• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * This kernel test validates architecture page table helpers and
4  * accessors and helps in verifying their continued compliance with
5  * expected generic MM semantics.
6  *
7  * Copyright (C) 2019 ARM Ltd.
8  *
9  * Author: Anshuman Khandual <anshuman.khandual@arm.com>
10  */
11 #define pr_fmt(fmt) "debug_vm_pgtable: [%-25s]: " fmt, __func__
12 
13 #include <linux/gfp.h>
14 #include <linux/highmem.h>
15 #include <linux/hugetlb.h>
16 #include <linux/kernel.h>
17 #include <linux/kconfig.h>
18 #include <linux/mm.h>
19 #include <linux/mman.h>
20 #include <linux/mm_types.h>
21 #include <linux/module.h>
22 #include <linux/pfn_t.h>
23 #include <linux/printk.h>
24 #include <linux/pgtable.h>
25 #include <linux/random.h>
26 #include <linux/spinlock.h>
27 #include <linux/swap.h>
28 #include <linux/swapops.h>
29 #include <linux/start_kernel.h>
30 #include <linux/sched/mm.h>
31 #include <linux/io.h>
32 #include <asm/pgalloc.h>
33 #include <asm/tlbflush.h>
34 
35 /*
36  * Please refer Documentation/vm/arch_pgtable_helpers.rst for the semantics
37  * expectations that are being validated here. All future changes in here
38  * or the documentation need to be in sync.
39  */
40 
41 #define VMFLAGS	(VM_READ|VM_WRITE|VM_EXEC)
42 
43 /*
44  * On s390 platform, the lower 4 bits are used to identify given page table
45  * entry type. But these bits might affect the ability to clear entries with
46  * pxx_clear() because of how dynamic page table folding works on s390. So
47  * while loading up the entries do not change the lower 4 bits. It does not
48  * have affect any other platform. Also avoid the 62nd bit on ppc64 that is
49  * used to mark a pte entry.
50  */
51 #define S390_SKIP_MASK		GENMASK(3, 0)
52 #if __BITS_PER_LONG == 64
53 #define PPC64_SKIP_MASK		GENMASK(62, 62)
54 #else
55 #define PPC64_SKIP_MASK		0x0
56 #endif
57 #define ARCH_SKIP_MASK (S390_SKIP_MASK | PPC64_SKIP_MASK)
58 #define RANDOM_ORVALUE (GENMASK(BITS_PER_LONG - 1, 0) & ~ARCH_SKIP_MASK)
59 #define RANDOM_NZVALUE	GENMASK(7, 0)
60 
pte_basic_tests(unsigned long pfn,int idx)61 static void __init pte_basic_tests(unsigned long pfn, int idx)
62 {
63 	pgprot_t prot = protection_map[idx];
64 	pte_t pte = pfn_pte(pfn, prot);
65 	unsigned long val = idx, *ptr = &val;
66 
67 	pr_debug("Validating PTE basic (%pGv)\n", ptr);
68 
69 	/*
70 	 * This test needs to be executed after the given page table entry
71 	 * is created with pfn_pte() to make sure that protection_map[idx]
72 	 * does not have the dirty bit enabled from the beginning. This is
73 	 * important for platforms like arm64 where (!PTE_RDONLY) indicate
74 	 * dirty bit being set.
75 	 */
76 	WARN_ON(pte_dirty(pte_wrprotect(pte)));
77 
78 	WARN_ON(!pte_same(pte, pte));
79 	WARN_ON(!pte_young(pte_mkyoung(pte_mkold(pte))));
80 	WARN_ON(!pte_dirty(pte_mkdirty(pte_mkclean(pte))));
81 	WARN_ON(!pte_write(pte_mkwrite(pte_wrprotect(pte))));
82 	WARN_ON(pte_young(pte_mkold(pte_mkyoung(pte))));
83 	WARN_ON(pte_dirty(pte_mkclean(pte_mkdirty(pte))));
84 	WARN_ON(pte_write(pte_wrprotect(pte_mkwrite(pte))));
85 	WARN_ON(pte_dirty(pte_wrprotect(pte_mkclean(pte))));
86 	WARN_ON(!pte_dirty(pte_wrprotect(pte_mkdirty(pte))));
87 }
88 
pte_advanced_tests(struct mm_struct * mm,struct vm_area_struct * vma,pte_t * ptep,unsigned long pfn,unsigned long vaddr,pgprot_t prot)89 static void __init pte_advanced_tests(struct mm_struct *mm,
90 				      struct vm_area_struct *vma, pte_t *ptep,
91 				      unsigned long pfn, unsigned long vaddr,
92 				      pgprot_t prot)
93 {
94 	pte_t pte = pfn_pte(pfn, prot);
95 
96 	/*
97 	 * Architectures optimize set_pte_at by avoiding TLB flush.
98 	 * This requires set_pte_at to be not used to update an
99 	 * existing pte entry. Clear pte before we do set_pte_at
100 	 */
101 
102 	pr_debug("Validating PTE advanced\n");
103 	pte = pfn_pte(pfn, prot);
104 	set_pte_at(mm, vaddr, ptep, pte);
105 	ptep_set_wrprotect(mm, vaddr, ptep);
106 	pte = ptep_get(ptep);
107 	WARN_ON(pte_write(pte));
108 	ptep_get_and_clear(mm, vaddr, ptep);
109 	pte = ptep_get(ptep);
110 	WARN_ON(!pte_none(pte));
111 
112 	pte = pfn_pte(pfn, prot);
113 	pte = pte_wrprotect(pte);
114 	pte = pte_mkclean(pte);
115 	set_pte_at(mm, vaddr, ptep, pte);
116 	pte = pte_mkwrite(pte);
117 	pte = pte_mkdirty(pte);
118 	ptep_set_access_flags(vma, vaddr, ptep, pte, 1);
119 	pte = ptep_get(ptep);
120 	WARN_ON(!(pte_write(pte) && pte_dirty(pte)));
121 	ptep_get_and_clear_full(mm, vaddr, ptep, 1);
122 	pte = ptep_get(ptep);
123 	WARN_ON(!pte_none(pte));
124 
125 	pte = pfn_pte(pfn, prot);
126 	pte = pte_mkyoung(pte);
127 	set_pte_at(mm, vaddr, ptep, pte);
128 	ptep_test_and_clear_young(vma, vaddr, ptep);
129 	pte = ptep_get(ptep);
130 	WARN_ON(pte_young(pte));
131 
132 	ptep_get_and_clear_full(mm, vaddr, ptep, 1);
133 }
134 
pte_savedwrite_tests(unsigned long pfn,pgprot_t prot)135 static void __init pte_savedwrite_tests(unsigned long pfn, pgprot_t prot)
136 {
137 	pte_t pte = pfn_pte(pfn, prot);
138 
139 	if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
140 		return;
141 
142 	pr_debug("Validating PTE saved write\n");
143 	WARN_ON(!pte_savedwrite(pte_mk_savedwrite(pte_clear_savedwrite(pte))));
144 	WARN_ON(pte_savedwrite(pte_clear_savedwrite(pte_mk_savedwrite(pte))));
145 }
146 
147 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pmd_basic_tests(unsigned long pfn,int idx)148 static void __init pmd_basic_tests(unsigned long pfn, int idx)
149 {
150 	pgprot_t prot = protection_map[idx];
151 	unsigned long val = idx, *ptr = &val;
152 	pmd_t pmd;
153 
154 	if (!has_transparent_hugepage())
155 		return;
156 
157 	pr_debug("Validating PMD basic (%pGv)\n", ptr);
158 	pmd = pfn_pmd(pfn, prot);
159 
160 	/*
161 	 * This test needs to be executed after the given page table entry
162 	 * is created with pfn_pmd() to make sure that protection_map[idx]
163 	 * does not have the dirty bit enabled from the beginning. This is
164 	 * important for platforms like arm64 where (!PTE_RDONLY) indicate
165 	 * dirty bit being set.
166 	 */
167 	WARN_ON(pmd_dirty(pmd_wrprotect(pmd)));
168 
169 
170 	WARN_ON(!pmd_same(pmd, pmd));
171 	WARN_ON(!pmd_young(pmd_mkyoung(pmd_mkold(pmd))));
172 	WARN_ON(!pmd_dirty(pmd_mkdirty(pmd_mkclean(pmd))));
173 	WARN_ON(!pmd_write(pmd_mkwrite(pmd_wrprotect(pmd))));
174 	WARN_ON(pmd_young(pmd_mkold(pmd_mkyoung(pmd))));
175 	WARN_ON(pmd_dirty(pmd_mkclean(pmd_mkdirty(pmd))));
176 	WARN_ON(pmd_write(pmd_wrprotect(pmd_mkwrite(pmd))));
177 	WARN_ON(pmd_dirty(pmd_wrprotect(pmd_mkclean(pmd))));
178 	WARN_ON(!pmd_dirty(pmd_wrprotect(pmd_mkdirty(pmd))));
179 	/*
180 	 * A huge page does not point to next level page table
181 	 * entry. Hence this must qualify as pmd_bad().
182 	 */
183 	WARN_ON(!pmd_bad(pmd_mkhuge(pmd)));
184 }
185 
pmd_advanced_tests(struct mm_struct * mm,struct vm_area_struct * vma,pmd_t * pmdp,unsigned long pfn,unsigned long vaddr,pgprot_t prot,pgtable_t pgtable)186 static void __init pmd_advanced_tests(struct mm_struct *mm,
187 				      struct vm_area_struct *vma, pmd_t *pmdp,
188 				      unsigned long pfn, unsigned long vaddr,
189 				      pgprot_t prot, pgtable_t pgtable)
190 {
191 	pmd_t pmd;
192 
193 	if (!has_transparent_hugepage())
194 		return;
195 
196 	pr_debug("Validating PMD advanced\n");
197 	/* Align the address wrt HPAGE_PMD_SIZE */
198 	vaddr &= HPAGE_PMD_MASK;
199 
200 	pgtable_trans_huge_deposit(mm, pmdp, pgtable);
201 
202 	pmd = pfn_pmd(pfn, prot);
203 	set_pmd_at(mm, vaddr, pmdp, pmd);
204 	pmdp_set_wrprotect(mm, vaddr, pmdp);
205 	pmd = READ_ONCE(*pmdp);
206 	WARN_ON(pmd_write(pmd));
207 	pmdp_huge_get_and_clear(mm, vaddr, pmdp);
208 	pmd = READ_ONCE(*pmdp);
209 	WARN_ON(!pmd_none(pmd));
210 
211 	pmd = pfn_pmd(pfn, prot);
212 	pmd = pmd_wrprotect(pmd);
213 	pmd = pmd_mkclean(pmd);
214 	set_pmd_at(mm, vaddr, pmdp, pmd);
215 	pmd = pmd_mkwrite(pmd);
216 	pmd = pmd_mkdirty(pmd);
217 	pmdp_set_access_flags(vma, vaddr, pmdp, pmd, 1);
218 	pmd = READ_ONCE(*pmdp);
219 	WARN_ON(!(pmd_write(pmd) && pmd_dirty(pmd)));
220 	pmdp_huge_get_and_clear_full(vma, vaddr, pmdp, 1);
221 	pmd = READ_ONCE(*pmdp);
222 	WARN_ON(!pmd_none(pmd));
223 
224 	pmd = pmd_mkhuge(pfn_pmd(pfn, prot));
225 	pmd = pmd_mkyoung(pmd);
226 	set_pmd_at(mm, vaddr, pmdp, pmd);
227 	pmdp_test_and_clear_young(vma, vaddr, pmdp);
228 	pmd = READ_ONCE(*pmdp);
229 	WARN_ON(pmd_young(pmd));
230 
231 	/*  Clear the pte entries  */
232 	pmdp_huge_get_and_clear(mm, vaddr, pmdp);
233 	pgtable = pgtable_trans_huge_withdraw(mm, pmdp);
234 }
235 
pmd_leaf_tests(unsigned long pfn,pgprot_t prot)236 static void __init pmd_leaf_tests(unsigned long pfn, pgprot_t prot)
237 {
238 	pmd_t pmd;
239 
240 	if (!has_transparent_hugepage())
241 		return;
242 
243 	pr_debug("Validating PMD leaf\n");
244 	pmd = pfn_pmd(pfn, prot);
245 
246 	/*
247 	 * PMD based THP is a leaf entry.
248 	 */
249 	pmd = pmd_mkhuge(pmd);
250 	WARN_ON(!pmd_leaf(pmd));
251 }
252 
253 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
pmd_huge_tests(pmd_t * pmdp,unsigned long pfn,pgprot_t prot)254 static void __init pmd_huge_tests(pmd_t *pmdp, unsigned long pfn, pgprot_t prot)
255 {
256 	pmd_t pmd;
257 
258 	if (!arch_ioremap_pmd_supported())
259 		return;
260 
261 	pr_debug("Validating PMD huge\n");
262 	/*
263 	 * X86 defined pmd_set_huge() verifies that the given
264 	 * PMD is not a populated non-leaf entry.
265 	 */
266 	WRITE_ONCE(*pmdp, __pmd(0));
267 	WARN_ON(!pmd_set_huge(pmdp, __pfn_to_phys(pfn), prot));
268 	WARN_ON(!pmd_clear_huge(pmdp));
269 	pmd = READ_ONCE(*pmdp);
270 	WARN_ON(!pmd_none(pmd));
271 }
272 #else /* CONFIG_HAVE_ARCH_HUGE_VMAP */
pmd_huge_tests(pmd_t * pmdp,unsigned long pfn,pgprot_t prot)273 static void __init pmd_huge_tests(pmd_t *pmdp, unsigned long pfn, pgprot_t prot) { }
274 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
275 
pmd_savedwrite_tests(unsigned long pfn,pgprot_t prot)276 static void __init pmd_savedwrite_tests(unsigned long pfn, pgprot_t prot)
277 {
278 	pmd_t pmd;
279 
280 	if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
281 		return;
282 
283 	if (!has_transparent_hugepage())
284 		return;
285 
286 	pr_debug("Validating PMD saved write\n");
287 	pmd = pfn_pmd(pfn, prot);
288 	WARN_ON(!pmd_savedwrite(pmd_mk_savedwrite(pmd_clear_savedwrite(pmd))));
289 	WARN_ON(pmd_savedwrite(pmd_clear_savedwrite(pmd_mk_savedwrite(pmd))));
290 }
291 
292 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
pud_basic_tests(struct mm_struct * mm,unsigned long pfn,int idx)293 static void __init pud_basic_tests(struct mm_struct *mm, unsigned long pfn, int idx)
294 {
295 	pgprot_t prot = protection_map[idx];
296 	unsigned long val = idx, *ptr = &val;
297 	pud_t pud;
298 
299 	if (!has_transparent_hugepage())
300 		return;
301 
302 	pr_debug("Validating PUD basic (%pGv)\n", ptr);
303 	pud = pfn_pud(pfn, prot);
304 
305 	/*
306 	 * This test needs to be executed after the given page table entry
307 	 * is created with pfn_pud() to make sure that protection_map[idx]
308 	 * does not have the dirty bit enabled from the beginning. This is
309 	 * important for platforms like arm64 where (!PTE_RDONLY) indicate
310 	 * dirty bit being set.
311 	 */
312 	WARN_ON(pud_dirty(pud_wrprotect(pud)));
313 
314 	WARN_ON(!pud_same(pud, pud));
315 	WARN_ON(!pud_young(pud_mkyoung(pud_mkold(pud))));
316 	WARN_ON(!pud_dirty(pud_mkdirty(pud_mkclean(pud))));
317 	WARN_ON(pud_dirty(pud_mkclean(pud_mkdirty(pud))));
318 	WARN_ON(!pud_write(pud_mkwrite(pud_wrprotect(pud))));
319 	WARN_ON(pud_write(pud_wrprotect(pud_mkwrite(pud))));
320 	WARN_ON(pud_young(pud_mkold(pud_mkyoung(pud))));
321 	WARN_ON(pud_dirty(pud_wrprotect(pud_mkclean(pud))));
322 	WARN_ON(!pud_dirty(pud_wrprotect(pud_mkdirty(pud))));
323 
324 	if (mm_pmd_folded(mm))
325 		return;
326 
327 	/*
328 	 * A huge page does not point to next level page table
329 	 * entry. Hence this must qualify as pud_bad().
330 	 */
331 	WARN_ON(!pud_bad(pud_mkhuge(pud)));
332 }
333 
pud_advanced_tests(struct mm_struct * mm,struct vm_area_struct * vma,pud_t * pudp,unsigned long pfn,unsigned long vaddr,pgprot_t prot)334 static void __init pud_advanced_tests(struct mm_struct *mm,
335 				      struct vm_area_struct *vma, pud_t *pudp,
336 				      unsigned long pfn, unsigned long vaddr,
337 				      pgprot_t prot)
338 {
339 	pud_t pud;
340 
341 	if (!has_transparent_hugepage())
342 		return;
343 
344 	pr_debug("Validating PUD advanced\n");
345 	/* Align the address wrt HPAGE_PUD_SIZE */
346 	vaddr &= HPAGE_PUD_MASK;
347 
348 	pud = pfn_pud(pfn, prot);
349 	set_pud_at(mm, vaddr, pudp, pud);
350 	pudp_set_wrprotect(mm, vaddr, pudp);
351 	pud = READ_ONCE(*pudp);
352 	WARN_ON(pud_write(pud));
353 
354 #ifndef __PAGETABLE_PMD_FOLDED
355 	pudp_huge_get_and_clear(mm, vaddr, pudp);
356 	pud = READ_ONCE(*pudp);
357 	WARN_ON(!pud_none(pud));
358 #endif /* __PAGETABLE_PMD_FOLDED */
359 	pud = pfn_pud(pfn, prot);
360 	pud = pud_wrprotect(pud);
361 	pud = pud_mkclean(pud);
362 	set_pud_at(mm, vaddr, pudp, pud);
363 	pud = pud_mkwrite(pud);
364 	pud = pud_mkdirty(pud);
365 	pudp_set_access_flags(vma, vaddr, pudp, pud, 1);
366 	pud = READ_ONCE(*pudp);
367 	WARN_ON(!(pud_write(pud) && pud_dirty(pud)));
368 
369 #ifndef __PAGETABLE_PMD_FOLDED
370 	pudp_huge_get_and_clear_full(mm, vaddr, pudp, 1);
371 	pud = READ_ONCE(*pudp);
372 	WARN_ON(!pud_none(pud));
373 #endif /* __PAGETABLE_PMD_FOLDED */
374 
375 	pud = pfn_pud(pfn, prot);
376 	pud = pud_mkyoung(pud);
377 	set_pud_at(mm, vaddr, pudp, pud);
378 	pudp_test_and_clear_young(vma, vaddr, pudp);
379 	pud = READ_ONCE(*pudp);
380 	WARN_ON(pud_young(pud));
381 
382 	pudp_huge_get_and_clear(mm, vaddr, pudp);
383 }
384 
pud_leaf_tests(unsigned long pfn,pgprot_t prot)385 static void __init pud_leaf_tests(unsigned long pfn, pgprot_t prot)
386 {
387 	pud_t pud;
388 
389 	if (!has_transparent_hugepage())
390 		return;
391 
392 	pr_debug("Validating PUD leaf\n");
393 	pud = pfn_pud(pfn, prot);
394 	/*
395 	 * PUD based THP is a leaf entry.
396 	 */
397 	pud = pud_mkhuge(pud);
398 	WARN_ON(!pud_leaf(pud));
399 }
400 
401 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
pud_huge_tests(pud_t * pudp,unsigned long pfn,pgprot_t prot)402 static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot)
403 {
404 	pud_t pud;
405 
406 	if (!arch_ioremap_pud_supported())
407 		return;
408 
409 	pr_debug("Validating PUD huge\n");
410 	/*
411 	 * X86 defined pud_set_huge() verifies that the given
412 	 * PUD is not a populated non-leaf entry.
413 	 */
414 	WRITE_ONCE(*pudp, __pud(0));
415 	WARN_ON(!pud_set_huge(pudp, __pfn_to_phys(pfn), prot));
416 	WARN_ON(!pud_clear_huge(pudp));
417 	pud = READ_ONCE(*pudp);
418 	WARN_ON(!pud_none(pud));
419 }
420 #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
pud_huge_tests(pud_t * pudp,unsigned long pfn,pgprot_t prot)421 static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot) { }
422 #endif /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
423 
424 #else  /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
pud_basic_tests(struct mm_struct * mm,unsigned long pfn,int idx)425 static void __init pud_basic_tests(struct mm_struct *mm, unsigned long pfn, int idx) { }
pud_advanced_tests(struct mm_struct * mm,struct vm_area_struct * vma,pud_t * pudp,unsigned long pfn,unsigned long vaddr,pgprot_t prot)426 static void __init pud_advanced_tests(struct mm_struct *mm,
427 				      struct vm_area_struct *vma, pud_t *pudp,
428 				      unsigned long pfn, unsigned long vaddr,
429 				      pgprot_t prot)
430 {
431 }
pud_leaf_tests(unsigned long pfn,pgprot_t prot)432 static void __init pud_leaf_tests(unsigned long pfn, pgprot_t prot) { }
pud_huge_tests(pud_t * pudp,unsigned long pfn,pgprot_t prot)433 static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot)
434 {
435 }
436 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
437 #else  /* !CONFIG_TRANSPARENT_HUGEPAGE */
pmd_basic_tests(unsigned long pfn,int idx)438 static void __init pmd_basic_tests(unsigned long pfn, int idx) { }
pud_basic_tests(struct mm_struct * mm,unsigned long pfn,int idx)439 static void __init pud_basic_tests(struct mm_struct *mm, unsigned long pfn, int idx) { }
pmd_advanced_tests(struct mm_struct * mm,struct vm_area_struct * vma,pmd_t * pmdp,unsigned long pfn,unsigned long vaddr,pgprot_t prot,pgtable_t pgtable)440 static void __init pmd_advanced_tests(struct mm_struct *mm,
441 				      struct vm_area_struct *vma, pmd_t *pmdp,
442 				      unsigned long pfn, unsigned long vaddr,
443 				      pgprot_t prot, pgtable_t pgtable)
444 {
445 }
pud_advanced_tests(struct mm_struct * mm,struct vm_area_struct * vma,pud_t * pudp,unsigned long pfn,unsigned long vaddr,pgprot_t prot)446 static void __init pud_advanced_tests(struct mm_struct *mm,
447 				      struct vm_area_struct *vma, pud_t *pudp,
448 				      unsigned long pfn, unsigned long vaddr,
449 				      pgprot_t prot)
450 {
451 }
pmd_leaf_tests(unsigned long pfn,pgprot_t prot)452 static void __init pmd_leaf_tests(unsigned long pfn, pgprot_t prot) { }
pud_leaf_tests(unsigned long pfn,pgprot_t prot)453 static void __init pud_leaf_tests(unsigned long pfn, pgprot_t prot) { }
pmd_huge_tests(pmd_t * pmdp,unsigned long pfn,pgprot_t prot)454 static void __init pmd_huge_tests(pmd_t *pmdp, unsigned long pfn, pgprot_t prot)
455 {
456 }
pud_huge_tests(pud_t * pudp,unsigned long pfn,pgprot_t prot)457 static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot)
458 {
459 }
pmd_savedwrite_tests(unsigned long pfn,pgprot_t prot)460 static void __init pmd_savedwrite_tests(unsigned long pfn, pgprot_t prot) { }
461 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
462 
p4d_basic_tests(unsigned long pfn,pgprot_t prot)463 static void __init p4d_basic_tests(unsigned long pfn, pgprot_t prot)
464 {
465 	p4d_t p4d;
466 
467 	pr_debug("Validating P4D basic\n");
468 	memset(&p4d, RANDOM_NZVALUE, sizeof(p4d_t));
469 	WARN_ON(!p4d_same(p4d, p4d));
470 }
471 
pgd_basic_tests(unsigned long pfn,pgprot_t prot)472 static void __init pgd_basic_tests(unsigned long pfn, pgprot_t prot)
473 {
474 	pgd_t pgd;
475 
476 	pr_debug("Validating PGD basic\n");
477 	memset(&pgd, RANDOM_NZVALUE, sizeof(pgd_t));
478 	WARN_ON(!pgd_same(pgd, pgd));
479 }
480 
481 #ifndef __PAGETABLE_PUD_FOLDED
pud_clear_tests(struct mm_struct * mm,pud_t * pudp)482 static void __init pud_clear_tests(struct mm_struct *mm, pud_t *pudp)
483 {
484 	pud_t pud = READ_ONCE(*pudp);
485 
486 	if (mm_pmd_folded(mm))
487 		return;
488 
489 	pr_debug("Validating PUD clear\n");
490 	pud = __pud(pud_val(pud) | RANDOM_ORVALUE);
491 	WRITE_ONCE(*pudp, pud);
492 	pud_clear(pudp);
493 	pud = READ_ONCE(*pudp);
494 	WARN_ON(!pud_none(pud));
495 }
496 
pud_populate_tests(struct mm_struct * mm,pud_t * pudp,pmd_t * pmdp)497 static void __init pud_populate_tests(struct mm_struct *mm, pud_t *pudp,
498 				      pmd_t *pmdp)
499 {
500 	pud_t pud;
501 
502 	if (mm_pmd_folded(mm))
503 		return;
504 
505 	pr_debug("Validating PUD populate\n");
506 	/*
507 	 * This entry points to next level page table page.
508 	 * Hence this must not qualify as pud_bad().
509 	 */
510 	pud_populate(mm, pudp, pmdp);
511 	pud = READ_ONCE(*pudp);
512 	WARN_ON(pud_bad(pud));
513 }
514 #else  /* !__PAGETABLE_PUD_FOLDED */
pud_clear_tests(struct mm_struct * mm,pud_t * pudp)515 static void __init pud_clear_tests(struct mm_struct *mm, pud_t *pudp) { }
pud_populate_tests(struct mm_struct * mm,pud_t * pudp,pmd_t * pmdp)516 static void __init pud_populate_tests(struct mm_struct *mm, pud_t *pudp,
517 				      pmd_t *pmdp)
518 {
519 }
520 #endif /* PAGETABLE_PUD_FOLDED */
521 
522 #ifndef __PAGETABLE_P4D_FOLDED
p4d_clear_tests(struct mm_struct * mm,p4d_t * p4dp)523 static void __init p4d_clear_tests(struct mm_struct *mm, p4d_t *p4dp)
524 {
525 	p4d_t p4d = READ_ONCE(*p4dp);
526 
527 	if (mm_pud_folded(mm))
528 		return;
529 
530 	pr_debug("Validating P4D clear\n");
531 	p4d = __p4d(p4d_val(p4d) | RANDOM_ORVALUE);
532 	WRITE_ONCE(*p4dp, p4d);
533 	p4d_clear(p4dp);
534 	p4d = READ_ONCE(*p4dp);
535 	WARN_ON(!p4d_none(p4d));
536 }
537 
p4d_populate_tests(struct mm_struct * mm,p4d_t * p4dp,pud_t * pudp)538 static void __init p4d_populate_tests(struct mm_struct *mm, p4d_t *p4dp,
539 				      pud_t *pudp)
540 {
541 	p4d_t p4d;
542 
543 	if (mm_pud_folded(mm))
544 		return;
545 
546 	pr_debug("Validating P4D populate\n");
547 	/*
548 	 * This entry points to next level page table page.
549 	 * Hence this must not qualify as p4d_bad().
550 	 */
551 	pud_clear(pudp);
552 	p4d_clear(p4dp);
553 	p4d_populate(mm, p4dp, pudp);
554 	p4d = READ_ONCE(*p4dp);
555 	WARN_ON(p4d_bad(p4d));
556 }
557 
pgd_clear_tests(struct mm_struct * mm,pgd_t * pgdp)558 static void __init pgd_clear_tests(struct mm_struct *mm, pgd_t *pgdp)
559 {
560 	pgd_t pgd = READ_ONCE(*pgdp);
561 
562 	if (mm_p4d_folded(mm))
563 		return;
564 
565 	pr_debug("Validating PGD clear\n");
566 	pgd = __pgd(pgd_val(pgd) | RANDOM_ORVALUE);
567 	WRITE_ONCE(*pgdp, pgd);
568 	pgd_clear(pgdp);
569 	pgd = READ_ONCE(*pgdp);
570 	WARN_ON(!pgd_none(pgd));
571 }
572 
pgd_populate_tests(struct mm_struct * mm,pgd_t * pgdp,p4d_t * p4dp)573 static void __init pgd_populate_tests(struct mm_struct *mm, pgd_t *pgdp,
574 				      p4d_t *p4dp)
575 {
576 	pgd_t pgd;
577 
578 	if (mm_p4d_folded(mm))
579 		return;
580 
581 	pr_debug("Validating PGD populate\n");
582 	/*
583 	 * This entry points to next level page table page.
584 	 * Hence this must not qualify as pgd_bad().
585 	 */
586 	p4d_clear(p4dp);
587 	pgd_clear(pgdp);
588 	pgd_populate(mm, pgdp, p4dp);
589 	pgd = READ_ONCE(*pgdp);
590 	WARN_ON(pgd_bad(pgd));
591 }
592 #else  /* !__PAGETABLE_P4D_FOLDED */
p4d_clear_tests(struct mm_struct * mm,p4d_t * p4dp)593 static void __init p4d_clear_tests(struct mm_struct *mm, p4d_t *p4dp) { }
pgd_clear_tests(struct mm_struct * mm,pgd_t * pgdp)594 static void __init pgd_clear_tests(struct mm_struct *mm, pgd_t *pgdp) { }
p4d_populate_tests(struct mm_struct * mm,p4d_t * p4dp,pud_t * pudp)595 static void __init p4d_populate_tests(struct mm_struct *mm, p4d_t *p4dp,
596 				      pud_t *pudp)
597 {
598 }
pgd_populate_tests(struct mm_struct * mm,pgd_t * pgdp,p4d_t * p4dp)599 static void __init pgd_populate_tests(struct mm_struct *mm, pgd_t *pgdp,
600 				      p4d_t *p4dp)
601 {
602 }
603 #endif /* PAGETABLE_P4D_FOLDED */
604 
pte_clear_tests(struct mm_struct * mm,pte_t * ptep,unsigned long pfn,unsigned long vaddr,pgprot_t prot)605 static void __init pte_clear_tests(struct mm_struct *mm, pte_t *ptep,
606 				   unsigned long pfn, unsigned long vaddr,
607 				   pgprot_t prot)
608 {
609 	pte_t pte = pfn_pte(pfn, prot);
610 
611 	pr_debug("Validating PTE clear\n");
612 #ifndef CONFIG_RISCV
613 	pte = __pte(pte_val(pte) | RANDOM_ORVALUE);
614 #endif
615 	set_pte_at(mm, vaddr, ptep, pte);
616 	barrier();
617 	pte_clear(mm, vaddr, ptep);
618 	pte = ptep_get(ptep);
619 	WARN_ON(!pte_none(pte));
620 }
621 
pmd_clear_tests(struct mm_struct * mm,pmd_t * pmdp)622 static void __init pmd_clear_tests(struct mm_struct *mm, pmd_t *pmdp)
623 {
624 	pmd_t pmd = READ_ONCE(*pmdp);
625 
626 	pr_debug("Validating PMD clear\n");
627 	pmd = __pmd(pmd_val(pmd) | RANDOM_ORVALUE);
628 	WRITE_ONCE(*pmdp, pmd);
629 	pmd_clear(pmdp);
630 	pmd = READ_ONCE(*pmdp);
631 	WARN_ON(!pmd_none(pmd));
632 }
633 
pmd_populate_tests(struct mm_struct * mm,pmd_t * pmdp,pgtable_t pgtable)634 static void __init pmd_populate_tests(struct mm_struct *mm, pmd_t *pmdp,
635 				      pgtable_t pgtable)
636 {
637 	pmd_t pmd;
638 
639 	pr_debug("Validating PMD populate\n");
640 	/*
641 	 * This entry points to next level page table page.
642 	 * Hence this must not qualify as pmd_bad().
643 	 */
644 	pmd_populate(mm, pmdp, pgtable);
645 	pmd = READ_ONCE(*pmdp);
646 	WARN_ON(pmd_bad(pmd));
647 }
648 
pte_special_tests(unsigned long pfn,pgprot_t prot)649 static void __init pte_special_tests(unsigned long pfn, pgprot_t prot)
650 {
651 	pte_t pte = pfn_pte(pfn, prot);
652 
653 	if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL))
654 		return;
655 
656 	pr_debug("Validating PTE special\n");
657 	WARN_ON(!pte_special(pte_mkspecial(pte)));
658 }
659 
pte_protnone_tests(unsigned long pfn,pgprot_t prot)660 static void __init pte_protnone_tests(unsigned long pfn, pgprot_t prot)
661 {
662 	pte_t pte = pfn_pte(pfn, prot);
663 
664 	if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
665 		return;
666 
667 	pr_debug("Validating PTE protnone\n");
668 	WARN_ON(!pte_protnone(pte));
669 	WARN_ON(!pte_present(pte));
670 }
671 
672 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pmd_protnone_tests(unsigned long pfn,pgprot_t prot)673 static void __init pmd_protnone_tests(unsigned long pfn, pgprot_t prot)
674 {
675 	pmd_t pmd;
676 
677 	if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
678 		return;
679 
680 	if (!has_transparent_hugepage())
681 		return;
682 
683 	pr_debug("Validating PMD protnone\n");
684 	pmd = pmd_mkhuge(pfn_pmd(pfn, prot));
685 	WARN_ON(!pmd_protnone(pmd));
686 	WARN_ON(!pmd_present(pmd));
687 }
688 #else  /* !CONFIG_TRANSPARENT_HUGEPAGE */
pmd_protnone_tests(unsigned long pfn,pgprot_t prot)689 static void __init pmd_protnone_tests(unsigned long pfn, pgprot_t prot) { }
690 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
691 
692 #ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
pte_devmap_tests(unsigned long pfn,pgprot_t prot)693 static void __init pte_devmap_tests(unsigned long pfn, pgprot_t prot)
694 {
695 	pte_t pte = pfn_pte(pfn, prot);
696 
697 	pr_debug("Validating PTE devmap\n");
698 	WARN_ON(!pte_devmap(pte_mkdevmap(pte)));
699 }
700 
701 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pmd_devmap_tests(unsigned long pfn,pgprot_t prot)702 static void __init pmd_devmap_tests(unsigned long pfn, pgprot_t prot)
703 {
704 	pmd_t pmd;
705 
706 	if (!has_transparent_hugepage())
707 		return;
708 
709 	pr_debug("Validating PMD devmap\n");
710 	pmd = pfn_pmd(pfn, prot);
711 	WARN_ON(!pmd_devmap(pmd_mkdevmap(pmd)));
712 }
713 
714 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
pud_devmap_tests(unsigned long pfn,pgprot_t prot)715 static void __init pud_devmap_tests(unsigned long pfn, pgprot_t prot)
716 {
717 	pud_t pud;
718 
719 	if (!has_transparent_hugepage())
720 		return;
721 
722 	pr_debug("Validating PUD devmap\n");
723 	pud = pfn_pud(pfn, prot);
724 	WARN_ON(!pud_devmap(pud_mkdevmap(pud)));
725 }
726 #else  /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
pud_devmap_tests(unsigned long pfn,pgprot_t prot)727 static void __init pud_devmap_tests(unsigned long pfn, pgprot_t prot) { }
728 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
729 #else  /* CONFIG_TRANSPARENT_HUGEPAGE */
pmd_devmap_tests(unsigned long pfn,pgprot_t prot)730 static void __init pmd_devmap_tests(unsigned long pfn, pgprot_t prot) { }
pud_devmap_tests(unsigned long pfn,pgprot_t prot)731 static void __init pud_devmap_tests(unsigned long pfn, pgprot_t prot) { }
732 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
733 #else
pte_devmap_tests(unsigned long pfn,pgprot_t prot)734 static void __init pte_devmap_tests(unsigned long pfn, pgprot_t prot) { }
pmd_devmap_tests(unsigned long pfn,pgprot_t prot)735 static void __init pmd_devmap_tests(unsigned long pfn, pgprot_t prot) { }
pud_devmap_tests(unsigned long pfn,pgprot_t prot)736 static void __init pud_devmap_tests(unsigned long pfn, pgprot_t prot) { }
737 #endif /* CONFIG_ARCH_HAS_PTE_DEVMAP */
738 
pte_soft_dirty_tests(unsigned long pfn,pgprot_t prot)739 static void __init pte_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
740 {
741 	pte_t pte = pfn_pte(pfn, prot);
742 
743 	if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
744 		return;
745 
746 	pr_debug("Validating PTE soft dirty\n");
747 	WARN_ON(!pte_soft_dirty(pte_mksoft_dirty(pte)));
748 	WARN_ON(pte_soft_dirty(pte_clear_soft_dirty(pte)));
749 }
750 
pte_swap_soft_dirty_tests(unsigned long pfn,pgprot_t prot)751 static void __init pte_swap_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
752 {
753 	pte_t pte = pfn_pte(pfn, prot);
754 
755 	if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
756 		return;
757 
758 	pr_debug("Validating PTE swap soft dirty\n");
759 	WARN_ON(!pte_swp_soft_dirty(pte_swp_mksoft_dirty(pte)));
760 	WARN_ON(pte_swp_soft_dirty(pte_swp_clear_soft_dirty(pte)));
761 }
762 
763 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pmd_soft_dirty_tests(unsigned long pfn,pgprot_t prot)764 static void __init pmd_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
765 {
766 	pmd_t pmd;
767 
768 	if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
769 		return;
770 
771 	if (!has_transparent_hugepage())
772 		return;
773 
774 	pr_debug("Validating PMD soft dirty\n");
775 	pmd = pfn_pmd(pfn, prot);
776 	WARN_ON(!pmd_soft_dirty(pmd_mksoft_dirty(pmd)));
777 	WARN_ON(pmd_soft_dirty(pmd_clear_soft_dirty(pmd)));
778 }
779 
pmd_swap_soft_dirty_tests(unsigned long pfn,pgprot_t prot)780 static void __init pmd_swap_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
781 {
782 	pmd_t pmd;
783 
784 	if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) ||
785 		!IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION))
786 		return;
787 
788 	if (!has_transparent_hugepage())
789 		return;
790 
791 	pr_debug("Validating PMD swap soft dirty\n");
792 	pmd = pfn_pmd(pfn, prot);
793 	WARN_ON(!pmd_swp_soft_dirty(pmd_swp_mksoft_dirty(pmd)));
794 	WARN_ON(pmd_swp_soft_dirty(pmd_swp_clear_soft_dirty(pmd)));
795 }
796 #else  /* !CONFIG_ARCH_HAS_PTE_DEVMAP */
pmd_soft_dirty_tests(unsigned long pfn,pgprot_t prot)797 static void __init pmd_soft_dirty_tests(unsigned long pfn, pgprot_t prot) { }
pmd_swap_soft_dirty_tests(unsigned long pfn,pgprot_t prot)798 static void __init pmd_swap_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
799 {
800 }
801 #endif /* CONFIG_ARCH_HAS_PTE_DEVMAP */
802 
pte_swap_tests(unsigned long pfn,pgprot_t prot)803 static void __init pte_swap_tests(unsigned long pfn, pgprot_t prot)
804 {
805 	swp_entry_t swp;
806 	pte_t pte;
807 
808 	pr_debug("Validating PTE swap\n");
809 	pte = pfn_pte(pfn, prot);
810 	swp = __pte_to_swp_entry(pte);
811 	pte = __swp_entry_to_pte(swp);
812 	WARN_ON(pfn != pte_pfn(pte));
813 }
814 
815 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
pmd_swap_tests(unsigned long pfn,pgprot_t prot)816 static void __init pmd_swap_tests(unsigned long pfn, pgprot_t prot)
817 {
818 	swp_entry_t swp;
819 	pmd_t pmd;
820 
821 	if (!has_transparent_hugepage())
822 		return;
823 
824 	pr_debug("Validating PMD swap\n");
825 	pmd = pfn_pmd(pfn, prot);
826 	swp = __pmd_to_swp_entry(pmd);
827 	pmd = __swp_entry_to_pmd(swp);
828 	WARN_ON(pfn != pmd_pfn(pmd));
829 }
830 #else  /* !CONFIG_ARCH_ENABLE_THP_MIGRATION */
pmd_swap_tests(unsigned long pfn,pgprot_t prot)831 static void __init pmd_swap_tests(unsigned long pfn, pgprot_t prot) { }
832 #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
833 
swap_migration_tests(void)834 static void __init swap_migration_tests(void)
835 {
836 	struct page *page;
837 	swp_entry_t swp;
838 
839 	if (!IS_ENABLED(CONFIG_MIGRATION))
840 		return;
841 
842 	pr_debug("Validating swap migration\n");
843 	/*
844 	 * swap_migration_tests() requires a dedicated page as it needs to
845 	 * be locked before creating a migration entry from it. Locking the
846 	 * page that actually maps kernel text ('start_kernel') can be real
847 	 * problematic. Lets allocate a dedicated page explicitly for this
848 	 * purpose that will be freed subsequently.
849 	 */
850 	page = alloc_page(GFP_KERNEL);
851 	if (!page) {
852 		pr_err("page allocation failed\n");
853 		return;
854 	}
855 
856 	/*
857 	 * make_migration_entry() expects given page to be
858 	 * locked, otherwise it stumbles upon a BUG_ON().
859 	 */
860 	__SetPageLocked(page);
861 	swp = make_migration_entry(page, 1);
862 	WARN_ON(!is_migration_entry(swp));
863 	WARN_ON(!is_write_migration_entry(swp));
864 
865 	make_migration_entry_read(&swp);
866 	WARN_ON(!is_migration_entry(swp));
867 	WARN_ON(is_write_migration_entry(swp));
868 
869 	swp = make_migration_entry(page, 0);
870 	WARN_ON(!is_migration_entry(swp));
871 	WARN_ON(is_write_migration_entry(swp));
872 	__ClearPageLocked(page);
873 	__free_page(page);
874 }
875 
876 #ifdef CONFIG_HUGETLB_PAGE
hugetlb_basic_tests(unsigned long pfn,pgprot_t prot)877 static void __init hugetlb_basic_tests(unsigned long pfn, pgprot_t prot)
878 {
879 	struct page *page;
880 	pte_t pte;
881 
882 	pr_debug("Validating HugeTLB basic\n");
883 	/*
884 	 * Accessing the page associated with the pfn is safe here,
885 	 * as it was previously derived from a real kernel symbol.
886 	 */
887 	page = pfn_to_page(pfn);
888 	pte = mk_huge_pte(page, prot);
889 
890 	WARN_ON(!huge_pte_dirty(huge_pte_mkdirty(pte)));
891 	WARN_ON(!huge_pte_write(huge_pte_mkwrite(huge_pte_wrprotect(pte))));
892 	WARN_ON(huge_pte_write(huge_pte_wrprotect(huge_pte_mkwrite(pte))));
893 
894 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
895 	pte = pfn_pte(pfn, prot);
896 
897 	WARN_ON(!pte_huge(pte_mkhuge(pte)));
898 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
899 }
900 #else  /* !CONFIG_HUGETLB_PAGE */
hugetlb_basic_tests(unsigned long pfn,pgprot_t prot)901 static void __init hugetlb_basic_tests(unsigned long pfn, pgprot_t prot) { }
902 #endif /* CONFIG_HUGETLB_PAGE */
903 
904 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pmd_thp_tests(unsigned long pfn,pgprot_t prot)905 static void __init pmd_thp_tests(unsigned long pfn, pgprot_t prot)
906 {
907 	pmd_t pmd;
908 
909 	if (!has_transparent_hugepage())
910 		return;
911 
912 	pr_debug("Validating PMD based THP\n");
913 	/*
914 	 * pmd_trans_huge() and pmd_present() must return positive after
915 	 * MMU invalidation with pmd_mkinvalid(). This behavior is an
916 	 * optimization for transparent huge page. pmd_trans_huge() must
917 	 * be true if pmd_page() returns a valid THP to avoid taking the
918 	 * pmd_lock when others walk over non transhuge pmds (i.e. there
919 	 * are no THP allocated). Especially when splitting a THP and
920 	 * removing the present bit from the pmd, pmd_trans_huge() still
921 	 * needs to return true. pmd_present() should be true whenever
922 	 * pmd_trans_huge() returns true.
923 	 */
924 	pmd = pfn_pmd(pfn, prot);
925 	WARN_ON(!pmd_trans_huge(pmd_mkhuge(pmd)));
926 
927 #ifndef __HAVE_ARCH_PMDP_INVALIDATE
928 	WARN_ON(!pmd_trans_huge(pmd_mkinvalid(pmd_mkhuge(pmd))));
929 	WARN_ON(!pmd_present(pmd_mkinvalid(pmd_mkhuge(pmd))));
930 #endif /* __HAVE_ARCH_PMDP_INVALIDATE */
931 }
932 
933 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
pud_thp_tests(unsigned long pfn,pgprot_t prot)934 static void __init pud_thp_tests(unsigned long pfn, pgprot_t prot)
935 {
936 	pud_t pud;
937 
938 	if (!has_transparent_hugepage())
939 		return;
940 
941 	pr_debug("Validating PUD based THP\n");
942 	pud = pfn_pud(pfn, prot);
943 	WARN_ON(!pud_trans_huge(pud_mkhuge(pud)));
944 
945 	/*
946 	 * pud_mkinvalid() has been dropped for now. Enable back
947 	 * these tests when it comes back with a modified pud_present().
948 	 *
949 	 * WARN_ON(!pud_trans_huge(pud_mkinvalid(pud_mkhuge(pud))));
950 	 * WARN_ON(!pud_present(pud_mkinvalid(pud_mkhuge(pud))));
951 	 */
952 }
953 #else  /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
pud_thp_tests(unsigned long pfn,pgprot_t prot)954 static void __init pud_thp_tests(unsigned long pfn, pgprot_t prot) { }
955 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
956 #else  /* !CONFIG_TRANSPARENT_HUGEPAGE */
pmd_thp_tests(unsigned long pfn,pgprot_t prot)957 static void __init pmd_thp_tests(unsigned long pfn, pgprot_t prot) { }
pud_thp_tests(unsigned long pfn,pgprot_t prot)958 static void __init pud_thp_tests(unsigned long pfn, pgprot_t prot) { }
959 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
960 
get_random_vaddr(void)961 static unsigned long __init get_random_vaddr(void)
962 {
963 	unsigned long random_vaddr, random_pages, total_user_pages;
964 
965 	total_user_pages = (TASK_SIZE - FIRST_USER_ADDRESS) / PAGE_SIZE;
966 
967 	random_pages = get_random_long() % total_user_pages;
968 	random_vaddr = FIRST_USER_ADDRESS + random_pages * PAGE_SIZE;
969 
970 	return random_vaddr;
971 }
972 
debug_vm_pgtable(void)973 static int __init debug_vm_pgtable(void)
974 {
975 	struct vm_area_struct *vma;
976 	struct mm_struct *mm;
977 	pgd_t *pgdp;
978 	p4d_t *p4dp, *saved_p4dp;
979 	pud_t *pudp, *saved_pudp;
980 	pmd_t *pmdp, *saved_pmdp, pmd;
981 	pte_t *ptep;
982 	pgtable_t saved_ptep;
983 	pgprot_t prot, protnone;
984 	phys_addr_t paddr;
985 	unsigned long vaddr, pte_aligned, pmd_aligned;
986 	unsigned long pud_aligned, p4d_aligned, pgd_aligned;
987 	spinlock_t *ptl = NULL;
988 	int idx;
989 
990 	pr_info("Validating architecture page table helpers\n");
991 	prot = vm_get_page_prot(VMFLAGS);
992 	vaddr = get_random_vaddr();
993 	mm = mm_alloc();
994 	if (!mm) {
995 		pr_err("mm_struct allocation failed\n");
996 		return 1;
997 	}
998 
999 	/*
1000 	 * __P000 (or even __S000) will help create page table entries with
1001 	 * PROT_NONE permission as required for pxx_protnone_tests().
1002 	 */
1003 	protnone = __P000;
1004 
1005 	vma = vm_area_alloc(mm);
1006 	if (!vma) {
1007 		pr_err("vma allocation failed\n");
1008 		return 1;
1009 	}
1010 
1011 	/*
1012 	 * PFN for mapping at PTE level is determined from a standard kernel
1013 	 * text symbol. But pfns for higher page table levels are derived by
1014 	 * masking lower bits of this real pfn. These derived pfns might not
1015 	 * exist on the platform but that does not really matter as pfn_pxx()
1016 	 * helpers will still create appropriate entries for the test. This
1017 	 * helps avoid large memory block allocations to be used for mapping
1018 	 * at higher page table levels.
1019 	 */
1020 	paddr = __pa_symbol(&start_kernel);
1021 
1022 	pte_aligned = (paddr & PAGE_MASK) >> PAGE_SHIFT;
1023 	pmd_aligned = (paddr & PMD_MASK) >> PAGE_SHIFT;
1024 	pud_aligned = (paddr & PUD_MASK) >> PAGE_SHIFT;
1025 	p4d_aligned = (paddr & P4D_MASK) >> PAGE_SHIFT;
1026 	pgd_aligned = (paddr & PGDIR_MASK) >> PAGE_SHIFT;
1027 	WARN_ON(!pfn_valid(pte_aligned));
1028 
1029 	pgdp = pgd_offset(mm, vaddr);
1030 	p4dp = p4d_alloc(mm, pgdp, vaddr);
1031 	pudp = pud_alloc(mm, p4dp, vaddr);
1032 	pmdp = pmd_alloc(mm, pudp, vaddr);
1033 	/*
1034 	 * Allocate pgtable_t
1035 	 */
1036 	if (pte_alloc(mm, pmdp)) {
1037 		pr_err("pgtable allocation failed\n");
1038 		return 1;
1039 	}
1040 
1041 	/*
1042 	 * Save all the page table page addresses as the page table
1043 	 * entries will be used for testing with random or garbage
1044 	 * values. These saved addresses will be used for freeing
1045 	 * page table pages.
1046 	 */
1047 	pmd = READ_ONCE(*pmdp);
1048 	saved_p4dp = p4d_offset(pgdp, 0UL);
1049 	saved_pudp = pud_offset(p4dp, 0UL);
1050 	saved_pmdp = pmd_offset(pudp, 0UL);
1051 	saved_ptep = pmd_pgtable(pmd);
1052 
1053 	/*
1054 	 * Iterate over the protection_map[] to make sure that all
1055 	 * the basic page table transformation validations just hold
1056 	 * true irrespective of the starting protection value for a
1057 	 * given page table entry.
1058 	 */
1059 	for (idx = 0; idx < ARRAY_SIZE(protection_map); idx++) {
1060 		pte_basic_tests(pte_aligned, idx);
1061 		pmd_basic_tests(pmd_aligned, idx);
1062 		pud_basic_tests(mm, pud_aligned, idx);
1063 	}
1064 
1065 	/*
1066 	 * Both P4D and PGD level tests are very basic which do not
1067 	 * involve creating page table entries from the protection
1068 	 * value and the given pfn. Hence just keep them out from
1069 	 * the above iteration for now to save some test execution
1070 	 * time.
1071 	 */
1072 	p4d_basic_tests(p4d_aligned, prot);
1073 	pgd_basic_tests(pgd_aligned, prot);
1074 
1075 	pmd_leaf_tests(pmd_aligned, prot);
1076 	pud_leaf_tests(pud_aligned, prot);
1077 
1078 	pte_savedwrite_tests(pte_aligned, protnone);
1079 	pmd_savedwrite_tests(pmd_aligned, protnone);
1080 
1081 	pte_special_tests(pte_aligned, prot);
1082 	pte_protnone_tests(pte_aligned, protnone);
1083 	pmd_protnone_tests(pmd_aligned, protnone);
1084 
1085 	pte_devmap_tests(pte_aligned, prot);
1086 	pmd_devmap_tests(pmd_aligned, prot);
1087 	pud_devmap_tests(pud_aligned, prot);
1088 
1089 	pte_soft_dirty_tests(pte_aligned, prot);
1090 	pmd_soft_dirty_tests(pmd_aligned, prot);
1091 	pte_swap_soft_dirty_tests(pte_aligned, prot);
1092 	pmd_swap_soft_dirty_tests(pmd_aligned, prot);
1093 
1094 	pte_swap_tests(pte_aligned, prot);
1095 	pmd_swap_tests(pmd_aligned, prot);
1096 
1097 	swap_migration_tests();
1098 
1099 	pmd_thp_tests(pmd_aligned, prot);
1100 	pud_thp_tests(pud_aligned, prot);
1101 
1102 	hugetlb_basic_tests(pte_aligned, prot);
1103 
1104 	/*
1105 	 * Page table modifying tests. They need to hold
1106 	 * proper page table lock.
1107 	 */
1108 
1109 	ptep = pte_offset_map_lock(mm, pmdp, vaddr, &ptl);
1110 	pte_clear_tests(mm, ptep, pte_aligned, vaddr, prot);
1111 	pte_advanced_tests(mm, vma, ptep, pte_aligned, vaddr, prot);
1112 	pte_unmap_unlock(ptep, ptl);
1113 
1114 	ptl = pmd_lock(mm, pmdp);
1115 	pmd_clear_tests(mm, pmdp);
1116 	pmd_advanced_tests(mm, vma, pmdp, pmd_aligned, vaddr, prot, saved_ptep);
1117 	pmd_huge_tests(pmdp, pmd_aligned, prot);
1118 	pmd_populate_tests(mm, pmdp, saved_ptep);
1119 	spin_unlock(ptl);
1120 
1121 	ptl = pud_lock(mm, pudp);
1122 	pud_clear_tests(mm, pudp);
1123 	pud_advanced_tests(mm, vma, pudp, pud_aligned, vaddr, prot);
1124 	pud_huge_tests(pudp, pud_aligned, prot);
1125 	pud_populate_tests(mm, pudp, saved_pmdp);
1126 	spin_unlock(ptl);
1127 
1128 	spin_lock(&mm->page_table_lock);
1129 	p4d_clear_tests(mm, p4dp);
1130 	pgd_clear_tests(mm, pgdp);
1131 	p4d_populate_tests(mm, p4dp, saved_pudp);
1132 	pgd_populate_tests(mm, pgdp, saved_p4dp);
1133 	spin_unlock(&mm->page_table_lock);
1134 
1135 	p4d_free(mm, saved_p4dp);
1136 	pud_free(mm, saved_pudp);
1137 	pmd_free(mm, saved_pmdp);
1138 	pte_free(mm, saved_ptep);
1139 
1140 	vm_area_free(vma);
1141 	mm_dec_nr_puds(mm);
1142 	mm_dec_nr_pmds(mm);
1143 	mm_dec_nr_ptes(mm);
1144 	mmdrop(mm);
1145 	return 0;
1146 }
1147 late_initcall(debug_vm_pgtable);
1148