• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * This file contains common routines for dealing with free of page tables
4  * Along with common page table handling code
5  *
6  *  Derived from arch/powerpc/mm/tlb_64.c:
7  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
8  *
9  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
10  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
11  *    Copyright (C) 1996 Paul Mackerras
12  *
13  *  Derived from "arch/i386/mm/init.c"
14  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
15  *
16  *  Dave Engebretsen <engebret@us.ibm.com>
17  *      Rework for PPC64 port.
18  */
19 
20 #include <linux/kernel.h>
21 #include <linux/gfp.h>
22 #include <linux/mm.h>
23 #include <linux/percpu.h>
24 #include <linux/hardirq.h>
25 #include <linux/hugetlb.h>
26 #include <asm/tlbflush.h>
27 #include <asm/tlb.h>
28 #include <asm/hugetlb.h>
29 #include <asm/pte-walk.h>
30 
31 #ifdef CONFIG_PPC64
32 #define PGD_ALIGN (sizeof(pgd_t) * MAX_PTRS_PER_PGD)
33 #else
34 #define PGD_ALIGN PAGE_SIZE
35 #endif
36 
37 pgd_t swapper_pg_dir[MAX_PTRS_PER_PGD] __section(".bss..page_aligned") __aligned(PGD_ALIGN);
38 
is_exec_fault(void)39 static inline int is_exec_fault(void)
40 {
41 	return current->thread.regs && TRAP(current->thread.regs) == 0x400;
42 }
43 
44 /* We only try to do i/d cache coherency on stuff that looks like
45  * reasonably "normal" PTEs. We currently require a PTE to be present
46  * and we avoid _PAGE_SPECIAL and cache inhibited pte. We also only do that
47  * on userspace PTEs
48  */
pte_looks_normal(pte_t pte)49 static inline int pte_looks_normal(pte_t pte)
50 {
51 
52 	if (pte_present(pte) && !pte_special(pte)) {
53 		if (pte_ci(pte))
54 			return 0;
55 		if (pte_user(pte))
56 			return 1;
57 	}
58 	return 0;
59 }
60 
maybe_pte_to_page(pte_t pte)61 static struct page *maybe_pte_to_page(pte_t pte)
62 {
63 	unsigned long pfn = pte_pfn(pte);
64 	struct page *page;
65 
66 	if (unlikely(!pfn_valid(pfn)))
67 		return NULL;
68 	page = pfn_to_page(pfn);
69 	if (PageReserved(page))
70 		return NULL;
71 	return page;
72 }
73 
74 #ifdef CONFIG_PPC_BOOK3S
75 
76 /* Server-style MMU handles coherency when hashing if HW exec permission
77  * is supposed per page (currently 64-bit only). If not, then, we always
78  * flush the cache for valid PTEs in set_pte. Embedded CPU without HW exec
79  * support falls into the same category.
80  */
81 
set_pte_filter_hash(pte_t pte)82 static pte_t set_pte_filter_hash(pte_t pte)
83 {
84 	if (radix_enabled())
85 		return pte;
86 
87 	pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
88 	if (pte_looks_normal(pte) && !(cpu_has_feature(CPU_FTR_COHERENT_ICACHE) ||
89 				       cpu_has_feature(CPU_FTR_NOEXECUTE))) {
90 		struct page *pg = maybe_pte_to_page(pte);
91 		if (!pg)
92 			return pte;
93 		if (!test_bit(PG_dcache_clean, &pg->flags)) {
94 			flush_dcache_icache_page(pg);
95 			set_bit(PG_dcache_clean, &pg->flags);
96 		}
97 	}
98 	return pte;
99 }
100 
101 #else /* CONFIG_PPC_BOOK3S */
102 
set_pte_filter_hash(pte_t pte)103 static pte_t set_pte_filter_hash(pte_t pte) { return pte; }
104 
105 #endif /* CONFIG_PPC_BOOK3S */
106 
107 /* Embedded type MMU with HW exec support. This is a bit more complicated
108  * as we don't have two bits to spare for _PAGE_EXEC and _PAGE_HWEXEC so
109  * instead we "filter out" the exec permission for non clean pages.
110  */
set_pte_filter(pte_t pte)111 static inline pte_t set_pte_filter(pte_t pte)
112 {
113 	struct page *pg;
114 
115 	if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
116 		return set_pte_filter_hash(pte);
117 
118 	/* No exec permission in the first place, move on */
119 	if (!pte_exec(pte) || !pte_looks_normal(pte))
120 		return pte;
121 
122 	/* If you set _PAGE_EXEC on weird pages you're on your own */
123 	pg = maybe_pte_to_page(pte);
124 	if (unlikely(!pg))
125 		return pte;
126 
127 	/* If the page clean, we move on */
128 	if (test_bit(PG_dcache_clean, &pg->flags))
129 		return pte;
130 
131 	/* If it's an exec fault, we flush the cache and make it clean */
132 	if (is_exec_fault()) {
133 		flush_dcache_icache_page(pg);
134 		set_bit(PG_dcache_clean, &pg->flags);
135 		return pte;
136 	}
137 
138 	/* Else, we filter out _PAGE_EXEC */
139 	return pte_exprotect(pte);
140 }
141 
set_access_flags_filter(pte_t pte,struct vm_area_struct * vma,int dirty)142 static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma,
143 				     int dirty)
144 {
145 	struct page *pg;
146 
147 	if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
148 		return pte;
149 
150 	/* So here, we only care about exec faults, as we use them
151 	 * to recover lost _PAGE_EXEC and perform I$/D$ coherency
152 	 * if necessary. Also if _PAGE_EXEC is already set, same deal,
153 	 * we just bail out
154 	 */
155 	if (dirty || pte_exec(pte) || !is_exec_fault())
156 		return pte;
157 
158 #ifdef CONFIG_DEBUG_VM
159 	/* So this is an exec fault, _PAGE_EXEC is not set. If it was
160 	 * an error we would have bailed out earlier in do_page_fault()
161 	 * but let's make sure of it
162 	 */
163 	if (WARN_ON(!(vma->vm_flags & VM_EXEC)))
164 		return pte;
165 #endif /* CONFIG_DEBUG_VM */
166 
167 	/* If you set _PAGE_EXEC on weird pages you're on your own */
168 	pg = maybe_pte_to_page(pte);
169 	if (unlikely(!pg))
170 		goto bail;
171 
172 	/* If the page is already clean, we move on */
173 	if (test_bit(PG_dcache_clean, &pg->flags))
174 		goto bail;
175 
176 	/* Clean the page and set PG_dcache_clean */
177 	flush_dcache_icache_page(pg);
178 	set_bit(PG_dcache_clean, &pg->flags);
179 
180  bail:
181 	return pte_mkexec(pte);
182 }
183 
184 /*
185  * set_pte stores a linux PTE into the linux page table.
186  */
set_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte)187 void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
188 		pte_t pte)
189 {
190 	/*
191 	 * Make sure hardware valid bit is not set. We don't do
192 	 * tlb flush for this update.
193 	 */
194 	VM_WARN_ON(pte_hw_valid(*ptep) && !pte_protnone(*ptep));
195 
196 	/* Note: mm->context.id might not yet have been assigned as
197 	 * this context might not have been activated yet when this
198 	 * is called.
199 	 */
200 	pte = set_pte_filter(pte);
201 
202 	/* Perform the setting of the PTE */
203 	__set_pte_at(mm, addr, ptep, pte, 0);
204 }
205 
unmap_kernel_page(unsigned long va)206 void unmap_kernel_page(unsigned long va)
207 {
208 	pmd_t *pmdp = pmd_off_k(va);
209 	pte_t *ptep = pte_offset_kernel(pmdp, va);
210 
211 	pte_clear(&init_mm, va, ptep);
212 	flush_tlb_kernel_range(va, va + PAGE_SIZE);
213 }
214 
215 /*
216  * This is called when relaxing access to a PTE. It's also called in the page
217  * fault path when we don't hit any of the major fault cases, ie, a minor
218  * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have
219  * handled those two for us, we additionally deal with missing execute
220  * permission here on some processors
221  */
ptep_set_access_flags(struct vm_area_struct * vma,unsigned long address,pte_t * ptep,pte_t entry,int dirty)222 int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
223 			  pte_t *ptep, pte_t entry, int dirty)
224 {
225 	int changed;
226 	entry = set_access_flags_filter(entry, vma, dirty);
227 	changed = !pte_same(*(ptep), entry);
228 	if (changed) {
229 		assert_pte_locked(vma->vm_mm, address);
230 		__ptep_set_access_flags(vma, ptep, entry,
231 					address, mmu_virtual_psize);
232 	}
233 	return changed;
234 }
235 
236 #ifdef CONFIG_HUGETLB_PAGE
huge_ptep_set_access_flags(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,pte_t pte,int dirty)237 int huge_ptep_set_access_flags(struct vm_area_struct *vma,
238 			       unsigned long addr, pte_t *ptep,
239 			       pte_t pte, int dirty)
240 {
241 #ifdef HUGETLB_NEED_PRELOAD
242 	/*
243 	 * The "return 1" forces a call of update_mmu_cache, which will write a
244 	 * TLB entry.  Without this, platforms that don't do a write of the TLB
245 	 * entry in the TLB miss handler asm will fault ad infinitum.
246 	 */
247 	ptep_set_access_flags(vma, addr, ptep, pte, dirty);
248 	return 1;
249 #else
250 	int changed, psize;
251 
252 	pte = set_access_flags_filter(pte, vma, dirty);
253 	changed = !pte_same(*(ptep), pte);
254 	if (changed) {
255 
256 #ifdef CONFIG_PPC_BOOK3S_64
257 		struct hstate *h = hstate_vma(vma);
258 
259 		psize = hstate_get_psize(h);
260 #ifdef CONFIG_DEBUG_VM
261 		assert_spin_locked(huge_pte_lockptr(h, vma->vm_mm, ptep));
262 #endif
263 
264 #else
265 		/*
266 		 * Not used on non book3s64 platforms.
267 		 * 8xx compares it with mmu_virtual_psize to
268 		 * know if it is a huge page or not.
269 		 */
270 		psize = MMU_PAGE_COUNT;
271 #endif
272 		__ptep_set_access_flags(vma, ptep, pte, addr, psize);
273 	}
274 	return changed;
275 #endif
276 }
277 
278 #if defined(CONFIG_PPC_8xx)
set_huge_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte)279 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte)
280 {
281 	pmd_t *pmd = pmd_off(mm, addr);
282 	pte_basic_t val;
283 	pte_basic_t *entry = &ptep->pte;
284 	int num, i;
285 
286 	/*
287 	 * Make sure hardware valid bit is not set. We don't do
288 	 * tlb flush for this update.
289 	 */
290 	VM_WARN_ON(pte_hw_valid(*ptep) && !pte_protnone(*ptep));
291 
292 	pte = set_pte_filter(pte);
293 
294 	val = pte_val(pte);
295 
296 	num = number_of_cells_per_pte(pmd, val, 1);
297 
298 	for (i = 0; i < num; i++, entry++, val += SZ_4K)
299 		*entry = val;
300 }
301 #endif
302 #endif /* CONFIG_HUGETLB_PAGE */
303 
304 #ifdef CONFIG_DEBUG_VM
assert_pte_locked(struct mm_struct * mm,unsigned long addr)305 void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
306 {
307 	pgd_t *pgd;
308 	p4d_t *p4d;
309 	pud_t *pud;
310 	pmd_t *pmd;
311 
312 	if (mm == &init_mm)
313 		return;
314 	pgd = mm->pgd + pgd_index(addr);
315 	BUG_ON(pgd_none(*pgd));
316 	p4d = p4d_offset(pgd, addr);
317 	BUG_ON(p4d_none(*p4d));
318 	pud = pud_offset(p4d, addr);
319 	BUG_ON(pud_none(*pud));
320 	pmd = pmd_offset(pud, addr);
321 	/*
322 	 * khugepaged to collapse normal pages to hugepage, first set
323 	 * pmd to none to force page fault/gup to take mmap_lock. After
324 	 * pmd is set to none, we do a pte_clear which does this assertion
325 	 * so if we find pmd none, return.
326 	 */
327 	if (pmd_none(*pmd))
328 		return;
329 	BUG_ON(!pmd_present(*pmd));
330 	assert_spin_locked(pte_lockptr(mm, pmd));
331 }
332 #endif /* CONFIG_DEBUG_VM */
333 
vmalloc_to_phys(void * va)334 unsigned long vmalloc_to_phys(void *va)
335 {
336 	unsigned long pfn = vmalloc_to_pfn(va);
337 
338 	BUG_ON(!pfn);
339 	return __pa(pfn_to_kaddr(pfn)) + offset_in_page(va);
340 }
341 EXPORT_SYMBOL_GPL(vmalloc_to_phys);
342 
343 /*
344  * We have 4 cases for pgds and pmds:
345  * (1) invalid (all zeroes)
346  * (2) pointer to next table, as normal; bottom 6 bits == 0
347  * (3) leaf pte for huge page _PAGE_PTE set
348  * (4) hugepd pointer, _PAGE_PTE = 0 and bits [2..6] indicate size of table
349  *
350  * So long as we atomically load page table pointers we are safe against teardown,
351  * we can follow the address down to the the page and take a ref on it.
352  * This function need to be called with interrupts disabled. We use this variant
353  * when we have MSR[EE] = 0 but the paca->irq_soft_mask = IRQS_ENABLED
354  */
__find_linux_pte(pgd_t * pgdir,unsigned long ea,bool * is_thp,unsigned * hpage_shift)355 pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
356 			bool *is_thp, unsigned *hpage_shift)
357 {
358 	pgd_t *pgdp;
359 	p4d_t p4d, *p4dp;
360 	pud_t pud, *pudp;
361 	pmd_t pmd, *pmdp;
362 	pte_t *ret_pte;
363 	hugepd_t *hpdp = NULL;
364 	unsigned pdshift;
365 
366 	if (hpage_shift)
367 		*hpage_shift = 0;
368 
369 	if (is_thp)
370 		*is_thp = false;
371 
372 	/*
373 	 * Always operate on the local stack value. This make sure the
374 	 * value don't get updated by a parallel THP split/collapse,
375 	 * page fault or a page unmap. The return pte_t * is still not
376 	 * stable. So should be checked there for above conditions.
377 	 * Top level is an exception because it is folded into p4d.
378 	 */
379 	pgdp = pgdir + pgd_index(ea);
380 	p4dp = p4d_offset(pgdp, ea);
381 	p4d  = READ_ONCE(*p4dp);
382 	pdshift = P4D_SHIFT;
383 
384 	if (p4d_none(p4d))
385 		return NULL;
386 
387 	if (p4d_is_leaf(p4d)) {
388 		ret_pte = (pte_t *)p4dp;
389 		goto out;
390 	}
391 
392 	if (is_hugepd(__hugepd(p4d_val(p4d)))) {
393 		hpdp = (hugepd_t *)&p4d;
394 		goto out_huge;
395 	}
396 
397 	/*
398 	 * Even if we end up with an unmap, the pgtable will not
399 	 * be freed, because we do an rcu free and here we are
400 	 * irq disabled
401 	 */
402 	pdshift = PUD_SHIFT;
403 	pudp = pud_offset(&p4d, ea);
404 	pud  = READ_ONCE(*pudp);
405 
406 	if (pud_none(pud))
407 		return NULL;
408 
409 	if (pud_is_leaf(pud)) {
410 		ret_pte = (pte_t *)pudp;
411 		goto out;
412 	}
413 
414 	if (is_hugepd(__hugepd(pud_val(pud)))) {
415 		hpdp = (hugepd_t *)&pud;
416 		goto out_huge;
417 	}
418 
419 	pdshift = PMD_SHIFT;
420 	pmdp = pmd_offset(&pud, ea);
421 	pmd  = READ_ONCE(*pmdp);
422 
423 	/*
424 	 * A hugepage collapse is captured by this condition, see
425 	 * pmdp_collapse_flush.
426 	 */
427 	if (pmd_none(pmd))
428 		return NULL;
429 
430 #ifdef CONFIG_PPC_BOOK3S_64
431 	/*
432 	 * A hugepage split is captured by this condition, see
433 	 * pmdp_invalidate.
434 	 *
435 	 * Huge page modification can be caught here too.
436 	 */
437 	if (pmd_is_serializing(pmd))
438 		return NULL;
439 #endif
440 
441 	if (pmd_trans_huge(pmd) || pmd_devmap(pmd)) {
442 		if (is_thp)
443 			*is_thp = true;
444 		ret_pte = (pte_t *)pmdp;
445 		goto out;
446 	}
447 
448 	if (pmd_is_leaf(pmd)) {
449 		ret_pte = (pte_t *)pmdp;
450 		goto out;
451 	}
452 
453 	if (is_hugepd(__hugepd(pmd_val(pmd)))) {
454 		hpdp = (hugepd_t *)&pmd;
455 		goto out_huge;
456 	}
457 
458 	return pte_offset_kernel(&pmd, ea);
459 
460 out_huge:
461 	if (!hpdp)
462 		return NULL;
463 
464 	ret_pte = hugepte_offset(*hpdp, ea, pdshift);
465 	pdshift = hugepd_shift(*hpdp);
466 out:
467 	if (hpage_shift)
468 		*hpage_shift = pdshift;
469 	return ret_pte;
470 }
471 EXPORT_SYMBOL_GPL(__find_linux_pte);
472