Lines Matching refs:pmd
245 static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, in free_pte_range() argument
248 pgtable_t token = pmd_pgtable(*pmd); in free_pte_range()
256 spinlock_t *ptl = pmd_lock(tlb->mm, pmd); in free_pte_range()
259 pmd_clear(pmd); in free_pte_range()
268 pmd_t *pmd; in free_pmd_range() local
273 pmd = pmd_offset(pud, addr); in free_pmd_range()
276 if (pmd_none_or_clear_bad(pmd)) in free_pmd_range()
278 free_pte_range(tlb, pmd, addr); in free_pmd_range()
279 } while (pmd++, addr = next, addr != end); in free_pmd_range()
292 pmd = pmd_offset(pud, start); in free_pmd_range()
294 pmd_free_tlb(tlb, pmd, start); in free_pmd_range()
469 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd) in __pte_alloc() argument
491 ptl = pmd_lock(mm, pmd); in __pte_alloc()
492 if (likely(pmd_none(*pmd))) { /* Has another populated it ? */ in __pte_alloc()
494 pmd_populate(mm, pmd, new); in __pte_alloc()
503 int __pte_alloc_kernel(pmd_t *pmd) in __pte_alloc_kernel() argument
512 if (likely(pmd_none(*pmd))) { /* Has another populated it ? */ in __pte_alloc_kernel()
513 pmd_populate_kernel(&init_mm, pmd, new); in __pte_alloc_kernel()
551 pmd_t *pmd = pmd_offset(pud, addr); in print_bad_pte() local
582 (long long)pte_val(pte), (long long)pmd_val(*pmd)); in print_bad_pte()
700 pmd_t pmd) in vm_normal_page_pmd() argument
702 unsigned long pfn = pmd_pfn(pmd); in vm_normal_page_pmd()
724 if (pmd_devmap(pmd)) in vm_normal_page_pmd()
726 if (is_huge_zero_pmd(pmd)) in vm_normal_page_pmd()
1279 struct vm_area_struct *vma, pmd_t *pmd, in zap_pte_range() argument
1294 start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl); in zap_pte_range()
1421 pmd_t *pmd; in zap_pmd_range() local
1424 pmd = pmd_offset(pud, addr); in zap_pmd_range()
1427 if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) { in zap_pmd_range()
1429 __split_huge_pmd(vma, pmd, addr, false, NULL); in zap_pmd_range()
1430 else if (zap_huge_pmd(tlb, vma, pmd, addr)) in zap_pmd_range()
1435 next - addr == HPAGE_PMD_SIZE && pmd_none(*pmd)) { in zap_pmd_range()
1436 spinlock_t *ptl = pmd_lock(tlb->mm, pmd); in zap_pmd_range()
1452 if (pmd_none_or_trans_huge_or_clear_bad(pmd)) in zap_pmd_range()
1454 next = zap_pte_range(tlb, vma, pmd, addr, next, details); in zap_pmd_range()
1457 } while (pmd++, addr = next, addr != end); in zap_pmd_range()
1685 pmd_t *pmd; in walk_to_pmd() local
1694 pmd = pmd_alloc(mm, pud, addr); in walk_to_pmd()
1695 if (!pmd) in walk_to_pmd()
1698 VM_BUG_ON(pmd_trans_huge(*pmd)); in walk_to_pmd()
1699 return pmd; in walk_to_pmd()
1705 pmd_t *pmd = walk_to_pmd(mm, addr); in __get_locked_pte() local
1707 if (!pmd) in __get_locked_pte()
1709 return pte_alloc_map_lock(mm, pmd, addr, ptl); in __get_locked_pte()
1781 pmd_t *pmd = NULL; in insert_pages() local
1791 pmd = walk_to_pmd(mm, addr); in insert_pages()
1792 if (!pmd) in insert_pages()
1800 if (pte_alloc(mm, pmd)) in insert_pages()
1807 start_pte = pte_offset_map_lock(mm, pmd, addr, &pte_lock); in insert_pages()
2251 static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd, in remap_pte_range() argument
2259 mapped_pte = pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); in remap_pte_range()
2281 pmd_t *pmd; in remap_pmd_range() local
2286 pmd = pmd_alloc(mm, pud, addr); in remap_pmd_range()
2287 if (!pmd) in remap_pmd_range()
2289 VM_BUG_ON(pmd_trans_huge(*pmd)); in remap_pmd_range()
2292 err = remap_pte_range(mm, pmd, addr, next, in remap_pmd_range()
2296 } while (pmd++, addr = next, addr != end); in remap_pmd_range()
2467 static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, in apply_to_pte_range() argument
2478 pte_alloc_kernel_track(pmd, addr, mask) : in apply_to_pte_range()
2479 pte_alloc_map_lock(mm, pmd, addr, &ptl); in apply_to_pte_range()
2484 pte_offset_kernel(pmd, addr) : in apply_to_pte_range()
2485 pte_offset_map_lock(mm, pmd, addr, &ptl); in apply_to_pte_range()
2488 BUG_ON(pmd_huge(*pmd)); in apply_to_pte_range()
2515 pmd_t *pmd; in apply_to_pmd_range() local
2522 pmd = pmd_alloc_track(mm, pud, addr, mask); in apply_to_pmd_range()
2523 if (!pmd) in apply_to_pmd_range()
2526 pmd = pmd_offset(pud, addr); in apply_to_pmd_range()
2530 if (create || !pmd_none_or_clear_bad(pmd)) { in apply_to_pmd_range()
2531 err = apply_to_pte_range(mm, pmd, addr, next, fn, data, in apply_to_pmd_range()
2536 } while (pmd++, addr = next, addr != end); in apply_to_pmd_range()
2644 vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); in pte_spinlock()
2661 pmdval = READ_ONCE(*vmf->pmd); in pte_spinlock()
2713 pmdval = READ_ONCE(*vmf->pmd); in __pte_map_lock_speculative()
2755 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, in pte_map_lock()
2766 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, in pte_map_lock_addr()
2830 vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); in pte_spinlock()
2837 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, in pte_map_lock()
2844 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, in pte_map_lock_addr()
2933 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); in cow_user_page()
2961 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); in cow_user_page()
3657 migration_entry_wait(vma->vm_mm, vmf->pmd, in do_swap_page()
3917 if (pte_alloc(vma->vm_mm, vmf->pmd)) in do_anonymous_page()
3921 if (unlikely(pmd_trans_unstable(vmf->pmd))) in do_anonymous_page()
4051 if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) { in __do_fault()
4094 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte); in deposit_prealloc_pte()
4130 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_set_pmd()
4131 if (unlikely(!pmd_none(*vmf->pmd))) in do_set_pmd()
4149 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); in do_set_pmd()
4151 update_mmu_cache_pmd(vma, haddr, vmf->pmd); in do_set_pmd()
4238 if (pmd_none(*vmf->pmd)) { in finish_fault()
4246 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in finish_fault()
4247 if (likely(pmd_none(*vmf->pmd))) { in finish_fault()
4249 pmd_populate(vma->vm_mm, vmf->pmd, vmf->prealloc_pte); in finish_fault()
4253 } else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) { in finish_fault()
4262 if (pmd_devmap_trans_unstable(vmf->pmd)) in finish_fault()
4366 pmd_none(*vmf->pmd)) { in do_fault_around()
4500 if (unlikely(!pmd_present(*vmf->pmd))) in do_fault()
4504 vmf->pmd, in do_fault()
4666 __split_huge_pmd(vmf->vma, vmf->pmd, vmf->address, false, NULL); in wp_huge_pmd()
4728 if (unlikely(pmd_none(*vmf->pmd))) { in handle_pte_fault()
4749 if (pmd_devmap_trans_unstable(vmf->pmd)) in handle_pte_fault()
4760 vmf->pte = pte_offset_map(vmf->pmd, vmf->address); in handle_pte_fault()
4900 vmf.pmd = pmd_alloc(mm, vmf.pud, address); in __handle_mm_fault()
4901 if (!vmf.pmd) in __handle_mm_fault()
4911 if (pmd_none(*vmf.pmd) && __transparent_hugepage_enabled(vma)) { in __handle_mm_fault()
4916 pmd_t orig_pmd = *vmf.pmd; in __handle_mm_fault()
4923 pmd_migration_entry_wait(mm, vmf.pmd); in __handle_mm_fault()
5151 vmf.pmd = pmd_offset(vmf.pud, address); in ___handle_speculative_fault()
5154 vmf.orig_pmd = READ_ONCE(*vmf.pmd); in ___handle_speculative_fault()
5180 vmf.pte = pte_offset_map(vmf.pmd, address); in ___handle_speculative_fault()
5181 if (pmd_val(READ_ONCE(*vmf.pmd)) != pmd_val(vmf.orig_pmd)) { in ___handle_speculative_fault()
5425 pmd_t *pmd; in follow_invalidate_pte() local
5440 pmd = pmd_offset(pud, address); in follow_invalidate_pte()
5441 VM_BUG_ON(pmd_trans_huge(*pmd)); in follow_invalidate_pte()
5443 if (pmd_huge(*pmd)) { in follow_invalidate_pte()
5453 *ptlp = pmd_lock(mm, pmd); in follow_invalidate_pte()
5454 if (pmd_huge(*pmd)) { in follow_invalidate_pte()
5455 *pmdpp = pmd; in follow_invalidate_pte()
5463 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) in follow_invalidate_pte()
5472 ptep = pte_offset_map_lock(mm, pmd, address, ptlp); in follow_invalidate_pte()