• Home
  • Raw
  • Download

Lines Matching refs:pmd

214 static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,  in free_pte_range()  argument
217 pgtable_t token = pmd_pgtable(*pmd); in free_pte_range()
218 pmd_clear(pmd); in free_pte_range()
227 pmd_t *pmd; in free_pmd_range() local
232 pmd = pmd_offset(pud, addr); in free_pmd_range()
235 if (pmd_none_or_clear_bad(pmd)) in free_pmd_range()
237 free_pte_range(tlb, pmd, addr); in free_pmd_range()
238 } while (pmd++, addr = next, addr != end); in free_pmd_range()
251 pmd = pmd_offset(pud, start); in free_pmd_range()
253 pmd_free_tlb(tlb, pmd, start); in free_pmd_range()
424 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd) in __pte_alloc() argument
446 ptl = pmd_lock(mm, pmd); in __pte_alloc()
447 if (likely(pmd_none(*pmd))) { /* Has another populated it ? */ in __pte_alloc()
449 pmd_populate(mm, pmd, new); in __pte_alloc()
458 int __pte_alloc_kernel(pmd_t *pmd) in __pte_alloc_kernel() argument
467 if (likely(pmd_none(*pmd))) { /* Has another populated it ? */ in __pte_alloc_kernel()
468 pmd_populate_kernel(&init_mm, pmd, new); in __pte_alloc_kernel()
506 pmd_t *pmd = pmd_offset(pud, addr); in print_bad_pte() local
537 (long long)pte_val(pte), (long long)pmd_val(*pmd)); in print_bad_pte()
650 pmd_t pmd) in vm_normal_page_pmd() argument
652 unsigned long pfn = pmd_pfn(pmd); in vm_normal_page_pmd()
674 if (pmd_devmap(pmd)) in vm_normal_page_pmd()
1024 struct vm_area_struct *vma, pmd_t *pmd, in zap_pte_range() argument
1039 start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl); in zap_pte_range()
1163 pmd_t *pmd; in zap_pmd_range() local
1166 pmd = pmd_offset(pud, addr); in zap_pmd_range()
1169 if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) { in zap_pmd_range()
1171 __split_huge_pmd(vma, pmd, addr, false, NULL); in zap_pmd_range()
1172 else if (zap_huge_pmd(tlb, vma, pmd, addr)) in zap_pmd_range()
1183 if (pmd_none_or_trans_huge_or_clear_bad(pmd)) in zap_pmd_range()
1185 next = zap_pte_range(tlb, vma, pmd, addr, next, details); in zap_pmd_range()
1188 } while (pmd++, addr = next, addr != end); in zap_pmd_range()
1417 pmd_t *pmd; in __get_locked_pte() local
1426 pmd = pmd_alloc(mm, pud, addr); in __get_locked_pte()
1427 if (!pmd) in __get_locked_pte()
1430 VM_BUG_ON(pmd_trans_huge(*pmd)); in __get_locked_pte()
1431 return pte_alloc_map_lock(mm, pmd, addr, ptl); in __get_locked_pte()
1810 static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd, in remap_pte_range() argument
1818 pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); in remap_pte_range()
1840 pmd_t *pmd; in remap_pmd_range() local
1845 pmd = pmd_alloc(mm, pud, addr); in remap_pmd_range()
1846 if (!pmd) in remap_pmd_range()
1848 VM_BUG_ON(pmd_trans_huge(*pmd)); in remap_pmd_range()
1851 err = remap_pte_range(mm, pmd, addr, next, in remap_pmd_range()
1855 } while (pmd++, addr = next, addr != end); in remap_pmd_range()
2023 static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, in apply_to_pte_range() argument
2032 pte_alloc_kernel(pmd, addr) : in apply_to_pte_range()
2033 pte_alloc_map_lock(mm, pmd, addr, &ptl); in apply_to_pte_range()
2037 BUG_ON(pmd_huge(*pmd)); in apply_to_pte_range()
2058 pmd_t *pmd; in apply_to_pmd_range() local
2064 pmd = pmd_alloc(mm, pud, addr); in apply_to_pmd_range()
2065 if (!pmd) in apply_to_pmd_range()
2069 err = apply_to_pte_range(mm, pmd, addr, next, fn, data); in apply_to_pmd_range()
2072 } while (pmd++, addr = next, addr != end); in apply_to_pmd_range()
2151 static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd, in pte_unmap_same() argument
2157 spinlock_t *ptl = pte_lockptr(mm, pmd); in pte_unmap_same()
2380 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl); in wp_page_copy()
2494 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address, in finish_mkwrite_fault()
2617 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_wp_page()
2781 if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte)) in do_swap_page()
2787 migration_entry_wait(vma->vm_mm, vmf->pmd, in do_swap_page()
2832 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_swap_page()
2888 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, in do_swap_page()
3007 if (pte_alloc(vma->vm_mm, vmf->pmd)) in do_anonymous_page()
3011 if (unlikely(pmd_trans_unstable(vmf->pmd))) in do_anonymous_page()
3019 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_anonymous_page()
3056 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, in do_anonymous_page()
3120 if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) { in __do_fault()
3154 static int pmd_devmap_trans_unstable(pmd_t *pmd) in pmd_devmap_trans_unstable() argument
3156 return pmd_devmap(*pmd) || pmd_trans_unstable(pmd); in pmd_devmap_trans_unstable()
3163 if (!pmd_none(*vmf->pmd)) in pte_alloc_one_map()
3166 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in pte_alloc_one_map()
3167 if (unlikely(!pmd_none(*vmf->pmd))) { in pte_alloc_one_map()
3173 pmd_populate(vma->vm_mm, vmf->pmd, vmf->prealloc_pte); in pte_alloc_one_map()
3176 } else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) { in pte_alloc_one_map()
3191 if (pmd_devmap_trans_unstable(vmf->pmd)) in pte_alloc_one_map()
3203 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, in pte_alloc_one_map()
3213 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte); in deposit_prealloc_pte()
3248 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_set_pmd()
3249 if (unlikely(!pmd_none(*vmf->pmd))) in do_set_pmd()
3267 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); in do_set_pmd()
3269 update_mmu_cache_pmd(vma, haddr, vmf->pmd); in do_set_pmd()
3310 if (pmd_none(*vmf->pmd) && PageTransCompound(page) && in alloc_set_pte()
3478 if (pmd_none(*vmf->pmd)) { in do_fault_around()
3488 if (pmd_trans_huge(*vmf->pmd)) { in do_fault_around()
3631 if (unlikely(!pmd_present(*vmf->pmd))) in do_fault()
3635 vmf->pmd, in do_fault()
3699 vmf->ptl = pte_lockptr(vma->vm_mm, vmf->pmd); in do_numa_page()
3791 __split_huge_pmd(vmf->vma, vmf->pmd, vmf->address, false, NULL); in wp_huge_pmd()
3844 if (unlikely(pmd_none(*vmf->pmd))) { in handle_pte_fault()
3854 if (pmd_devmap_trans_unstable(vmf->pmd)) in handle_pte_fault()
3862 vmf->pte = pte_offset_map(vmf->pmd, vmf->address); in handle_pte_fault()
3893 vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); in handle_pte_fault()
3975 vmf.pmd = pmd_alloc(mm, vmf.pud, address); in __handle_mm_fault()
3976 if (!vmf.pmd) in __handle_mm_fault()
3978 if (pmd_none(*vmf.pmd) && __transparent_hugepage_enabled(vma)) { in __handle_mm_fault()
3983 pmd_t orig_pmd = *vmf.pmd; in __handle_mm_fault()
3990 pmd_migration_entry_wait(mm, vmf.pmd); in __handle_mm_fault()
4158 pmd_t *pmd; in __follow_pte_pmd() local
4173 pmd = pmd_offset(pud, address); in __follow_pte_pmd()
4174 VM_BUG_ON(pmd_trans_huge(*pmd)); in __follow_pte_pmd()
4176 if (pmd_huge(*pmd)) { in __follow_pte_pmd()
4186 *ptlp = pmd_lock(mm, pmd); in __follow_pte_pmd()
4187 if (pmd_huge(*pmd)) { in __follow_pte_pmd()
4188 *pmdpp = pmd; in __follow_pte_pmd()
4196 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) in __follow_pte_pmd()
4205 ptep = pte_offset_map_lock(mm, pmd, address, ptlp); in __follow_pte_pmd()