• Home
  • Raw
  • Download

Lines Matching refs:pmd

390 static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,  in free_pte_range()  argument
393 pgtable_t token = pmd_pgtable(*pmd); in free_pte_range()
394 pmd_clear(pmd); in free_pte_range()
403 pmd_t *pmd; in free_pmd_range() local
408 pmd = pmd_offset(pud, addr); in free_pmd_range()
411 if (pmd_none_or_clear_bad(pmd)) in free_pmd_range()
413 free_pte_range(tlb, pmd, addr); in free_pmd_range()
414 } while (pmd++, addr = next, addr != end); in free_pmd_range()
427 pmd = pmd_offset(pud, start); in free_pmd_range()
429 pmd_free_tlb(tlb, pmd, start); in free_pmd_range()
562 pmd_t *pmd, unsigned long address) in __pte_alloc() argument
585 ptl = pmd_lock(mm, pmd); in __pte_alloc()
587 if (likely(pmd_none(*pmd))) { /* Has another populated it ? */ in __pte_alloc()
589 pmd_populate(mm, pmd, new); in __pte_alloc()
591 } else if (unlikely(pmd_trans_splitting(*pmd))) in __pte_alloc()
597 wait_split_huge_page(vma->anon_vma, pmd); in __pte_alloc()
601 int __pte_alloc_kernel(pmd_t *pmd, unsigned long address) in __pte_alloc_kernel() argument
610 if (likely(pmd_none(*pmd))) { /* Has another populated it ? */ in __pte_alloc_kernel()
611 pmd_populate_kernel(&init_mm, pmd, new); in __pte_alloc_kernel()
614 VM_BUG_ON(pmd_trans_splitting(*pmd)); in __pte_alloc_kernel()
649 pmd_t *pmd = pmd_offset(pud, addr); in print_bad_pte() local
682 (long long)pte_val(pte), (long long)pmd_val(*pmd)); in print_bad_pte()
1074 struct vm_area_struct *vma, pmd_t *pmd, in zap_pte_range() argument
1087 start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl); in zap_pte_range()
1213 pmd_t *pmd; in zap_pmd_range() local
1216 pmd = pmd_offset(pud, addr); in zap_pmd_range()
1219 if (pmd_trans_huge(*pmd)) { in zap_pmd_range()
1230 split_huge_page_pmd(vma, addr, pmd); in zap_pmd_range()
1231 } else if (zap_huge_pmd(tlb, vma, pmd, addr)) in zap_pmd_range()
1242 if (pmd_none_or_trans_huge_or_clear_bad(pmd)) in zap_pmd_range()
1244 next = zap_pte_range(tlb, vma, pmd, addr, next, details); in zap_pmd_range()
1247 } while (pmd++, addr = next, addr != end); in zap_pmd_range()
1448 pmd_t * pmd = pmd_alloc(mm, pud, addr); in __get_locked_pte() local
1449 if (pmd) { in __get_locked_pte()
1450 VM_BUG_ON(pmd_trans_huge(*pmd)); in __get_locked_pte()
1451 return pte_alloc_map_lock(mm, pmd, addr, ptl); in __get_locked_pte()
1645 static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd, in remap_pte_range() argument
1652 pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); in remap_pte_range()
1670 pmd_t *pmd; in remap_pmd_range() local
1674 pmd = pmd_alloc(mm, pud, addr); in remap_pmd_range()
1675 if (!pmd) in remap_pmd_range()
1677 VM_BUG_ON(pmd_trans_huge(*pmd)); in remap_pmd_range()
1680 if (remap_pte_range(mm, pmd, addr, next, in remap_pmd_range()
1683 } while (pmd++, addr = next, addr != end); in remap_pmd_range()
1822 static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, in apply_to_pte_range() argument
1832 pte_alloc_kernel(pmd, addr) : in apply_to_pte_range()
1833 pte_alloc_map_lock(mm, pmd, addr, &ptl); in apply_to_pte_range()
1837 BUG_ON(pmd_huge(*pmd)); in apply_to_pte_range()
1841 token = pmd_pgtable(*pmd); in apply_to_pte_range()
1860 pmd_t *pmd; in apply_to_pmd_range() local
1866 pmd = pmd_alloc(mm, pud, addr); in apply_to_pmd_range()
1867 if (!pmd) in apply_to_pmd_range()
1871 err = apply_to_pte_range(mm, pmd, addr, next, fn, data); in apply_to_pmd_range()
1874 } while (pmd++, addr = next, addr != end); in apply_to_pmd_range()
1932 static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd, in pte_unmap_same() argument
1938 spinlock_t *ptl = pte_lockptr(mm, pmd); in pte_unmap_same()
2027 unsigned long address, pte_t *page_table, pmd_t *pmd, in do_wp_page() argument
2065 page_table = pte_offset_map_lock(mm, pmd, address, in do_wp_page()
2107 page_table = pte_offset_map_lock(mm, pmd, address, in do_wp_page()
2212 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); in do_wp_page()
2405 unsigned long address, pte_t *page_table, pmd_t *pmd, in do_swap_page() argument
2417 if (!pte_unmap_same(mm, pmd, page_table, orig_pte)) in do_swap_page()
2423 migration_entry_wait(mm, pmd, address); in do_swap_page()
2442 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); in do_swap_page()
2497 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); in do_swap_page()
2556 ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte); in do_swap_page()
2588 unsigned long address, pte_t *page_table, pmd_t *pmd, in do_anonymous_page() argument
2606 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); in do_anonymous_page()
2632 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); in do_anonymous_page()
2841 unsigned long address, pmd_t *pmd, in do_read_fault() argument
2856 pte = pte_offset_map_lock(mm, pmd, address, &ptl); in do_read_fault()
2867 pte = pte_offset_map_lock(mm, pmd, address, &ptl); in do_read_fault()
2882 unsigned long address, pmd_t *pmd, in do_cow_fault() argument
2910 pte = pte_offset_map_lock(mm, pmd, address, &ptl); in do_cow_fault()
2931 unsigned long address, pmd_t *pmd, in do_shared_fault() argument
2959 pte = pte_offset_map_lock(mm, pmd, address, &ptl); in do_shared_fault()
2995 unsigned long address, pte_t *page_table, pmd_t *pmd, in do_linear_fault() argument
3006 return do_read_fault(mm, vma, address, pmd, pgoff, flags, in do_linear_fault()
3009 return do_cow_fault(mm, vma, address, pmd, pgoff, flags, in do_linear_fault()
3011 return do_shared_fault(mm, vma, address, pmd, pgoff, flags, orig_pte); in do_linear_fault()
3026 unsigned long address, pte_t *page_table, pmd_t *pmd, in do_nonlinear_fault() argument
3033 if (!pte_unmap_same(mm, pmd, page_table, orig_pte)) in do_nonlinear_fault()
3046 return do_read_fault(mm, vma, address, pmd, pgoff, flags, in do_nonlinear_fault()
3049 return do_cow_fault(mm, vma, address, pmd, pgoff, flags, in do_nonlinear_fault()
3051 return do_shared_fault(mm, vma, address, pmd, pgoff, flags, orig_pte); in do_nonlinear_fault()
3070 unsigned long addr, pte_t pte, pte_t *ptep, pmd_t *pmd) in do_numa_page() argument
3089 ptl = pte_lockptr(mm, pmd); in do_numa_page()
3162 pte_t *pte, pmd_t *pmd, unsigned int flags) in handle_pte_fault() argument
3172 pte, pmd, flags, entry); in handle_pte_fault()
3174 pte, pmd, flags); in handle_pte_fault()
3178 pte, pmd, flags, entry); in handle_pte_fault()
3180 pte, pmd, flags, entry); in handle_pte_fault()
3184 return do_numa_page(mm, vma, address, entry, pte, pmd); in handle_pte_fault()
3186 ptl = pte_lockptr(mm, pmd); in handle_pte_fault()
3193 pte, pmd, ptl, entry); in handle_pte_fault()
3225 pmd_t *pmd; in __handle_mm_fault() local
3235 pmd = pmd_alloc(mm, pud, address); in __handle_mm_fault()
3236 if (!pmd) in __handle_mm_fault()
3238 if (pmd_none(*pmd) && transparent_hugepage_enabled(vma)) { in __handle_mm_fault()
3242 pmd, flags); in __handle_mm_fault()
3246 pmd_t orig_pmd = *pmd; in __handle_mm_fault()
3263 orig_pmd, pmd); in __handle_mm_fault()
3266 ret = do_huge_pmd_wp_page(mm, vma, address, pmd, in __handle_mm_fault()
3271 huge_pmd_set_accessed(mm, vma, address, pmd, in __handle_mm_fault()
3283 if (unlikely(pmd_none(*pmd)) && in __handle_mm_fault()
3284 unlikely(__pte_alloc(mm, vma, pmd, address))) in __handle_mm_fault()
3297 if (unlikely(pmd_trans_unstable(pmd))) in __handle_mm_fault()
3305 pte = pte_offset_map(pmd, address); in __handle_mm_fault()
3307 return handle_pte_fault(mm, vma, address, pte, pmd, flags); in __handle_mm_fault()
3411 pmd_t *pmd; in __follow_pte() local
3422 pmd = pmd_offset(pud, address); in __follow_pte()
3423 VM_BUG_ON(pmd_trans_huge(*pmd)); in __follow_pte()
3424 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) in __follow_pte()
3428 if (pmd_huge(*pmd)) in __follow_pte()
3431 ptep = pte_offset_map_lock(mm, pmd, address, ptlp); in __follow_pte()