• Home
  • Raw
  • Download

Lines Matching refs:vm_mm

548 	pgd_t *pgd = pgd_offset(vma->vm_mm, addr);  in print_bad_pte()
851 struct mm_struct *src_mm = src_vma->vm_mm; in copy_present_page()
907 set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte); in copy_present_page()
920 struct mm_struct *src_mm = src_vma->vm_mm; in copy_present_pte()
959 set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte); in copy_present_pte()
987 struct mm_struct *dst_mm = dst_vma->vm_mm; in copy_pte_range()
988 struct mm_struct *src_mm = src_vma->vm_mm; in copy_pte_range()
1094 struct mm_struct *dst_mm = dst_vma->vm_mm; in copy_pmd_range()
1095 struct mm_struct *src_mm = src_vma->vm_mm; in copy_pmd_range()
1131 struct mm_struct *dst_mm = dst_vma->vm_mm; in copy_pud_range()
1132 struct mm_struct *src_mm = src_vma->vm_mm; in copy_pud_range()
1168 struct mm_struct *dst_mm = dst_vma->vm_mm; in copy_p4d_range()
1194 struct mm_struct *dst_mm = dst_vma->vm_mm; in copy_page_range()
1195 struct mm_struct *src_mm = src_vma->vm_mm; in copy_page_range()
1520 pgd = pgd_offset(vma->vm_mm, addr); in unmap_page_range()
1598 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm, in unmap_vmas()
1621 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, in zap_page_range()
1623 tlb_gather_mmu(&tlb, vma->vm_mm, start, range.end); in zap_page_range()
1624 update_hiwater_rss(vma->vm_mm); in zap_page_range()
1648 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, in zap_page_range_single()
1650 tlb_gather_mmu(&tlb, vma->vm_mm, address, range.end); in zap_page_range_single()
1651 update_hiwater_rss(vma->vm_mm); in zap_page_range_single()
1743 struct mm_struct *mm = vma->vm_mm; in insert_page()
1784 struct mm_struct *const mm = vma->vm_mm; in insert_pages()
1857 BUG_ON(mmap_read_trylock(vma->vm_mm)); in vm_insert_pages()
1915 BUG_ON(mmap_read_trylock(vma->vm_mm)); in vm_insert_page()
2007 struct mm_struct *mm = vma->vm_mm; in insert_pfn()
2362 struct mm_struct *mm = vma->vm_mm; in remap_pfn_range()
2644 vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); in pte_spinlock()
2667 vmf->ptl = pte_lockptr(vmf->vma->vm_mm, &pmdval); in pte_spinlock()
2726 ptl = pte_lockptr(vmf->vma->vm_mm, &pmdval); in __pte_map_lock_speculative()
2755 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, in pte_map_lock()
2766 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, in pte_map_lock_addr()
2830 vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); in pte_spinlock()
2837 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, in pte_map_lock()
2844 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, in pte_map_lock_addr()
2909 struct mm_struct *mm = vma->vm_mm; in cow_user_page()
3145 struct mm_struct *mm = vma->vm_mm; in wp_page_copy()
3421 mm_tlb_flush_pending(vmf->vma->vm_mm))) in do_wp_page()
3657 migration_entry_wait(vma->vm_mm, vmf->pmd, in do_swap_page()
3697 err = mem_cgroup_charge(page, vma->vm_mm, in do_swap_page()
3750 count_memcg_event_mm(vma->vm_mm, PGMAJFAULT); in do_swap_page()
3761 locked = lock_page_or_retry(page, vma->vm_mm, vmf->flags); in do_swap_page()
3814 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); in do_swap_page()
3815 dec_mm_counter_fast(vma->vm_mm, MM_SWAPENTS); in do_swap_page()
3830 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte); in do_swap_page()
3831 arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte); in do_swap_page()
3917 if (pte_alloc(vma->vm_mm, vmf->pmd)) in do_anonymous_page()
3927 !mm_forbids_zeropage(vma->vm_mm)) { in do_anonymous_page()
3936 ret = check_stable_address_space(vma->vm_mm); in do_anonymous_page()
3962 if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL)) in do_anonymous_page()
3988 ret = check_stable_address_space(vma->vm_mm); in do_anonymous_page()
4000 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); in do_anonymous_page()
4004 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry); in do_anonymous_page()
4052 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm); in __do_fault()
4094 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte); in deposit_prealloc_pte()
4099 mm_inc_nr_ptes(vma->vm_mm); in deposit_prealloc_pte()
4124 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm); in do_set_pmd()
4130 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_set_pmd()
4141 add_mm_counter(vma->vm_mm, mm_counter_file(page), HPAGE_PMD_NR); in do_set_pmd()
4149 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); in do_set_pmd()
4186 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); in do_set_pte()
4190 inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page)); in do_set_pte()
4193 set_pte_at(vma->vm_mm, addr, vmf->pte, entry); in do_set_pte()
4229 ret = check_stable_address_space(vma->vm_mm); in finish_fault()
4246 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in finish_fault()
4248 mm_inc_nr_ptes(vma->vm_mm); in finish_fault()
4249 pmd_populate(vma->vm_mm, vmf->pmd, vmf->prealloc_pte); in finish_fault()
4253 } else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) { in finish_fault()
4367 vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm); in do_fault_around()
4417 if (mem_cgroup_charge(vmf->cow_page, vma->vm_mm, GFP_KERNEL)) { in do_cow_fault()
4489 struct mm_struct *vm_mm = vma->vm_mm; in do_fault() local
4503 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, in do_fault()
4530 pte_free(vm_mm, vmf->prealloc_pte); in do_fault()
4807 if (!mmu_notifier_trylock(vmf->vma->vm_mm)) { in handle_pte_fault()
4813 mmu_notifier_unlock(vmf->vma->vm_mm); in handle_pte_fault()
4863 struct mm_struct *mm = vma->vm_mm; in __handle_mm_fault()
5306 count_memcg_event_mm(vma->vm_mm, PGFAULT); in handle_mm_fault()
5324 ret = hugetlb_fault(vma->vm_mm, vma, address, flags); in handle_mm_fault()
5536 ret = follow_pte(vma->vm_mm, address, &ptep, &ptl); in follow_pfn()
5557 if (follow_pte(vma->vm_mm, address, &ptep, &ptl)) in follow_phys()