Home
last modified time | relevance | path

Searched refs:vm_mm (Results 1 – 25 of 25) sorted by relevance

/mm/
Dpgtable-generic.c53 set_pte_at(vma->vm_mm, address, ptep, entry); in ptep_set_access_flags()
76 struct mm_struct *mm = (vma)->vm_mm; in ptep_clear_flush()
109 set_pmd_at(vma->vm_mm, address, pmdp, entry); in pmdp_set_access_flags()
136 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); in pmdp_huge_clear_flush()
148 set_pmd_at(vma->vm_mm, address, pmdp, pmd); in pmdp_splitting_flush()
195 set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(entry)); in pmdp_invalidate()
212 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); in pmdp_collapse_flush()
Dmprotect.c49 return pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl); in lock_pte_protection()
51 pmdl = pmd_lock(vma->vm_mm, pmd); in lock_pte_protection()
57 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl); in lock_pte_protection()
66 struct mm_struct *mm = vma->vm_mm; in change_pte_range()
75 flush_tlb_batched_pending(vma->vm_mm); in change_pte_range()
142 struct mm_struct *mm = vma->vm_mm; in change_pmd_range()
218 struct mm_struct *mm = vma->vm_mm; in change_protection_range()
298 struct mm_struct *mm = vma->vm_mm; in mprotect_fixup()
Dmremap.c96 struct mm_struct *mm = vma->vm_mm; in move_ptes()
140 flush_tlb_batched_pending(vma->vm_mm); in move_ptes()
195 mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end); in move_page_tables()
204 old_pmd = get_old_pmd(vma->vm_mm, old_addr); in move_page_tables()
207 new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr); in move_page_tables()
231 if (pmd_none(*new_pmd) && __pte_alloc(new_vma->vm_mm, new_vma, in move_page_tables()
243 mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end); in move_page_tables()
252 struct mm_struct *mm = vma->vm_mm; in move_vma()
Dhuge_memory.c872 struct mm_struct *mm = vma->vm_mm; in insert_pfn_pmd()
1283 struct mm_struct *mm = vma->vm_mm; in follow_trans_huge_pmd()
1508 struct mm_struct *mm = vma->vm_mm; in move_huge_pmd()
1564 struct mm_struct *mm = vma->vm_mm; in change_huge_pmd()
1642 *ptl = pmd_lock(vma->vm_mm, pmd); in __pmd_trans_huge_lock()
1716 struct mm_struct *mm = vma->vm_mm; in __split_huge_page_splitting()
1868 struct mm_struct *mm = vma->vm_mm; in __split_huge_page_map()
2047 if (mm_has_pgste(vma->vm_mm)) in hugepage_madvise()
2313 mmu_notifier_test_young(vma->vm_mm, address)) in __collapse_huge_page_isolate()
2335 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1); in __collapse_huge_page_copy()
[all …]
Dvmacache.c24 if (vmacache_valid_mm(newvma->vm_mm)) in vmacache_update()
62 if (WARN_ON_ONCE(vma->vm_mm != mm)) in vmacache_find()
Drmap.c176 struct mm_struct *mm = vma->vm_mm; in anon_vma_prepare()
834 pte = page_check_address(page, vma->vm_mm, address, &ptl, 1); in page_mapped_in_vma()
854 struct mm_struct *mm = vma->vm_mm; in page_referenced_one()
934 if (!mm_match_cgroup(vma->vm_mm, memcg)) in invalid_page_referenced_vma()
1001 struct mm_struct *mm = vma->vm_mm; in page_mkclean_one()
1322 struct mm_struct *mm = vma->vm_mm; in try_to_unmap_one()
1357 mmu_notifier_invalidate_range_start(vma->vm_mm, in try_to_unmap_one()
1507 mmu_notifier_invalidate_range_end(vma->vm_mm, in try_to_unmap_one()
Dmadvise.c50 struct mm_struct *mm = vma->vm_mm; in madvise_behavior()
156 orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl); in swapin_walk_pmd_entry()
179 .mm = vma->vm_mm, in force_swapin_readahead()
Dgup.c52 set_pte_at(vma->vm_mm, address, pte, entry); in follow_pfn_pte()
83 struct mm_struct *mm = vma->vm_mm; in follow_page_pte()
209 struct mm_struct *mm = vma->vm_mm; in follow_page_mask()
324 struct mm_struct *mm = vma->vm_mm; in faultin_page()
914 struct mm_struct *mm = vma->vm_mm; in populate_vma_page_range()
Dmemory.c641 pgd_t *pgd = pgd_offset(vma->vm_mm, addr); in print_bad_pte()
1293 pgd = pgd_offset(vma->vm_mm, addr); in unmap_page_range()
1369 struct mm_struct *mm = vma->vm_mm; in unmap_vmas()
1389 struct mm_struct *mm = vma->vm_mm; in zap_page_range()
1415 struct mm_struct *mm = vma->vm_mm; in zap_page_range_single()
1476 struct mm_struct *mm = vma->vm_mm; in insert_page()
1543 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem)); in vm_insert_page()
1554 struct mm_struct *mm = vma->vm_mm; in insert_pfn()
1772 struct mm_struct *mm = vma->vm_mm; in remap_pfn_range()
2870 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); in do_set_pte()
[all …]
Dksm.c375 ret = handle_mm_fault(vma->vm_mm, vma, addr, in break_ksm()
691 if (ksm_test_exit(vma->vm_mm)) in unmerge_ksm_pages()
861 struct mm_struct *mm = vma->vm_mm; in write_protect_page()
935 struct mm_struct *mm = vma->vm_mm; in replace_page()
1790 struct mm_struct *mm = vma->vm_mm; in ksm_madvise()
1981 if ((rmap_item->mm == vma->vm_mm) == search_new_forks) in rmap_walk_ksm()
Dmmap.c757 struct mm_struct *mm = vma->vm_mm; in vma_adjust()
1671 vma->vm_mm = mm; in mmap_region()
2179 struct mm_struct *mm = vma->vm_mm; in acct_stack_growth()
2205 if (is_hugepage_only_range(vma->vm_mm, new_start, size)) in acct_stack_growth()
2225 struct mm_struct *mm = vma->vm_mm; in expand_upwards()
2318 struct mm_struct *mm = vma->vm_mm; in expand_downwards()
2898 vma->vm_mm = mm; in do_brk()
3032 struct mm_struct *mm = vma->vm_mm; in copy_vma()
3184 vma->vm_mm = mm; in __install_special_mapping()
Dmlock.c372 pte = get_locked_pte(vma->vm_mm, start, &ptl); in __munlock_pagevec_fill()
502 struct mm_struct *mm = vma->vm_mm; in mlock_fixup()
Dzsmalloc.c283 enum zs_mapmode vm_mm; /* mapping mode */ member
1101 if (area->vm_mm == ZS_MM_WO) in __zs_map_object()
1126 if (area->vm_mm == ZS_MM_RO) in __zs_unmap_object()
1293 area->vm_mm = mm; in zs_map_object()
Dpage_idle.c57 struct mm_struct *mm = vma->vm_mm; in page_idle_clear_pte_refs_one()
Dswapfile.c1148 if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL, &memcg)) { in unuse_pte()
1153 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in unuse_pte()
1160 dec_mm_counter(vma->vm_mm, MM_SWAPENTS); in unuse_pte()
1161 inc_mm_counter(vma->vm_mm, MM_ANONPAGES); in unuse_pte()
1163 set_pte_at(vma->vm_mm, addr, pte, in unuse_pte()
1283 pgd = pgd_offset(vma->vm_mm, addr); in unuse_vma()
Ddebug.c161 vma->vm_prev, vma->vm_mm, in dump_vma()
Dmincore.c210 mincore_walk.mm = vma->vm_mm; in do_mincore()
Dnommu.c697 struct mm_struct *mm = vma->vm_mm; in protect_vma()
722 vma->vm_mm = mm; in add_vma_to_mm()
783 struct mm_struct *mm = vma->vm_mm; in delete_vma_from_mm()
Dmemory-failure.c430 if (vma->vm_mm == t->mm) in collect_procs_anon()
465 if (vma->vm_mm == t->mm) in collect_procs_file()
Dhugetlb.c3268 struct mm_struct *mm = vma->vm_mm; in __unmap_hugepage_range()
3407 mm = vma->vm_mm; in unmap_hugepage_range()
4081 struct mm_struct *mm = vma->vm_mm; in hugetlb_change_protection()
4411 spte = huge_pte_offset(svma->vm_mm, saddr); in huge_pmd_share()
Dfilemap.c1984 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); in filemap_fault()
1992 if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) { in filemap_fault()
Dmempolicy.c738 err = split_vma(vma->vm_mm, vma, vmstart, 1); in mbind_range()
743 err = split_vma(vma->vm_mm, vma, vmend, 0); in mbind_range()
Dmigrate.c110 struct mm_struct *mm = vma->vm_mm; in remove_migration_pte()
Dshmem.c1328 up_read(&vma->vm_mm->mmap_sem); in shmem_fault()
1359 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); in shmem_fault()
Dmemcontrol.c4821 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in mem_cgroup_count_precharge_pte_range()
5043 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in mem_cgroup_move_charge_pte_range()