Home
last modified time | relevance | path

Searched refs:vm_mm (Results 1 – 25 of 26) sorted by relevance

12

/mm/
Dpgtable-generic.c53 set_pte_at(vma->vm_mm, address, ptep, entry); in ptep_set_access_flags()
69 set_pmd_at(vma->vm_mm, address, pmdp, entry); in pmdp_set_access_flags()
113 struct mm_struct *mm = (vma)->vm_mm; in ptep_clear_flush()
129 pmd = pmdp_get_and_clear(vma->vm_mm, address, pmdp); in pmdp_clear_flush()
143 set_pmd_at(vma->vm_mm, address, pmdp, pmd); in pmdp_splitting_flush()
198 set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(entry)); in pmdp_invalidate()
Dmprotect.c47 return pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl); in lock_pte_protection()
49 pmdl = pmd_lock(vma->vm_mm, pmd); in lock_pte_protection()
55 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl); in lock_pte_protection()
64 struct mm_struct *mm = vma->vm_mm; in change_pte_range()
138 struct mm_struct *mm = vma->vm_mm; in change_pmd_range()
214 struct mm_struct *mm = vma->vm_mm; in change_protection_range()
258 struct mm_struct *mm = vma->vm_mm; in mprotect_fixup()
Dmremap.c97 struct mm_struct *mm = vma->vm_mm; in move_ptes()
180 mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end); in move_page_tables()
189 old_pmd = get_old_pmd(vma->vm_mm, old_addr); in move_page_tables()
192 new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr); in move_page_tables()
217 if (pmd_none(*new_pmd) && __pte_alloc(new_vma->vm_mm, new_vma, in move_page_tables()
232 mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end); in move_page_tables()
241 struct mm_struct *mm = vma->vm_mm; in move_vma()
Drmap.c171 struct mm_struct *mm = vma->vm_mm; in anon_vma_prepare()
706 pte = page_check_address(page, vma->vm_mm, address, &ptl, 1); in page_mapped_in_vma()
726 struct mm_struct *mm = vma->vm_mm; in page_referenced_one()
801 if (!mm_match_cgroup(vma->vm_mm, memcg)) in invalid_page_referenced_vma()
868 struct mm_struct *mm = vma->vm_mm; in page_mkclean_one()
1188 struct mm_struct *mm = vma->vm_mm; in try_to_unmap_one()
1315 if (down_read_trylock(&vma->vm_mm->mmap_sem)) { in try_to_unmap_one()
1320 up_read(&vma->vm_mm->mmap_sem); in try_to_unmap_one()
1355 struct mm_struct *mm = vma->vm_mm; in try_to_unmap_cluster()
1387 if (down_read_trylock(&vma->vm_mm->mmap_sem)) { in try_to_unmap_cluster()
[all …]
Dhuge_memory.c1223 struct mm_struct *mm = vma->vm_mm; in follow_trans_huge_pmd()
1466 struct mm_struct *mm = vma->vm_mm; in move_huge_pmd()
1518 struct mm_struct *mm = vma->vm_mm; in change_huge_pmd()
1564 *ptl = pmd_lock(vma->vm_mm, pmd); in __pmd_trans_huge_lock()
1638 struct mm_struct *mm = vma->vm_mm; in __split_huge_page_splitting()
1790 struct mm_struct *mm = vma->vm_mm; in __split_huge_page_map()
1969 if (mm_has_pgste(vma->vm_mm)) in hugepage_madvise()
2206 mmu_notifier_test_young(vma->vm_mm, address)) in __collapse_huge_page_isolate()
2228 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1); in __collapse_huge_page_copy()
2244 pte_clear(vma->vm_mm, address, _pte); in __collapse_huge_page_copy()
[all …]
Dvmacache.c60 if (vmacache_valid_mm(newvma->vm_mm)) in vmacache_update()
98 if (WARN_ON_ONCE(vma->vm_mm != mm)) in vmacache_find()
Dmmap.c761 struct mm_struct *mm = vma->vm_mm; in vma_adjust()
1635 vma->vm_mm = mm; in mmap_region()
2142 struct mm_struct *mm = vma->vm_mm; in acct_stack_growth()
2168 if (is_hugepage_only_range(vma->vm_mm, new_start, size)) in acct_stack_growth()
2253 spin_lock(&vma->vm_mm->page_table_lock); in expand_upwards()
2260 vma->vm_mm->highest_vm_end = vm_end_gap(vma); in expand_upwards()
2261 spin_unlock(&vma->vm_mm->page_table_lock); in expand_upwards()
2269 validate_mm(vma->vm_mm); in expand_upwards()
2334 spin_lock(&vma->vm_mm->page_table_lock); in expand_downwards()
2340 spin_unlock(&vma->vm_mm->page_table_lock); in expand_downwards()
[all …]
Dmadvise.c49 struct mm_struct *mm = vma->vm_mm; in madvise_behavior()
155 orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl); in swapin_walk_pmd_entry()
178 .mm = vma->vm_mm, in force_swapin_readahead()
Dmemory.c647 pgd_t *pgd = pgd_offset(vma->vm_mm, addr); in print_bad_pte()
1284 pgd = pgd_offset(vma->vm_mm, addr); in unmap_page_range()
1360 struct mm_struct *mm = vma->vm_mm; in unmap_vmas()
1380 struct mm_struct *mm = vma->vm_mm; in zap_page_range()
1406 struct mm_struct *mm = vma->vm_mm; in zap_page_range_single()
1467 struct mm_struct *mm = vma->vm_mm; in insert_page()
1534 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem)); in vm_insert_page()
1545 struct mm_struct *mm = vma->vm_mm; in insert_pfn()
1723 struct mm_struct *mm = vma->vm_mm; in remap_pfn_range()
2721 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); in do_set_pte()
[all …]
Dmincore.c124 ptep = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in mincore_pte_range()
209 pgd = pgd_offset(vma->vm_mm, addr); in mincore_page_range()
Dmlock.c230 struct mm_struct *mm = vma->vm_mm; in __mlock_vma_pages_range()
428 pte = get_locked_pte(vma->vm_mm, start, &ptl); in __munlock_pagevec_fill()
558 struct mm_struct *mm = vma->vm_mm; in mlock_fixup()
Dksm.c375 ret = handle_mm_fault(vma->vm_mm, vma, addr, in break_ksm()
689 if (ksm_test_exit(vma->vm_mm)) in unmerge_ksm_pages()
859 struct mm_struct *mm = vma->vm_mm; in write_protect_page()
933 struct mm_struct *mm = vma->vm_mm; in replace_page()
1742 struct mm_struct *mm = vma->vm_mm; in ksm_madvise()
1931 if ((rmap_item->mm == vma->vm_mm) == search_new_forks) in rmap_walk_ksm()
Dgup.c48 struct mm_struct *mm = vma->vm_mm; in follow_page_pte()
162 struct mm_struct *mm = vma->vm_mm; in follow_page_mask()
274 struct mm_struct *mm = vma->vm_mm; in faultin_page()
Dzsmalloc.c279 enum zs_mapmode vm_mm; /* mapping mode */ member
1096 if (area->vm_mm == ZS_MM_WO) in __zs_map_object()
1121 if (area->vm_mm == ZS_MM_RO) in __zs_unmap_object()
1288 area->vm_mm = mm; in zs_map_object()
Dswapfile.c1148 if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL, &memcg)) { in unuse_pte()
1153 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in unuse_pte()
1160 dec_mm_counter(vma->vm_mm, MM_SWAPENTS); in unuse_pte()
1161 inc_mm_counter(vma->vm_mm, MM_ANONPAGES); in unuse_pte()
1163 set_pte_at(vma->vm_mm, addr, pte, in unuse_pte()
1283 pgd = pgd_offset(vma->vm_mm, addr); in unuse_vma()
Ddebug.c159 vma->vm_prev, vma->vm_mm, in dump_vma()
Dmempolicy.c493 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in queue_pages_pte_range()
532 ptl = huge_pte_lock(hstate_vma(vma), vma->vm_mm, (pte_t *)pmd); in queue_pages_hugetlb_pmd_range()
609 pgd = pgd_offset(vma->vm_mm, addr); in queue_pages_pgd_range()
783 err = split_vma(vma->vm_mm, vma, vmstart, 1); in mbind_range()
788 err = split_vma(vma->vm_mm, vma, vmend, 0); in mbind_range()
Dfremap.c105 struct mm_struct *mm = vma->vm_mm; in generic_file_remap_pages()
Dnommu.c690 struct mm_struct *mm = vma->vm_mm; in protect_vma()
717 vma->vm_mm = mm; in add_vma_to_mm()
778 struct mm_struct *mm = vma->vm_mm; in delete_vma_from_mm()
Dmemory-failure.c451 if (vma->vm_mm == t->mm) in collect_procs_anon()
486 if (vma->vm_mm == t->mm) in collect_procs_file()
Dfilemap_xip.c187 mm = vma->vm_mm; in __xip_unmap()
Dhugetlb.c2655 struct mm_struct *mm = vma->vm_mm; in __unmap_hugepage_range()
2772 mm = vma->vm_mm; in unmap_hugepage_range()
3400 struct mm_struct *mm = vma->vm_mm; in hugetlb_change_protection()
3633 spte = huge_pte_offset(svma->vm_mm, saddr); in huge_pmd_share()
Dfilemap.c1896 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); in filemap_fault()
1904 if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) { in filemap_fault()
Dmigrate.c109 struct mm_struct *mm = vma->vm_mm; in remove_migration_pte()
Dshmem.c1317 up_read(&vma->vm_mm->mmap_sem); in shmem_fault()
1348 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); in shmem_fault()

12