Home
last modified time | relevance | path

Searched refs:vm_mm (Results 1 – 25 of 29) sorted by relevance

12

/mm/
Dmemory.c548 pgd_t *pgd = pgd_offset(vma->vm_mm, addr); in print_bad_pte()
851 struct mm_struct *src_mm = src_vma->vm_mm; in copy_present_page()
907 set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte); in copy_present_page()
920 struct mm_struct *src_mm = src_vma->vm_mm; in copy_present_pte()
959 set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte); in copy_present_pte()
987 struct mm_struct *dst_mm = dst_vma->vm_mm; in copy_pte_range()
988 struct mm_struct *src_mm = src_vma->vm_mm; in copy_pte_range()
1094 struct mm_struct *dst_mm = dst_vma->vm_mm; in copy_pmd_range()
1095 struct mm_struct *src_mm = src_vma->vm_mm; in copy_pmd_range()
1131 struct mm_struct *dst_mm = dst_vma->vm_mm; in copy_pud_range()
[all …]
Dhuge_memory.c591 if (mem_cgroup_charge(page, vma->vm_mm, gfp)) { in __do_huge_pmd_anonymous_page()
599 pgtable = pte_alloc_one(vma->vm_mm); in __do_huge_pmd_anonymous_page()
613 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in __do_huge_pmd_anonymous_page()
619 ret = check_stable_address_space(vma->vm_mm); in __do_huge_pmd_anonymous_page()
629 pte_free(vma->vm_mm, pgtable); in __do_huge_pmd_anonymous_page()
639 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); in __do_huge_pmd_anonymous_page()
640 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); in __do_huge_pmd_anonymous_page()
641 add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); in __do_huge_pmd_anonymous_page()
642 mm_inc_nr_ptes(vma->vm_mm); in __do_huge_pmd_anonymous_page()
645 count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC); in __do_huge_pmd_anonymous_page()
[all …]
Dpgtable-generic.c70 set_pte_at(vma->vm_mm, address, ptep, entry); in ptep_set_access_flags()
93 struct mm_struct *mm = (vma)->vm_mm; in ptep_clear_flush()
112 set_pmd_at(vma->vm_mm, address, pmdp, entry); in pmdp_set_access_flags()
140 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); in pmdp_huge_clear_flush()
153 pud = pudp_huge_get_and_clear(vma->vm_mm, address, pudp); in pudp_huge_clear_flush()
215 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); in pmdp_collapse_flush()
Dmremap.c139 struct mm_struct *mm = vma->vm_mm; in move_ptes()
175 flush_tlb_batched_pending(vma->vm_mm); in move_ptes()
251 struct mm_struct *mm = vma->vm_mm; in move_normal_pmd()
292 old_ptl = pmd_lock(vma->vm_mm, old_pmd); in move_normal_pmd()
327 struct mm_struct *mm = vma->vm_mm; in move_normal_pud()
349 old_ptl = pud_lock(vma->vm_mm, old_pud); in move_normal_pud()
476 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm, in move_page_tables()
490 old_pud = get_old_pud(vma->vm_mm, old_addr); in move_page_tables()
493 new_pud = alloc_new_pud(vma->vm_mm, vma, new_addr); in move_page_tables()
502 old_pmd = get_old_pmd(vma->vm_mm, old_addr); in move_page_tables()
[all …]
Dmadvise.c73 struct mm_struct *mm = vma->vm_mm; in madvise_behavior()
205 orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl); in swapin_walk_pmd_entry()
266 struct mm_struct *mm = vma->vm_mm; in madvise_willneed()
273 walk_page_range(vma->vm_mm, start, end, &swapin_walk_ops, vma); in madvise_willneed()
407 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in madvise_cold_or_pageout_pte_range()
520 walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private); in madvise_cold_page_range()
528 struct mm_struct *mm = vma->vm_mm; in madvise_cold()
555 walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private); in madvise_pageout_page_range()
577 struct mm_struct *mm = vma->vm_mm; in madvise_pageout()
741 struct mm_struct *mm = vma->vm_mm; in madvise_free_single_vma()
[all …]
Dmprotect.c65 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in change_pte_range()
69 atomic_read(&vma->vm_mm->mm_users) == 1) in change_pte_range()
72 flush_tlb_batched_pending(vma->vm_mm); in change_pte_range()
176 set_pte_at(vma->vm_mm, addr, pte, newpte); in change_pte_range()
246 vma, vma->vm_mm, addr, end); in change_pmd_range()
328 struct mm_struct *mm = vma->vm_mm; in change_protection_range()
402 struct mm_struct *mm = vma->vm_mm; in mprotect_fixup()
Dvmacache.c37 if (vmacache_valid_mm(newvma->vm_mm)) in vmacache_update()
76 if (WARN_ON_ONCE(vma->vm_mm != mm)) in vmacache_find()
Drmap.c187 struct mm_struct *mm = vma->vm_mm; in __anon_vma_prepare()
853 if (!mm_match_cgroup(vma->vm_mm, memcg)) in invalid_page_referenced_vma()
937 0, vma, vma->vm_mm, address, in page_mkclean_one()
956 set_pte_at(vma->vm_mm, address, pte, entry); in page_mkclean_one()
970 set_pmd_at(vma->vm_mm, address, pmd, entry); in page_mkclean_one()
1451 struct mm_struct *mm = vma->vm_mm; in try_to_unmap_one()
1495 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, in try_to_unmap_one()
Dkhugepaged.c356 if (mm_has_pgste(vma->vm_mm)) in hugepage_madvise()
717 mmu_notifier_test_young(vma->vm_mm, address)) in __collapse_huge_page_isolate()
755 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1); in __collapse_huge_page_copy()
765 pte_clear(vma->vm_mm, address, _pte); in __collapse_huge_page_copy()
783 pte_clear(vma->vm_mm, address, _pte); in __collapse_huge_page_copy()
1371 mmu_notifier_test_young(vma->vm_mm, address)) in khugepaged_scan_pmd()
1540 add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count); in collapse_pte_mapped_thp()
1630 mm = vma->vm_mm; in retract_page_tables()
Dpage_vma_mapped.c50 pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd); in map_pte()
151 struct mm_struct *mm = pvmw->vma->vm_mm; in page_vma_mapped_walk()
Dmigrate.c240 set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); in remove_migration_pte()
248 set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); in remove_migration_pte()
2300 struct mm_struct *mm = vma->vm_mm; in migrate_vma_collect_pmd()
2497 migrate->vma->vm_mm, migrate->start, migrate->end, in migrate_vma_collect()
2501 walk_page_range(migrate->vma->vm_mm, migrate->start, migrate->end, in migrate_vma_collect()
2846 struct mm_struct *mm = vma->vm_mm; in migrate_vma_insert_page()
2893 if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL)) in migrate_vma_insert_page()
3007 migrate->vma->vm_mm, in migrate_vma_pages()
Dpagewalk.c470 .mm = vma->vm_mm, in walk_page_vma()
549 walk.mm = vma->vm_mm; in walk_page_mapping()
Dgup.c406 set_pte_at(vma->vm_mm, address, pte, entry); in follow_pfn_pte()
429 struct mm_struct *mm = vma->vm_mm; in follow_page_pte()
595 struct mm_struct *mm = vma->vm_mm; in follow_pmd_mask()
710 struct mm_struct *mm = vma->vm_mm; in follow_pud_mask()
793 struct mm_struct *mm = vma->vm_mm; in follow_page_mask()
1442 struct mm_struct *mm = vma->vm_mm; in populate_vma_page_range()
Dksm.c852 if (ksm_test_exit(vma->vm_mm)) in unmerge_ksm_pages()
1037 struct mm_struct *mm = vma->vm_mm; in write_protect_page()
1124 struct mm_struct *mm = vma->vm_mm; in replace_page()
2441 struct mm_struct *mm = vma->vm_mm; in ksm_madvise()
2590 if (new_page && mem_cgroup_charge(new_page, vma->vm_mm, GFP_KERNEL)) { in ksm_might_need_to_copy()
2654 if ((rmap_item->mm == vma->vm_mm) == search_new_forks) in rmap_walk_ksm()
Dmlock.c392 pte = get_locked_pte(vma->vm_mm, start, &ptl); in __munlock_pagevec_fill()
537 struct mm_struct *mm = vma->vm_mm; in mlock_fixup()
Dmincore.c199 err = walk_page_range(vma->vm_mm, addr, end, &mincore_walk_ops, vec); in do_mincore()
Dmmap.c782 struct mm_struct *mm = vma->vm_mm; in __vma_adjust()
2477 struct mm_struct *mm = vma->vm_mm; in acct_stack_growth()
2502 if (is_hugepage_only_range(vma->vm_mm, new_start, size)) in acct_stack_growth()
2522 struct mm_struct *mm = vma->vm_mm; in expand_upwards()
2613 struct mm_struct *mm = vma->vm_mm; in expand_downwards()
3418 struct mm_struct *mm = vma->vm_mm; in copy_vma()
3567 if (WARN_ON_ONCE(current->mm != new_vma->vm_mm)) in special_mapping_mremap()
Dmemory-failure.c302 pgd = pgd_offset(vma->vm_mm, address); in dev_pagemap_mapping_shift()
497 if (vma->vm_mm == t->mm) in collect_procs_anon()
533 if (vma->vm_mm == t->mm) in collect_procs_file()
Ddebug.c209 vma->vm_prev, vma->vm_mm, in dump_vma()
Dzsmalloc.c298 enum zs_mapmode vm_mm; /* mapping mode */ member
1143 if (area->vm_mm == ZS_MM_WO) in __zs_map_object()
1168 if (area->vm_mm == ZS_MM_RO) in __zs_unmap_object()
1282 area->vm_mm = mm; in zs_map_object()
Dswapfile.c1951 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in unuse_pte()
1957 dec_mm_counter(vma->vm_mm, MM_SWAPENTS); in unuse_pte()
1958 inc_mm_counter(vma->vm_mm, MM_ANONPAGES); in unuse_pte()
1960 set_pte_at(vma->vm_mm, addr, pte, in unuse_pte()
2129 pgd = pgd_offset(vma->vm_mm, addr); in unuse_vma()
Dinternal.h466 mmap_read_unlock(vmf->vma->vm_mm); in maybe_unlock_mmap_for_io()
Dhugetlb.c3930 struct mm_struct *mm = vma->vm_mm; in __unmap_hugepage_range()
4080 mm = vma->vm_mm; in unmap_hugepage_range()
5074 struct mm_struct *mm = vma->vm_mm; in hugetlb_change_protection()
5465 spte = huge_pte_offset(svma->vm_mm, saddr, in huge_pmd_share()
5645 struct mm_struct *mm = vma->vm_mm; in follow_huge_pmd_pte()
5778 struct mm_struct *mm = vma->vm_mm; in hugetlb_unshare_pmds()
Dnommu.c572 vma->vm_mm = mm; in add_vma_to_mm()
631 struct mm_struct *mm = vma->vm_mm; in delete_vma_from_mm()
/mm/damon/
Dpaddr.c31 damon_ptep_mkold(pvmw.pte, vma->vm_mm, addr); in __damon_pa_mkold()
33 damon_pmdp_mkold(pvmw.pmd, vma->vm_mm, addr); in __damon_pa_mkold()
109 mmu_notifier_test_young(vma->vm_mm, addr); in __damon_pa_young()
114 mmu_notifier_test_young(vma->vm_mm, addr); in __damon_pa_young()

12