/mm/ |
D | huge_memory.c | 594 if (mem_cgroup_try_charge_delay(page, vma->vm_mm, gfp, &memcg, true)) { in __do_huge_pmd_anonymous_page() 600 pgtable = pte_alloc_one(vma->vm_mm); in __do_huge_pmd_anonymous_page() 614 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in __do_huge_pmd_anonymous_page() 620 ret = check_stable_address_space(vma->vm_mm); in __do_huge_pmd_anonymous_page() 631 pte_free(vma->vm_mm, pgtable); in __do_huge_pmd_anonymous_page() 642 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); in __do_huge_pmd_anonymous_page() 643 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); in __do_huge_pmd_anonymous_page() 644 add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); in __do_huge_pmd_anonymous_page() 645 mm_inc_nr_ptes(vma->vm_mm); in __do_huge_pmd_anonymous_page() 656 pte_free(vma->vm_mm, pgtable); in __do_huge_pmd_anonymous_page() [all …]
|
D | memory.c | 503 pgd_t *pgd = pgd_offset(vma->vm_mm, addr); in print_bad_pte() 1251 pgd = pgd_offset(vma->vm_mm, addr); in unmap_page_range() 1329 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm, in unmap_vmas() 1352 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, in zap_page_range() 1354 tlb_gather_mmu(&tlb, vma->vm_mm, start, range.end); in zap_page_range() 1355 update_hiwater_rss(vma->vm_mm); in zap_page_range() 1379 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, in zap_page_range_single() 1381 tlb_gather_mmu(&tlb, vma->vm_mm, address, range.end); in zap_page_range_single() 1382 update_hiwater_rss(vma->vm_mm); in zap_page_range_single() 1444 struct mm_struct *mm = vma->vm_mm; in insert_page() [all …]
|
D | pgtable-generic.c | 61 set_pte_at(vma->vm_mm, address, ptep, entry); in ptep_set_access_flags() 84 struct mm_struct *mm = (vma)->vm_mm; in ptep_clear_flush() 103 set_pmd_at(vma->vm_mm, address, pmdp, entry); in pmdp_set_access_flags() 131 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); in pmdp_huge_clear_flush() 144 pud = pudp_huge_get_and_clear(vma->vm_mm, address, pudp); in pudp_huge_clear_flush() 206 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); in pmdp_collapse_flush()
|
D | mremap.c | 120 struct mm_struct *mm = vma->vm_mm; in move_ptes() 156 flush_tlb_batched_pending(vma->vm_mm); in move_ptes() 200 struct mm_struct *mm = vma->vm_mm; in move_normal_pmd() 218 old_ptl = pmd_lock(vma->vm_mm, old_pmd); in move_normal_pmd() 252 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm, in move_page_tables() 263 old_pmd = get_old_pmd(vma->vm_mm, old_addr); in move_page_tables() 266 new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr); in move_page_tables() 304 if (pte_alloc(new_vma->vm_mm, new_pmd)) in move_page_tables() 324 struct mm_struct *mm = vma->vm_mm; in move_vma()
|
D | mprotect.c | 61 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in change_pte_range() 65 atomic_read(&vma->vm_mm->mm_users) == 1) in change_pte_range() 68 flush_tlb_batched_pending(vma->vm_mm); in change_pte_range() 138 set_pte_at(vma->vm_mm, addr, pte, newpte); in change_pte_range() 152 set_pte_at(vma->vm_mm, addr, pte, newpte); in change_pte_range() 189 vma, vma->vm_mm, addr, end); in change_pmd_range() 271 struct mm_struct *mm = vma->vm_mm; in change_protection_range() 342 struct mm_struct *mm = vma->vm_mm; in mprotect_fixup()
|
D | madvise.c | 69 struct mm_struct *mm = vma->vm_mm; in madvise_behavior() 199 orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl); in swapin_walk_pmd_entry() 263 walk_page_range(vma->vm_mm, start, end, &swapin_walk_ops, vma); in madvise_willneed() 386 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in madvise_cold_or_pageout_pte_range() 481 walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private); in madvise_cold_page_range() 489 struct mm_struct *mm = vma->vm_mm; in madvise_cold() 514 walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private); in madvise_pageout_page_range() 538 struct mm_struct *mm = vma->vm_mm; in madvise_pageout() 696 struct mm_struct *mm = vma->vm_mm; in madvise_free_single_vma() 719 walk_page_range(vma->vm_mm, range.start, range.end, in madvise_free_single_vma()
|
D | khugepaged.c | 324 if (mm_has_pgste(vma->vm_mm)) in hugepage_madvise() 412 test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) in hugepage_vma_check() 625 mmu_notifier_test_young(vma->vm_mm, address)) in __collapse_huge_page_isolate() 659 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1); in __collapse_huge_page_copy() 669 pte_clear(vma->vm_mm, address, _pte); in __collapse_huge_page_copy() 687 pte_clear(vma->vm_mm, address, _pte); in __collapse_huge_page_copy() 1214 mmu_notifier_test_young(vma->vm_mm, address)) in khugepaged_scan_pmd() 1379 add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count); in collapse_pte_mapped_thp() 1383 ptl = pmd_lock(vma->vm_mm, pmd); in collapse_pte_mapped_thp() 1448 pmd = mm_find_pmd(vma->vm_mm, addr); in retract_page_tables() [all …]
|
D | vmacache.c | 38 if (vmacache_valid_mm(newvma->vm_mm)) in vmacache_update() 77 if (WARN_ON_ONCE(vma->vm_mm != mm)) in vmacache_find()
|
D | rmap.c | 178 struct mm_struct *mm = vma->vm_mm; in __anon_vma_prepare() 821 if (!mm_match_cgroup(vma->vm_mm, memcg)) in invalid_page_referenced_vma() 901 0, vma, vma->vm_mm, address, in page_mkclean_one() 920 set_pte_at(vma->vm_mm, address, pte, entry); in page_mkclean_one() 934 set_pmd_at(vma->vm_mm, address, pmd, entry); in page_mkclean_one() 1344 struct mm_struct *mm = vma->vm_mm; in try_to_unmap_one() 1377 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, in try_to_unmap_one()
|
D | page_vma_mapped.c | 50 pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd); in map_pte() 140 struct mm_struct *mm = pvmw->vma->vm_mm; in page_vma_mapped_walk()
|
D | migrate.c | 257 set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); in remove_migration_pte() 265 set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); in remove_migration_pte() 2174 struct mm_struct *mm = vma->vm_mm; in migrate_vma_collect_pmd() 2349 migrate->vma->vm_mm, migrate->start, migrate->end); in migrate_vma_collect() 2352 walk_page_range(migrate->vma->vm_mm, migrate->start, migrate->end, in migrate_vma_collect() 2689 struct mm_struct *mm = vma->vm_mm; in migrate_vma_insert_page() 2737 if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL, &memcg, false)) in migrate_vma_insert_page() 2850 migrate->vma->vm_mm, in migrate_vma_pages()
|
D | gup.c | 154 set_pte_at(vma->vm_mm, address, pte, entry); in follow_pfn_pte() 177 struct mm_struct *mm = vma->vm_mm; in follow_page_pte() 316 struct mm_struct *mm = vma->vm_mm; in follow_pmd_mask() 431 struct mm_struct *mm = vma->vm_mm; in follow_pud_mask() 514 struct mm_struct *mm = vma->vm_mm; in follow_page_mask() 1191 struct mm_struct *mm = vma->vm_mm; in populate_vma_page_range()
|
D | mlock.c | 386 pte = get_locked_pte(vma->vm_mm, start, &ptl); in __munlock_pagevec_fill() 522 struct mm_struct *mm = vma->vm_mm; in mlock_fixup()
|
D | debug.c | 126 vma->vm_prev, vma->vm_mm, in dump_vma()
|
D | mmap.c | 723 struct mm_struct *mm = vma->vm_mm; in __vma_adjust() 2309 struct mm_struct *mm = vma->vm_mm; in acct_stack_growth() 2334 if (is_hugepage_only_range(vma->vm_mm, new_start, size)) in acct_stack_growth() 2354 struct mm_struct *mm = vma->vm_mm; in expand_upwards() 2446 struct mm_struct *mm = vma->vm_mm; in expand_downwards() 3227 struct mm_struct *mm = vma->vm_mm; in copy_vma() 3355 if (WARN_ON_ONCE(current->mm != new_vma->vm_mm)) in special_mapping_mremap()
|
D | memory-failure.c | 274 pgd = pgd_offset(vma->vm_mm, address); in dev_pagemap_mapping_shift() 463 if (vma->vm_mm == t->mm) in collect_procs_anon() 498 if (vma->vm_mm == t->mm) in collect_procs_file()
|
D | ksm.c | 850 if (ksm_test_exit(vma->vm_mm)) in unmerge_ksm_pages() 1035 struct mm_struct *mm = vma->vm_mm; in write_protect_page() 1122 struct mm_struct *mm = vma->vm_mm; in replace_page() 2431 struct mm_struct *mm = vma->vm_mm; in ksm_madvise() 2633 if ((rmap_item->mm == vma->vm_mm) == search_new_forks) in rmap_walk_ksm()
|
D | mincore.c | 222 err = walk_page_range(vma->vm_mm, addr, end, &mincore_walk_ops, vec); in do_mincore()
|
D | swapfile.c | 1867 if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL, in unuse_pte() 1873 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in unuse_pte() 1880 dec_mm_counter(vma->vm_mm, MM_SWAPENTS); in unuse_pte() 1881 inc_mm_counter(vma->vm_mm, MM_ANONPAGES); in unuse_pte() 1883 set_pte_at(vma->vm_mm, addr, pte, in unuse_pte() 2054 pgd = pgd_offset(vma->vm_mm, addr); in unuse_vma()
|
D | zsmalloc.c | 302 enum zs_mapmode vm_mm; /* mapping mode */ member 1187 if (area->vm_mm == ZS_MM_WO) in __zs_map_object() 1212 if (area->vm_mm == ZS_MM_RO) in __zs_unmap_object() 1328 area->vm_mm = mm; in zs_map_object()
|
D | pagewalk.c | 361 .mm = vma->vm_mm, in walk_page_vma()
|
D | internal.h | 381 up_read(&vmf->vma->vm_mm->mmap_sem); in maybe_unlock_mmap_for_io()
|
D | hmm.c | 927 walk_page_range(vma->vm_mm, start, end, &hmm_walk_ops, in hmm_range_fault() 931 ret = walk_page_range(vma->vm_mm, start, end, in hmm_range_fault()
|
D | nommu.c | 591 vma->vm_mm = mm; in add_vma_to_mm() 650 struct mm_struct *mm = vma->vm_mm; in delete_vma_from_mm()
|
D | shmem.c | 1633 struct mm_struct *charge_mm = vma ? vma->vm_mm : current->mm; in shmem_swapin_page() 1768 charge_mm = vma ? vma->vm_mm : current->mm; in shmem_getpage_gfp() 2061 test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) in shmem_fault() 3996 test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) in shmem_huge_enabled()
|