Home
last modified time | relevance | path

Searched refs:vm_file (Results 1 – 19 of 19) sorted by relevance

/mm/
Dnommu.c558 if (region->vm_file) in __put_nommu_region()
559 fput(region->vm_file); in __put_nommu_region()
598 if (vma->vm_file) { in add_vma_to_mm()
599 mapping = vma->vm_file->f_mapping; in add_vma_to_mm()
667 if (vma->vm_file) { in delete_vma_from_mm()
668 mapping = vma->vm_file->f_mapping; in delete_vma_from_mm()
696 if (vma->vm_file) in delete_vma()
697 fput(vma->vm_file); in delete_vma()
987 ret = call_mmap(vma->vm_file, vma); in do_mmap_shared_file()
1018 ret = call_mmap(vma->vm_file, vma); in do_mmap_private()
[all …]
Dmmap.c158 struct file *file = vma->vm_file; in unlink_file_vma()
178 if (vma->vm_file) in remove_vma()
179 fput(vma->vm_file); in remove_vma()
616 file = vma->vm_file; in __vma_link_file()
646 if (vma->vm_file) { in vma_link()
647 mapping = vma->vm_file->f_mapping; in vma_link()
726 struct file *file = vma->vm_file; in __vma_adjust()
753 VM_WARN_ON(file != next->vm_file); in __vma_adjust()
1025 if (vma->vm_file != file) in is_mergeable_vma()
1254 a->vm_file == b->vm_file && in anon_vma_compatible()
[all …]
Dmincore.c101 if (vma->vm_file) { in __mincore_unmapped_range()
106 vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff); in __mincore_unmapped_range()
184 if (!vma->vm_file) in can_do_mincore()
192 return inode_owner_or_capable(file_inode(vma->vm_file)) || in can_do_mincore()
193 inode_permission(file_inode(vma->vm_file), MAY_WRITE) == 0; in can_do_mincore()
Dmremap.c86 if (vma->vm_file) in take_rmap_locks()
87 i_mmap_lock_write(vma->vm_file->f_mapping); in take_rmap_locks()
96 if (vma->vm_file) in drop_rmap_locks()
97 i_mmap_unlock_write(vma->vm_file->f_mapping); in drop_rmap_locks()
559 ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff + in mremap_to()
715 new_addr = get_unmapped_area(vma->vm_file, 0, new_len, in SYSCALL_DEFINE5()
Duserfaultfd.c79 if (dst_vma->vm_file) { in mcopy_atomic_pte()
81 inode = dst_vma->vm_file->f_inode; in mcopy_atomic_pte()
128 if (dst_vma->vm_file) { in mfill_zeropage_pte()
130 inode = dst_vma->vm_file->f_inode; in mfill_zeropage_pte()
274 mapping = dst_vma->vm_file->f_mapping; in __mcopy_atomic_hugetlb()
Dmadvise.c96 if (vma->vm_file || vma->vm_flags & VM_SHARED) { in madvise_behavior()
136 vma->vm_file, pgoff, vma_policy(vma), in madvise_behavior()
257 struct file *file = vma->vm_file; in madvise_willneed()
531 if (!vma->vm_file) in can_do_pageout()
539 return inode_owner_or_capable(file_inode(vma->vm_file)) || in can_do_pageout()
540 inode_permission(file_inode(vma->vm_file), MAY_WRITE) == 0; in can_do_pageout()
836 f = vma->vm_file; in madvise_remove()
Dkhugepaged.c418 if (shmem_file(vma->vm_file) || in hugepage_vma_check()
420 vma->vm_file && in hugepage_vma_check()
1318 if (!vma || !vma->vm_file || in collapse_pte_mapped_thp()
1331 hpage = find_lock_page(vma->vm_file->f_mapping, in collapse_pte_mapped_thp()
1348 i_mmap_lock_write(vma->vm_file->f_mapping); in collapse_pte_mapped_thp()
1417 i_mmap_unlock_write(vma->vm_file->f_mapping); in collapse_pte_mapped_thp()
1426 i_mmap_unlock_write(vma->vm_file->f_mapping); in collapse_pte_mapped_thp()
2019 if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) { in khugepaged_scan_mm_slot()
2024 if (shmem_file(vma->vm_file) in khugepaged_scan_mm_slot()
2027 file = get_file(vma->vm_file); in khugepaged_scan_mm_slot()
Dmemory.c544 mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL; in print_bad_pte()
555 vma->vm_file, in print_bad_pte()
557 vma->vm_file ? vma->vm_file->f_op->mmap : NULL, in print_bad_pte()
1312 if (vma->vm_file) in unmap_single_vma()
1331 if (vma->vm_file) { in unmap_single_vma()
1332 i_mmap_lock_write(vma->vm_file->f_mapping); in unmap_single_vma()
1334 i_mmap_unlock_write(vma->vm_file->f_mapping); in unmap_single_vma()
2302 struct file *vm_file = vma->vm_file; in __get_fault_gfp_mask() local
2304 if (vm_file) in __get_fault_gfp_mask()
2305 return mapping_gfp_mask(vm_file->f_mapping) | __GFP_FS | __GFP_IO; in __get_fault_gfp_mask()
[all …]
Dmsync.c83 file = vma->vm_file; in SYSCALL_DEFINE3()
Dhugetlb.c235 return subpool_inode(file_inode(vma->vm_file)); in subpool_vma()
782 struct address_space *mapping = vma->vm_file->f_mapping; in vma_resv_map()
3761 mapping = vma->vm_file->f_mapping; in unmap_ref_private()
3933 mapping = vma->vm_file->f_mapping; in hugetlbfs_pagecache_page()
3950 mapping = vma->vm_file->f_mapping; in hugetlbfs_pagecache_present()
4224 mapping = vma->vm_file->f_mapping; in hugetlb_fault()
4391 mapping = dst_vma->vm_file->f_mapping; in hugetlb_mcopy_atomic_pte()
4663 i_mmap_lock_write(vma->vm_file->f_mapping); in hugetlb_change_protection()
4724 i_mmap_unlock_write(vma->vm_file->f_mapping); in hugetlb_change_protection()
4965 struct address_space *mapping = vma->vm_file->f_mapping; in huge_pmd_share()
Dfilemap.c2543 struct file *file = vmf->vma->vm_file; in do_sync_mmap_readahead()
2592 struct file *file = vmf->vma->vm_file; in do_async_mmap_readahead()
2637 struct file *file = vmf->vma->vm_file; in filemap_fault()
2763 struct file *file = vmf->vma->vm_file; in filemap_map_pages()
2832 struct inode *inode = file_inode(vmf->vma->vm_file); in filemap_page_mkwrite()
2836 file_update_time(vmf->vma->vm_file); in filemap_page_mkwrite()
Dshmem.c741 struct inode *inode = file_inode(vma->vm_file); in shmem_swap_usage()
1998 struct inode *inode = file_inode(vma->vm_file); in shmem_fault()
2170 struct inode *inode = file_inode(vma->vm_file); in shmem_set_policy()
2177 struct inode *inode = file_inode(vma->vm_file); in shmem_get_policy()
2307 struct inode *inode = file_inode(dst_vma->vm_file); in shmem_mfill_atomic_pte()
4014 struct inode *inode = file_inode(vma->vm_file); in shmem_huge_enabled()
4210 if (vma->vm_file) in shmem_zero_setup()
4211 fput(vma->vm_file); in shmem_zero_setup()
4212 vma->vm_file = file; in shmem_zero_setup()
Ddebug.c131 vma->vm_file, vma->vm_private_data, in dump_vma()
Dinternal.h405 fpin = get_file(vmf->vma->vm_file); in maybe_unlock_mmap_for_io()
Drmap.c711 } else if (!vma->vm_file) { in page_address_in_vma()
713 } else if (vma->vm_file->f_mapping != compound_head(page)->mapping) { in page_address_in_vma()
Dmprotect.c430 vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma), in mprotect_fixup()
Dmlock.c537 vma->vm_file, pgoff, vma_policy(vma), in mlock_fixup()
Dmempolicy.c708 vma->vm_ops, vma->vm_file, in vma_replace_policy()
760 vma->anon_vma, vma->vm_file, pgoff, in mbind_range()
Dmemcontrol.c5474 if (!vma->vm_file) /* anonymous vma */ in mc_handle_file_pte()
5479 mapping = vma->vm_file->f_mapping; in mc_handle_file_pte()