/fs/proc/ |
D | task_mmu.c | 133 struct vm_area_struct *vma = vma_next(&priv->iter); in proc_get_vma() local 135 if (vma) { in proc_get_vma() 136 *ppos = vma->vm_start; in proc_get_vma() 139 vma = get_gate_vma(priv->mm); in proc_get_vma() 142 return vma; in proc_get_vma() 264 show_map_vma(struct seq_file *m, struct vm_area_struct *vma) in show_map_vma() argument 267 struct mm_struct *mm = vma->vm_mm; in show_map_vma() 268 struct file *file = vma->vm_file; in show_map_vma() 269 vm_flags_t flags = vma->vm_flags; in show_map_vma() 277 struct inode *inode = file_inode(vma->vm_file); in show_map_vma() [all …]
|
D | task_nommu.c | 24 struct vm_area_struct *vma; in task_mem() local 29 for_each_vma(vmi, vma) { in task_mem() 30 bytes += kobjsize(vma); in task_mem() 32 region = vma->vm_region; in task_mem() 37 size = vma->vm_end - vma->vm_start; in task_mem() 41 is_nommu_shared_mapping(vma->vm_flags)) { in task_mem() 46 slack = region->vm_end - vma->vm_end; in task_mem() 84 struct vm_area_struct *vma; in task_vsize() local 88 for_each_vma(vmi, vma) in task_vsize() 89 vsize += vma->vm_end - vma->vm_start; in task_vsize() [all …]
|
D | vmcore.c | 224 int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma, in remap_oldmem_pfn_range() argument 229 return remap_pfn_range(vma, from, pfn, size, prot); in remap_oldmem_pfn_range() 276 static int vmcoredd_mmap_dumps(struct vm_area_struct *vma, unsigned long dst, in vmcoredd_mmap_dumps() argument 290 if (remap_vmalloc_range_partial(vma, dst, buf, 0, in vmcoredd_mmap_dumps() 427 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in mmap_vmcore_fault() 503 static int remap_oldmem_pfn_checked(struct vm_area_struct *vma, in remap_oldmem_pfn_checked() argument 525 if (remap_oldmem_pfn_range(vma, from + len, in remap_oldmem_pfn_checked() 532 if (remap_oldmem_pfn_range(vma, from + len, in remap_oldmem_pfn_checked() 543 if (remap_oldmem_pfn_range(vma, from + len, pos_start, in remap_oldmem_pfn_checked() 549 do_munmap(vma->vm_mm, from, len, NULL); in remap_oldmem_pfn_checked() [all …]
|
/fs/ |
D | userfaultfd.c | 92 bool userfaultfd_wp_unpopulated(struct vm_area_struct *vma) in userfaultfd_wp_unpopulated() argument 94 struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx; in userfaultfd_wp_unpopulated() 102 static void userfaultfd_set_vm_flags(struct vm_area_struct *vma, in userfaultfd_set_vm_flags() argument 105 const bool uffd_wp_changed = (vma->vm_flags ^ flags) & VM_UFFD_WP; in userfaultfd_set_vm_flags() 107 vm_flags_reset(vma, flags); in userfaultfd_set_vm_flags() 113 if ((vma->vm_flags & VM_SHARED) && uffd_wp_changed) in userfaultfd_set_vm_flags() 114 vma_set_page_prot(vma); in userfaultfd_set_vm_flags() 244 struct vm_area_struct *vma = vmf->vma; in userfaultfd_huge_must_wait() local 250 ptep = hugetlb_walk(vma, vmf->address, vma_mmu_pagesize(vma)); in userfaultfd_huge_must_wait() 375 struct vm_area_struct *vma = vmf->vma; in handle_userfault() local [all …]
|
D | coredump.c | 1003 static bool always_dump_vma(struct vm_area_struct *vma) in always_dump_vma() argument 1006 if (vma == get_gate_vma(vma->vm_mm)) in always_dump_vma() 1013 if (vma->vm_ops && vma->vm_ops->name && vma->vm_ops->name(vma)) in always_dump_vma() 1020 if (arch_vma_name(vma)) in always_dump_vma() 1031 static unsigned long vma_dump_size(struct vm_area_struct *vma, in vma_dump_size() argument 1037 if (always_dump_vma(vma)) in vma_dump_size() 1040 if (vma->vm_flags & VM_DONTDUMP) in vma_dump_size() 1044 if (vma_is_dax(vma)) { in vma_dump_size() 1045 if ((vma->vm_flags & VM_SHARED) && FILTER(DAX_SHARED)) in vma_dump_size() 1047 if (!(vma->vm_flags & VM_SHARED) && FILTER(DAX_PRIVATE)) in vma_dump_size() [all …]
|
D | exec.c | 203 struct vm_area_struct *vma = bprm->vma; in get_arg_page() local 212 if (write && pos < vma->vm_start) { in get_arg_page() 214 ret = expand_downwards(vma, pos); in get_arg_page() 235 acct_arg_size(bprm, vma_pages(vma)); in get_arg_page() 252 flush_cache_page(bprm->vma, pos, page_to_pfn(page)); in flush_arg_page() 258 struct vm_area_struct *vma = NULL; in __bprm_mm_init() local 261 bprm->vma = vma = vm_area_alloc(mm); in __bprm_mm_init() 262 if (!vma) in __bprm_mm_init() 264 vma_set_anonymous(vma); in __bprm_mm_init() 278 vma->vm_end = STACK_TOP_MAX; in __bprm_mm_init() [all …]
|
D | dax.c | 357 struct vm_area_struct *vma, unsigned long address, bool shared) in dax_associate_entry() argument 365 index = linear_page_index(vma, address & ~(size - 1)); in dax_associate_entry() 856 struct vm_area_struct *vma) in dax_fault_is_synchronous() argument 858 return (iter->flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC) && in dax_fault_is_synchronous() 873 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in dax_insert_entry() 876 bool dirty = write && !dax_fault_is_synchronous(iter, vmf->vma); in dax_insert_entry() 898 dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address, in dax_insert_entry() 931 struct vm_area_struct *vma; in dax_writeback_one() local 993 vma_interval_tree_foreach(vma, &mapping->i_mmap, index, end) { in dax_writeback_one() 994 pfn_mkclean_range(pfn, count, index, vma); in dax_writeback_one() [all …]
|
/fs/hugetlbfs/ |
D | inode.c | 87 static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma, in hugetlb_set_vma_policy() argument 90 vma->vm_policy = mpol_shared_policy_lookup(&HUGETLBFS_I(inode)->policy, in hugetlb_set_vma_policy() 94 static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma) in hugetlb_drop_vma_policy() argument 96 mpol_cond_put(vma->vm_policy); in hugetlb_drop_vma_policy() 99 static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma, in hugetlb_set_vma_policy() argument 104 static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma) in hugetlb_drop_vma_policy() argument 119 static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) in hugetlbfs_file_mmap() argument 136 vm_flags_set(vma, VM_HUGETLB | VM_DONTEXPAND); in hugetlbfs_file_mmap() 137 vma->vm_ops = &hugetlb_vm_ops; in hugetlbfs_file_mmap() 139 ret = seal_check_future_write(info->seals, vma); in hugetlbfs_file_mmap() [all …]
|
/fs/coda/ |
D | file.c | 125 coda_vm_open(struct vm_area_struct *vma) in coda_vm_open() argument 128 container_of(vma->vm_ops, struct coda_vm_ops, vm_ops); in coda_vm_open() 133 cvm_ops->host_vm_ops->open(vma); in coda_vm_open() 137 coda_vm_close(struct vm_area_struct *vma) in coda_vm_close() argument 140 container_of(vma->vm_ops, struct coda_vm_ops, vm_ops); in coda_vm_close() 143 cvm_ops->host_vm_ops->close(vma); in coda_vm_close() 146 vma->vm_ops = cvm_ops->host_vm_ops; in coda_vm_close() 153 coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma) in coda_file_mmap() argument 168 if (WARN_ON(coda_file != vma->vm_file)) in coda_file_mmap() 171 count = vma->vm_end - vma->vm_start; in coda_file_mmap() [all …]
|
/fs/ocfs2/ |
D | mmap.c | 33 struct vm_area_struct *vma = vmf->vma; in ocfs2_fault() local 41 trace_ocfs2_fault(OCFS2_I(vma->vm_file->f_mapping->host)->ip_blkno, in ocfs2_fault() 42 vma, vmf->page, vmf->pgoff); in ocfs2_fault() 116 struct inode *inode = file_inode(vmf->vma->vm_file); in ocfs2_page_mkwrite() 144 ret = __ocfs2_page_mkwrite(vmf->vma->vm_file, di_bh, page); in ocfs2_page_mkwrite() 162 int ocfs2_mmap(struct file *file, struct vm_area_struct *vma) in ocfs2_mmap() argument 174 vma->vm_ops = &ocfs2_file_vm_ops; in ocfs2_mmap()
|
/fs/kernfs/ |
D | file.c | 352 static void kernfs_vma_open(struct vm_area_struct *vma) in kernfs_vma_open() argument 354 struct file *file = vma->vm_file; in kernfs_vma_open() 364 of->vm_ops->open(vma); in kernfs_vma_open() 371 struct file *file = vmf->vma->vm_file; in kernfs_vma_fault() 391 struct file *file = vmf->vma->vm_file; in kernfs_vma_page_mkwrite() 411 static int kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr, in kernfs_vma_access() argument 414 struct file *file = vma->vm_file; in kernfs_vma_access() 426 ret = of->vm_ops->access(vma, addr, buf, len, write); in kernfs_vma_access() 433 static int kernfs_vma_set_policy(struct vm_area_struct *vma, in kernfs_vma_set_policy() argument 436 struct file *file = vma->vm_file; in kernfs_vma_set_policy() [all …]
|
/fs/nilfs2/ |
D | file.c | 47 struct vm_area_struct *vma = vmf->vma; in nilfs_page_mkwrite() local 49 struct inode *inode = file_inode(vma->vm_file); in nilfs_page_mkwrite() 98 file_update_time(vma->vm_file); in nilfs_page_mkwrite() 99 ret = block_page_mkwrite(vma, vmf, nilfs_get_block); in nilfs_page_mkwrite() 126 static int nilfs_file_mmap(struct file *file, struct vm_area_struct *vma) in nilfs_file_mmap() argument 129 vma->vm_ops = &nilfs_file_vm_ops; in nilfs_file_mmap()
|
/fs/9p/ |
D | vfs_file.c | 499 v9fs_file_mmap(struct file *filp, struct vm_area_struct *vma) in v9fs_file_mmap() argument 509 return generic_file_readonly_mmap(filp, vma); in v9fs_file_mmap() 512 retval = generic_file_mmap(filp, vma); in v9fs_file_mmap() 514 vma->vm_ops = &v9fs_mmap_file_vm_ops; in v9fs_file_mmap() 523 struct file *filp = vmf->vma->vm_file; in v9fs_vm_page_mkwrite() 554 static void v9fs_mmap_vm_close(struct vm_area_struct *vma) in v9fs_mmap_vm_close() argument 561 .range_start = (loff_t)vma->vm_pgoff * PAGE_SIZE, in v9fs_mmap_vm_close() 563 .range_end = (loff_t)vma->vm_pgoff * PAGE_SIZE + in v9fs_mmap_vm_close() 564 (vma->vm_end - vma->vm_start - 1), in v9fs_mmap_vm_close() 567 if (!(vma->vm_flags & VM_SHARED)) in v9fs_mmap_vm_close() [all …]
|
/fs/cramfs/ |
D | inode.c | 346 static int cramfs_physmem_mmap(struct file *file, struct vm_area_struct *vma) in cramfs_physmem_mmap() argument 351 unsigned long address, pgoff = vma->vm_pgoff; in cramfs_physmem_mmap() 355 ret = generic_file_readonly_mmap(file, vma); in cramfs_physmem_mmap() 366 if (vma->vm_flags & VM_WRITE) in cramfs_physmem_mmap() 373 pages = min(vma_pages(vma), max_pages - pgoff); in cramfs_physmem_mmap() 395 if (pages == vma_pages(vma)) { in cramfs_physmem_mmap() 402 ret = remap_pfn_range(vma, vma->vm_start, address >> PAGE_SHIFT, in cramfs_physmem_mmap() 403 pages * PAGE_SIZE, vma->vm_page_prot); in cramfs_physmem_mmap() 411 vm_flags_set(vma, VM_MIXEDMAP); in cramfs_physmem_mmap() 416 vmf = vmf_insert_mixed(vma, vma->vm_start + off, pfn); in cramfs_physmem_mmap() [all …]
|
/fs/udf/ |
D | file.c | 39 struct vm_area_struct *vma = vmf->vma; in udf_page_mkwrite() local 40 struct inode *inode = file_inode(vma->vm_file); in udf_page_mkwrite() 49 file_update_time(vma->vm_file); in udf_page_mkwrite() 192 static int udf_file_mmap(struct file *file, struct vm_area_struct *vma) in udf_file_mmap() argument 195 vma->vm_ops = &udf_file_vm_ops; in udf_file_mmap()
|
/fs/ext2/ |
D | file.c | 95 struct inode *inode = file_inode(vmf->vma->vm_file); in ext2_dax_fault() 98 (vmf->vma->vm_flags & VM_SHARED); in ext2_dax_fault() 102 file_update_time(vmf->vma->vm_file); in ext2_dax_fault() 125 static int ext2_file_mmap(struct file *file, struct vm_area_struct *vma) in ext2_file_mmap() argument 128 return generic_file_mmap(file, vma); in ext2_file_mmap() 131 vma->vm_ops = &ext2_dax_vm_ops; in ext2_file_mmap()
|
/fs/afs/ |
D | file.c | 21 static int afs_file_mmap(struct file *file, struct vm_area_struct *vma); 551 static int afs_file_mmap(struct file *file, struct vm_area_struct *vma) in afs_file_mmap() argument 558 ret = generic_file_mmap(file, vma); in afs_file_mmap() 560 vma->vm_ops = &afs_vm_ops; in afs_file_mmap() 566 static void afs_vm_open(struct vm_area_struct *vma) in afs_vm_open() argument 568 afs_add_open_mmap(AFS_FS_I(file_inode(vma->vm_file))); in afs_vm_open() 571 static void afs_vm_close(struct vm_area_struct *vma) in afs_vm_close() argument 573 afs_drop_open_mmap(AFS_FS_I(file_inode(vma->vm_file))); in afs_vm_close() 578 struct afs_vnode *vnode = AFS_FS_I(file_inode(vmf->vma->vm_file)); in afs_vm_map_pages()
|
/fs/ramfs/ |
D | file-nommu.c | 31 static int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma); 265 static int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma) in ramfs_nommu_mmap() argument 267 if (!is_nommu_shared_mapping(vma->vm_flags)) in ramfs_nommu_mmap() 271 vma->vm_ops = &generic_file_vm_ops; in ramfs_nommu_mmap()
|
/fs/vboxsf/ |
D | file.c | 157 static void vboxsf_vma_close(struct vm_area_struct *vma) in vboxsf_vma_close() argument 159 filemap_write_and_wait(vma->vm_file->f_mapping); in vboxsf_vma_close() 168 static int vboxsf_file_mmap(struct file *file, struct vm_area_struct *vma) in vboxsf_file_mmap() argument 172 err = generic_file_mmap(file, vma); in vboxsf_file_mmap() 174 vma->vm_ops = &vboxsf_file_vm_ops; in vboxsf_file_mmap()
|
/fs/ext4/ |
D | file.c | 708 struct inode *inode = file_inode(vmf->vma->vm_file); in ext4_dax_huge_fault() 723 (vmf->vma->vm_flags & VM_SHARED); in ext4_dax_huge_fault() 724 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in ext4_dax_huge_fault() 729 file_update_time(vmf->vma->vm_file); in ext4_dax_huge_fault() 782 static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma) in ext4_file_mmap() argument 794 if (!daxdev_mapping_supported(vma, dax_dev)) in ext4_file_mmap() 799 vma->vm_ops = &ext4_dax_vm_ops; in ext4_file_mmap() 800 vm_flags_set(vma, VM_HUGEPAGE); in ext4_file_mmap() 802 vma->vm_ops = &ext4_file_vm_ops; in ext4_file_mmap()
|
/fs/fuse/ |
D | passthrough.c | 217 ssize_t fuse_passthrough_mmap(struct file *file, struct vm_area_struct *vma) in fuse_passthrough_mmap() argument 227 if (WARN_ON(file != vma->vm_file)) in fuse_passthrough_mmap() 230 vma->vm_file = get_file(passthrough_filp); in fuse_passthrough_mmap() 233 ret = call_mmap(vma->vm_file, vma); in fuse_passthrough_mmap()
|
/fs/erofs/ |
D | data.c | 432 static int erofs_file_mmap(struct file *file, struct vm_area_struct *vma) in erofs_file_mmap() argument 435 return generic_file_readonly_mmap(file, vma); in erofs_file_mmap() 437 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) in erofs_file_mmap() 440 vma->vm_ops = &erofs_dax_vm_ops; in erofs_file_mmap() 441 vm_flags_set(vma, VM_HUGEPAGE); in erofs_file_mmap()
|
/fs/romfs/ |
D | mmap-nommu.c | 64 static int romfs_mmap(struct file *file, struct vm_area_struct *vma) in romfs_mmap() argument 66 return is_nommu_shared_mapping(vma->vm_flags) ? 0 : -ENOSYS; in romfs_mmap()
|
/fs/orangefs/ |
D | file.c | 378 struct file *file = vmf->vma->vm_file; in orangefs_fault() 401 static int orangefs_file_mmap(struct file *file, struct vm_area_struct *vma) in orangefs_file_mmap() argument 413 vm_flags_mod(vma, VM_SEQ_READ, VM_RAND_READ); in orangefs_file_mmap() 416 vma->vm_ops = &orangefs_file_vm_ops; in orangefs_file_mmap()
|
/fs/zonefs/ |
D | file.c | 283 struct inode *inode = file_inode(vmf->vma->vm_file); in zonefs_filemap_page_mkwrite() 297 file_update_time(vmf->vma->vm_file); in zonefs_filemap_page_mkwrite() 314 static int zonefs_file_mmap(struct file *file, struct vm_area_struct *vma) in zonefs_file_mmap() argument 323 (vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) in zonefs_file_mmap() 327 vma->vm_ops = &zonefs_file_vm_ops; in zonefs_file_mmap()
|