Home
last modified time | relevance | path

Searched refs:vm_start (Results 1 – 16 of 16) sorted by relevance

/fs/proc/
Dtask_nommu.c37 size += region->vm_end - region->vm_start; in task_mem()
39 size = vma->vm_end - vma->vm_start; in task_mem()
92 vsize += vma->vm_end - vma->vm_start; in task_vsize()
114 size += region->vm_end - region->vm_start; in task_statm()
138 return vma->vm_start <= mm->start_stack && in is_stack()
167 vma->vm_start, in nommu_vma_show()
Dtask_mmu.c229 if (vma && vma->vm_start <= last_addr) in m_start()
238 m->version = vma->vm_start; in m_start()
325 return vma->vm_start <= vma->vm_mm->start_stack && in is_stack()
368 start = vma->vm_start; in show_map_vma()
395 if (vma->vm_start <= mm->brk && in show_map_vma()
896 SEQ_PUT_DEC("Size: ", vma->vm_end - vma->vm_start); in show_smap()
947 show_vma_header_prefix(m, priv->mm->mmap ? priv->mm->mmap->vm_start : 0, in show_smaps_rollup()
1358 hole_end = min(end, vma->vm_start); in pagemap_pte_hole()
1898 pol = __get_vma_policy(vma, vma->vm_start); in show_numa_map()
1906 seq_printf(m, "%08lx %s", vma->vm_start, buffer); in show_numa_map()
[all …]
Dvmcore.c557 size_t size = vma->vm_end - vma->vm_start; in mmap_vmcore()
581 if (remap_pfn_range(vma, vma->vm_start, pfn, tsz, in mmap_vmcore()
614 if (vmcoredd_mmap_dumps(vma, vma->vm_start + len, in mmap_vmcore()
631 if (remap_vmalloc_range_partial(vma, vma->vm_start + len, in mmap_vmcore()
650 if (vmcore_remap_oldmem_pfn(vma, vma->vm_start + len, in mmap_vmcore()
665 do_munmap(vma->vm_mm, vma->vm_start, len, NULL); in mmap_vmcore()
Dnommu.c52 region->vm_start, in nommu_region_show()
Dbase.c1971 unsigned long vm_start, vm_end; in map_files_d_revalidate() local
1990 if (!dname_to_vma_addr(dentry, &vm_start, &vm_end)) { in map_files_d_revalidate()
1993 exact_vma_exists = !!find_exact_vma(mm, vm_start, in map_files_d_revalidate()
2022 unsigned long vm_start, vm_end; in map_files_get_link() local
2038 rc = dname_to_vma_addr(dentry, &vm_start, &vm_end); in map_files_get_link()
2047 vma = find_exact_vma(mm, vm_start, vm_end); in map_files_get_link()
2119 unsigned long vm_start, vm_end; in proc_map_files_lookup() local
2135 if (dname_to_vma_addr(dentry, &vm_start, &vm_end)) in proc_map_files_lookup()
2147 vma = find_exact_vma(mm, vm_start, vm_end); in proc_map_files_lookup()
2233 p->start = vma->vm_start; in proc_map_files_readdir()
/fs/
Duserfaultfd.c833 for ( ; vma && vma->vm_start < end; vma = vma->vm_next) { in userfaultfd_unmap_prep()
911 prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end, in userfaultfd_release()
1356 if (vma->vm_start >= end) in userfaultfd_register()
1375 for (cur = vma; cur && cur->vm_start < end; cur = cur->vm_next) { in userfaultfd_register()
1403 end > cur->vm_start) { in userfaultfd_register()
1433 if (vma->vm_start < start) in userfaultfd_register()
1453 if (vma->vm_start > start) in userfaultfd_register()
1454 start = vma->vm_start; in userfaultfd_register()
1467 if (vma->vm_start < start) { in userfaultfd_register()
1490 } while (vma && vma->vm_start < end); in userfaultfd_register()
[all …]
Dbinfmt_elf_fdpic.c1199 kdcore("%08lx: %08lx: no (IO)", vma->vm_start, vma->vm_flags); in maydump()
1207 kdcore("%08lx: %08lx: no (!read)", vma->vm_start, vma->vm_flags); in maydump()
1215 kdcore("%08lx: %08lx: %s (DAX shared)", vma->vm_start, in maydump()
1219 kdcore("%08lx: %08lx: %s (DAX private)", vma->vm_start, in maydump()
1229 kdcore("%08lx: %08lx: %s (share)", vma->vm_start, in maydump()
1235 kdcore("%08lx: %08lx: %s (share)", vma->vm_start, in maydump()
1244 kdcore("%08lx: %08lx: %s (!anon)", vma->vm_start, in maydump()
1251 kdcore("%08lx: %08lx: %s", vma->vm_start, vma->vm_flags, in maydump()
1506 for (addr = vma->vm_start; addr < vma->vm_end; in elf_fdpic_dump_segments()
1522 if (!dump_emit(cprm, (void *) vma->vm_start, in elf_fdpic_dump_segments()
[all …]
Dexec.c267 vma->vm_start = vma->vm_end - PAGE_SIZE; in __bprm_mm_init()
638 unsigned long old_start = vma->vm_start; in shift_arg_pages()
675 vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING); in shift_arg_pages()
684 vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING); in shift_arg_pages()
725 if (vma->vm_end - vma->vm_start > stack_base) in setup_arg_pages()
730 stack_shift = vma->vm_start - stack_base; in setup_arg_pages()
738 unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr)) in setup_arg_pages()
768 ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end, in setup_arg_pages()
785 stack_size = vma->vm_end - vma->vm_start; in setup_arg_pages()
793 stack_base = vma->vm_start + rlim_stack; in setup_arg_pages()
[all …]
Dbinfmt_elf.c1359 u32 __user *header = (u32 __user *) vma->vm_start; in vma_dump_size()
1391 return vma->vm_end - vma->vm_start; in vma_dump_size()
1632 *start_end_ofs++ = vma->vm_start; in fill_files_note()
2308 phdr.p_vaddr = vma->vm_start; in elf_core_dump()
2311 phdr.p_memsz = vma->vm_end - vma->vm_start; in elf_core_dump()
2343 end = vma->vm_start + vma_filesz[i++]; in elf_core_dump()
2345 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) { in elf_core_dump()
Ddax.c771 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); in pgoff_address()
772 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); in pgoff_address()
1511 if (pmd_addr < vma->vm_start) in dax_iomap_pmd_fault()
Dio_uring.c3661 unsigned long sz = vma->vm_end - vma->vm_start; in io_uring_mmap()
3684 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot); in io_uring_mmap()
Daio.c348 ctx->user_id = ctx->mmap_base = vma->vm_start; in aio_ring_mremap()
/fs/cramfs/
Dinode.c412 ret = remap_pfn_range(vma, vma->vm_start, address >> PAGE_SHIFT, in cramfs_physmem_mmap()
426 vmf = vmf_insert_mixed(vma, vma->vm_start + off, pfn); in cramfs_physmem_mmap()
436 address, pages, vma_pages(vma), vma->vm_start, in cramfs_physmem_mmap()
/fs/coda/
Dfile.c142 count = vma->vm_end - vma->vm_start; in coda_file_mmap()
/fs/hugetlbfs/
Dinode.c174 vma_len = (loff_t)(vma->vm_end - vma->vm_start); in hugetlbfs_file_mmap()
440 + vma->vm_start; in hugetlb_vmdelete_list()
445 unmap_hugepage_range(vma, vma->vm_start + v_offset, v_end, in hugetlb_vmdelete_list()
/fs/9p/
Dvfs_file.c615 (vma->vm_end - vma->vm_start - 1), in v9fs_mmap_vm_close()