• Home
  • Raw
  • Download

Lines Matching refs:len

53 #define arch_mmap_check(addr, len, flags)	(0)  argument
57 #define arch_rebalance_pgtables(addr, len) (addr) argument
298 static unsigned long do_brk(unsigned long addr, unsigned long len);
1276 unsigned long len) in mlock_future_check() argument
1282 locked = len >> PAGE_SHIFT; in mlock_future_check()
1297 unsigned long len, unsigned long prot, in do_mmap_pgoff() argument
1316 if (!len) in do_mmap_pgoff()
1323 len = PAGE_ALIGN(len); in do_mmap_pgoff()
1324 if (!len) in do_mmap_pgoff()
1328 if ((pgoff + (len >> PAGE_SHIFT)) < pgoff) in do_mmap_pgoff()
1338 addr = get_unmapped_area(file, addr, len, pgoff, flags); in do_mmap_pgoff()
1353 if (mlock_future_check(mm, vm_flags, len)) in do_mmap_pgoff()
1436 addr = mmap_region(file, addr, len, vm_flags, pgoff); in do_mmap_pgoff()
1440 *populate = len; in do_mmap_pgoff()
1444 SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, in SYSCALL_DEFINE6() argument
1457 len = ALIGN(len, huge_page_size(hstate_file(file))); in SYSCALL_DEFINE6()
1469 len = ALIGN(len, huge_page_size(hs)); in SYSCALL_DEFINE6()
1476 file = hugetlb_file_setup(HUGETLB_ANON_FILE, len, in SYSCALL_DEFINE6()
1486 retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff); in SYSCALL_DEFINE6()
1497 unsigned long len; member
1513 return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, in SYSCALL_DEFINE1()
1572 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff) in mmap_region() argument
1581 if (!may_expand_vm(mm, len >> PAGE_SHIFT)) { in mmap_region()
1591 nr_pages = count_vma_pages_range(mm, addr, addr + len); in mmap_region()
1593 if (!may_expand_vm(mm, (len >> PAGE_SHIFT) - nr_pages)) in mmap_region()
1600 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) { in mmap_region()
1601 if (do_munmap(mm, addr, len)) in mmap_region()
1610 charged = len >> PAGE_SHIFT; in mmap_region()
1619 vma = vma_merge(mm, prev, addr, addr + len, vm_flags, NULL, file, pgoff, in mmap_region()
1637 vma->vm_end = addr + len; in mmap_region()
1694 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT); in mmap_region()
1698 mm->locked_vm += (len >> PAGE_SHIFT); in mmap_region()
1955 unsigned long len, unsigned long pgoff, unsigned long flags) in arch_get_unmapped_area() argument
1961 if (len > TASK_SIZE - mmap_min_addr) in arch_get_unmapped_area()
1970 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && in arch_get_unmapped_area()
1971 (!vma || addr + len <= vm_start_gap(vma)) && in arch_get_unmapped_area()
1977 info.length = len; in arch_get_unmapped_area()
1992 const unsigned long len, const unsigned long pgoff, in arch_get_unmapped_area_topdown() argument
2001 if (len > TASK_SIZE - mmap_min_addr) in arch_get_unmapped_area_topdown()
2011 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && in arch_get_unmapped_area_topdown()
2012 (!vma || addr + len <= vm_start_gap(vma)) && in arch_get_unmapped_area_topdown()
2018 info.length = len; in arch_get_unmapped_area_topdown()
2043 get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, in get_unmapped_area() argument
2049 unsigned long error = arch_mmap_check(addr, len, flags); in get_unmapped_area()
2054 if (len > TASK_SIZE) in get_unmapped_area()
2060 addr = get_area(file, addr, len, pgoff, flags); in get_unmapped_area()
2064 if (addr > TASK_SIZE - len) in get_unmapped_area()
2069 addr = arch_rebalance_pgtables(addr, len); in get_unmapped_area()
2580 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) in do_munmap() argument
2585 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start) in do_munmap()
2588 len = PAGE_ALIGN(len); in do_munmap()
2589 if (len == 0) in do_munmap()
2600 end = start + len; in do_munmap()
2664 int vm_munmap(unsigned long start, size_t len) in vm_munmap() argument
2670 ret = do_munmap(mm, start, len); in vm_munmap()
2676 SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) in SYSCALL_DEFINE2() argument
2679 return vm_munmap(addr, len); in SYSCALL_DEFINE2()
2697 static unsigned long do_brk(unsigned long addr, unsigned long len) in do_brk() argument
2706 len = PAGE_ALIGN(len); in do_brk()
2707 if (!len) in do_brk()
2712 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED); in do_brk()
2716 error = mlock_future_check(mm, mm->def_flags, len); in do_brk()
2730 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) { in do_brk()
2731 if (do_munmap(mm, addr, len)) in do_brk()
2737 if (!may_expand_vm(mm, len >> PAGE_SHIFT)) in do_brk()
2743 if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT)) in do_brk()
2747 vma = vma_merge(mm, prev, addr, addr + len, flags, in do_brk()
2757 vm_unacct_memory(len >> PAGE_SHIFT); in do_brk()
2764 vma->vm_end = addr + len; in do_brk()
2771 mm->total_vm += len >> PAGE_SHIFT; in do_brk()
2773 mm->locked_vm += (len >> PAGE_SHIFT); in do_brk()
2778 unsigned long vm_brk(unsigned long addr, unsigned long len) in vm_brk() argument
2785 ret = do_brk(addr, len); in vm_brk()
2789 mm_populate(addr, len); in vm_brk()
2885 unsigned long addr, unsigned long len, pgoff_t pgoff, in copy_vma() argument
2904 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) in copy_vma()
2906 new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags, in copy_vma()
2936 new_vma->vm_end = addr + len; in copy_vma()
3037 unsigned long addr, unsigned long len, in __install_special_mapping() argument
3051 vma->vm_end = addr + len; in __install_special_mapping()
3063 mm->total_vm += len >> PAGE_SHIFT; in __install_special_mapping()
3085 unsigned long addr, unsigned long len, in _install_special_mapping() argument
3088 return __install_special_mapping(mm, addr, len, vm_flags, in _install_special_mapping()
3093 unsigned long addr, unsigned long len, in install_special_mapping() argument
3097 mm, addr, len, vm_flags, &legacy_special_mapping_vmops, in install_special_mapping()