Lines Matching refs:addr
59 #define arch_mmap_check(addr, len, flags) (0) argument
185 static int do_brk_flags(unsigned long addr, unsigned long request, unsigned long flags,
528 static int find_vma_links(struct mm_struct *mm, unsigned long addr, in find_vma_links() argument
543 if (vma_tmp->vm_end > addr) { in find_vma_links()
563 unsigned long addr, unsigned long end) in count_vma_pages_range() argument
569 vma = find_vma_intersection(mm, addr, end); in count_vma_pages_range()
574 max(addr, vma->vm_start)) >> PAGE_SHIFT; in count_vma_pages_range()
1143 struct vm_area_struct *prev, unsigned long addr, in vma_merge() argument
1150 pgoff_t pglen = (end - addr) >> PAGE_SHIFT; in vma_merge()
1170 VM_WARN_ON(prev && addr <= prev->vm_start); in vma_merge()
1172 VM_WARN_ON(addr >= end); in vma_merge()
1177 if (prev && prev->vm_end == addr && in vma_merge()
1217 if (prev && addr < prev->vm_end) /* case 4 */ in vma_merge()
1219 addr, prev->vm_pgoff, NULL, next); in vma_merge()
1221 err = __vma_adjust(area, addr, next->vm_end, in vma_merge()
1400 unsigned long do_mmap(struct file *file, unsigned long addr, in do_mmap() argument
1429 addr = round_hint_to_min(addr); in do_mmap()
1447 addr = get_unmapped_area(file, addr, len, pgoff, flags); in do_mmap()
1448 if (offset_in_page(addr)) in do_mmap()
1449 return addr; in do_mmap()
1452 struct vm_area_struct *vma = find_vma(mm, addr); in do_mmap()
1454 if (vma && vma->vm_start < addr + len) in do_mmap()
1559 pgoff = addr >> PAGE_SHIFT; in do_mmap()
1580 addr = mmap_region(file, addr, len, vm_flags, pgoff, uf); in do_mmap()
1581 if (!IS_ERR_VALUE(addr) && in do_mmap()
1585 return addr; in do_mmap()
1588 unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len, in ksys_mmap_pgoff() argument
1595 addr = untagged_addr(addr); in ksys_mmap_pgoff()
1632 retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff); in ksys_mmap_pgoff()
1639 SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, in SYSCALL_DEFINE6() argument
1643 return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff); in SYSCALL_DEFINE6()
1648 unsigned long addr; member
1665 return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, in SYSCALL_DEFINE1()
1724 unsigned long mmap_region(struct file *file, unsigned long addr, in mmap_region() argument
1742 nr_pages = count_vma_pages_range(mm, addr, addr + len); in mmap_region()
1750 while (find_vma_links(mm, addr, addr + len, &prev, &rb_link, in mmap_region()
1752 if (do_munmap(mm, addr, len, uf)) in mmap_region()
1769 vma = vma_merge(mm, prev, addr, addr + len, vm_flags, in mmap_region()
1785 vma->vm_start = addr; in mmap_region()
1786 vma->vm_end = addr + len; in mmap_region()
1820 WARN_ON_ONCE(addr != vma->vm_start); in mmap_region()
1822 addr = vma->vm_start; in mmap_region()
1868 return addr; in mmap_region()
2094 #define arch_get_mmap_end(addr) (TASK_SIZE) argument
2098 #define arch_get_mmap_base(addr, base) (base) argument
2114 arch_get_unmapped_area(struct file *filp, unsigned long addr, in arch_get_unmapped_area() argument
2120 const unsigned long mmap_end = arch_get_mmap_end(addr); in arch_get_unmapped_area()
2126 return addr; in arch_get_unmapped_area()
2128 if (addr) { in arch_get_unmapped_area()
2129 addr = PAGE_ALIGN(addr); in arch_get_unmapped_area()
2130 vma = find_vma_prev(mm, addr, &prev); in arch_get_unmapped_area()
2131 if (mmap_end - len >= addr && addr >= mmap_min_addr && in arch_get_unmapped_area()
2132 (!vma || addr + len <= vm_start_gap(vma)) && in arch_get_unmapped_area()
2133 (!prev || addr >= vm_end_gap(prev))) in arch_get_unmapped_area()
2134 return addr; in arch_get_unmapped_area()
2152 arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, in arch_get_unmapped_area_topdown() argument
2159 const unsigned long mmap_end = arch_get_mmap_end(addr); in arch_get_unmapped_area_topdown()
2166 return addr; in arch_get_unmapped_area_topdown()
2169 if (addr) { in arch_get_unmapped_area_topdown()
2170 addr = PAGE_ALIGN(addr); in arch_get_unmapped_area_topdown()
2171 vma = find_vma_prev(mm, addr, &prev); in arch_get_unmapped_area_topdown()
2172 if (mmap_end - len >= addr && addr >= mmap_min_addr && in arch_get_unmapped_area_topdown()
2173 (!vma || addr + len <= vm_start_gap(vma)) && in arch_get_unmapped_area_topdown()
2174 (!prev || addr >= vm_end_gap(prev))) in arch_get_unmapped_area_topdown()
2175 return addr; in arch_get_unmapped_area_topdown()
2181 info.high_limit = arch_get_mmap_base(addr, mm->mmap_base); in arch_get_unmapped_area_topdown()
2183 addr = vm_unmapped_area(&info); in arch_get_unmapped_area_topdown()
2191 if (offset_in_page(addr)) { in arch_get_unmapped_area_topdown()
2192 VM_BUG_ON(addr != -ENOMEM); in arch_get_unmapped_area_topdown()
2196 addr = vm_unmapped_area(&info); in arch_get_unmapped_area_topdown()
2199 return addr; in arch_get_unmapped_area_topdown()
2204 get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, in get_unmapped_area() argument
2210 unsigned long error = arch_mmap_check(addr, len, flags); in get_unmapped_area()
2232 addr = get_area(file, addr, len, pgoff, flags); in get_unmapped_area()
2233 if (IS_ERR_VALUE(addr)) in get_unmapped_area()
2234 return addr; in get_unmapped_area()
2236 if (addr > TASK_SIZE - len) in get_unmapped_area()
2238 if (offset_in_page(addr)) in get_unmapped_area()
2241 error = security_mmap_addr(addr); in get_unmapped_area()
2242 return error ? error : addr; in get_unmapped_area()
2248 struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) in find_vma() argument
2254 vma = vmacache_find(mm, addr); in find_vma()
2265 if (tmp->vm_end > addr) { in find_vma()
2267 if (tmp->vm_start <= addr) in find_vma()
2275 vmacache_update(addr, vma); in find_vma()
2285 find_vma_prev(struct mm_struct *mm, unsigned long addr, in find_vma_prev() argument
2290 vma = find_vma(mm, addr); in find_vma_prev()
2540 find_extend_vma(struct mm_struct *mm, unsigned long addr) in find_extend_vma() argument
2544 addr &= PAGE_MASK; in find_extend_vma()
2545 vma = find_vma_prev(mm, addr, &prev); in find_extend_vma()
2546 if (vma && (vma->vm_start <= addr)) in find_extend_vma()
2549 if (!prev || !mmget_still_valid(mm) || expand_stack(prev, addr)) in find_extend_vma()
2552 populate_vma_page_range(prev, addr, prev->vm_end, NULL); in find_extend_vma()
2562 find_extend_vma(struct mm_struct *mm, unsigned long addr) in find_extend_vma() argument
2567 addr &= PAGE_MASK; in find_extend_vma()
2568 vma = find_vma(mm, addr); in find_extend_vma()
2571 if (vma->vm_start <= addr) in find_extend_vma()
2579 if (expand_stack(vma, addr)) in find_extend_vma()
2582 populate_vma_page_range(vma, addr, start, NULL); in find_extend_vma()
2670 unsigned long addr, int new_below) in __split_vma() argument
2676 err = vma->vm_ops->split(vma, addr); in __split_vma()
2686 new->vm_end = addr; in __split_vma()
2688 new->vm_start = addr; in __split_vma()
2689 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT); in __split_vma()
2707 err = vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff + in __split_vma()
2708 ((addr - new->vm_start) >> PAGE_SHIFT), new); in __split_vma()
2710 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new); in __split_vma()
2734 unsigned long addr, int new_below) in split_vma() argument
2739 return __split_vma(mm, vma, addr, new_below); in split_vma()
2893 SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) in SYSCALL_DEFINE2() argument
2895 addr = untagged_addr(addr); in SYSCALL_DEFINE2()
2896 profile_munmap(addr); in SYSCALL_DEFINE2()
2897 return __vm_munmap(addr, len, true); in SYSCALL_DEFINE2()
3005 static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long flags, struct list_hea… in do_brk_flags() argument
3010 pgoff_t pgoff = addr >> PAGE_SHIFT; in do_brk_flags()
3018 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED); in do_brk_flags()
3029 while (find_vma_links(mm, addr, addr + len, &prev, &rb_link, in do_brk_flags()
3031 if (do_munmap(mm, addr, len, uf)) in do_brk_flags()
3046 vma = vma_merge(mm, prev, addr, addr + len, flags, in do_brk_flags()
3061 vma->vm_start = addr; in do_brk_flags()
3062 vma->vm_end = addr + len; in do_brk_flags()
3077 int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags) in vm_brk_flags() argument
3094 ret = do_brk_flags(addr, len, flags, &uf); in vm_brk_flags()
3099 mm_populate(addr, len); in vm_brk_flags()
3104 int vm_brk(unsigned long addr, unsigned long len) in vm_brk() argument
3106 return vm_brk_flags(addr, len, 0); in vm_brk()
3222 unsigned long addr, unsigned long len, pgoff_t pgoff, in copy_vma() argument
3237 pgoff = addr >> PAGE_SHIFT; in copy_vma()
3241 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) in copy_vma()
3243 new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags, in copy_vma()
3272 new_vma->vm_start = addr; in copy_vma()
3273 new_vma->vm_end = addr + len; in copy_vma()
3408 unsigned long addr, unsigned long len, in __install_special_mapping() argument
3419 vma->vm_start = addr; in __install_special_mapping()
3420 vma->vm_end = addr + len; in __install_special_mapping()
3462 unsigned long addr, unsigned long len, in _install_special_mapping() argument
3465 return __install_special_mapping(mm, addr, len, vm_flags, (void *)spec, in _install_special_mapping()
3470 unsigned long addr, unsigned long len, in install_special_mapping() argument
3474 mm, addr, len, vm_flags, (void *)pages, in install_special_mapping()