• Home
  • Raw
  • Download

Lines Matching refs:mm

278 void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,  in __vma_link_list()  argument
288 next = mm->mmap; in __vma_link_list()
289 mm->mmap = vma; in __vma_link_list()
296 void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma) in __vma_unlink_list() argument
305 mm->mmap = next; in __vma_unlink_list()
371 unsigned long arch_randomize_brk(struct mm_struct *mm) in arch_randomize_brk() argument
375 return randomize_page(mm->brk, SZ_32M); in arch_randomize_brk()
377 return randomize_page(mm->brk, SZ_1G); in arch_randomize_brk()
434 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) in arch_pick_mmap_layout() argument
442 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; in arch_pick_mmap_layout()
443 mm->get_unmapped_area = arch_get_unmapped_area; in arch_pick_mmap_layout()
445 mm->mmap_base = mmap_base(random_factor, rlim_stack); in arch_pick_mmap_layout()
446 mm->get_unmapped_area = arch_get_unmapped_area_topdown; in arch_pick_mmap_layout()
450 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) in arch_pick_mmap_layout() argument
452 mm->mmap_base = TASK_UNMAPPED_BASE; in arch_pick_mmap_layout()
453 mm->get_unmapped_area = arch_get_unmapped_area; in arch_pick_mmap_layout()
472 int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc, in __account_locked_vm() argument
478 mmap_assert_write_locked(mm); in __account_locked_vm()
480 locked_vm = mm->locked_vm; in __account_locked_vm()
488 mm->locked_vm = locked_vm + pages; in __account_locked_vm()
491 mm->locked_vm = locked_vm - pages; in __account_locked_vm()
515 int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc) in account_locked_vm() argument
519 if (pages == 0 || !mm) in account_locked_vm()
522 mmap_write_lock(mm); in account_locked_vm()
523 ret = __account_locked_vm(mm, pages, inc, current, in account_locked_vm()
525 mmap_write_unlock(mm); in account_locked_vm()
536 struct mm_struct *mm = current->mm; in vm_mmap_pgoff() local
542 if (mmap_write_lock_killable(mm)) in vm_mmap_pgoff()
546 mmap_write_unlock(mm); in vm_mmap_pgoff()
547 userfaultfd_unmap_complete(mm, &uf); in vm_mmap_pgoff()
975 int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) in __vm_enough_memory() argument
1003 if (mm) { in __vm_enough_memory()
1006 allowed -= min_t(long, mm->total_vm / 32, reserve); in __vm_enough_memory()
1031 struct mm_struct *mm = get_task_mm(task); in get_cmdline() local
1033 if (!mm) in get_cmdline()
1035 if (!mm->arg_end) in get_cmdline()
1038 spin_lock(&mm->arg_lock); in get_cmdline()
1039 arg_start = mm->arg_start; in get_cmdline()
1040 arg_end = mm->arg_end; in get_cmdline()
1041 env_start = mm->env_start; in get_cmdline()
1042 env_end = mm->env_end; in get_cmdline()
1043 spin_unlock(&mm->arg_lock); in get_cmdline()
1071 mmput(mm); in get_cmdline()