Lines Matching refs:mm
137 vma = find_vma(current->mm, (unsigned long)objp); in kobjsize()
149 long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, in __get_user_pages() argument
167 vma = find_vma(mm, start); in __get_user_pages()
199 long get_user_pages(struct task_struct *tsk, struct mm_struct *mm, in get_user_pages() argument
211 return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas, in get_user_pages()
264 down_write(¤t->mm->mmap_sem); in vmalloc_user()
265 vma = find_vma(current->mm, (unsigned long)ret); in vmalloc_user()
268 up_write(¤t->mm->mmap_sem); in vmalloc_user()
512 struct mm_struct *mm = current->mm; in SYSCALL_DEFINE1() local
514 if (brk < mm->start_brk || brk > mm->context.end_brk) in SYSCALL_DEFINE1()
515 return mm->brk; in SYSCALL_DEFINE1()
517 if (mm->brk == brk) in SYSCALL_DEFINE1()
518 return mm->brk; in SYSCALL_DEFINE1()
523 if (brk <= mm->brk) { in SYSCALL_DEFINE1()
524 mm->brk = brk; in SYSCALL_DEFINE1()
531 flush_icache_range(mm->brk, brk); in SYSCALL_DEFINE1()
532 return mm->brk = brk; in SYSCALL_DEFINE1()
690 struct mm_struct *mm = vma->vm_mm; in protect_vma() local
693 protect_page(mm, start, flags); in protect_vma()
696 update_protections(mm); in protect_vma()
706 static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) in add_vma_to_mm() argument
716 mm->map_count++; in add_vma_to_mm()
717 vma->vm_mm = mm; in add_vma_to_mm()
734 p = &mm->mm_rb.rb_node; in add_vma_to_mm()
761 rb_insert_color(&vma->vm_rb, &mm->mm_rb); in add_vma_to_mm()
768 __vma_link_list(mm, vma, prev, parent); in add_vma_to_mm()
778 struct mm_struct *mm = vma->vm_mm; in delete_vma_from_mm() local
785 mm->map_count--; in delete_vma_from_mm()
789 vmacache_invalidate(mm); in delete_vma_from_mm()
806 rb_erase(&vma->vm_rb, &mm->mm_rb); in delete_vma_from_mm()
811 mm->mmap = vma->vm_next; in delete_vma_from_mm()
820 static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma) in delete_vma() argument
835 struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) in find_vma() argument
840 vma = vmacache_find(mm, addr); in find_vma()
846 for (vma = mm->mmap; vma; vma = vma->vm_next) { in find_vma()
863 struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr) in find_extend_vma() argument
865 return find_vma(mm, addr); in find_extend_vma()
881 static struct vm_area_struct *find_vma_exact(struct mm_struct *mm, in find_vma_exact() argument
889 vma = vmacache_find_exact(mm, addr, end); in find_vma_exact()
895 for (vma = mm->mmap; vma; vma = vma->vm_next) { in find_vma_exact()
1443 current->mm->total_vm += len >> PAGE_SHIFT; in do_mmap_pgoff()
1446 add_vma_to_mm(current->mm, vma); in do_mmap_pgoff()
1546 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, in split_vma() argument
1560 if (mm->map_count >= sysctl_max_map_count) in split_vma()
1603 add_vma_to_mm(mm, vma); in split_vma()
1604 add_vma_to_mm(mm, new); in split_vma()
1612 static int shrink_vma(struct mm_struct *mm, in shrink_vma() argument
1627 add_vma_to_mm(mm, vma); in shrink_vma()
1653 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) in do_munmap() argument
1668 vma = find_vma(mm, start); in do_munmap()
1712 ret = split_vma(mm, vma, start, 1); in do_munmap()
1718 return shrink_vma(mm, vma, start, end); in do_munmap()
1723 delete_vma(mm, vma); in do_munmap()
1731 struct mm_struct *mm = current->mm; in vm_munmap() local
1734 down_write(&mm->mmap_sem); in vm_munmap()
1735 ret = do_munmap(mm, addr, len); in vm_munmap()
1736 up_write(&mm->mmap_sem); in vm_munmap()
1749 void exit_mmap(struct mm_struct *mm) in exit_mmap() argument
1753 if (!mm) in exit_mmap()
1758 mm->total_vm = 0; in exit_mmap()
1760 while ((vma = mm->mmap)) { in exit_mmap()
1761 mm->mmap = vma->vm_next; in exit_mmap()
1763 delete_vma(mm, vma); in exit_mmap()
1803 vma = find_vma_exact(current->mm, addr, old_len); in do_mremap()
1827 down_write(¤t->mm->mmap_sem); in SYSCALL_DEFINE5()
1829 up_write(¤t->mm->mmap_sem); in SYSCALL_DEFINE5()
1906 int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) in __vm_enough_memory() argument
1970 if (mm) { in __vm_enough_memory()
1972 allowed -= min_t(long, mm->total_vm / 32, reserve); in __vm_enough_memory()
2005 static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, in __access_remote_vm() argument
2010 down_read(&mm->mmap_sem); in __access_remote_vm()
2013 vma = find_vma(mm, addr); in __access_remote_vm()
2032 up_read(&mm->mmap_sem); in __access_remote_vm()
2047 int access_remote_vm(struct mm_struct *mm, unsigned long addr, in access_remote_vm() argument
2050 return __access_remote_vm(NULL, mm, addr, buf, len, write); in access_remote_vm()
2059 struct mm_struct *mm; in access_process_vm() local
2064 mm = get_task_mm(tsk); in access_process_vm()
2065 if (!mm) in access_process_vm()
2068 len = __access_remote_vm(tsk, mm, addr, buf, len, write); in access_process_vm()
2070 mmput(mm); in access_process_vm()