Lines Matching refs:mm
346 struct vm_area_struct *vm_area_alloc(struct mm_struct *mm) in vm_area_alloc() argument
352 vma_init(vma, mm); in vm_area_alloc()
483 static __latent_entropy int dup_mmap(struct mm_struct *mm, in dup_mmap() argument
498 uprobe_dup_mmap(oldmm, mm); in dup_mmap()
502 down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING); in dup_mmap()
505 RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm)); in dup_mmap()
507 mm->total_vm = oldmm->total_vm; in dup_mmap()
508 mm->data_vm = oldmm->data_vm; in dup_mmap()
509 mm->exec_vm = oldmm->exec_vm; in dup_mmap()
510 mm->stack_vm = oldmm->stack_vm; in dup_mmap()
512 rb_link = &mm->mm_rb.rb_node; in dup_mmap()
514 pprev = &mm->mmap; in dup_mmap()
515 retval = ksm_fork(mm, oldmm); in dup_mmap()
518 retval = khugepaged_fork(mm, oldmm); in dup_mmap()
527 vm_stat_account(mm, mpnt->vm_flags, -vma_pages(mpnt)); in dup_mmap()
552 tmp->vm_mm = mm; in dup_mmap()
600 __vma_link_rb(mm, tmp, rb_link, rb_parent); in dup_mmap()
604 mm->map_count++; in dup_mmap()
606 retval = copy_page_range(mm, oldmm, mpnt); in dup_mmap()
615 retval = arch_dup_mmap(oldmm, mm); in dup_mmap()
617 up_write(&mm->mmap_sem); in dup_mmap()
634 static inline int mm_alloc_pgd(struct mm_struct *mm) in mm_alloc_pgd() argument
636 mm->pgd = pgd_alloc(mm); in mm_alloc_pgd()
637 if (unlikely(!mm->pgd)) in mm_alloc_pgd()
642 static inline void mm_free_pgd(struct mm_struct *mm) in mm_free_pgd() argument
644 pgd_free(mm, mm->pgd); in mm_free_pgd()
647 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) in dup_mmap() argument
650 RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm)); in dup_mmap()
654 #define mm_alloc_pgd(mm) (0) argument
655 #define mm_free_pgd(mm) argument
658 static void check_mm(struct mm_struct *mm) in check_mm() argument
666 long x = atomic_long_read(&mm->rss_stat.count[i]); in check_mm()
670 mm, resident_page_types[i], x); in check_mm()
673 if (mm_pgtables_bytes(mm)) in check_mm()
675 mm_pgtables_bytes(mm)); in check_mm()
678 VM_BUG_ON_MM(mm->pmd_huge_pte, mm); in check_mm()
683 #define free_mm(mm) (kmem_cache_free(mm_cachep, (mm))) argument
690 void __mmdrop(struct mm_struct *mm) in __mmdrop() argument
692 BUG_ON(mm == &init_mm); in __mmdrop()
693 WARN_ON_ONCE(mm == current->mm); in __mmdrop()
694 WARN_ON_ONCE(mm == current->active_mm); in __mmdrop()
695 mm_free_pgd(mm); in __mmdrop()
696 destroy_context(mm); in __mmdrop()
697 mmu_notifier_mm_destroy(mm); in __mmdrop()
698 check_mm(mm); in __mmdrop()
699 put_user_ns(mm->user_ns); in __mmdrop()
700 free_mm(mm); in __mmdrop()
706 struct mm_struct *mm; in mmdrop_async_fn() local
708 mm = container_of(work, struct mm_struct, async_put_work); in mmdrop_async_fn()
709 __mmdrop(mm); in mmdrop_async_fn()
712 static void mmdrop_async(struct mm_struct *mm) in mmdrop_async() argument
714 if (unlikely(atomic_dec_and_test(&mm->mm_count))) { in mmdrop_async()
715 INIT_WORK(&mm->async_put_work, mmdrop_async_fn); in mmdrop_async()
716 schedule_work(&mm->async_put_work); in mmdrop_async()
983 static void mm_init_aio(struct mm_struct *mm) in mm_init_aio() argument
986 spin_lock_init(&mm->ioctx_lock); in mm_init_aio()
987 mm->ioctx_table = NULL; in mm_init_aio()
991 static __always_inline void mm_clear_owner(struct mm_struct *mm, in mm_clear_owner() argument
995 if (mm->owner == p) in mm_clear_owner()
996 WRITE_ONCE(mm->owner, NULL); in mm_clear_owner()
1000 static void mm_init_owner(struct mm_struct *mm, struct task_struct *p) in mm_init_owner() argument
1003 mm->owner = p; in mm_init_owner()
1007 static void mm_init_uprobes_state(struct mm_struct *mm) in mm_init_uprobes_state() argument
1010 mm->uprobes_state.xol_area = NULL; in mm_init_uprobes_state()
1014 static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, in mm_init() argument
1017 mm->mmap = NULL; in mm_init()
1018 mm->mm_rb = RB_ROOT; in mm_init()
1019 mm->vmacache_seqnum = 0; in mm_init()
1020 atomic_set(&mm->mm_users, 1); in mm_init()
1021 atomic_set(&mm->mm_count, 1); in mm_init()
1022 init_rwsem(&mm->mmap_sem); in mm_init()
1023 INIT_LIST_HEAD(&mm->mmlist); in mm_init()
1024 mm->core_state = NULL; in mm_init()
1025 mm_pgtables_bytes_init(mm); in mm_init()
1026 mm->map_count = 0; in mm_init()
1027 mm->locked_vm = 0; in mm_init()
1028 atomic64_set(&mm->pinned_vm, 0); in mm_init()
1029 memset(&mm->rss_stat, 0, sizeof(mm->rss_stat)); in mm_init()
1030 spin_lock_init(&mm->page_table_lock); in mm_init()
1031 spin_lock_init(&mm->arg_lock); in mm_init()
1032 mm_init_cpumask(mm); in mm_init()
1033 mm_init_aio(mm); in mm_init()
1034 mm_init_owner(mm, p); in mm_init()
1035 RCU_INIT_POINTER(mm->exe_file, NULL); in mm_init()
1036 mmu_notifier_mm_init(mm); in mm_init()
1037 init_tlb_flush_pending(mm); in mm_init()
1039 mm->pmd_huge_pte = NULL; in mm_init()
1041 mm_init_uprobes_state(mm); in mm_init()
1043 if (current->mm) { in mm_init()
1044 mm->flags = current->mm->flags & MMF_INIT_MASK; in mm_init()
1045 mm->def_flags = current->mm->def_flags & VM_INIT_DEF_MASK; in mm_init()
1047 mm->flags = default_dump_filter; in mm_init()
1048 mm->def_flags = 0; in mm_init()
1051 if (mm_alloc_pgd(mm)) in mm_init()
1054 if (init_new_context(p, mm)) in mm_init()
1057 mm->user_ns = get_user_ns(user_ns); in mm_init()
1058 return mm; in mm_init()
1061 mm_free_pgd(mm); in mm_init()
1063 free_mm(mm); in mm_init()
1072 struct mm_struct *mm; in mm_alloc() local
1074 mm = allocate_mm(); in mm_alloc()
1075 if (!mm) in mm_alloc()
1078 memset(mm, 0, sizeof(*mm)); in mm_alloc()
1079 return mm_init(mm, current, current_user_ns()); in mm_alloc()
1082 static inline void __mmput(struct mm_struct *mm) in __mmput() argument
1084 VM_BUG_ON(atomic_read(&mm->mm_users)); in __mmput()
1086 uprobe_clear_state(mm); in __mmput()
1087 exit_aio(mm); in __mmput()
1088 ksm_exit(mm); in __mmput()
1089 khugepaged_exit(mm); /* must run before exit_mmap */ in __mmput()
1090 exit_mmap(mm); in __mmput()
1091 mm_put_huge_zero_page(mm); in __mmput()
1092 set_mm_exe_file(mm, NULL); in __mmput()
1093 if (!list_empty(&mm->mmlist)) { in __mmput()
1095 list_del(&mm->mmlist); in __mmput()
1098 if (mm->binfmt) in __mmput()
1099 module_put(mm->binfmt->module); in __mmput()
1100 mmdrop(mm); in __mmput()
1106 void mmput(struct mm_struct *mm) in mmput() argument
1110 if (atomic_dec_and_test(&mm->mm_users)) in mmput()
1111 __mmput(mm); in mmput()
1118 struct mm_struct *mm = container_of(work, struct mm_struct, in mmput_async_fn() local
1121 __mmput(mm); in mmput_async_fn()
1124 void mmput_async(struct mm_struct *mm) in mmput_async() argument
1126 if (atomic_dec_and_test(&mm->mm_users)) { in mmput_async()
1127 INIT_WORK(&mm->async_put_work, mmput_async_fn); in mmput_async()
1128 schedule_work(&mm->async_put_work); in mmput_async()
1144 void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file) in set_mm_exe_file() argument
1153 old_exe_file = rcu_dereference_raw(mm->exe_file); in set_mm_exe_file()
1157 rcu_assign_pointer(mm->exe_file, new_exe_file); in set_mm_exe_file()
1168 struct file *get_mm_exe_file(struct mm_struct *mm) in get_mm_exe_file() argument
1173 exe_file = rcu_dereference(mm->exe_file); in get_mm_exe_file()
1191 struct mm_struct *mm; in get_task_exe_file() local
1194 mm = task->mm; in get_task_exe_file()
1195 if (mm) { in get_task_exe_file()
1197 exe_file = get_mm_exe_file(mm); in get_task_exe_file()
1215 struct mm_struct *mm; in get_task_mm() local
1218 mm = task->mm; in get_task_mm()
1219 if (mm) { in get_task_mm()
1221 mm = NULL; in get_task_mm()
1223 mmget(mm); in get_task_mm()
1226 return mm; in get_task_mm()
1232 struct mm_struct *mm; in mm_access() local
1239 mm = get_task_mm(task); in mm_access()
1240 if (mm && mm != current->mm && in mm_access()
1242 mmput(mm); in mm_access()
1243 mm = ERR_PTR(-EACCES); in mm_access()
1247 return mm; in mm_access()
1297 static void mm_release(struct task_struct *tsk, struct mm_struct *mm) in mm_release() argument
1302 deactivate_mm(tsk, mm); in mm_release()
1311 atomic_read(&mm->mm_users) > 1) { in mm_release()
1331 void exit_mm_release(struct task_struct *tsk, struct mm_struct *mm) in exit_mm_release() argument
1334 mm_release(tsk, mm); in exit_mm_release()
1337 void exec_mm_release(struct task_struct *tsk, struct mm_struct *mm) in exec_mm_release() argument
1340 mm_release(tsk, mm); in exec_mm_release()
1356 struct mm_struct *mm; in dup_mm() local
1359 mm = allocate_mm(); in dup_mm()
1360 if (!mm) in dup_mm()
1363 memcpy(mm, oldmm, sizeof(*mm)); in dup_mm()
1365 if (!mm_init(mm, tsk, mm->user_ns)) in dup_mm()
1368 err = dup_mmap(mm, oldmm); in dup_mm()
1372 mm->hiwater_rss = get_mm_rss(mm); in dup_mm()
1373 mm->hiwater_vm = mm->total_vm; in dup_mm()
1375 if (mm->binfmt && !try_module_get(mm->binfmt->module)) in dup_mm()
1378 return mm; in dup_mm()
1382 mm->binfmt = NULL; in dup_mm()
1383 mm_init_owner(mm, NULL); in dup_mm()
1384 mmput(mm); in dup_mm()
1392 struct mm_struct *mm, *oldmm; in copy_mm() local
1402 tsk->mm = NULL; in copy_mm()
1410 oldmm = current->mm; in copy_mm()
1419 mm = oldmm; in copy_mm()
1424 mm = dup_mm(tsk, current->mm); in copy_mm()
1425 if (!mm) in copy_mm()
1429 tsk->mm = mm; in copy_mm()
1430 tsk->active_mm = mm; in copy_mm()
2261 if (p->mm) { in copy_process()
2262 mm_clear_owner(p->mm, p); in copy_process()
2263 mmput(p->mm); in copy_process()