• Home
  • Raw
  • Download

Lines Matching refs:mm

350 struct vm_area_struct *vm_area_alloc(struct mm_struct *mm)  in vm_area_alloc()  argument
356 vma_init(vma, mm); in vm_area_alloc()
481 static __latent_entropy int dup_mmap(struct mm_struct *mm, in dup_mmap() argument
496 uprobe_dup_mmap(oldmm, mm); in dup_mmap()
500 mmap_write_lock_nested(mm, SINGLE_DEPTH_NESTING); in dup_mmap()
503 RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm)); in dup_mmap()
505 mm->total_vm = oldmm->total_vm; in dup_mmap()
506 mm->data_vm = oldmm->data_vm; in dup_mmap()
507 mm->exec_vm = oldmm->exec_vm; in dup_mmap()
508 mm->stack_vm = oldmm->stack_vm; in dup_mmap()
510 rb_link = &mm->mm_rb.rb_node; in dup_mmap()
512 pprev = &mm->mmap; in dup_mmap()
513 retval = ksm_fork(mm, oldmm); in dup_mmap()
516 retval = khugepaged_fork(mm, oldmm); in dup_mmap()
525 vm_stat_account(mm, mpnt->vm_flags, -vma_pages(mpnt)); in dup_mmap()
550 tmp->vm_mm = mm; in dup_mmap()
599 __vma_link_rb(mm, tmp, rb_link, rb_parent); in dup_mmap()
603 mm->map_count++; in dup_mmap()
624 retval = arch_dup_mmap(oldmm, mm); in dup_mmap()
626 mmap_write_unlock(mm); in dup_mmap()
659 static inline int mm_alloc_pgd(struct mm_struct *mm) in mm_alloc_pgd() argument
661 mm->pgd = pgd_alloc(mm); in mm_alloc_pgd()
662 if (unlikely(!mm->pgd)) in mm_alloc_pgd()
667 static inline void mm_free_pgd(struct mm_struct *mm) in mm_free_pgd() argument
669 pgd_free(mm, mm->pgd); in mm_free_pgd()
672 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) in dup_mmap() argument
675 RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm)); in dup_mmap()
679 #define mm_alloc_pgd(mm) (0) argument
680 #define mm_free_pgd(mm) argument
683 static void check_mm(struct mm_struct *mm) in check_mm() argument
691 long x = atomic_long_read(&mm->rss_stat.count[i]); in check_mm()
695 mm, resident_page_types[i], x); in check_mm()
698 if (mm_pgtables_bytes(mm)) in check_mm()
700 mm_pgtables_bytes(mm)); in check_mm()
703 VM_BUG_ON_MM(mm->pmd_huge_pte, mm); in check_mm()
708 #define free_mm(mm) (kmem_cache_free(mm_cachep, (mm))) argument
715 void __mmdrop(struct mm_struct *mm) in __mmdrop() argument
717 BUG_ON(mm == &init_mm); in __mmdrop()
718 WARN_ON_ONCE(mm == current->mm); in __mmdrop()
719 WARN_ON_ONCE(mm == current->active_mm); in __mmdrop()
720 mm_free_pgd(mm); in __mmdrop()
721 destroy_context(mm); in __mmdrop()
722 mmu_notifier_subscriptions_destroy(mm); in __mmdrop()
723 check_mm(mm); in __mmdrop()
724 put_user_ns(mm->user_ns); in __mmdrop()
725 free_mm(mm); in __mmdrop()
731 struct mm_struct *mm; in mmdrop_async_fn() local
733 mm = container_of(work, struct mm_struct, async_put_work); in mmdrop_async_fn()
734 __mmdrop(mm); in mmdrop_async_fn()
737 static void mmdrop_async(struct mm_struct *mm) in mmdrop_async() argument
739 if (unlikely(atomic_dec_and_test(&mm->mm_count))) { in mmdrop_async()
740 INIT_WORK(&mm->async_put_work, mmdrop_async_fn); in mmdrop_async()
741 schedule_work(&mm->async_put_work); in mmdrop_async()
1023 static void mm_init_aio(struct mm_struct *mm) in mm_init_aio() argument
1026 spin_lock_init(&mm->ioctx_lock); in mm_init_aio()
1027 mm->ioctx_table = NULL; in mm_init_aio()
1031 static __always_inline void mm_clear_owner(struct mm_struct *mm, in mm_clear_owner() argument
1035 if (mm->owner == p) in mm_clear_owner()
1036 WRITE_ONCE(mm->owner, NULL); in mm_clear_owner()
1040 static void mm_init_owner(struct mm_struct *mm, struct task_struct *p) in mm_init_owner() argument
1043 mm->owner = p; in mm_init_owner()
1047 static void mm_init_pasid(struct mm_struct *mm) in mm_init_pasid() argument
1050 mm->pasid = INIT_PASID; in mm_init_pasid()
1054 static void mm_init_uprobes_state(struct mm_struct *mm) in mm_init_uprobes_state() argument
1057 mm->uprobes_state.xol_area = NULL; in mm_init_uprobes_state()
1061 static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, in mm_init() argument
1064 mm->mmap = NULL; in mm_init()
1065 mm->mm_rb = RB_ROOT; in mm_init()
1066 mm->vmacache_seqnum = 0; in mm_init()
1068 rwlock_init(&mm->mm_rb_lock); in mm_init()
1070 atomic_set(&mm->mm_users, 1); in mm_init()
1071 atomic_set(&mm->mm_count, 1); in mm_init()
1072 seqcount_init(&mm->write_protect_seq); in mm_init()
1073 mmap_init_lock(mm); in mm_init()
1074 INIT_LIST_HEAD(&mm->mmlist); in mm_init()
1075 mm->core_state = NULL; in mm_init()
1076 mm_pgtables_bytes_init(mm); in mm_init()
1077 mm->map_count = 0; in mm_init()
1078 mm->locked_vm = 0; in mm_init()
1079 atomic_set(&mm->has_pinned, 0); in mm_init()
1080 atomic64_set(&mm->pinned_vm, 0); in mm_init()
1081 memset(&mm->rss_stat, 0, sizeof(mm->rss_stat)); in mm_init()
1082 spin_lock_init(&mm->page_table_lock); in mm_init()
1083 spin_lock_init(&mm->arg_lock); in mm_init()
1084 mm_init_cpumask(mm); in mm_init()
1085 mm_init_aio(mm); in mm_init()
1086 mm_init_owner(mm, p); in mm_init()
1087 mm_init_pasid(mm); in mm_init()
1088 RCU_INIT_POINTER(mm->exe_file, NULL); in mm_init()
1089 if (!mmu_notifier_subscriptions_init(mm)) in mm_init()
1091 init_tlb_flush_pending(mm); in mm_init()
1093 mm->pmd_huge_pte = NULL; in mm_init()
1095 mm_init_uprobes_state(mm); in mm_init()
1096 hugetlb_count_init(mm); in mm_init()
1098 if (current->mm) { in mm_init()
1099 mm->flags = current->mm->flags & MMF_INIT_MASK; in mm_init()
1100 mm->def_flags = current->mm->def_flags & VM_INIT_DEF_MASK; in mm_init()
1102 mm->flags = default_dump_filter; in mm_init()
1103 mm->def_flags = 0; in mm_init()
1106 if (mm_alloc_pgd(mm)) in mm_init()
1109 if (init_new_context(p, mm)) in mm_init()
1112 mm->user_ns = get_user_ns(user_ns); in mm_init()
1113 return mm; in mm_init()
1116 mm_free_pgd(mm); in mm_init()
1118 free_mm(mm); in mm_init()
1127 struct mm_struct *mm; in mm_alloc() local
1129 mm = allocate_mm(); in mm_alloc()
1130 if (!mm) in mm_alloc()
1133 memset(mm, 0, sizeof(*mm)); in mm_alloc()
1134 return mm_init(mm, current, current_user_ns()); in mm_alloc()
1137 static inline void __mmput(struct mm_struct *mm) in __mmput() argument
1139 VM_BUG_ON(atomic_read(&mm->mm_users)); in __mmput()
1141 uprobe_clear_state(mm); in __mmput()
1142 exit_aio(mm); in __mmput()
1143 ksm_exit(mm); in __mmput()
1144 khugepaged_exit(mm); /* must run before exit_mmap */ in __mmput()
1145 exit_mmap(mm); in __mmput()
1146 mm_put_huge_zero_page(mm); in __mmput()
1147 set_mm_exe_file(mm, NULL); in __mmput()
1148 if (!list_empty(&mm->mmlist)) { in __mmput()
1150 list_del(&mm->mmlist); in __mmput()
1153 if (mm->binfmt) in __mmput()
1154 module_put(mm->binfmt->module); in __mmput()
1155 mmdrop(mm); in __mmput()
1161 void mmput(struct mm_struct *mm) in mmput() argument
1165 if (atomic_dec_and_test(&mm->mm_users)) { in mmput()
1167 __mmput(mm); in mmput()
1175 struct mm_struct *mm = container_of(work, struct mm_struct, in mmput_async_fn() local
1178 __mmput(mm); in mmput_async_fn()
1181 void mmput_async(struct mm_struct *mm) in mmput_async() argument
1183 if (atomic_dec_and_test(&mm->mm_users)) { in mmput_async()
1184 INIT_WORK(&mm->async_put_work, mmput_async_fn); in mmput_async()
1185 schedule_work(&mm->async_put_work); in mmput_async()
1202 void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file) in set_mm_exe_file() argument
1211 old_exe_file = rcu_dereference_raw(mm->exe_file); in set_mm_exe_file()
1215 rcu_assign_pointer(mm->exe_file, new_exe_file); in set_mm_exe_file()
1226 struct file *get_mm_exe_file(struct mm_struct *mm) in get_mm_exe_file() argument
1231 exe_file = rcu_dereference(mm->exe_file); in get_mm_exe_file()
1249 struct mm_struct *mm; in get_task_exe_file() local
1252 mm = task->mm; in get_task_exe_file()
1253 if (mm) { in get_task_exe_file()
1255 exe_file = get_mm_exe_file(mm); in get_task_exe_file()
1273 struct mm_struct *mm; in get_task_mm() local
1276 mm = task->mm; in get_task_mm()
1277 if (mm) { in get_task_mm()
1279 mm = NULL; in get_task_mm()
1281 mmget(mm); in get_task_mm()
1284 return mm; in get_task_mm()
1290 struct mm_struct *mm; in mm_access() local
1297 mm = get_task_mm(task); in mm_access()
1298 if (mm && mm != current->mm && in mm_access()
1300 mmput(mm); in mm_access()
1301 mm = ERR_PTR(-EACCES); in mm_access()
1305 return mm; in mm_access()
1355 static void mm_release(struct task_struct *tsk, struct mm_struct *mm) in mm_release() argument
1360 deactivate_mm(tsk, mm); in mm_release()
1369 atomic_read(&mm->mm_users) > 1) { in mm_release()
1389 void exit_mm_release(struct task_struct *tsk, struct mm_struct *mm) in exit_mm_release() argument
1392 mm_release(tsk, mm); in exit_mm_release()
1395 void exec_mm_release(struct task_struct *tsk, struct mm_struct *mm) in exec_mm_release() argument
1398 mm_release(tsk, mm); in exec_mm_release()
1414 struct mm_struct *mm; in dup_mm() local
1417 mm = allocate_mm(); in dup_mm()
1418 if (!mm) in dup_mm()
1421 memcpy(mm, oldmm, sizeof(*mm)); in dup_mm()
1423 if (!mm_init(mm, tsk, mm->user_ns)) in dup_mm()
1426 err = dup_mmap(mm, oldmm); in dup_mm()
1430 mm->hiwater_rss = get_mm_rss(mm); in dup_mm()
1431 mm->hiwater_vm = mm->total_vm; in dup_mm()
1433 if (mm->binfmt && !try_module_get(mm->binfmt->module)) in dup_mm()
1436 return mm; in dup_mm()
1440 mm->binfmt = NULL; in dup_mm()
1441 mm_init_owner(mm, NULL); in dup_mm()
1442 mmput(mm); in dup_mm()
1450 struct mm_struct *mm, *oldmm; in copy_mm() local
1460 tsk->mm = NULL; in copy_mm()
1468 oldmm = current->mm; in copy_mm()
1477 mm = oldmm; in copy_mm()
1482 mm = dup_mm(tsk, current->mm); in copy_mm()
1483 if (!mm) in copy_mm()
1487 tsk->mm = mm; in copy_mm()
1488 tsk->active_mm = mm; in copy_mm()
1887 if (!tsk->mm) in copy_oom_score_adj()
1896 set_bit(MMF_MULTIPROCESS, &tsk->mm->flags); in copy_oom_score_adj()
2426 if (p->mm) { in copy_process()
2427 mm_clear_owner(p->mm, p); in copy_process()
2428 mmput(p->mm); in copy_process()