/kernel/sched/ |
D | membarrier.c | 35 struct mm_struct *mm = (struct mm_struct *) info; in ipi_sync_rq_state() local 37 if (current->mm != mm) in ipi_sync_rq_state() 40 atomic_read(&mm->membarrier_state)); in ipi_sync_rq_state() 50 void membarrier_exec_mmap(struct mm_struct *mm) in membarrier_exec_mmap() argument 58 atomic_set(&mm->membarrier_state, 0); in membarrier_exec_mmap() 136 struct mm_struct *mm = current->mm; in membarrier_private_expedited() local 141 if (!(atomic_read(&mm->membarrier_state) & in membarrier_private_expedited() 145 if (!(atomic_read(&mm->membarrier_state) & in membarrier_private_expedited() 150 if (atomic_read(&mm->mm_users) == 1 || num_online_cpus() == 1) in membarrier_private_expedited() 178 if (p && p->mm == mm) in membarrier_private_expedited() [all …]
|
D | fair.c | 1117 rss = get_mm_rss(p->mm); in task_nr_scan_windows() 1953 p->mm->numa_next_scan = jiffies + in update_task_scan_period() 2138 seq = READ_ONCE(p->mm->numa_scan_seq); in task_numa_placement() 2307 if (tsk->mm == current->mm) in task_numa_group() 2407 if (!p->mm) in task_numa_fault() 2476 WRITE_ONCE(p->mm->numa_scan_seq, READ_ONCE(p->mm->numa_scan_seq) + 1); in reset_ptenuma_scan() 2477 p->mm->numa_scan_offset = 0; in reset_ptenuma_scan() 2488 struct mm_struct *mm = p->mm; in task_numa_work() local 2509 if (!mm->numa_next_scan) { in task_numa_work() 2510 mm->numa_next_scan = now + in task_numa_work() [all …]
|
D | core.c | 2091 if (p->mm && printk_ratelimit()) { in select_fallback_rq() 3188 struct mm_struct *mm = rq->prev_mm; in finish_task_switch() local 3241 if (mm) { in finish_task_switch() 3242 membarrier_mm_sync_core_before_usermode(mm); in finish_task_switch() 3243 mmdrop(mm); in finish_task_switch() 3353 if (!next->mm) { // to kernel in context_switch() 3357 if (prev->mm) // from user in context_switch() 3362 membarrier_switch_mm(rq, prev->active_mm, next->mm); in context_switch() 3371 switch_mm_irqs_off(prev->active_mm, next->mm, next); in context_switch() 3373 if (!prev->mm) { // from kernel in context_switch() [all …]
|
D | debug.c | 836 if (p->mm) in sched_show_numa() 837 P(mm->numa_scan_seq); in sched_show_numa()
|
/kernel/ |
D | fork.c | 346 struct vm_area_struct *vm_area_alloc(struct mm_struct *mm) in vm_area_alloc() argument 352 vma_init(vma, mm); in vm_area_alloc() 483 static __latent_entropy int dup_mmap(struct mm_struct *mm, in dup_mmap() argument 498 uprobe_dup_mmap(oldmm, mm); in dup_mmap() 502 down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING); in dup_mmap() 505 RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm)); in dup_mmap() 507 mm->total_vm = oldmm->total_vm; in dup_mmap() 508 mm->data_vm = oldmm->data_vm; in dup_mmap() 509 mm->exec_vm = oldmm->exec_vm; in dup_mmap() 510 mm->stack_vm = oldmm->stack_vm; in dup_mmap() [all …]
|
D | sys.c | 1772 struct mm_struct *mm = get_task_mm(p); in getrusage() local 1774 if (mm) { in getrusage() 1775 setmax_mm_hiwater_rss(&maxrss, mm); in getrusage() 1776 mmput(mm); in getrusage() 1814 static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd) in prctl_set_mm_exe_file() argument 1843 exe_file = get_mm_exe_file(mm); in prctl_set_mm_exe_file() 1848 down_read(&mm->mmap_sem); in prctl_set_mm_exe_file() 1849 for (vma = mm->mmap; vma; vma = vma->vm_next) { in prctl_set_mm_exe_file() 1857 up_read(&mm->mmap_sem); in prctl_set_mm_exe_file() 1864 old_exe = xchg(&mm->exe_file, exe.file); in prctl_set_mm_exe_file() [all …]
|
D | exit.c | 347 void mm_update_next_owner(struct mm_struct *mm) in mm_update_next_owner() argument 356 if (mm->owner != p) in mm_update_next_owner() 363 if (atomic_read(&mm->mm_users) <= 1) { in mm_update_next_owner() 364 WRITE_ONCE(mm->owner, NULL); in mm_update_next_owner() 373 if (c->mm == mm) in mm_update_next_owner() 381 if (c->mm == mm) in mm_update_next_owner() 392 if (c->mm == mm) in mm_update_next_owner() 394 if (c->mm) in mm_update_next_owner() 404 WRITE_ONCE(mm->owner, NULL); in mm_update_next_owner() 420 if (c->mm != mm) { in mm_update_next_owner() [all …]
|
D | tsacct.c | 85 struct mm_struct *mm; in xacct_add_tsk() local 92 mm = get_task_mm(p); in xacct_add_tsk() 93 if (mm) { in xacct_add_tsk() 95 stats->hiwater_rss = get_mm_hiwater_rss(mm) * PAGE_SIZE / KB; in xacct_add_tsk() 96 stats->hiwater_vm = get_mm_hiwater_vm(mm) * PAGE_SIZE / KB; in xacct_add_tsk() 97 mmput(mm); in xacct_add_tsk() 121 if (!likely(tsk->mm)) in __acct_update_integrals() 136 tsk->acct_rss_mem1 += delta * get_mm_rss(tsk->mm) >> 10; in __acct_update_integrals() 137 tsk->acct_vm_mem1 += delta * tsk->mm->total_vm >> 10; in __acct_update_integrals()
|
D | ptrace.c | 45 struct mm_struct *mm; in ptrace_access_vm() local 48 mm = get_task_mm(tsk); in ptrace_access_vm() 49 if (!mm) in ptrace_access_vm() 54 ((get_dumpable(mm) != SUID_DUMP_USER) && in ptrace_access_vm() 55 !ptracer_capable(tsk, mm->user_ns))) { in ptrace_access_vm() 56 mmput(mm); in ptrace_access_vm() 60 ret = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags); in ptrace_access_vm() 61 mmput(mm); in ptrace_access_vm() 284 struct mm_struct *mm; in __ptrace_may_access() local 345 mm = task->mm; in __ptrace_may_access() [all …]
|
D | acct.c | 539 if (group_dead && current->mm) { in acct_collect() 542 down_read(¤t->mm->mmap_sem); in acct_collect() 543 vma = current->mm->mmap; in acct_collect() 548 up_read(¤t->mm->mmap_sem); in acct_collect()
|
D | futex.c | 336 mmgrab(key->private.mm); in futex_get_mm() 469 mmdrop(key->private.mm); in drop_futex_key_refs() 530 struct mm_struct *mm = current->mm; in get_futex_key() local 557 key->private.mm = mm; in get_futex_key() 659 key->private.mm = mm; in get_futex_key() 753 struct mm_struct *mm = current->mm; in fault_in_user_writeable() local 756 down_read(&mm->mmap_sem); in fault_in_user_writeable() 757 ret = fixup_user_fault(current, mm, (unsigned long)uaddr, in fault_in_user_writeable() 759 up_read(&mm->mmap_sem); in fault_in_user_writeable()
|
D | kcmp.c | 200 ret = kcmp_ptr(task1->mm, task2->mm, KCMP_VM); in SYSCALL_DEFINE5()
|
D | context_tracking.c | 65 WARN_ON_ONCE(!current->mm); in __context_tracking_enter()
|
D | latencytop.c | 93 if (!tsk->mm) in account_global_scheduler_latency()
|
D | audit.c | 2086 struct mm_struct *mm) in audit_log_d_path_exe() argument 2090 if (!mm) in audit_log_d_path_exe() 2093 exe_file = get_mm_exe_file(mm); in audit_log_d_path_exe() 2152 audit_log_d_path_exe(ab, current->mm); in audit_log_task_info()
|
D | cred.c | 459 if (task->mm) in commit_creds() 460 set_dumpable(task->mm, suid_dumpable); in commit_creds()
|
D | audit.h | 244 struct mm_struct *mm);
|
/kernel/events/ |
D | uprobes.c | 83 struct mm_struct *mm; member 157 struct mm_struct *mm = vma->vm_mm; in __replace_page() local 167 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, addr, in __replace_page() 196 dec_mm_counter(mm, MM_ANONPAGES); in __replace_page() 199 dec_mm_counter(mm, mm_counter_file(old_page)); in __replace_page() 200 inc_mm_counter(mm, MM_ANONPAGES); in __replace_page() 206 set_pte_at_notify(mm, addr, pvmw.pte, in __replace_page() 293 delayed_uprobe_check(struct uprobe *uprobe, struct mm_struct *mm) in delayed_uprobe_check() argument 298 if (du->uprobe == uprobe && du->mm == mm) in delayed_uprobe_check() 303 static int delayed_uprobe_add(struct uprobe *uprobe, struct mm_struct *mm) in delayed_uprobe_add() argument [all …]
|
D | callchain.c | 206 if (current->mm) in get_perf_callchain()
|
/kernel/cgroup/ |
D | cpuset.c | 1569 struct mm_struct *mm; member 1580 do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL); in cpuset_migrate_mm_workfn() 1581 mmput(mwork->mm); in cpuset_migrate_mm_workfn() 1585 static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from, in cpuset_migrate_mm() argument 1592 mwork->mm = mm; in cpuset_migrate_mm() 1598 mmput(mm); in cpuset_migrate_mm() 1667 struct mm_struct *mm; in update_tasks_nodemask() local 1672 mm = get_task_mm(task); in update_tasks_nodemask() 1673 if (!mm) in update_tasks_nodemask() 1678 mpol_rebind_mm(mm, &cs->mems_allowed); in update_tasks_nodemask() [all …]
|
/kernel/trace/ |
D | trace_output.c | 368 static int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm, in seq_print_user_ip() argument 378 if (mm) { in seq_print_user_ip() 381 down_read(&mm->mmap_sem); in seq_print_user_ip() 382 vma = find_vma(mm, ip); in seq_print_user_ip() 393 up_read(&mm->mmap_sem); in seq_print_user_ip() 1089 struct mm_struct *mm = NULL; in trace_user_stack_print() local 1105 mm = get_task_mm(task); in trace_user_stack_print() 1116 seq_print_user_ip(s, mm, ip, flags); in trace_user_stack_print() 1120 if (mm) in trace_user_stack_print() 1121 mmput(mm); in trace_user_stack_print()
|
D | trace_uprobe.c | 1052 struct mm_struct *mm); 1201 __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm) in __uprobe_perf_filter() argument 1209 if (event->hw.target->mm == mm) in __uprobe_perf_filter() 1220 return __uprobe_perf_filter(filter, event->hw.target->mm); in trace_uprobe_filter_event() 1324 enum uprobe_filter_ctx ctx, struct mm_struct *mm) in uprobe_perf_filter() argument 1334 ret = __uprobe_perf_filter(filter, mm); in uprobe_perf_filter() 1397 if (!uprobe_perf_filter(&tu->consumer, 0, current->mm)) in uprobe_perf_func()
|
/kernel/bpf/ |
D | stackmap.c | 307 if (!user || !current || !current->mm || irq_work_busy || in stack_map_get_build_id_offset() 308 down_read_trylock(¤t->mm->mmap_sem) == 0) { in stack_map_get_build_id_offset() 319 vma = find_vma(current->mm, ips[i]); in stack_map_get_build_id_offset() 333 up_read(¤t->mm->mmap_sem); in stack_map_get_build_id_offset() 335 work->sem = ¤t->mm->mmap_sem; in stack_map_get_build_id_offset() 342 rwsem_release(¤t->mm->mmap_sem.dep_map, 1, _RET_IP_); in stack_map_get_build_id_offset()
|
/kernel/debug/kdb/ |
D | kdb_support.c | 646 } else if (!p->mm && state == 'S') { in kdb_task_state_char()
|
/kernel/debug/ |
D | debug_core.c | 289 if (current->mm) { in kgdb_flush_swbreak_addr()
|