Home
last modified time | relevance | path

Searched refs:mm (Results 1 – 25 of 27) sorted by relevance

12

/kernel/
Dfork.c411 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) in dup_mmap() argument
421 uprobe_dup_mmap(oldmm, mm); in dup_mmap()
425 down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING); in dup_mmap()
428 RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm)); in dup_mmap()
430 mm->total_vm = oldmm->total_vm; in dup_mmap()
431 mm->shared_vm = oldmm->shared_vm; in dup_mmap()
432 mm->exec_vm = oldmm->exec_vm; in dup_mmap()
433 mm->stack_vm = oldmm->stack_vm; in dup_mmap()
435 rb_link = &mm->mm_rb.rb_node; in dup_mmap()
437 pprev = &mm->mmap; in dup_mmap()
[all …]
Dsys.c1608 struct mm_struct *mm = get_task_mm(p); in k_getrusage() local
1610 if (mm) { in k_getrusage()
1611 setmax_mm_hiwater_rss(&maxrss, mm); in k_getrusage()
1612 mmput(mm); in k_getrusage()
1654 static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd) in prctl_set_mm_exe_file() argument
1683 exe_file = get_mm_exe_file(mm); in prctl_set_mm_exe_file()
1688 down_read(&mm->mmap_sem); in prctl_set_mm_exe_file()
1689 for (vma = mm->mmap; vma; vma = vma->vm_next) { in prctl_set_mm_exe_file()
1697 up_read(&mm->mmap_sem); in prctl_set_mm_exe_file()
1708 if (test_and_set_bit(MMF_EXE_FILE_CHANGED, &mm->flags)) in prctl_set_mm_exe_file()
[all …]
Dexit.c302 void mm_update_next_owner(struct mm_struct *mm) in mm_update_next_owner() argument
311 if (mm->owner != p) in mm_update_next_owner()
318 if (atomic_read(&mm->mm_users) <= 1) { in mm_update_next_owner()
319 mm->owner = NULL; in mm_update_next_owner()
328 if (c->mm == mm) in mm_update_next_owner()
336 if (c->mm == mm) in mm_update_next_owner()
347 if (c->mm == mm) in mm_update_next_owner()
349 if (c->mm) in mm_update_next_owner()
359 mm->owner = NULL; in mm_update_next_owner()
375 if (c->mm != mm) { in mm_update_next_owner()
[all …]
Dtsacct.c94 struct mm_struct *mm; in xacct_add_tsk() local
99 mm = get_task_mm(p); in xacct_add_tsk()
100 if (mm) { in xacct_add_tsk()
102 stats->hiwater_rss = get_mm_hiwater_rss(mm) * PAGE_SIZE / KB; in xacct_add_tsk()
103 stats->hiwater_vm = get_mm_hiwater_vm(mm) * PAGE_SIZE / KB; in xacct_add_tsk()
104 mmput(mm); in xacct_add_tsk()
126 if (likely(tsk->mm)) { in __acct_update_integrals()
142 tsk->acct_rss_mem1 += delta * get_mm_rss(tsk->mm); in __acct_update_integrals()
143 tsk->acct_vm_mem1 += delta * tsk->mm->total_vm; in __acct_update_integrals()
Dptrace.c258 struct mm_struct *mm; in __ptrace_may_access() local
319 mm = task->mm; in __ptrace_may_access()
320 if (mm && in __ptrace_may_access()
321 ((get_dumpable(mm) != SUID_DUMP_USER) && in __ptrace_may_access()
322 !ptrace_has_cap(mm->user_ns, mode))) in __ptrace_may_access()
1030 struct mm_struct *mm = get_task_mm(child); in ptrace_request() local
1034 if (!mm) in ptrace_request()
1039 tmp = mm->context.exec_fdpic_loadmap; in ptrace_request()
1042 tmp = mm->context.interp_fdpic_loadmap; in ptrace_request()
1047 mmput(mm); in ptrace_request()
Dcpuset.c999 struct mm_struct *mm; member
1010 do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL); in cpuset_migrate_mm_workfn()
1011 mmput(mwork->mm); in cpuset_migrate_mm_workfn()
1015 static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from, in cpuset_migrate_mm() argument
1022 mwork->mm = mm; in cpuset_migrate_mm()
1028 mmput(mm); in cpuset_migrate_mm()
1121 struct mm_struct *mm; in update_tasks_nodemask() local
1126 mm = get_task_mm(task); in update_tasks_nodemask()
1127 if (!mm) in update_tasks_nodemask()
1132 mpol_rebind_mm(mm, &cs->mems_allowed); in update_tasks_nodemask()
[all …]
Dkcmp.c145 ret = kcmp_ptr(task1->mm, task2->mm, KCMP_VM); in SYSCALL_DEFINE5()
Dacct.c536 if (group_dead && current->mm) { in acct_collect()
539 down_read(&current->mm->mmap_sem); in acct_collect()
540 vma = current->mm->mmap; in acct_collect()
545 up_read(&current->mm->mmap_sem); in acct_collect()
Dfutex.c342 atomic_inc(&key->private.mm->mm_count); in futex_get_mm()
452 mmdrop(key->private.mm); in drop_futex_key_refs()
525 struct mm_struct *mm = current->mm; in get_futex_key() local
552 key->private.mm = mm; in get_futex_key()
682 key->private.mm = mm; in get_futex_key()
748 struct mm_struct *mm = current->mm; in fault_in_user_writeable() local
751 down_read(&mm->mmap_sem); in fault_in_user_writeable()
752 ret = fixup_user_fault(current, mm, (unsigned long)uaddr, in fault_in_user_writeable()
754 up_read(&mm->mmap_sem); in fault_in_user_writeable()
Dcontext_tracking.c64 WARN_ON_ONCE(!current->mm); in __context_tracking_enter()
Dlatencytop.c101 if (!tsk->mm) in account_global_scheduler_latency()
Dcred.c451 if (task->mm) in commit_creds()
452 set_dumpable(task->mm, suid_dumpable); in commit_creds()
Daudit.c1869 struct mm_struct *mm) in audit_log_d_path_exe() argument
1873 if (!mm) in audit_log_d_path_exe()
1876 exe_file = get_mm_exe_file(mm); in audit_log_d_path_exe()
1919 audit_log_d_path_exe(ab, tsk->mm); in audit_log_task_info()
Daudit.h263 struct mm_struct *mm);
Dcpu.c300 cpumask_clear_cpu(cpu, mm_cpumask(t->mm)); in clear_tasks_mm_cpumask()
Dsignal.c1819 if (unlikely(current->mm->core_state) && in may_ptrace_stop()
1820 unlikely(current->mm == current->parent->mm)) in may_ptrace_stop()
/kernel/events/
Duprobes.c155 struct mm_struct *mm = vma->vm_mm; in __replace_page() local
171 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); in __replace_page()
173 ptep = page_check_address(page, mm, addr, &ptl, 0); in __replace_page()
185 dec_mm_counter(mm, MM_FILEPAGES); in __replace_page()
186 inc_mm_counter(mm, MM_ANONPAGES); in __replace_page()
191 set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot)); in __replace_page()
204 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); in __replace_page()
293 int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr, in uprobe_write_opcode() argument
302 ret = get_user_pages(NULL, mm, vaddr, 1, FOLL_FORCE, &old_page, &vma); in uprobe_write_opcode()
342 int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr) in set_swbp() argument
[all …]
Dcallchain.c181 if (current->mm) in perf_callchain()
/kernel/trace/
Dtrace_output.c325 static int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm, in seq_print_user_ip() argument
335 if (mm) { in seq_print_user_ip()
338 down_read(&mm->mmap_sem); in seq_print_user_ip()
339 vma = find_vma(mm, ip); in seq_print_user_ip()
350 up_read(&mm->mmap_sem); in seq_print_user_ip()
1222 struct mm_struct *mm = NULL; in trace_user_stack_print() local
1238 mm = get_task_mm(task); in trace_user_stack_print()
1256 seq_print_user_ip(s, mm, ip, flags); in trace_user_stack_print()
1260 if (mm) in trace_user_stack_print()
1261 mmput(mm); in trace_user_stack_print()
Dtrace_uprobe.c905 struct mm_struct *mm);
1025 __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm) in __uprobe_perf_filter() argument
1033 if (event->hw.target->mm == mm) in __uprobe_perf_filter()
1043 return __uprobe_perf_filter(&tu->filter, event->hw.target->mm); in uprobe_filter_event()
1103 enum uprobe_filter_ctx ctx, struct mm_struct *mm) in uprobe_perf_filter() argument
1110 ret = __uprobe_perf_filter(&tu->filter, mm); in uprobe_perf_filter()
1173 if (!uprobe_perf_filter(&tu->consumer, 0, current->mm)) in uprobe_perf_func()
/kernel/sched/
Dcore.c1269 tlb_migrate_finish(p->mm); in __set_cpus_allowed_ptr()
1639 if (p->mm && printk_ratelimit()) { in select_fallback_rq()
2216 if (p->mm && atomic_read(&p->mm->mm_users) == 1) { in __sched_fork()
2217 p->mm->numa_next_scan = jiffies + msecs_to_jiffies(sysctl_numa_balancing_scan_delay); in __sched_fork()
2218 p->mm->numa_scan_seq = 0; in __sched_fork()
2227 p->numa_scan_seq = p->mm ? p->mm->numa_scan_seq : 0; in __sched_fork()
2646 struct mm_struct *mm = rq->prev_mm; in finish_task_switch() local
2685 if (mm) in finish_task_switch()
2686 mmdrop(mm); in finish_task_switch()
2773 struct mm_struct *mm, *oldmm; in context_switch() local
[all …]
Dfair.c983 rss = get_mm_rss(p->mm); in task_nr_scan_windows()
1825 p->mm->numa_next_scan = jiffies + in update_task_scan_period()
1998 seq = READ_ONCE(p->mm->numa_scan_seq); in task_numa_placement()
2169 if (tsk->mm == current->mm) in task_numa_group()
2267 if (!p->mm) in task_numa_fault()
2335 WRITE_ONCE(p->mm->numa_scan_seq, READ_ONCE(p->mm->numa_scan_seq) + 1); in reset_ptenuma_scan()
2336 p->mm->numa_scan_offset = 0; in reset_ptenuma_scan()
2347 struct mm_struct *mm = p->mm; in task_numa_work() local
2367 if (!mm->numa_next_scan) { in task_numa_work()
2368 mm->numa_next_scan = now + in task_numa_work()
[all …]
Ddebug.c527 if (p->mm) in sched_show_numa()
528 P(mm->numa_scan_seq); in sched_show_numa()
/kernel/debug/kdb/
Dkdb_support.c646 } else if (!p->mm && state == 'S') { in kdb_task_state_char()
/kernel/debug/
Ddebug_core.c223 if (current->mm) { in kgdb_flush_swbreak_addr()

12