/mm/ |
D | oom_kill.c | 96 struct task_struct *tsk; in oom_cpuset_eligible() local 104 for_each_thread(start, tsk) { in oom_cpuset_eligible() 112 ret = mempolicy_nodemask_intersects(tsk, mask); in oom_cpuset_eligible() 118 ret = cpuset_mems_allowed_intersects(current, tsk); in oom_cpuset_eligible() 128 static bool oom_cpuset_eligible(struct task_struct *tsk, struct oom_control *oc) in oom_cpuset_eligible() argument 609 static bool oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm) in oom_reap_task_mm() argument 614 trace_skip_task_reaping(tsk->pid); in oom_reap_task_mm() 625 trace_skip_task_reaping(tsk->pid); in oom_reap_task_mm() 629 trace_start_task_reaping(tsk->pid); in oom_reap_task_mm() 637 task_pid_nr(tsk), tsk->comm, in oom_reap_task_mm() [all …]
|
D | memory-failure.c | 226 struct task_struct *tsk; member 238 struct task_struct *t = tk->tsk; in kill_proc() 335 static void add_to_kill(struct task_struct *tsk, struct page *p, in add_to_kill() argument 365 page_to_pfn(p), tsk->comm); in add_to_kill() 371 get_task_struct(tsk); in add_to_kill() 372 tk->tsk = tsk; in add_to_kill() 398 pfn, tk->tsk->comm, tk->tsk->pid); in kill_procs() 400 tk->tsk, PIDTYPE_PID); in kill_procs() 411 pfn, tk->tsk->comm, tk->tsk->pid); in kill_procs() 413 put_task_struct(tk->tsk); in kill_procs() [all …]
|
D | page-writeback.c | 406 struct task_struct *tsk; in domain_dirty_limits() local 440 tsk = current; in domain_dirty_limits() 441 if (rt_task(tsk)) { in domain_dirty_limits() 482 struct task_struct *tsk = current; in node_dirty_limit() local 491 if (rt_task(tsk)) in node_dirty_limit()
|
D | nommu.c | 1687 int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, in __access_remote_vm() argument 1741 int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, in access_process_vm() argument 1749 mm = get_task_mm(tsk); in access_process_vm() 1753 len = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags); in access_process_vm()
|
D | mempolicy.c | 393 void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new) in mpol_rebind_task() argument 395 mpol_rebind_policy(tsk->mempolicy, new); in mpol_rebind_task() 2107 bool mempolicy_nodemask_intersects(struct task_struct *tsk, in mempolicy_nodemask_intersects() argument 2115 task_lock(tsk); in mempolicy_nodemask_intersects() 2116 mempolicy = tsk->mempolicy; in mempolicy_nodemask_intersects() 2137 task_unlock(tsk); in mempolicy_nodemask_intersects()
|
D | vmscan.c | 3978 struct task_struct *tsk = current; in kswapd() local 3982 set_cpus_allowed_ptr(tsk, cpumask); in kswapd() 3996 tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD; in kswapd() 4048 tsk->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD); in kswapd()
|
D | compaction.c | 2865 struct task_struct *tsk = current; in kcompactd() local 2871 set_cpus_allowed_ptr(tsk, cpumask); in kcompactd()
|
D | memory.c | 5604 int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, in __access_remote_vm() argument 5690 int access_process_vm(struct task_struct *tsk, unsigned long addr, in access_process_vm() argument 5696 mm = get_task_mm(tsk); in access_process_vm() 5700 ret = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags); in access_process_vm()
|
D | page_alloc.c | 4654 static bool oom_reserves_allowed(struct task_struct *tsk) in oom_reserves_allowed() argument 4656 if (!tsk_is_oom_victim(tsk)) in oom_reserves_allowed()
|
/mm/damon/ |
D | core.c | 443 struct task_struct *tsk; in __damon_stop() local 446 tsk = ctx->kdamond; in __damon_stop() 447 if (tsk) { in __damon_stop() 448 get_task_struct(tsk); in __damon_stop() 450 kthread_stop(tsk); in __damon_stop() 451 put_task_struct(tsk); in __damon_stop()
|