• Home
  • Raw
  • Download

Lines Matching +full:oc +full:- +full:level +full:- +full:select

1 // SPDX-License-Identifier: GPL-2.0-only
15 * Since we won't call these routines often (on a well-configured
71 static inline bool is_memcg_oom(struct oom_control *oc) in is_memcg_oom() argument
73 return oc->memcg != NULL; in is_memcg_oom()
78 * oom_cpuset_eligible() - check task eligiblity for kill
80 * @oc: pointer to struct oom_control
86 * This function is assuming oom-killer context and 'current' has triggered
87 * the oom-killer.
90 struct oom_control *oc) in oom_cpuset_eligible() argument
94 const nodemask_t *mask = oc->nodemask; in oom_cpuset_eligible()
96 if (is_memcg_oom(oc)) in oom_cpuset_eligible()
124 static bool oom_cpuset_eligible(struct task_struct *tsk, struct oom_control *oc) in oom_cpuset_eligible() argument
131 * The process p may have detached its own ->mm while exiting or through
133 * pointer. Return p, or any of its subthreads with a valid ->mm, with
144 if (likely(t->mm)) in find_lock_task_mm()
156 * order == -1 means the oom kill is required by sysrq, otherwise only
159 static inline bool is_sysrq_oom(struct oom_control *oc) in is_sysrq_oom() argument
161 return oc->order == -1; in is_sysrq_oom()
169 if (p->flags & PF_KTHREAD) in oom_unkillable_task()
194 * oom_badness - heuristic function to determine which candidate task to kill
219 adj = (long)p->signal->oom_score_adj; in oom_badness()
221 test_bit(MMF_OOM_SKIP, &p->mm->flags) || in oom_badness()
231 points = get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS) + in oom_badness()
232 mm_pgtables_bytes(p->mm) / PAGE_SIZE; in oom_badness()
252 static enum oom_constraint constrained_alloc(struct oom_control *oc) in constrained_alloc() argument
256 enum zone_type highest_zoneidx = gfp_zone(oc->gfp_mask); in constrained_alloc()
260 if (is_memcg_oom(oc)) { in constrained_alloc()
261 oc->totalpages = mem_cgroup_get_max(oc->memcg) ?: 1; in constrained_alloc()
266 oc->totalpages = totalram_pages() + total_swap_pages; in constrained_alloc()
271 if (!oc->zonelist) in constrained_alloc()
278 if (oc->gfp_mask & __GFP_THISNODE) in constrained_alloc()
286 if (oc->nodemask && in constrained_alloc()
287 !nodes_subset(node_states[N_MEMORY], *oc->nodemask)) { in constrained_alloc()
288 oc->totalpages = total_swap_pages; in constrained_alloc()
289 for_each_node_mask(nid, *oc->nodemask) in constrained_alloc()
290 oc->totalpages += node_present_pages(nid); in constrained_alloc()
295 for_each_zone_zonelist_nodemask(zone, z, oc->zonelist, in constrained_alloc()
296 highest_zoneidx, oc->nodemask) in constrained_alloc()
297 if (!cpuset_zone_allowed(zone, oc->gfp_mask)) in constrained_alloc()
301 oc->totalpages = total_swap_pages; in constrained_alloc()
303 oc->totalpages += node_present_pages(nid); in constrained_alloc()
311 struct oom_control *oc = arg; in oom_evaluate_task() local
318 if (!is_memcg_oom(oc) && !oom_cpuset_eligible(task, oc)) in oom_evaluate_task()
327 if (!is_sysrq_oom(oc) && tsk_is_oom_victim(task)) { in oom_evaluate_task()
328 if (test_bit(MMF_OOM_SKIP, &task->signal->oom_mm->flags)) in oom_evaluate_task()
335 * killed first if it triggers an oom, then select it. in oom_evaluate_task()
339 goto select; in oom_evaluate_task()
342 points = oom_badness(task, oc->totalpages); in oom_evaluate_task()
343 if (points == LONG_MIN || points < oc->chosen_points) in oom_evaluate_task()
346 select: in oom_evaluate_task()
347 if (oc->chosen) in oom_evaluate_task()
348 put_task_struct(oc->chosen); in oom_evaluate_task()
350 oc->chosen = task; in oom_evaluate_task()
351 oc->chosen_points = points; in oom_evaluate_task()
355 if (oc->chosen) in oom_evaluate_task()
356 put_task_struct(oc->chosen); in oom_evaluate_task()
357 oc->chosen = (void *)-1UL; in oom_evaluate_task()
363 * 'points'. In case scan was aborted, oc->chosen is set to -1.
365 static void select_bad_process(struct oom_control *oc) in select_bad_process() argument
367 oc->chosen_points = LONG_MIN; in select_bad_process()
369 if (is_memcg_oom(oc)) in select_bad_process()
370 mem_cgroup_scan_tasks(oc->memcg, oom_evaluate_task, oc); in select_bad_process()
376 if (oom_evaluate_task(p, oc)) in select_bad_process()
384 struct oom_control *oc = arg; in dump_task() local
391 if (!is_memcg_oom(oc) && !oom_cpuset_eligible(p, oc)) in dump_task()
405 task->pid, from_kuid(&init_user_ns, task_uid(task)), in dump_task()
406 task->tgid, task->mm->total_vm, get_mm_rss(task->mm), in dump_task()
407 mm_pgtables_bytes(task->mm), in dump_task()
408 get_mm_counter(task->mm, MM_SWAPENTS), in dump_task()
409 task->signal->oom_score_adj, task->comm); in dump_task()
416 * dump_tasks - dump current memory state of all system tasks
417 * @oc: pointer to struct oom_control
425 static void dump_tasks(struct oom_control *oc) in dump_tasks() argument
430 if (is_memcg_oom(oc)) in dump_tasks()
431 mem_cgroup_scan_tasks(oc->memcg, dump_task, oc); in dump_tasks()
441 dump_task(p, oc); in dump_tasks()
447 static void dump_oom_summary(struct oom_control *oc, struct task_struct *victim) in dump_oom_summary() argument
450 pr_info("oom-kill:constraint=%s,nodemask=%*pbl", in dump_oom_summary()
451 oom_constraint_text[oc->constraint], in dump_oom_summary()
452 nodemask_pr_args(oc->nodemask)); in dump_oom_summary()
454 mem_cgroup_print_oom_context(oc->memcg, victim); in dump_oom_summary()
455 pr_cont(",task=%s,pid=%d,uid=%d\n", victim->comm, victim->pid, in dump_oom_summary()
459 static void dump_header(struct oom_control *oc, struct task_struct *p) in dump_header() argument
461 pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), order=%d, oom_score_adj=%hd\n", in dump_header()
462 current->comm, oc->gfp_mask, &oc->gfp_mask, oc->order, in dump_header()
463 current->signal->oom_score_adj); in dump_header()
464 if (!IS_ENABLED(CONFIG_COMPACTION) && oc->order) in dump_header()
468 if (is_memcg_oom(oc)) in dump_header()
469 mem_cgroup_print_oom_meminfo(oc->memcg); in dump_header()
471 show_mem(SHOW_MEM_FILTER_NODES, oc->nodemask); in dump_header()
476 dump_tasks(oc); in dump_header()
478 dump_oom_summary(oc, p); in dump_header()
489 #define K(x) ((x) << (PAGE_SHIFT-10))
492 * task->mm can be NULL if the task is the exited group leader. So to
502 struct mm_struct *t_mm = READ_ONCE(t->mm); in process_shares_mm()
530 set_bit(MMF_UNSTABLE, &mm->flags); in __oom_reap_task_mm()
532 for (vma = mm->mmap ; vma; vma = vma->vm_next) { in __oom_reap_task_mm()
546 if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) { in __oom_reap_task_mm()
551 vma, mm, vma->vm_start, in __oom_reap_task_mm()
552 vma->vm_end); in __oom_reap_task_mm()
579 trace_skip_task_reaping(tsk->pid); in oom_reap_task_mm()
589 if (test_bit(MMF_OOM_SKIP, &mm->flags)) { in oom_reap_task_mm()
590 trace_skip_task_reaping(tsk->pid); in oom_reap_task_mm()
594 trace_start_task_reaping(tsk->pid); in oom_reap_task_mm()
601 …pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n… in oom_reap_task_mm()
602 task_pid_nr(tsk), tsk->comm, in oom_reap_task_mm()
607 trace_finish_task_reaping(tsk->pid); in oom_reap_task_mm()
618 struct mm_struct *mm = tsk->signal->oom_mm; in oom_reap_task()
625 test_bit(MMF_OOM_SKIP, &mm->flags)) in oom_reap_task()
629 task_pid_nr(tsk), tsk->comm); in oom_reap_task()
634 tsk->oom_reaper_list = NULL; in oom_reap_task()
640 set_bit(MMF_OOM_SKIP, &mm->flags); in oom_reap_task()
655 oom_reaper_list = tsk->oom_reaper_list; in oom_reaper()
670 struct mm_struct *mm = tsk->signal->oom_mm; in wake_oom_reaper()
673 /* The victim managed to terminate on its own - see exit_mmap */ in wake_oom_reaper()
674 if (test_bit(MMF_OOM_SKIP, &mm->flags)) { in wake_oom_reaper()
680 tsk->oom_reaper_list = oom_reaper_list; in wake_oom_reaper()
683 trace_wake_reaper(tsk->pid); in wake_oom_reaper()
699 if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags)) in queue_oom_reaper()
703 timer_setup(&tsk->oom_reaper_timer, wake_oom_reaper, 0); in queue_oom_reaper()
704 tsk->oom_reaper_timer.expires = jiffies + OOM_REAPER_DELAY; in queue_oom_reaper()
705 add_timer(&tsk->oom_reaper_timer); in queue_oom_reaper()
721 * mark_oom_victim - mark the given task as OOM victim
727 * tsk->mm has to be non NULL and caller has to guarantee it is stable (either
732 struct mm_struct *mm = tsk->mm;
740 if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm)) {
741 mmgrab(tsk->signal->oom_mm);
742 set_bit(MMF_OOM_VICTIM, &mm->flags);
753 trace_mark_victim(tsk->pid);
757 * exit_oom_victim - note the exit of an OOM victim
768 * oom_killer_enable - enable OOM killer
777 * oom_killer_disable - disable OOM killer
817 struct signal_struct *sig = task->signal; in __task_will_free_mem()
824 if (sig->flags & SIGNAL_GROUP_COREDUMP) in __task_will_free_mem()
827 if (sig->flags & SIGNAL_GROUP_EXIT) in __task_will_free_mem()
830 if (thread_group_empty(task) && (task->flags & PF_EXITING)) in __task_will_free_mem()
840 * Caller has to make sure that task->mm is stable (hold task_lock or
845 struct mm_struct *mm = task->mm; in task_will_free_mem()
864 if (test_bit(MMF_OOM_SKIP, &mm->flags)) in task_will_free_mem()
867 if (atomic_read(&mm->mm_users) <= 1) in task_will_free_mem()
899 message, task_pid_nr(victim), victim->comm); in __oom_kill_process()
909 mm = victim->mm; in __oom_kill_process()
923 …pr_err("%s: Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB… in __oom_kill_process()
924 message, task_pid_nr(victim), victim->comm, K(mm->total_vm), in __oom_kill_process()
929 mm_pgtables_bytes(mm) >> 10, victim->signal->oom_score_adj); in __oom_kill_process()
933 * Kill all user processes sharing victim->mm in other thread groups, if in __oom_kill_process()
935 * depletion of all memory. This prevents mm->mmap_lock livelock when an in __oom_kill_process()
949 set_bit(MMF_OOM_SKIP, &mm->flags); in __oom_kill_process()
951 task_pid_nr(victim), victim->comm, in __oom_kill_process()
952 task_pid_nr(p), p->comm); in __oom_kill_process()
959 if (unlikely(p->flags & PF_KTHREAD)) in __oom_kill_process()
979 if (task->signal->oom_score_adj != OOM_SCORE_ADJ_MIN && in oom_kill_memcg_member()
987 static void oom_kill_process(struct oom_control *oc, const char *message) in oom_kill_process() argument
989 struct task_struct *victim = oc->chosen; in oom_kill_process()
1010 dump_header(oc, victim); in oom_kill_process()
1017 oom_group = mem_cgroup_get_oom_group(victim, oc->memcg); in oom_kill_process()
1035 static void check_panic_on_oom(struct oom_control *oc) in check_panic_on_oom() argument
1045 if (oc->constraint != CONSTRAINT_NONE) in check_panic_on_oom()
1049 if (is_sysrq_oom(oc)) in check_panic_on_oom()
1051 dump_header(oc, NULL); in check_panic_on_oom()
1053 sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide"); in check_panic_on_oom()
1071 * out_of_memory - kill the "best" process when we run out of memory
1072 * @oc: pointer to struct oom_control
1079 bool out_of_memory(struct oom_control *oc) in out_of_memory() argument
1086 if (!is_memcg_oom(oc)) { in out_of_memory()
1095 * select it. The goal is to allow it to allocate so that it may in out_of_memory()
1105 * The OOM killer does not compensate for IO-less reclaim. in out_of_memory()
1107 * make sure exclude 0 mask - all other users should have at least in out_of_memory()
1111 if (oc->gfp_mask && !(oc->gfp_mask & __GFP_FS) && !is_memcg_oom(oc)) in out_of_memory()
1118 oc->constraint = constrained_alloc(oc); in out_of_memory()
1119 if (oc->constraint != CONSTRAINT_MEMORY_POLICY) in out_of_memory()
1120 oc->nodemask = NULL; in out_of_memory()
1121 check_panic_on_oom(oc); in out_of_memory()
1123 if (!is_memcg_oom(oc) && sysctl_oom_kill_allocating_task && in out_of_memory()
1124 current->mm && !oom_unkillable_task(current) && in out_of_memory()
1125 oom_cpuset_eligible(current, oc) && in out_of_memory()
1126 current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) { in out_of_memory()
1128 oc->chosen = current; in out_of_memory()
1129 oom_kill_process(oc, "Out of memory (oom_kill_allocating_task)"); in out_of_memory()
1133 select_bad_process(oc); in out_of_memory()
1135 if (!oc->chosen) { in out_of_memory()
1136 dump_header(oc, NULL); in out_of_memory()
1140 * system level, we cannot survive this and will enter in out_of_memory()
1143 if (!is_sysrq_oom(oc) && !is_memcg_oom(oc)) in out_of_memory()
1146 if (oc->chosen && oc->chosen != (void *)-1UL) in out_of_memory()
1147 oom_kill_process(oc, !is_memcg_oom(oc) ? "Out of memory" : in out_of_memory()
1149 return !!oc->chosen; in out_of_memory()