• Home
  • Raw
  • Download

Lines Matching +full:oc +full:- +full:level +full:- +full:select

1 // SPDX-License-Identifier: GPL-2.0-only
15 * Since we won't call these routines often (on a well-configured
70 static inline bool is_memcg_oom(struct oom_control *oc) in is_memcg_oom() argument
72 return oc->memcg != NULL; in is_memcg_oom()
77 * oom_cpuset_eligible() - check task eligiblity for kill
79 * @oc: pointer to struct oom_control
85 * This function is assuming oom-killer context and 'current' has triggered
86 * the oom-killer.
89 struct oom_control *oc) in oom_cpuset_eligible() argument
93 const nodemask_t *mask = oc->nodemask; in oom_cpuset_eligible()
95 if (is_memcg_oom(oc)) in oom_cpuset_eligible()
123 static bool oom_cpuset_eligible(struct task_struct *tsk, struct oom_control *oc) in oom_cpuset_eligible() argument
130 * The process p may have detached its own ->mm while exiting or through
132 * pointer. Return p, or any of its subthreads with a valid ->mm, with
143 if (likely(t->mm)) in find_lock_task_mm()
155 * order == -1 means the oom kill is required by sysrq, otherwise only
158 static inline bool is_sysrq_oom(struct oom_control *oc) in is_sysrq_oom() argument
160 return oc->order == -1; in is_sysrq_oom()
168 if (p->flags & PF_KTHREAD) in oom_unkillable_task()
193 * oom_badness - heuristic function to determine which candidate task to kill
218 adj = (long)p->signal->oom_score_adj; in oom_badness()
220 test_bit(MMF_OOM_SKIP, &p->mm->flags) || in oom_badness()
230 points = get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS) + in oom_badness()
231 mm_pgtables_bytes(p->mm) / PAGE_SIZE; in oom_badness()
251 static enum oom_constraint constrained_alloc(struct oom_control *oc) in constrained_alloc() argument
255 enum zone_type highest_zoneidx = gfp_zone(oc->gfp_mask); in constrained_alloc()
259 if (is_memcg_oom(oc)) { in constrained_alloc()
260 oc->totalpages = mem_cgroup_get_max(oc->memcg) ?: 1; in constrained_alloc()
265 oc->totalpages = totalram_pages() + total_swap_pages; in constrained_alloc()
270 if (!oc->zonelist) in constrained_alloc()
277 if (oc->gfp_mask & __GFP_THISNODE) in constrained_alloc()
285 if (oc->nodemask && in constrained_alloc()
286 !nodes_subset(node_states[N_MEMORY], *oc->nodemask)) { in constrained_alloc()
287 oc->totalpages = total_swap_pages; in constrained_alloc()
288 for_each_node_mask(nid, *oc->nodemask) in constrained_alloc()
289 oc->totalpages += node_present_pages(nid); in constrained_alloc()
294 for_each_zone_zonelist_nodemask(zone, z, oc->zonelist, in constrained_alloc()
295 highest_zoneidx, oc->nodemask) in constrained_alloc()
296 if (!cpuset_zone_allowed(zone, oc->gfp_mask)) in constrained_alloc()
300 oc->totalpages = total_swap_pages; in constrained_alloc()
302 oc->totalpages += node_present_pages(nid); in constrained_alloc()
310 struct oom_control *oc = arg; in oom_evaluate_task() local
317 if (!is_memcg_oom(oc) && !oom_cpuset_eligible(task, oc)) in oom_evaluate_task()
326 if (!is_sysrq_oom(oc) && tsk_is_oom_victim(task)) { in oom_evaluate_task()
327 if (test_bit(MMF_OOM_SKIP, &task->signal->oom_mm->flags)) in oom_evaluate_task()
334 * killed first if it triggers an oom, then select it. in oom_evaluate_task()
338 goto select; in oom_evaluate_task()
341 points = oom_badness(task, oc->totalpages); in oom_evaluate_task()
342 if (points == LONG_MIN || points < oc->chosen_points) in oom_evaluate_task()
345 select: in oom_evaluate_task()
346 if (oc->chosen) in oom_evaluate_task()
347 put_task_struct(oc->chosen); in oom_evaluate_task()
349 oc->chosen = task; in oom_evaluate_task()
350 oc->chosen_points = points; in oom_evaluate_task()
354 if (oc->chosen) in oom_evaluate_task()
355 put_task_struct(oc->chosen); in oom_evaluate_task()
356 oc->chosen = (void *)-1UL; in oom_evaluate_task()
362 * 'points'. In case scan was aborted, oc->chosen is set to -1.
364 static void select_bad_process(struct oom_control *oc) in select_bad_process() argument
366 oc->chosen_points = LONG_MIN; in select_bad_process()
368 if (is_memcg_oom(oc)) in select_bad_process()
369 mem_cgroup_scan_tasks(oc->memcg, oom_evaluate_task, oc); in select_bad_process()
375 if (oom_evaluate_task(p, oc)) in select_bad_process()
383 struct oom_control *oc = arg; in dump_task() local
390 if (!is_memcg_oom(oc) && !oom_cpuset_eligible(p, oc)) in dump_task()
404 task->pid, from_kuid(&init_user_ns, task_uid(task)), in dump_task()
405 task->tgid, task->mm->total_vm, get_mm_rss(task->mm), in dump_task()
406 mm_pgtables_bytes(task->mm), in dump_task()
407 get_mm_counter(task->mm, MM_SWAPENTS), in dump_task()
408 task->signal->oom_score_adj, task->comm); in dump_task()
415 * dump_tasks - dump current memory state of all system tasks
416 * @oc: pointer to struct oom_control
424 static void dump_tasks(struct oom_control *oc) in dump_tasks() argument
429 if (is_memcg_oom(oc)) in dump_tasks()
430 mem_cgroup_scan_tasks(oc->memcg, dump_task, oc); in dump_tasks()
436 dump_task(p, oc); in dump_tasks()
441 static void dump_oom_summary(struct oom_control *oc, struct task_struct *victim) in dump_oom_summary() argument
444 pr_info("oom-kill:constraint=%s,nodemask=%*pbl", in dump_oom_summary()
445 oom_constraint_text[oc->constraint], in dump_oom_summary()
446 nodemask_pr_args(oc->nodemask)); in dump_oom_summary()
448 mem_cgroup_print_oom_context(oc->memcg, victim); in dump_oom_summary()
449 pr_cont(",task=%s,pid=%d,uid=%d\n", victim->comm, victim->pid, in dump_oom_summary()
453 static void dump_header(struct oom_control *oc, struct task_struct *p) in dump_header() argument
455 pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), order=%d, oom_score_adj=%hd\n", in dump_header()
456 current->comm, oc->gfp_mask, &oc->gfp_mask, oc->order, in dump_header()
457 current->signal->oom_score_adj); in dump_header()
458 if (!IS_ENABLED(CONFIG_COMPACTION) && oc->order) in dump_header()
462 if (is_memcg_oom(oc)) in dump_header()
463 mem_cgroup_print_oom_meminfo(oc->memcg); in dump_header()
465 show_mem(SHOW_MEM_FILTER_NODES, oc->nodemask); in dump_header()
470 dump_tasks(oc); in dump_header()
472 dump_oom_summary(oc, p); in dump_header()
483 #define K(x) ((x) << (PAGE_SHIFT-10))
486 * task->mm can be NULL if the task is the exited group leader. So to
496 struct mm_struct *t_mm = READ_ONCE(t->mm); in process_shares_mm()
524 set_bit(MMF_UNSTABLE, &mm->flags); in __oom_reap_task_mm()
526 for (vma = mm->mmap ; vma; vma = vma->vm_next) { in __oom_reap_task_mm()
540 if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) { in __oom_reap_task_mm()
545 vma, mm, vma->vm_start, in __oom_reap_task_mm()
546 vma->vm_end); in __oom_reap_task_mm()
573 trace_skip_task_reaping(tsk->pid); in oom_reap_task_mm()
583 if (test_bit(MMF_OOM_SKIP, &mm->flags)) { in oom_reap_task_mm()
584 trace_skip_task_reaping(tsk->pid); in oom_reap_task_mm()
588 trace_start_task_reaping(tsk->pid); in oom_reap_task_mm()
595 …pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n… in oom_reap_task_mm()
596 task_pid_nr(tsk), tsk->comm, in oom_reap_task_mm()
601 trace_finish_task_reaping(tsk->pid); in oom_reap_task_mm()
612 struct mm_struct *mm = tsk->signal->oom_mm; in oom_reap_task()
619 test_bit(MMF_OOM_SKIP, &mm->flags)) in oom_reap_task()
623 task_pid_nr(tsk), tsk->comm); in oom_reap_task()
628 tsk->oom_reaper_list = NULL; in oom_reap_task()
634 set_bit(MMF_OOM_SKIP, &mm->flags); in oom_reap_task()
649 oom_reaper_list = tsk->oom_reaper_list; in oom_reaper()
664 struct mm_struct *mm = tsk->signal->oom_mm; in wake_oom_reaper()
667 /* The victim managed to terminate on its own - see exit_mmap */ in wake_oom_reaper()
668 if (test_bit(MMF_OOM_SKIP, &mm->flags)) { in wake_oom_reaper()
674 tsk->oom_reaper_list = oom_reaper_list; in wake_oom_reaper()
677 trace_wake_reaper(tsk->pid); in wake_oom_reaper()
693 if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags)) in queue_oom_reaper()
697 timer_setup(&tsk->oom_reaper_timer, wake_oom_reaper, 0); in queue_oom_reaper()
698 tsk->oom_reaper_timer.expires = jiffies + OOM_REAPER_DELAY; in queue_oom_reaper()
699 add_timer(&tsk->oom_reaper_timer); in queue_oom_reaper()
715 * mark_oom_victim - mark the given task as OOM victim
721 * tsk->mm has to be non NULL and caller has to guarantee it is stable (either
726 struct mm_struct *mm = tsk->mm;
734 if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm)) {
735 mmgrab(tsk->signal->oom_mm);
736 set_bit(MMF_OOM_VICTIM, &mm->flags);
747 trace_mark_victim(tsk->pid);
751 * exit_oom_victim - note the exit of an OOM victim
762 * oom_killer_enable - enable OOM killer
771 * oom_killer_disable - disable OOM killer
811 struct signal_struct *sig = task->signal; in __task_will_free_mem()
818 if (sig->flags & SIGNAL_GROUP_COREDUMP) in __task_will_free_mem()
821 if (sig->flags & SIGNAL_GROUP_EXIT) in __task_will_free_mem()
824 if (thread_group_empty(task) && (task->flags & PF_EXITING)) in __task_will_free_mem()
834 * Caller has to make sure that task->mm is stable (hold task_lock or
839 struct mm_struct *mm = task->mm; in task_will_free_mem()
858 if (test_bit(MMF_OOM_SKIP, &mm->flags)) in task_will_free_mem()
861 if (atomic_read(&mm->mm_users) <= 1) in task_will_free_mem()
893 message, task_pid_nr(victim), victim->comm); in __oom_kill_process()
903 mm = victim->mm; in __oom_kill_process()
917 …pr_err("%s: Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB… in __oom_kill_process()
918 message, task_pid_nr(victim), victim->comm, K(mm->total_vm), in __oom_kill_process()
923 mm_pgtables_bytes(mm) >> 10, victim->signal->oom_score_adj); in __oom_kill_process()
927 * Kill all user processes sharing victim->mm in other thread groups, if in __oom_kill_process()
929 * depletion of all memory. This prevents mm->mmap_lock livelock when an in __oom_kill_process()
943 set_bit(MMF_OOM_SKIP, &mm->flags); in __oom_kill_process()
945 task_pid_nr(victim), victim->comm, in __oom_kill_process()
946 task_pid_nr(p), p->comm); in __oom_kill_process()
953 if (unlikely(p->flags & PF_KTHREAD)) in __oom_kill_process()
973 if (task->signal->oom_score_adj != OOM_SCORE_ADJ_MIN && in oom_kill_memcg_member()
981 static void oom_kill_process(struct oom_control *oc, const char *message) in oom_kill_process() argument
983 struct task_struct *victim = oc->chosen; in oom_kill_process()
1004 dump_header(oc, victim); in oom_kill_process()
1011 oom_group = mem_cgroup_get_oom_group(victim, oc->memcg); in oom_kill_process()
1029 static void check_panic_on_oom(struct oom_control *oc) in check_panic_on_oom() argument
1039 if (oc->constraint != CONSTRAINT_NONE) in check_panic_on_oom()
1043 if (is_sysrq_oom(oc)) in check_panic_on_oom()
1045 dump_header(oc, NULL); in check_panic_on_oom()
1047 sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide"); in check_panic_on_oom()
1065 * out_of_memory - kill the "best" process when we run out of memory
1066 * @oc: pointer to struct oom_control
1073 bool out_of_memory(struct oom_control *oc) in out_of_memory() argument
1080 if (!is_memcg_oom(oc)) { in out_of_memory()
1089 * select it. The goal is to allow it to allocate so that it may in out_of_memory()
1099 * The OOM killer does not compensate for IO-less reclaim. in out_of_memory()
1101 * make sure exclude 0 mask - all other users should have at least in out_of_memory()
1105 if (oc->gfp_mask && !(oc->gfp_mask & __GFP_FS) && !is_memcg_oom(oc)) in out_of_memory()
1112 oc->constraint = constrained_alloc(oc); in out_of_memory()
1113 if (oc->constraint != CONSTRAINT_MEMORY_POLICY) in out_of_memory()
1114 oc->nodemask = NULL; in out_of_memory()
1115 check_panic_on_oom(oc); in out_of_memory()
1117 if (!is_memcg_oom(oc) && sysctl_oom_kill_allocating_task && in out_of_memory()
1118 current->mm && !oom_unkillable_task(current) && in out_of_memory()
1119 oom_cpuset_eligible(current, oc) && in out_of_memory()
1120 current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) { in out_of_memory()
1122 oc->chosen = current; in out_of_memory()
1123 oom_kill_process(oc, "Out of memory (oom_kill_allocating_task)"); in out_of_memory()
1127 select_bad_process(oc); in out_of_memory()
1129 if (!oc->chosen) { in out_of_memory()
1130 dump_header(oc, NULL); in out_of_memory()
1134 * system level, we cannot survive this and will enter in out_of_memory()
1137 if (!is_sysrq_oom(oc) && !is_memcg_oom(oc)) in out_of_memory()
1140 if (oc->chosen && oc->chosen != (void *)-1UL) in out_of_memory()
1141 oom_kill_process(oc, !is_memcg_oom(oc) ? "Out of memory" : in out_of_memory()
1143 return !!oc->chosen; in out_of_memory()