1 /*
2 * linux/mm/oom_kill.c
3 *
4 * Copyright (C) 1998,2000 Rik van Riel
5 * Thanks go out to Claus Fischer for some serious inspiration and
6 * for goading me into coding this file...
7 * Copyright (C) 2010 Google, Inc.
8 * Rewritten by David Rientjes
9 *
10 * The routines in this file are used to kill a process when
11 * we're seriously out of memory. This gets called from __alloc_pages()
12 * in mm/page_alloc.c when we really run out of memory.
13 *
14 * Since we won't call these routines often (on a well-configured
15 * machine) this file will double as a 'coding guide' and a signpost
16 * for newbie kernel hackers. It features several pointers to major
17 * kernel subsystems and hints as to where to find out what things do.
18 */
19
20 #include <linux/oom.h>
21 #include <linux/mm.h>
22 #include <linux/err.h>
23 #include <linux/gfp.h>
24 #include <linux/sched.h>
25 #include <linux/swap.h>
26 #include <linux/timex.h>
27 #include <linux/jiffies.h>
28 #include <linux/cpuset.h>
29 #include <linux/export.h>
30 #include <linux/notifier.h>
31 #include <linux/memcontrol.h>
32 #include <linux/mempolicy.h>
33 #include <linux/security.h>
34 #include <linux/ptrace.h>
35 #include <linux/freezer.h>
36 #include <linux/ftrace.h>
37 #include <linux/ratelimit.h>
38
39 #define CREATE_TRACE_POINTS
40 #include <trace/events/oom.h>
41
42 int sysctl_panic_on_oom;
43 int sysctl_oom_kill_allocating_task;
44 int sysctl_oom_dump_tasks = 1;
45 static DEFINE_SPINLOCK(zone_scan_lock);
46
47 #ifdef CONFIG_NUMA
48 /**
49 * has_intersects_mems_allowed() - check task eligiblity for kill
50 * @start: task struct of which task to consider
51 * @mask: nodemask passed to page allocator for mempolicy ooms
52 *
53 * Task eligibility is determined by whether or not a candidate task, @tsk,
54 * shares the same mempolicy nodes as current if it is bound by such a policy
55 * and whether or not it has the same set of allowed cpuset nodes.
56 */
has_intersects_mems_allowed(struct task_struct * start,const nodemask_t * mask)57 static bool has_intersects_mems_allowed(struct task_struct *start,
58 const nodemask_t *mask)
59 {
60 struct task_struct *tsk;
61 bool ret = false;
62
63 rcu_read_lock();
64 for_each_thread(start, tsk) {
65 if (mask) {
66 /*
67 * If this is a mempolicy constrained oom, tsk's
68 * cpuset is irrelevant. Only return true if its
69 * mempolicy intersects current, otherwise it may be
70 * needlessly killed.
71 */
72 ret = mempolicy_nodemask_intersects(tsk, mask);
73 } else {
74 /*
75 * This is not a mempolicy constrained oom, so only
76 * check the mems of tsk's cpuset.
77 */
78 ret = cpuset_mems_allowed_intersects(current, tsk);
79 }
80 if (ret)
81 break;
82 }
83 rcu_read_unlock();
84
85 return ret;
86 }
87 #else
has_intersects_mems_allowed(struct task_struct * tsk,const nodemask_t * mask)88 static bool has_intersects_mems_allowed(struct task_struct *tsk,
89 const nodemask_t *mask)
90 {
91 return true;
92 }
93 #endif /* CONFIG_NUMA */
94
95 /*
96 * The process p may have detached its own ->mm while exiting or through
97 * use_mm(), but one or more of its subthreads may still have a valid
98 * pointer. Return p, or any of its subthreads with a valid ->mm, with
99 * task_lock() held.
100 */
find_lock_task_mm(struct task_struct * p)101 struct task_struct *find_lock_task_mm(struct task_struct *p)
102 {
103 struct task_struct *t;
104
105 rcu_read_lock();
106
107 for_each_thread(p, t) {
108 task_lock(t);
109 if (likely(t->mm))
110 goto found;
111 task_unlock(t);
112 }
113 t = NULL;
114 found:
115 rcu_read_unlock();
116
117 return t;
118 }
119
120 /* return true if the task is not adequate as candidate victim task. */
oom_unkillable_task(struct task_struct * p,const struct mem_cgroup * memcg,const nodemask_t * nodemask)121 static bool oom_unkillable_task(struct task_struct *p,
122 const struct mem_cgroup *memcg, const nodemask_t *nodemask)
123 {
124 if (is_global_init(p))
125 return true;
126 if (p->flags & PF_KTHREAD)
127 return true;
128
129 /* When mem_cgroup_out_of_memory() and p is not member of the group */
130 if (memcg && !task_in_mem_cgroup(p, memcg))
131 return true;
132
133 /* p may not have freeable memory in nodemask */
134 if (!has_intersects_mems_allowed(p, nodemask))
135 return true;
136
137 return false;
138 }
139
140 /**
141 * oom_badness - heuristic function to determine which candidate task to kill
142 * @p: task struct of which task we should calculate
143 * @totalpages: total present RAM allowed for page allocation
144 *
145 * The heuristic for determining which task to kill is made to be as simple and
146 * predictable as possible. The goal is to return the highest value for the
147 * task consuming the most memory to avoid subsequent oom failures.
148 */
oom_badness(struct task_struct * p,struct mem_cgroup * memcg,const nodemask_t * nodemask,unsigned long totalpages)149 unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
150 const nodemask_t *nodemask, unsigned long totalpages)
151 {
152 long points;
153 long adj;
154
155 if (oom_unkillable_task(p, memcg, nodemask))
156 return 0;
157
158 p = find_lock_task_mm(p);
159 if (!p)
160 return 0;
161
162 adj = (long)p->signal->oom_score_adj;
163 if (adj == OOM_SCORE_ADJ_MIN) {
164 task_unlock(p);
165 return 0;
166 }
167
168 /*
169 * The baseline for the badness score is the proportion of RAM that each
170 * task's rss, pagetable and swap space use.
171 */
172 points = get_mm_rss(p->mm) + atomic_long_read(&p->mm->nr_ptes) +
173 get_mm_counter(p->mm, MM_SWAPENTS);
174 task_unlock(p);
175
176 /*
177 * Root processes get 3% bonus, just like the __vm_enough_memory()
178 * implementation used by LSMs.
179 */
180 if (has_capability_noaudit(p, CAP_SYS_ADMIN))
181 points -= (points * 3) / 100;
182
183 /* Normalize to oom_score_adj units */
184 adj *= totalpages / 1000;
185 points += adj;
186
187 /*
188 * Never return 0 for an eligible task regardless of the root bonus and
189 * oom_score_adj (oom_score_adj can't be OOM_SCORE_ADJ_MIN here).
190 */
191 return points > 0 ? points : 1;
192 }
193
194 /*
195 * Determine the type of allocation constraint.
196 */
197 #ifdef CONFIG_NUMA
constrained_alloc(struct zonelist * zonelist,gfp_t gfp_mask,nodemask_t * nodemask,unsigned long * totalpages)198 static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
199 gfp_t gfp_mask, nodemask_t *nodemask,
200 unsigned long *totalpages)
201 {
202 struct zone *zone;
203 struct zoneref *z;
204 enum zone_type high_zoneidx = gfp_zone(gfp_mask);
205 bool cpuset_limited = false;
206 int nid;
207
208 /* Default to all available memory */
209 *totalpages = totalram_pages + total_swap_pages;
210
211 if (!zonelist)
212 return CONSTRAINT_NONE;
213 /*
214 * Reach here only when __GFP_NOFAIL is used. So, we should avoid
215 * to kill current.We have to random task kill in this case.
216 * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now.
217 */
218 if (gfp_mask & __GFP_THISNODE)
219 return CONSTRAINT_NONE;
220
221 /*
222 * This is not a __GFP_THISNODE allocation, so a truncated nodemask in
223 * the page allocator means a mempolicy is in effect. Cpuset policy
224 * is enforced in get_page_from_freelist().
225 */
226 if (nodemask && !nodes_subset(node_states[N_MEMORY], *nodemask)) {
227 *totalpages = total_swap_pages;
228 for_each_node_mask(nid, *nodemask)
229 *totalpages += node_spanned_pages(nid);
230 return CONSTRAINT_MEMORY_POLICY;
231 }
232
233 /* Check this allocation failure is caused by cpuset's wall function */
234 for_each_zone_zonelist_nodemask(zone, z, zonelist,
235 high_zoneidx, nodemask)
236 if (!cpuset_zone_allowed_softwall(zone, gfp_mask))
237 cpuset_limited = true;
238
239 if (cpuset_limited) {
240 *totalpages = total_swap_pages;
241 for_each_node_mask(nid, cpuset_current_mems_allowed)
242 *totalpages += node_spanned_pages(nid);
243 return CONSTRAINT_CPUSET;
244 }
245 return CONSTRAINT_NONE;
246 }
247 #else
constrained_alloc(struct zonelist * zonelist,gfp_t gfp_mask,nodemask_t * nodemask,unsigned long * totalpages)248 static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
249 gfp_t gfp_mask, nodemask_t *nodemask,
250 unsigned long *totalpages)
251 {
252 *totalpages = totalram_pages + total_swap_pages;
253 return CONSTRAINT_NONE;
254 }
255 #endif
256
oom_scan_process_thread(struct task_struct * task,unsigned long totalpages,const nodemask_t * nodemask,bool force_kill)257 enum oom_scan_t oom_scan_process_thread(struct task_struct *task,
258 unsigned long totalpages, const nodemask_t *nodemask,
259 bool force_kill)
260 {
261 if (oom_unkillable_task(task, NULL, nodemask))
262 return OOM_SCAN_CONTINUE;
263
264 /*
265 * This task already has access to memory reserves and is being killed.
266 * Don't allow any other task to have access to the reserves.
267 */
268 if (test_tsk_thread_flag(task, TIF_MEMDIE)) {
269 if (unlikely(frozen(task)))
270 __thaw_task(task);
271 if (!force_kill)
272 return OOM_SCAN_ABORT;
273 }
274 if (!task->mm)
275 return OOM_SCAN_CONTINUE;
276
277 /*
278 * If task is allocating a lot of memory and has been marked to be
279 * killed first if it triggers an oom, then select it.
280 */
281 if (oom_task_origin(task))
282 return OOM_SCAN_SELECT;
283
284 if (task->flags & PF_EXITING && !force_kill) {
285 /*
286 * If this task is not being ptraced on exit, then wait for it
287 * to finish before killing some other task unnecessarily.
288 */
289 if (!(task->group_leader->ptrace & PT_TRACE_EXIT))
290 return OOM_SCAN_ABORT;
291 }
292 return OOM_SCAN_OK;
293 }
294
295 /*
296 * Simple selection loop. We chose the process with the highest
297 * number of 'points'. Returns -1 on scan abort.
298 *
299 * (not docbooked, we don't want this one cluttering up the manual)
300 */
select_bad_process(unsigned int * ppoints,unsigned long totalpages,const nodemask_t * nodemask,bool force_kill)301 static struct task_struct *select_bad_process(unsigned int *ppoints,
302 unsigned long totalpages, const nodemask_t *nodemask,
303 bool force_kill)
304 {
305 struct task_struct *g, *p;
306 struct task_struct *chosen = NULL;
307 unsigned long chosen_points = 0;
308
309 rcu_read_lock();
310 for_each_process_thread(g, p) {
311 unsigned int points;
312
313 switch (oom_scan_process_thread(p, totalpages, nodemask,
314 force_kill)) {
315 case OOM_SCAN_SELECT:
316 chosen = p;
317 chosen_points = ULONG_MAX;
318 /* fall through */
319 case OOM_SCAN_CONTINUE:
320 continue;
321 case OOM_SCAN_ABORT:
322 rcu_read_unlock();
323 return (struct task_struct *)(-1UL);
324 case OOM_SCAN_OK:
325 break;
326 };
327 points = oom_badness(p, NULL, nodemask, totalpages);
328 if (!points || points < chosen_points)
329 continue;
330 /* Prefer thread group leaders for display purposes */
331 if (points == chosen_points && thread_group_leader(chosen))
332 continue;
333
334 chosen = p;
335 chosen_points = points;
336 }
337 if (chosen)
338 get_task_struct(chosen);
339 rcu_read_unlock();
340
341 *ppoints = chosen_points * 1000 / totalpages;
342 return chosen;
343 }
344
345 /**
346 * dump_tasks - dump current memory state of all system tasks
347 * @memcg: current's memory controller, if constrained
348 * @nodemask: nodemask passed to page allocator for mempolicy ooms
349 *
350 * Dumps the current memory state of all eligible tasks. Tasks not in the same
351 * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes
352 * are not shown.
353 * State information includes task's pid, uid, tgid, vm size, rss, nr_ptes,
354 * swapents, oom_score_adj value, and name.
355 */
dump_tasks(const struct mem_cgroup * memcg,const nodemask_t * nodemask)356 static void dump_tasks(const struct mem_cgroup *memcg, const nodemask_t *nodemask)
357 {
358 struct task_struct *p;
359 struct task_struct *task;
360
361 pr_info("[ pid ] uid tgid total_vm rss nr_ptes swapents oom_score_adj name\n");
362 rcu_read_lock();
363 for_each_process(p) {
364 if (oom_unkillable_task(p, memcg, nodemask))
365 continue;
366
367 task = find_lock_task_mm(p);
368 if (!task) {
369 /*
370 * This is a kthread or all of p's threads have already
371 * detached their mm's. There's no need to report
372 * them; they can't be oom killed anyway.
373 */
374 continue;
375 }
376
377 pr_info("[%5d] %5d %5d %8lu %8lu %7ld %8lu %5hd %s\n",
378 task->pid, from_kuid(&init_user_ns, task_uid(task)),
379 task->tgid, task->mm->total_vm, get_mm_rss(task->mm),
380 atomic_long_read(&task->mm->nr_ptes),
381 get_mm_counter(task->mm, MM_SWAPENTS),
382 task->signal->oom_score_adj, task->comm);
383 task_unlock(task);
384 }
385 rcu_read_unlock();
386 }
387
dump_header(struct task_struct * p,gfp_t gfp_mask,int order,struct mem_cgroup * memcg,const nodemask_t * nodemask)388 static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
389 struct mem_cgroup *memcg, const nodemask_t *nodemask)
390 {
391 task_lock(current);
392 pr_warning("%s invoked oom-killer: gfp_mask=0x%x, order=%d, "
393 "oom_score_adj=%hd\n",
394 current->comm, gfp_mask, order,
395 current->signal->oom_score_adj);
396 cpuset_print_task_mems_allowed(current);
397 task_unlock(current);
398 dump_stack();
399 if (memcg)
400 mem_cgroup_print_oom_info(memcg, p);
401 else
402 show_mem(SHOW_MEM_FILTER_NODES);
403 if (sysctl_oom_dump_tasks)
404 dump_tasks(memcg, nodemask);
405 }
406
407 /*
408 * Number of OOM killer invocations (including memcg OOM killer).
409 * Primarily used by PM freezer to check for potential races with
410 * OOM killed frozen task.
411 */
412 static atomic_t oom_kills = ATOMIC_INIT(0);
413
oom_kills_count(void)414 int oom_kills_count(void)
415 {
416 return atomic_read(&oom_kills);
417 }
418
note_oom_kill(void)419 void note_oom_kill(void)
420 {
421 atomic_inc(&oom_kills);
422 }
423
424 #define K(x) ((x) << (PAGE_SHIFT-10))
425 /*
426 * Must be called while holding a reference to p, which will be released upon
427 * returning.
428 */
oom_kill_process(struct task_struct * p,gfp_t gfp_mask,int order,unsigned int points,unsigned long totalpages,struct mem_cgroup * memcg,nodemask_t * nodemask,const char * message)429 void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
430 unsigned int points, unsigned long totalpages,
431 struct mem_cgroup *memcg, nodemask_t *nodemask,
432 const char *message)
433 {
434 struct task_struct *victim = p;
435 struct task_struct *child;
436 struct task_struct *t;
437 struct mm_struct *mm;
438 unsigned int victim_points = 0;
439 static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL,
440 DEFAULT_RATELIMIT_BURST);
441
442 /*
443 * If the task is already exiting, don't alarm the sysadmin or kill
444 * its children or threads, just set TIF_MEMDIE so it can die quickly
445 */
446 if (p->flags & PF_EXITING) {
447 set_tsk_thread_flag(p, TIF_MEMDIE);
448 put_task_struct(p);
449 return;
450 }
451
452 if (__ratelimit(&oom_rs))
453 dump_header(p, gfp_mask, order, memcg, nodemask);
454
455 task_lock(p);
456 pr_err("%s: Kill process %d (%s) score %d or sacrifice child\n",
457 message, task_pid_nr(p), p->comm, points);
458 task_unlock(p);
459
460 /*
461 * If any of p's children has a different mm and is eligible for kill,
462 * the one with the highest oom_badness() score is sacrificed for its
463 * parent. This attempts to lose the minimal amount of work done while
464 * still freeing memory.
465 */
466 read_lock(&tasklist_lock);
467 for_each_thread(p, t) {
468 list_for_each_entry(child, &t->children, sibling) {
469 unsigned int child_points;
470
471 if (child->mm == p->mm)
472 continue;
473 /*
474 * oom_badness() returns 0 if the thread is unkillable
475 */
476 child_points = oom_badness(child, memcg, nodemask,
477 totalpages);
478 if (child_points > victim_points) {
479 put_task_struct(victim);
480 victim = child;
481 victim_points = child_points;
482 get_task_struct(victim);
483 }
484 }
485 }
486 read_unlock(&tasklist_lock);
487
488 p = find_lock_task_mm(victim);
489 if (!p) {
490 put_task_struct(victim);
491 return;
492 } else if (victim != p) {
493 get_task_struct(p);
494 put_task_struct(victim);
495 victim = p;
496 }
497
498 /* mm cannot safely be dereferenced after task_unlock(victim) */
499 mm = victim->mm;
500 pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB\n",
501 task_pid_nr(victim), victim->comm, K(victim->mm->total_vm),
502 K(get_mm_counter(victim->mm, MM_ANONPAGES)),
503 K(get_mm_counter(victim->mm, MM_FILEPAGES)));
504 task_unlock(victim);
505
506 /*
507 * Kill all user processes sharing victim->mm in other thread groups, if
508 * any. They don't get access to memory reserves, though, to avoid
509 * depletion of all memory. This prevents mm->mmap_sem livelock when an
510 * oom killed thread cannot exit because it requires the semaphore and
511 * its contended by another thread trying to allocate memory itself.
512 * That thread will now get access to memory reserves since it has a
513 * pending fatal signal.
514 */
515 rcu_read_lock();
516 for_each_process(p)
517 if (p->mm == mm && !same_thread_group(p, victim) &&
518 !(p->flags & PF_KTHREAD)) {
519 if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
520 continue;
521
522 task_lock(p); /* Protect ->comm from prctl() */
523 pr_err("Kill process %d (%s) sharing same memory\n",
524 task_pid_nr(p), p->comm);
525 task_unlock(p);
526 do_send_sig_info(SIGKILL, SEND_SIG_FORCED, p, true);
527 }
528 rcu_read_unlock();
529
530 set_tsk_thread_flag(victim, TIF_MEMDIE);
531 do_send_sig_info(SIGKILL, SEND_SIG_FORCED, victim, true);
532 put_task_struct(victim);
533 }
534 #undef K
535
536 /*
537 * Determines whether the kernel must panic because of the panic_on_oom sysctl.
538 */
check_panic_on_oom(enum oom_constraint constraint,gfp_t gfp_mask,int order,const nodemask_t * nodemask)539 void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
540 int order, const nodemask_t *nodemask)
541 {
542 if (likely(!sysctl_panic_on_oom))
543 return;
544 if (sysctl_panic_on_oom != 2) {
545 /*
546 * panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel
547 * does not panic for cpuset, mempolicy, or memcg allocation
548 * failures.
549 */
550 if (constraint != CONSTRAINT_NONE)
551 return;
552 }
553 dump_header(NULL, gfp_mask, order, NULL, nodemask);
554 panic("Out of memory: %s panic_on_oom is enabled\n",
555 sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide");
556 }
557
558 static BLOCKING_NOTIFIER_HEAD(oom_notify_list);
559
register_oom_notifier(struct notifier_block * nb)560 int register_oom_notifier(struct notifier_block *nb)
561 {
562 return blocking_notifier_chain_register(&oom_notify_list, nb);
563 }
564 EXPORT_SYMBOL_GPL(register_oom_notifier);
565
unregister_oom_notifier(struct notifier_block * nb)566 int unregister_oom_notifier(struct notifier_block *nb)
567 {
568 return blocking_notifier_chain_unregister(&oom_notify_list, nb);
569 }
570 EXPORT_SYMBOL_GPL(unregister_oom_notifier);
571
572 /*
573 * Try to acquire the OOM killer lock for the zones in zonelist. Returns zero
574 * if a parallel OOM killing is already taking place that includes a zone in
575 * the zonelist. Otherwise, locks all zones in the zonelist and returns 1.
576 */
oom_zonelist_trylock(struct zonelist * zonelist,gfp_t gfp_mask)577 bool oom_zonelist_trylock(struct zonelist *zonelist, gfp_t gfp_mask)
578 {
579 struct zoneref *z;
580 struct zone *zone;
581 bool ret = true;
582
583 spin_lock(&zone_scan_lock);
584 for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask))
585 if (test_bit(ZONE_OOM_LOCKED, &zone->flags)) {
586 ret = false;
587 goto out;
588 }
589
590 /*
591 * Lock each zone in the zonelist under zone_scan_lock so a parallel
592 * call to oom_zonelist_trylock() doesn't succeed when it shouldn't.
593 */
594 for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask))
595 set_bit(ZONE_OOM_LOCKED, &zone->flags);
596
597 out:
598 spin_unlock(&zone_scan_lock);
599 return ret;
600 }
601
602 /*
603 * Clears the ZONE_OOM_LOCKED flag for all zones in the zonelist so that failed
604 * allocation attempts with zonelists containing them may now recall the OOM
605 * killer, if necessary.
606 */
oom_zonelist_unlock(struct zonelist * zonelist,gfp_t gfp_mask)607 void oom_zonelist_unlock(struct zonelist *zonelist, gfp_t gfp_mask)
608 {
609 struct zoneref *z;
610 struct zone *zone;
611
612 spin_lock(&zone_scan_lock);
613 for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask))
614 clear_bit(ZONE_OOM_LOCKED, &zone->flags);
615 spin_unlock(&zone_scan_lock);
616 }
617
618 /**
619 * out_of_memory - kill the "best" process when we run out of memory
620 * @zonelist: zonelist pointer
621 * @gfp_mask: memory allocation flags
622 * @order: amount of memory being requested as a power of 2
623 * @nodemask: nodemask passed to page allocator
624 * @force_kill: true if a task must be killed, even if others are exiting
625 *
626 * If we run out of memory, we have the choice between either
627 * killing a random task (bad), letting the system crash (worse)
628 * OR try to be smart about which process to kill. Note that we
629 * don't have to be perfect here, we just have to be good.
630 */
out_of_memory(struct zonelist * zonelist,gfp_t gfp_mask,int order,nodemask_t * nodemask,bool force_kill)631 void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
632 int order, nodemask_t *nodemask, bool force_kill)
633 {
634 const nodemask_t *mpol_mask;
635 struct task_struct *p;
636 unsigned long totalpages;
637 unsigned long freed = 0;
638 unsigned int uninitialized_var(points);
639 enum oom_constraint constraint = CONSTRAINT_NONE;
640 int killed = 0;
641
642 blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
643 if (freed > 0)
644 /* Got some memory back in the last second. */
645 return;
646
647 /*
648 * If current has a pending SIGKILL or is exiting, then automatically
649 * select it. The goal is to allow it to allocate so that it may
650 * quickly exit and free its memory.
651 */
652 if (fatal_signal_pending(current) || current->flags & PF_EXITING) {
653 set_thread_flag(TIF_MEMDIE);
654 return;
655 }
656
657 /*
658 * Check if there were limitations on the allocation (only relevant for
659 * NUMA) that may require different handling.
660 */
661 constraint = constrained_alloc(zonelist, gfp_mask, nodemask,
662 &totalpages);
663 mpol_mask = (constraint == CONSTRAINT_MEMORY_POLICY) ? nodemask : NULL;
664 check_panic_on_oom(constraint, gfp_mask, order, mpol_mask);
665
666 if (sysctl_oom_kill_allocating_task && current->mm &&
667 !oom_unkillable_task(current, NULL, nodemask) &&
668 current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) {
669 get_task_struct(current);
670 oom_kill_process(current, gfp_mask, order, 0, totalpages, NULL,
671 nodemask,
672 "Out of memory (oom_kill_allocating_task)");
673 goto out;
674 }
675
676 p = select_bad_process(&points, totalpages, mpol_mask, force_kill);
677 /* Found nothing?!?! Either we hang forever, or we panic. */
678 if (!p) {
679 dump_header(NULL, gfp_mask, order, NULL, mpol_mask);
680 panic("Out of memory and no killable processes...\n");
681 }
682 if (p != (void *)-1UL) {
683 oom_kill_process(p, gfp_mask, order, points, totalpages, NULL,
684 nodemask, "Out of memory");
685 killed = 1;
686 }
687 out:
688 /*
689 * Give the killed threads a good chance of exiting before trying to
690 * allocate memory again.
691 */
692 if (killed)
693 schedule_timeout_killable(1);
694 }
695
696 /*
697 * The pagefault handler calls here because it is out of memory, so kill a
698 * memory-hogging task. If any populated zone has ZONE_OOM_LOCKED set, a
699 * parallel oom killing is already in progress so do nothing.
700 */
pagefault_out_of_memory(void)701 void pagefault_out_of_memory(void)
702 {
703 struct zonelist *zonelist;
704
705 if (mem_cgroup_oom_synchronize(true))
706 return;
707
708 zonelist = node_zonelist(first_memory_node, GFP_KERNEL);
709 if (oom_zonelist_trylock(zonelist, GFP_KERNEL)) {
710 out_of_memory(NULL, 0, 0, NULL, false);
711 oom_zonelist_unlock(zonelist, GFP_KERNEL);
712 }
713 }
714