• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/mm/oom_kill.c
4  *
5  *  Copyright (C)  1998,2000  Rik van Riel
6  *	Thanks go out to Claus Fischer for some serious inspiration and
7  *	for goading me into coding this file...
8  *  Copyright (C)  2010  Google, Inc.
9  *	Rewritten by David Rientjes
10  *
11  *  The routines in this file are used to kill a process when
12  *  we're seriously out of memory. This gets called from __alloc_pages()
13  *  in mm/page_alloc.c when we really run out of memory.
14  *
15  *  Since we won't call these routines often (on a well-configured
16  *  machine) this file will double as a 'coding guide' and a signpost
17  *  for newbie kernel hackers. It features several pointers to major
18  *  kernel subsystems and hints as to where to find out what things do.
19  */
20 
21 #include <linux/oom.h>
22 #include <linux/mm.h>
23 #include <linux/err.h>
24 #include <linux/gfp.h>
25 #include <linux/sched.h>
26 #include <linux/sched/mm.h>
27 #include <linux/sched/coredump.h>
28 #include <linux/sched/task.h>
29 #include <linux/sched/debug.h>
30 #include <linux/swap.h>
31 #include <linux/syscalls.h>
32 #include <linux/timex.h>
33 #include <linux/jiffies.h>
34 #include <linux/cpuset.h>
35 #include <linux/export.h>
36 #include <linux/notifier.h>
37 #include <linux/memcontrol.h>
38 #include <linux/mempolicy.h>
39 #include <linux/security.h>
40 #include <linux/ptrace.h>
41 #include <linux/freezer.h>
42 #include <linux/ftrace.h>
43 #include <linux/ratelimit.h>
44 #include <linux/kthread.h>
45 #include <linux/init.h>
46 #include <linux/mmu_notifier.h>
47 #include <linux/cred.h>
48 
49 #include <asm/tlb.h>
50 #include "internal.h"
51 #include "slab.h"
52 
53 #define CREATE_TRACE_POINTS
54 #include <trace/events/oom.h>
55 
56 #undef CREATE_TRACE_POINTS
57 #include <trace/hooks/mm.h>
58 
59 int sysctl_panic_on_oom;
60 int sysctl_oom_kill_allocating_task;
61 int sysctl_oom_dump_tasks = 1;
62 
63 /*
64  * Serializes oom killer invocations (out_of_memory()) from all contexts to
65  * prevent from over eager oom killing (e.g. when the oom killer is invoked
66  * from different domains).
67  *
68  * oom_killer_disable() relies on this lock to stabilize oom_killer_disabled
69  * and mark_oom_victim
70  */
71 DEFINE_MUTEX(oom_lock);
72 /* Serializes oom_score_adj and oom_score_adj_min updates */
73 DEFINE_MUTEX(oom_adj_mutex);
74 
is_memcg_oom(struct oom_control * oc)75 static inline bool is_memcg_oom(struct oom_control *oc)
76 {
77 	return oc->memcg != NULL;
78 }
79 
80 #ifdef CONFIG_NUMA
81 /**
82  * oom_cpuset_eligible() - check task eligibility for kill
83  * @start: task struct of which task to consider
84  * @oc: pointer to struct oom_control
85  *
86  * Task eligibility is determined by whether or not a candidate task, @tsk,
87  * shares the same mempolicy nodes as current if it is bound by such a policy
88  * and whether or not it has the same set of allowed cpuset nodes.
89  *
90  * This function is assuming oom-killer context and 'current' has triggered
91  * the oom-killer.
92  */
oom_cpuset_eligible(struct task_struct * start,struct oom_control * oc)93 static bool oom_cpuset_eligible(struct task_struct *start,
94 				struct oom_control *oc)
95 {
96 	struct task_struct *tsk;
97 	bool ret = false;
98 	const nodemask_t *mask = oc->nodemask;
99 
100 	if (is_memcg_oom(oc))
101 		return true;
102 
103 	rcu_read_lock();
104 	for_each_thread(start, tsk) {
105 		if (mask) {
106 			/*
107 			 * If this is a mempolicy constrained oom, tsk's
108 			 * cpuset is irrelevant.  Only return true if its
109 			 * mempolicy intersects current, otherwise it may be
110 			 * needlessly killed.
111 			 */
112 			ret = mempolicy_in_oom_domain(tsk, mask);
113 		} else {
114 			/*
115 			 * This is not a mempolicy constrained oom, so only
116 			 * check the mems of tsk's cpuset.
117 			 */
118 			ret = cpuset_mems_allowed_intersects(current, tsk);
119 		}
120 		if (ret)
121 			break;
122 	}
123 	rcu_read_unlock();
124 
125 	return ret;
126 }
127 #else
oom_cpuset_eligible(struct task_struct * tsk,struct oom_control * oc)128 static bool oom_cpuset_eligible(struct task_struct *tsk, struct oom_control *oc)
129 {
130 	return true;
131 }
132 #endif /* CONFIG_NUMA */
133 
134 /*
135  * The process p may have detached its own ->mm while exiting or through
136  * kthread_use_mm(), but one or more of its subthreads may still have a valid
137  * pointer.  Return p, or any of its subthreads with a valid ->mm, with
138  * task_lock() held.
139  */
find_lock_task_mm(struct task_struct * p)140 struct task_struct *find_lock_task_mm(struct task_struct *p)
141 {
142 	struct task_struct *t;
143 
144 	rcu_read_lock();
145 
146 	for_each_thread(p, t) {
147 		task_lock(t);
148 		if (likely(t->mm))
149 			goto found;
150 		task_unlock(t);
151 	}
152 	t = NULL;
153 found:
154 	rcu_read_unlock();
155 
156 	return t;
157 }
158 
159 /*
160  * order == -1 means the oom kill is required by sysrq, otherwise only
161  * for display purposes.
162  */
is_sysrq_oom(struct oom_control * oc)163 static inline bool is_sysrq_oom(struct oom_control *oc)
164 {
165 	return oc->order == -1;
166 }
167 
168 /* return true if the task is not adequate as candidate victim task. */
oom_unkillable_task(struct task_struct * p)169 static bool oom_unkillable_task(struct task_struct *p)
170 {
171 	if (is_global_init(p))
172 		return true;
173 	if (p->flags & PF_KTHREAD)
174 		return true;
175 	return false;
176 }
177 
178 /*
179  * Check whether unreclaimable slab amount is greater than
180  * all user memory(LRU pages).
181  * dump_unreclaimable_slab() could help in the case that
182  * oom due to too much unreclaimable slab used by kernel.
183 */
should_dump_unreclaim_slab(void)184 static bool should_dump_unreclaim_slab(void)
185 {
186 	unsigned long nr_lru;
187 
188 	nr_lru = global_node_page_state(NR_ACTIVE_ANON) +
189 		 global_node_page_state(NR_INACTIVE_ANON) +
190 		 global_node_page_state(NR_ACTIVE_FILE) +
191 		 global_node_page_state(NR_INACTIVE_FILE) +
192 		 global_node_page_state(NR_ISOLATED_ANON) +
193 		 global_node_page_state(NR_ISOLATED_FILE) +
194 		 global_node_page_state(NR_UNEVICTABLE);
195 
196 	return (global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B) > nr_lru);
197 }
198 
199 /**
200  * oom_badness - heuristic function to determine which candidate task to kill
201  * @p: task struct of which task we should calculate
202  * @totalpages: total present RAM allowed for page allocation
203  *
204  * The heuristic for determining which task to kill is made to be as simple and
205  * predictable as possible.  The goal is to return the highest value for the
206  * task consuming the most memory to avoid subsequent oom failures.
207  */
oom_badness(struct task_struct * p,unsigned long totalpages)208 long oom_badness(struct task_struct *p, unsigned long totalpages)
209 {
210 	long points;
211 	long adj;
212 
213 	if (oom_unkillable_task(p))
214 		return LONG_MIN;
215 
216 	p = find_lock_task_mm(p);
217 	if (!p)
218 		return LONG_MIN;
219 
220 	/*
221 	 * Do not even consider tasks which are explicitly marked oom
222 	 * unkillable or have been already oom reaped or the are in
223 	 * the middle of vfork
224 	 */
225 	adj = (long)p->signal->oom_score_adj;
226 	if (adj == OOM_SCORE_ADJ_MIN ||
227 			test_bit(MMF_OOM_SKIP, &p->mm->flags) ||
228 			in_vfork(p)) {
229 		task_unlock(p);
230 		return LONG_MIN;
231 	}
232 
233 	/*
234 	 * The baseline for the badness score is the proportion of RAM that each
235 	 * task's rss, pagetable and swap space use.
236 	 */
237 	points = get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS) +
238 		mm_pgtables_bytes(p->mm) / PAGE_SIZE;
239 	task_unlock(p);
240 
241 	/* Normalize to oom_score_adj units */
242 	adj *= totalpages / 1000;
243 	points += adj;
244 
245 	return points;
246 }
247 
248 static const char * const oom_constraint_text[] = {
249 	[CONSTRAINT_NONE] = "CONSTRAINT_NONE",
250 	[CONSTRAINT_CPUSET] = "CONSTRAINT_CPUSET",
251 	[CONSTRAINT_MEMORY_POLICY] = "CONSTRAINT_MEMORY_POLICY",
252 	[CONSTRAINT_MEMCG] = "CONSTRAINT_MEMCG",
253 };
254 
255 /*
256  * Determine the type of allocation constraint.
257  */
constrained_alloc(struct oom_control * oc)258 static enum oom_constraint constrained_alloc(struct oom_control *oc)
259 {
260 	struct zone *zone;
261 	struct zoneref *z;
262 	enum zone_type highest_zoneidx = gfp_zone(oc->gfp_mask);
263 	bool cpuset_limited = false;
264 	int nid;
265 
266 	if (is_memcg_oom(oc)) {
267 		oc->totalpages = mem_cgroup_get_max(oc->memcg) ?: 1;
268 		return CONSTRAINT_MEMCG;
269 	}
270 
271 	/* Default to all available memory */
272 	oc->totalpages = totalram_pages() + total_swap_pages;
273 
274 	if (!IS_ENABLED(CONFIG_NUMA))
275 		return CONSTRAINT_NONE;
276 
277 	if (!oc->zonelist)
278 		return CONSTRAINT_NONE;
279 	/*
280 	 * Reach here only when __GFP_NOFAIL is used. So, we should avoid
281 	 * to kill current.We have to random task kill in this case.
282 	 * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now.
283 	 */
284 	if (oc->gfp_mask & __GFP_THISNODE)
285 		return CONSTRAINT_NONE;
286 
287 	/*
288 	 * This is not a __GFP_THISNODE allocation, so a truncated nodemask in
289 	 * the page allocator means a mempolicy is in effect.  Cpuset policy
290 	 * is enforced in get_page_from_freelist().
291 	 */
292 	if (oc->nodemask &&
293 	    !nodes_subset(node_states[N_MEMORY], *oc->nodemask)) {
294 		oc->totalpages = total_swap_pages;
295 		for_each_node_mask(nid, *oc->nodemask)
296 			oc->totalpages += node_present_pages(nid);
297 		return CONSTRAINT_MEMORY_POLICY;
298 	}
299 
300 	/* Check this allocation failure is caused by cpuset's wall function */
301 	for_each_zone_zonelist_nodemask(zone, z, oc->zonelist,
302 			highest_zoneidx, oc->nodemask)
303 		if (!cpuset_zone_allowed(zone, oc->gfp_mask))
304 			cpuset_limited = true;
305 
306 	if (cpuset_limited) {
307 		oc->totalpages = total_swap_pages;
308 		for_each_node_mask(nid, cpuset_current_mems_allowed)
309 			oc->totalpages += node_present_pages(nid);
310 		return CONSTRAINT_CPUSET;
311 	}
312 	return CONSTRAINT_NONE;
313 }
314 
oom_evaluate_task(struct task_struct * task,void * arg)315 static int oom_evaluate_task(struct task_struct *task, void *arg)
316 {
317 	struct oom_control *oc = arg;
318 	long points;
319 
320 	if (oom_unkillable_task(task))
321 		goto next;
322 
323 	/* p may not have freeable memory in nodemask */
324 	if (!is_memcg_oom(oc) && !oom_cpuset_eligible(task, oc))
325 		goto next;
326 
327 	/*
328 	 * This task already has access to memory reserves and is being killed.
329 	 * Don't allow any other task to have access to the reserves unless
330 	 * the task has MMF_OOM_SKIP because chances that it would release
331 	 * any memory is quite low.
332 	 */
333 	if (!is_sysrq_oom(oc) && tsk_is_oom_victim(task)) {
334 		if (test_bit(MMF_OOM_SKIP, &task->signal->oom_mm->flags))
335 			goto next;
336 		goto abort;
337 	}
338 
339 	/*
340 	 * If task is allocating a lot of memory and has been marked to be
341 	 * killed first if it triggers an oom, then select it.
342 	 */
343 	if (oom_task_origin(task)) {
344 		points = LONG_MAX;
345 		goto select;
346 	}
347 
348 	points = oom_badness(task, oc->totalpages);
349 	if (points == LONG_MIN || points < oc->chosen_points)
350 		goto next;
351 
352 select:
353 	if (oc->chosen)
354 		put_task_struct(oc->chosen);
355 	get_task_struct(task);
356 	oc->chosen = task;
357 	oc->chosen_points = points;
358 next:
359 	return 0;
360 abort:
361 	if (oc->chosen)
362 		put_task_struct(oc->chosen);
363 	oc->chosen = (void *)-1UL;
364 	return 1;
365 }
366 
367 /*
368  * Simple selection loop. We choose the process with the highest number of
369  * 'points'. In case scan was aborted, oc->chosen is set to -1.
370  */
select_bad_process(struct oom_control * oc)371 static void select_bad_process(struct oom_control *oc)
372 {
373 	oc->chosen_points = LONG_MIN;
374 
375 	if (is_memcg_oom(oc))
376 		mem_cgroup_scan_tasks(oc->memcg, oom_evaluate_task, oc);
377 	else {
378 		struct task_struct *p;
379 
380 		rcu_read_lock();
381 		for_each_process(p)
382 			if (oom_evaluate_task(p, oc))
383 				break;
384 		rcu_read_unlock();
385 	}
386 }
387 
dump_task(struct task_struct * p,void * arg)388 static int dump_task(struct task_struct *p, void *arg)
389 {
390 	struct oom_control *oc = arg;
391 	struct task_struct *task;
392 
393 	if (oom_unkillable_task(p))
394 		return 0;
395 
396 	/* p may not have freeable memory in nodemask */
397 	if (!is_memcg_oom(oc) && !oom_cpuset_eligible(p, oc))
398 		return 0;
399 
400 	task = find_lock_task_mm(p);
401 	if (!task) {
402 		/*
403 		 * All of p's threads have already detached their mm's. There's
404 		 * no need to report them; they can't be oom killed anyway.
405 		 */
406 		return 0;
407 	}
408 
409 	pr_info("[%7d] %5d %5d %8lu %8lu %8ld %8lu         %5hd %s\n",
410 		task->pid, from_kuid(&init_user_ns, task_uid(task)),
411 		task->tgid, task->mm->total_vm, get_mm_rss(task->mm),
412 		mm_pgtables_bytes(task->mm),
413 		get_mm_counter(task->mm, MM_SWAPENTS),
414 		task->signal->oom_score_adj, task->comm);
415 	task_unlock(task);
416 
417 	return 0;
418 }
419 
420 /**
421  * dump_tasks - dump current memory state of all system tasks
422  * @oc: pointer to struct oom_control
423  *
424  * Dumps the current memory state of all eligible tasks.  Tasks not in the same
425  * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes
426  * are not shown.
427  * State information includes task's pid, uid, tgid, vm size, rss,
428  * pgtables_bytes, swapents, oom_score_adj value, and name.
429  */
dump_tasks(struct oom_control * oc)430 static void dump_tasks(struct oom_control *oc)
431 {
432 	pr_info("Tasks state (memory values in pages):\n");
433 	pr_info("[  pid  ]   uid  tgid total_vm      rss pgtables_bytes swapents oom_score_adj name\n");
434 
435 	if (is_memcg_oom(oc))
436 		mem_cgroup_scan_tasks(oc->memcg, dump_task, oc);
437 	else {
438 		struct task_struct *p;
439 
440 		rcu_read_lock();
441 		for_each_process(p)
442 			dump_task(p, oc);
443 		rcu_read_unlock();
444 	}
445 }
446 
dump_oom_summary(struct oom_control * oc,struct task_struct * victim)447 static void dump_oom_summary(struct oom_control *oc, struct task_struct *victim)
448 {
449 	/* one line summary of the oom killer context. */
450 	pr_info("oom-kill:constraint=%s,nodemask=%*pbl",
451 			oom_constraint_text[oc->constraint],
452 			nodemask_pr_args(oc->nodemask));
453 	cpuset_print_current_mems_allowed();
454 	mem_cgroup_print_oom_context(oc->memcg, victim);
455 	pr_cont(",task=%s,pid=%d,uid=%d\n", victim->comm, victim->pid,
456 		from_kuid(&init_user_ns, task_uid(victim)));
457 }
458 
dump_header(struct oom_control * oc,struct task_struct * p)459 static void dump_header(struct oom_control *oc, struct task_struct *p)
460 {
461 	pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), order=%d, oom_score_adj=%hd\n",
462 		current->comm, oc->gfp_mask, &oc->gfp_mask, oc->order,
463 			current->signal->oom_score_adj);
464 	if (!IS_ENABLED(CONFIG_COMPACTION) && oc->order)
465 		pr_warn("COMPACTION is disabled!!!\n");
466 
467 	dump_stack();
468 	if (is_memcg_oom(oc))
469 		mem_cgroup_print_oom_meminfo(oc->memcg);
470 	else {
471 		show_mem(SHOW_MEM_FILTER_NODES, oc->nodemask);
472 		if (should_dump_unreclaim_slab())
473 			dump_unreclaimable_slab();
474 	}
475 	if (sysctl_oom_dump_tasks)
476 		dump_tasks(oc);
477 	if (p)
478 		dump_oom_summary(oc, p);
479 }
480 
481 /*
482  * Number of OOM victims in flight
483  */
484 static atomic_t oom_victims = ATOMIC_INIT(0);
485 static DECLARE_WAIT_QUEUE_HEAD(oom_victims_wait);
486 
487 static bool oom_killer_disabled __read_mostly;
488 
489 #define K(x) ((x) << (PAGE_SHIFT-10))
490 
491 /*
492  * task->mm can be NULL if the task is the exited group leader.  So to
493  * determine whether the task is using a particular mm, we examine all the
494  * task's threads: if one of those is using this mm then this task was also
495  * using it.
496  */
process_shares_mm(struct task_struct * p,struct mm_struct * mm)497 bool process_shares_mm(struct task_struct *p, struct mm_struct *mm)
498 {
499 	struct task_struct *t;
500 
501 	for_each_thread(p, t) {
502 		struct mm_struct *t_mm = READ_ONCE(t->mm);
503 		if (t_mm)
504 			return t_mm == mm;
505 	}
506 	return false;
507 }
508 
509 #ifdef CONFIG_MMU
510 /*
511  * OOM Reaper kernel thread which tries to reap the memory used by the OOM
512  * victim (if that is possible) to help the OOM killer to move on.
513  */
514 static struct task_struct *oom_reaper_th;
515 static DECLARE_WAIT_QUEUE_HEAD(oom_reaper_wait);
516 static struct task_struct *oom_reaper_list;
517 static DEFINE_SPINLOCK(oom_reaper_lock);
518 
__oom_reap_task_mm(struct mm_struct * mm)519 bool __oom_reap_task_mm(struct mm_struct *mm)
520 {
521 	struct vm_area_struct *vma;
522 	bool ret = true;
523 
524 	/*
525 	 * Tell all users of get_user/copy_from_user etc... that the content
526 	 * is no longer stable. No barriers really needed because unmapping
527 	 * should imply barriers already and the reader would hit a page fault
528 	 * if it stumbled over a reaped memory.
529 	 */
530 	set_bit(MMF_UNSTABLE, &mm->flags);
531 
532 	for (vma = mm->mmap ; vma; vma = vma->vm_next) {
533 		if (!can_madv_lru_vma(vma))
534 			continue;
535 
536 		/*
537 		 * Only anonymous pages have a good chance to be dropped
538 		 * without additional steps which we cannot afford as we
539 		 * are OOM already.
540 		 *
541 		 * We do not even care about fs backed pages because all
542 		 * which are reclaimable have already been reclaimed and
543 		 * we do not want to block exit_mmap by keeping mm ref
544 		 * count elevated without a good reason.
545 		 */
546 		if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) {
547 			struct mmu_notifier_range range;
548 			struct mmu_gather tlb;
549 
550 			mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0,
551 						vma, mm, vma->vm_start,
552 						vma->vm_end);
553 			tlb_gather_mmu(&tlb, mm);
554 			if (mmu_notifier_invalidate_range_start_nonblock(&range)) {
555 				tlb_finish_mmu(&tlb);
556 				ret = false;
557 				continue;
558 			}
559 			unmap_page_range(&tlb, vma, range.start, range.end, NULL);
560 			mmu_notifier_invalidate_range_end(&range);
561 			tlb_finish_mmu(&tlb);
562 		}
563 	}
564 
565 	return ret;
566 }
567 
568 /*
569  * Reaps the address space of the give task.
570  *
571  * Returns true on success and false if none or part of the address space
572  * has been reclaimed and the caller should retry later.
573  */
oom_reap_task_mm(struct task_struct * tsk,struct mm_struct * mm)574 static bool oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
575 {
576 	bool ret = true;
577 
578 	if (!mmap_read_trylock(mm)) {
579 		trace_skip_task_reaping(tsk->pid);
580 		return false;
581 	}
582 
583 	/*
584 	 * MMF_OOM_SKIP is set by exit_mmap when the OOM reaper can't
585 	 * work on the mm anymore. The check for MMF_OOM_SKIP must run
586 	 * under mmap_lock for reading because it serializes against the
587 	 * mmap_write_lock();mmap_write_unlock() cycle in exit_mmap().
588 	 */
589 	if (test_bit(MMF_OOM_SKIP, &mm->flags)) {
590 		trace_skip_task_reaping(tsk->pid);
591 		goto out_unlock;
592 	}
593 
594 	trace_start_task_reaping(tsk->pid);
595 
596 	/* failed to reap part of the address space. Try again later */
597 	ret = __oom_reap_task_mm(mm);
598 	if (!ret)
599 		goto out_finish;
600 
601 	pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
602 			task_pid_nr(tsk), tsk->comm,
603 			K(get_mm_counter(mm, MM_ANONPAGES)),
604 			K(get_mm_counter(mm, MM_FILEPAGES)),
605 			K(get_mm_counter(mm, MM_SHMEMPAGES)));
606 out_finish:
607 	trace_finish_task_reaping(tsk->pid);
608 out_unlock:
609 	mmap_read_unlock(mm);
610 
611 	return ret;
612 }
613 
614 #define MAX_OOM_REAP_RETRIES 10
oom_reap_task(struct task_struct * tsk)615 static void oom_reap_task(struct task_struct *tsk)
616 {
617 	int attempts = 0;
618 	struct mm_struct *mm = tsk->signal->oom_mm;
619 
620 	/* Retry the mmap_read_trylock(mm) a few times */
621 	while (attempts++ < MAX_OOM_REAP_RETRIES && !oom_reap_task_mm(tsk, mm))
622 		schedule_timeout_idle(HZ/10);
623 
624 	if (attempts <= MAX_OOM_REAP_RETRIES ||
625 	    test_bit(MMF_OOM_SKIP, &mm->flags))
626 		goto done;
627 
628 	pr_info("oom_reaper: unable to reap pid:%d (%s)\n",
629 		task_pid_nr(tsk), tsk->comm);
630 	sched_show_task(tsk);
631 	debug_show_all_locks();
632 
633 done:
634 	tsk->oom_reaper_list = NULL;
635 
636 	/*
637 	 * Hide this mm from OOM killer because it has been either reaped or
638 	 * somebody can't call mmap_write_unlock(mm).
639 	 */
640 	set_bit(MMF_OOM_SKIP, &mm->flags);
641 
642 	/* Drop a reference taken by queue_oom_reaper */
643 	put_task_struct(tsk);
644 }
645 
oom_reaper(void * unused)646 static int oom_reaper(void *unused)
647 {
648 	while (true) {
649 		struct task_struct *tsk = NULL;
650 
651 		wait_event_freezable(oom_reaper_wait, oom_reaper_list != NULL);
652 		spin_lock_irq(&oom_reaper_lock);
653 		if (oom_reaper_list != NULL) {
654 			tsk = oom_reaper_list;
655 			oom_reaper_list = tsk->oom_reaper_list;
656 		}
657 		spin_unlock_irq(&oom_reaper_lock);
658 
659 		if (tsk)
660 			oom_reap_task(tsk);
661 	}
662 
663 	return 0;
664 }
665 
__wake_oom_reaper(struct task_struct * tsk)666 static void __wake_oom_reaper(struct task_struct *tsk)
667 {
668 	struct mm_struct *mm = tsk->signal->oom_mm;
669 	unsigned long flags;
670 
671 	/* The victim managed to terminate on its own - see exit_mmap */
672 	if (test_bit(MMF_OOM_SKIP, &mm->flags)) {
673 		put_task_struct(tsk);
674 		return;
675 	}
676 
677 	spin_lock_irqsave(&oom_reaper_lock, flags);
678 	tsk->oom_reaper_list = oom_reaper_list;
679 	oom_reaper_list = tsk;
680 	spin_unlock_irqrestore(&oom_reaper_lock, flags);
681 	trace_wake_reaper(tsk->pid);
682 	wake_up(&oom_reaper_wait);
683 }
684 
wake_oom_reaper(struct timer_list * timer)685 static void wake_oom_reaper(struct timer_list *timer)
686 {
687 	struct task_struct *tsk = container_of(timer, struct task_struct,
688 			oom_reaper_timer);
689 	__wake_oom_reaper(tsk);
690 }
691 
692 /*
693  * Give the OOM victim time to exit naturally before invoking the oom_reaping.
694  * The timers timeout is arbitrary... the longer it is, the longer the worst
695  * case scenario for the OOM can take. If it is too small, the oom_reaper can
696  * get in the way and release resources needed by the process exit path.
697  * e.g. The futex robust list can sit in Anon|Private memory that gets reaped
698  * before the exit path is able to wake the futex waiters.
699  */
700 #define OOM_REAPER_DELAY (2*HZ)
queue_oom_reaper(struct task_struct * tsk)701 static void queue_oom_reaper(struct task_struct *tsk)
702 {
703 	/* mm is already queued? */
704 	if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags))
705 		return;
706 
707 	get_task_struct(tsk);
708 	timer_setup(&tsk->oom_reaper_timer, wake_oom_reaper, 0);
709 	tsk->oom_reaper_timer.expires = jiffies + OOM_REAPER_DELAY;
710 	add_timer(&tsk->oom_reaper_timer);
711 }
712 
oom_init(void)713 static int __init oom_init(void)
714 {
715 	oom_reaper_th = kthread_run(oom_reaper, NULL, "oom_reaper");
716 	return 0;
717 }
subsys_initcall(oom_init)718 subsys_initcall(oom_init)
719 #else
720 static inline void queue_oom_reaper(struct task_struct *tsk)
721 {
722 }
723 
724 static void __wake_oom_reaper(struct task_struct *tsk)
725 {
726 }
727 #endif /* CONFIG_MMU */
728 
729 /**
730  * tsk->mm has to be non NULL and caller has to guarantee it is stable (either
731  * under task_lock or operate on the current).
732  */
733 static void __mark_oom_victim(struct task_struct *tsk)
734 {
735 	struct mm_struct *mm = tsk->mm;
736 
737 	if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm)) {
738 		mmgrab(tsk->signal->oom_mm);
739 		set_bit(MMF_OOM_VICTIM, &mm->flags);
740 	}
741 }
742 
743 /**
744  * mark_oom_victim - mark the given task as OOM victim
745  * @tsk: task to mark
746  *
747  * Has to be called with oom_lock held and never after
748  * oom has been disabled already.
749  *
750  * tsk->mm has to be non NULL and caller has to guarantee it is stable (either
751  * under task_lock or operate on the current).
752  */
mark_oom_victim(struct task_struct * tsk)753 static void mark_oom_victim(struct task_struct *tsk)
754 {
755 	const struct cred *cred;
756 
757 	WARN_ON(oom_killer_disabled);
758 	/* OOM killer might race with memcg OOM */
759 	if (test_and_set_tsk_thread_flag(tsk, TIF_MEMDIE))
760 		return;
761 
762 	/* oom_mm is bound to the signal struct life time. */
763 	__mark_oom_victim(tsk);
764 
765 	/*
766 	 * Make sure that the task is woken up from uninterruptible sleep
767 	 * if it is frozen because OOM killer wouldn't be able to free
768 	 * any memory and livelock. freezing_slow_path will tell the freezer
769 	 * that TIF_MEMDIE tasks should be ignored.
770 	 */
771 	__thaw_task(tsk);
772 	atomic_inc(&oom_victims);
773 	cred = get_task_cred(tsk);
774 	trace_mark_victim(tsk, cred->uid.val);
775 	put_cred(cred);
776 }
777 
778 /**
779  * exit_oom_victim - note the exit of an OOM victim
780  */
exit_oom_victim(void)781 void exit_oom_victim(void)
782 {
783 	clear_thread_flag(TIF_MEMDIE);
784 
785 	if (!atomic_dec_return(&oom_victims))
786 		wake_up_all(&oom_victims_wait);
787 }
788 
789 /**
790  * oom_killer_enable - enable OOM killer
791  */
oom_killer_enable(void)792 void oom_killer_enable(void)
793 {
794 	oom_killer_disabled = false;
795 	pr_info("OOM killer enabled.\n");
796 }
797 
798 /**
799  * oom_killer_disable - disable OOM killer
800  * @timeout: maximum timeout to wait for oom victims in jiffies
801  *
802  * Forces all page allocations to fail rather than trigger OOM killer.
803  * Will block and wait until all OOM victims are killed or the given
804  * timeout expires.
805  *
806  * The function cannot be called when there are runnable user tasks because
807  * the userspace would see unexpected allocation failures as a result. Any
808  * new usage of this function should be consulted with MM people.
809  *
810  * Returns true if successful and false if the OOM killer cannot be
811  * disabled.
812  */
oom_killer_disable(signed long timeout)813 bool oom_killer_disable(signed long timeout)
814 {
815 	signed long ret;
816 
817 	/*
818 	 * Make sure to not race with an ongoing OOM killer. Check that the
819 	 * current is not killed (possibly due to sharing the victim's memory).
820 	 */
821 	if (mutex_lock_killable(&oom_lock))
822 		return false;
823 	oom_killer_disabled = true;
824 	mutex_unlock(&oom_lock);
825 
826 	ret = wait_event_interruptible_timeout(oom_victims_wait,
827 			!atomic_read(&oom_victims), timeout);
828 	if (ret <= 0) {
829 		oom_killer_enable();
830 		return false;
831 	}
832 	pr_info("OOM killer disabled.\n");
833 
834 	return true;
835 }
836 
__task_will_free_mem(struct task_struct * task)837 static inline bool __task_will_free_mem(struct task_struct *task)
838 {
839 	struct signal_struct *sig = task->signal;
840 
841 	/*
842 	 * A coredumping process may sleep for an extended period in exit_mm(),
843 	 * so the oom killer cannot assume that the process will promptly exit
844 	 * and release memory.
845 	 */
846 	if (sig->flags & SIGNAL_GROUP_COREDUMP)
847 		return false;
848 
849 	if (sig->flags & SIGNAL_GROUP_EXIT)
850 		return true;
851 
852 	if (thread_group_empty(task) && (task->flags & PF_EXITING))
853 		return true;
854 
855 	return false;
856 }
857 
858 /*
859  * Checks whether the given task is dying or exiting and likely to
860  * release its address space. This means that all threads and processes
861  * sharing the same mm have to be killed or exiting.
862  * Caller has to make sure that task->mm is stable (hold task_lock or
863  * it operates on the current).
864  */
task_will_free_mem(struct task_struct * task)865 static bool task_will_free_mem(struct task_struct *task)
866 {
867 	struct mm_struct *mm = task->mm;
868 	struct task_struct *p;
869 	bool ret = true;
870 
871 	/*
872 	 * Skip tasks without mm because it might have passed its exit_mm and
873 	 * exit_oom_victim. oom_reaper could have rescued that but do not rely
874 	 * on that for now. We can consider find_lock_task_mm in future.
875 	 */
876 	if (!mm)
877 		return false;
878 
879 	if (!__task_will_free_mem(task))
880 		return false;
881 
882 	/*
883 	 * This task has already been drained by the oom reaper so there are
884 	 * only small chances it will free some more
885 	 */
886 	if (test_bit(MMF_OOM_SKIP, &mm->flags))
887 		return false;
888 
889 	if (atomic_read(&mm->mm_users) <= 1)
890 		return true;
891 
892 	/*
893 	 * Make sure that all tasks which share the mm with the given tasks
894 	 * are dying as well to make sure that a) nobody pins its mm and
895 	 * b) the task is also reapable by the oom reaper.
896 	 */
897 	rcu_read_lock();
898 	for_each_process(p) {
899 		if (!process_shares_mm(p, mm))
900 			continue;
901 		if (same_thread_group(task, p))
902 			continue;
903 		ret = __task_will_free_mem(p);
904 		if (!ret)
905 			break;
906 	}
907 	rcu_read_unlock();
908 
909 	return ret;
910 }
911 
__oom_kill_process(struct task_struct * victim,const char * message)912 static void __oom_kill_process(struct task_struct *victim, const char *message)
913 {
914 	struct task_struct *p;
915 	struct mm_struct *mm;
916 	bool can_oom_reap = true;
917 
918 	p = find_lock_task_mm(victim);
919 	if (!p) {
920 		pr_info("%s: OOM victim %d (%s) is already exiting. Skip killing the task\n",
921 			message, task_pid_nr(victim), victim->comm);
922 		put_task_struct(victim);
923 		return;
924 	} else if (victim != p) {
925 		get_task_struct(p);
926 		put_task_struct(victim);
927 		victim = p;
928 	}
929 
930 	/* Get a reference to safely compare mm after task_unlock(victim) */
931 	mm = victim->mm;
932 	mmgrab(mm);
933 
934 	/* Raise event before sending signal: task reaper must see this */
935 	count_vm_event(OOM_KILL);
936 	memcg_memory_event_mm(mm, MEMCG_OOM_KILL);
937 
938 	/*
939 	 * We should send SIGKILL before granting access to memory reserves
940 	 * in order to prevent the OOM victim from depleting the memory
941 	 * reserves from the user space under its control.
942 	 */
943 	do_send_sig_info(SIGKILL, SEND_SIG_PRIV, victim, PIDTYPE_TGID);
944 	mark_oom_victim(victim);
945 	pr_err("%s: Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB, UID:%u pgtables:%lukB oom_score_adj:%hd\n",
946 		message, task_pid_nr(victim), victim->comm, K(mm->total_vm),
947 		K(get_mm_counter(mm, MM_ANONPAGES)),
948 		K(get_mm_counter(mm, MM_FILEPAGES)),
949 		K(get_mm_counter(mm, MM_SHMEMPAGES)),
950 		from_kuid(&init_user_ns, task_uid(victim)),
951 		mm_pgtables_bytes(mm) >> 10, victim->signal->oom_score_adj);
952 	task_unlock(victim);
953 
954 	/*
955 	 * Kill all user processes sharing victim->mm in other thread groups, if
956 	 * any.  They don't get access to memory reserves, though, to avoid
957 	 * depletion of all memory.  This prevents mm->mmap_lock livelock when an
958 	 * oom killed thread cannot exit because it requires the semaphore and
959 	 * its contended by another thread trying to allocate memory itself.
960 	 * That thread will now get access to memory reserves since it has a
961 	 * pending fatal signal.
962 	 */
963 	rcu_read_lock();
964 	for_each_process(p) {
965 		if (!process_shares_mm(p, mm))
966 			continue;
967 		if (same_thread_group(p, victim))
968 			continue;
969 		if (is_global_init(p)) {
970 			can_oom_reap = false;
971 			set_bit(MMF_OOM_SKIP, &mm->flags);
972 			pr_info("oom killer %d (%s) has mm pinned by %d (%s)\n",
973 					task_pid_nr(victim), victim->comm,
974 					task_pid_nr(p), p->comm);
975 			continue;
976 		}
977 		/*
978 		 * No kthread_use_mm() user needs to read from the userspace so
979 		 * we are ok to reap it.
980 		 */
981 		if (unlikely(p->flags & PF_KTHREAD))
982 			continue;
983 		do_send_sig_info(SIGKILL, SEND_SIG_PRIV, p, PIDTYPE_TGID);
984 	}
985 	rcu_read_unlock();
986 
987 	if (can_oom_reap)
988 		queue_oom_reaper(victim);
989 
990 	mmdrop(mm);
991 	put_task_struct(victim);
992 }
993 #undef K
994 
995 /*
996  * Kill provided task unless it's secured by setting
997  * oom_score_adj to OOM_SCORE_ADJ_MIN.
998  */
oom_kill_memcg_member(struct task_struct * task,void * message)999 static int oom_kill_memcg_member(struct task_struct *task, void *message)
1000 {
1001 	if (task->signal->oom_score_adj != OOM_SCORE_ADJ_MIN &&
1002 	    !is_global_init(task)) {
1003 		get_task_struct(task);
1004 		__oom_kill_process(task, message);
1005 	}
1006 	return 0;
1007 }
1008 
oom_kill_process(struct oom_control * oc,const char * message)1009 static void oom_kill_process(struct oom_control *oc, const char *message)
1010 {
1011 	struct task_struct *victim = oc->chosen;
1012 	struct mem_cgroup *oom_group;
1013 	static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL,
1014 					      DEFAULT_RATELIMIT_BURST);
1015 
1016 	/*
1017 	 * If the task is already exiting, don't alarm the sysadmin or kill
1018 	 * its children or threads, just give it access to memory reserves
1019 	 * so it can die quickly
1020 	 */
1021 	task_lock(victim);
1022 	if (task_will_free_mem(victim)) {
1023 		mark_oom_victim(victim);
1024 		queue_oom_reaper(victim);
1025 		task_unlock(victim);
1026 		put_task_struct(victim);
1027 		return;
1028 	}
1029 	task_unlock(victim);
1030 
1031 	if (__ratelimit(&oom_rs))
1032 		dump_header(oc, victim);
1033 
1034 	/*
1035 	 * Do we need to kill the entire memory cgroup?
1036 	 * Or even one of the ancestor memory cgroups?
1037 	 * Check this out before killing the victim task.
1038 	 */
1039 	oom_group = mem_cgroup_get_oom_group(victim, oc->memcg);
1040 
1041 	__oom_kill_process(victim, message);
1042 
1043 	/*
1044 	 * If necessary, kill all tasks in the selected memory cgroup.
1045 	 */
1046 	if (oom_group) {
1047 		mem_cgroup_print_oom_group(oom_group);
1048 		mem_cgroup_scan_tasks(oom_group, oom_kill_memcg_member,
1049 				      (void *)message);
1050 		mem_cgroup_put(oom_group);
1051 	}
1052 }
1053 
1054 /*
1055  * Determines whether the kernel must panic because of the panic_on_oom sysctl.
1056  */
check_panic_on_oom(struct oom_control * oc)1057 static void check_panic_on_oom(struct oom_control *oc)
1058 {
1059 	if (likely(!sysctl_panic_on_oom))
1060 		return;
1061 	if (sysctl_panic_on_oom != 2) {
1062 		/*
1063 		 * panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel
1064 		 * does not panic for cpuset, mempolicy, or memcg allocation
1065 		 * failures.
1066 		 */
1067 		if (oc->constraint != CONSTRAINT_NONE)
1068 			return;
1069 	}
1070 	/* Do not panic for oom kills triggered by sysrq */
1071 	if (is_sysrq_oom(oc))
1072 		return;
1073 	dump_header(oc, NULL);
1074 	panic("Out of memory: %s panic_on_oom is enabled\n",
1075 		sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide");
1076 }
1077 
1078 static BLOCKING_NOTIFIER_HEAD(oom_notify_list);
1079 
register_oom_notifier(struct notifier_block * nb)1080 int register_oom_notifier(struct notifier_block *nb)
1081 {
1082 	return blocking_notifier_chain_register(&oom_notify_list, nb);
1083 }
1084 EXPORT_SYMBOL_GPL(register_oom_notifier);
1085 
unregister_oom_notifier(struct notifier_block * nb)1086 int unregister_oom_notifier(struct notifier_block *nb)
1087 {
1088 	return blocking_notifier_chain_unregister(&oom_notify_list, nb);
1089 }
1090 EXPORT_SYMBOL_GPL(unregister_oom_notifier);
1091 
1092 /**
1093  * out_of_memory - kill the "best" process when we run out of memory
1094  * @oc: pointer to struct oom_control
1095  *
1096  * If we run out of memory, we have the choice between either
1097  * killing a random task (bad), letting the system crash (worse)
1098  * OR try to be smart about which process to kill. Note that we
1099  * don't have to be perfect here, we just have to be good.
1100  */
out_of_memory(struct oom_control * oc)1101 bool out_of_memory(struct oom_control *oc)
1102 {
1103 	unsigned long freed = 0;
1104 
1105 	if (oom_killer_disabled)
1106 		return false;
1107 
1108 	if (!is_memcg_oom(oc)) {
1109 		blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
1110 		if (freed > 0)
1111 			/* Got some memory back in the last second. */
1112 			return true;
1113 	}
1114 
1115 	/*
1116 	 * If current has a pending SIGKILL or is exiting, then automatically
1117 	 * select it.  The goal is to allow it to allocate so that it may
1118 	 * quickly exit and free its memory.
1119 	 */
1120 	if (task_will_free_mem(current)) {
1121 		mark_oom_victim(current);
1122 		queue_oom_reaper(current);
1123 		return true;
1124 	}
1125 
1126 	/*
1127 	 * The OOM killer does not compensate for IO-less reclaim.
1128 	 * pagefault_out_of_memory lost its gfp context so we have to
1129 	 * make sure exclude 0 mask - all other users should have at least
1130 	 * ___GFP_DIRECT_RECLAIM to get here. But mem_cgroup_oom() has to
1131 	 * invoke the OOM killer even if it is a GFP_NOFS allocation.
1132 	 */
1133 	if (oc->gfp_mask && !(oc->gfp_mask & __GFP_FS) && !is_memcg_oom(oc))
1134 		return true;
1135 
1136 	/*
1137 	 * Check if there were limitations on the allocation (only relevant for
1138 	 * NUMA and memcg) that may require different handling.
1139 	 */
1140 	oc->constraint = constrained_alloc(oc);
1141 	if (oc->constraint != CONSTRAINT_MEMORY_POLICY)
1142 		oc->nodemask = NULL;
1143 	check_panic_on_oom(oc);
1144 
1145 	if (!is_memcg_oom(oc) && sysctl_oom_kill_allocating_task &&
1146 	    current->mm && !oom_unkillable_task(current) &&
1147 	    oom_cpuset_eligible(current, oc) &&
1148 	    current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) {
1149 		get_task_struct(current);
1150 		oc->chosen = current;
1151 		oom_kill_process(oc, "Out of memory (oom_kill_allocating_task)");
1152 		return true;
1153 	}
1154 
1155 	select_bad_process(oc);
1156 	/* Found nothing?!?! */
1157 	if (!oc->chosen) {
1158 		int ret = false;
1159 
1160 		trace_android_vh_oom_check_panic(oc, &ret);
1161 		if (ret)
1162 			return true;
1163 
1164 		dump_header(oc, NULL);
1165 		pr_warn("Out of memory and no killable processes...\n");
1166 		/*
1167 		 * If we got here due to an actual allocation at the
1168 		 * system level, we cannot survive this and will enter
1169 		 * an endless loop in the allocator. Bail out now.
1170 		 */
1171 		if (!is_sysrq_oom(oc) && !is_memcg_oom(oc))
1172 			panic("System is deadlocked on memory\n");
1173 	}
1174 	if (oc->chosen && oc->chosen != (void *)-1UL)
1175 		oom_kill_process(oc, !is_memcg_oom(oc) ? "Out of memory" :
1176 				 "Memory cgroup out of memory");
1177 	return !!oc->chosen;
1178 }
1179 
1180 /*
1181  * The pagefault handler calls here because some allocation has failed. We have
1182  * to take care of the memcg OOM here because this is the only safe context without
1183  * any locks held but let the oom killer triggered from the allocation context care
1184  * about the global OOM.
1185  */
pagefault_out_of_memory(void)1186 void pagefault_out_of_memory(void)
1187 {
1188 	static DEFINE_RATELIMIT_STATE(pfoom_rs, DEFAULT_RATELIMIT_INTERVAL,
1189 				      DEFAULT_RATELIMIT_BURST);
1190 
1191 	if (mem_cgroup_oom_synchronize(true))
1192 		return;
1193 
1194 	if (fatal_signal_pending(current))
1195 		return;
1196 
1197 	if (__ratelimit(&pfoom_rs))
1198 		pr_warn("Huh VM_FAULT_OOM leaked out to the #PF handler. Retrying PF\n");
1199 }
1200 
SYSCALL_DEFINE2(process_mrelease,int,pidfd,unsigned int,flags)1201 SYSCALL_DEFINE2(process_mrelease, int, pidfd, unsigned int, flags)
1202 {
1203 #ifdef CONFIG_MMU
1204 	struct mm_struct *mm = NULL;
1205 	struct task_struct *task;
1206 	struct task_struct *p;
1207 	unsigned int f_flags;
1208 	bool reap = false;
1209 	struct pid *pid;
1210 	long ret = 0;
1211 
1212 	if (flags)
1213 		return -EINVAL;
1214 
1215 	pid = pidfd_get_pid(pidfd, &f_flags);
1216 	if (IS_ERR(pid))
1217 		return PTR_ERR(pid);
1218 
1219 	task = get_pid_task(pid, PIDTYPE_TGID);
1220 	if (!task) {
1221 		ret = -ESRCH;
1222 		goto put_pid;
1223 	}
1224 
1225 	/*
1226 	 * Make sure to choose a thread which still has a reference to mm
1227 	 * during the group exit
1228 	 */
1229 	p = find_lock_task_mm(task);
1230 	if (!p) {
1231 		ret = -ESRCH;
1232 		goto put_task;
1233 	}
1234 
1235 	mm = p->mm;
1236 	mmgrab(mm);
1237 
1238 	/*
1239 	 * If we are too late and exit_mmap already checked mm_is_oom_victim
1240 	 * then will block on mmap_read_lock until exit_mmap releases mmap_lock
1241 	 */
1242 	set_bit(MMF_OOM_VICTIM, &mm->flags);
1243 
1244 	if (task_will_free_mem(p))
1245 		reap = true;
1246 	else {
1247 		/* Error only if the work has not been done already */
1248 		if (!test_bit(MMF_OOM_SKIP, &mm->flags))
1249 			ret = -EINVAL;
1250 	}
1251 	task_unlock(p);
1252 
1253 	if (!reap)
1254 		goto drop_mm;
1255 
1256 	if (mmap_read_lock_killable(mm)) {
1257 		ret = -EINTR;
1258 		goto drop_mm;
1259 	}
1260 	/*
1261 	 * Check MMF_OOM_SKIP again under mmap_read_lock protection to ensure
1262 	 * possible change in exit_mmap is seen
1263 	 */
1264 	if (!test_bit(MMF_OOM_SKIP, &mm->flags) && !__oom_reap_task_mm(mm))
1265 		ret = -EAGAIN;
1266 	mmap_read_unlock(mm);
1267 
1268 drop_mm:
1269 	mmdrop(mm);
1270 put_task:
1271 	put_task_struct(task);
1272 put_pid:
1273 	put_pid(pid);
1274 	return ret;
1275 #else
1276 	return -ENOSYS;
1277 #endif /* CONFIG_MMU */
1278 }
1279 
add_to_oom_reaper(struct task_struct * p)1280 void add_to_oom_reaper(struct task_struct *p)
1281 {
1282 	p = find_lock_task_mm(p);
1283 	if (!p)
1284 		return;
1285 	if (task_will_free_mem(p)) {
1286 		__mark_oom_victim(p);
1287 		if (!test_and_set_bit(MMF_OOM_REAP_QUEUED,
1288 				     &p->signal->oom_mm->flags)) {
1289 			get_task_struct(p);
1290 			__wake_oom_reaper(p);
1291 		}
1292 	}
1293 	task_unlock(p);
1294 }
1295