• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* memcontrol.c - Memory Controller
3  *
4  * Copyright IBM Corporation, 2007
5  * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6  *
7  * Copyright 2007 OpenVZ SWsoft Inc
8  * Author: Pavel Emelianov <xemul@openvz.org>
9  *
10  * Memory thresholds
11  * Copyright (C) 2009 Nokia Corporation
12  * Author: Kirill A. Shutemov
13  *
14  * Kernel Memory Controller
15  * Copyright (C) 2012 Parallels Inc. and Google Inc.
16  * Authors: Glauber Costa and Suleiman Souhlal
17  *
18  * Native page reclaim
19  * Charge lifetime sanitation
20  * Lockless page tracking & accounting
21  * Unified hierarchy configuration model
22  * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
23  */
24 
25 #include <linux/page_counter.h>
26 #include <linux/memcontrol.h>
27 #include <linux/cgroup.h>
28 #include <linux/pagewalk.h>
29 #include <linux/sched/mm.h>
30 #include <linux/shmem_fs.h>
31 #include <linux/hugetlb.h>
32 #include <linux/pagemap.h>
33 #include <linux/vm_event_item.h>
34 #include <linux/smp.h>
35 #include <linux/page-flags.h>
36 #include <linux/backing-dev.h>
37 #include <linux/bit_spinlock.h>
38 #include <linux/rcupdate.h>
39 #include <linux/limits.h>
40 #include <linux/export.h>
41 #include <linux/mutex.h>
42 #include <linux/rbtree.h>
43 #include <linux/slab.h>
44 #include <linux/swap.h>
45 #include <linux/swapops.h>
46 #include <linux/spinlock.h>
47 #include <linux/eventfd.h>
48 #include <linux/poll.h>
49 #include <linux/sort.h>
50 #include <linux/fs.h>
51 #include <linux/seq_file.h>
52 #include <linux/vmpressure.h>
53 #include <linux/mm_inline.h>
54 #include <linux/swap_cgroup.h>
55 #include <linux/cpu.h>
56 #include <linux/oom.h>
57 #include <linux/lockdep.h>
58 #include <linux/file.h>
59 #include <linux/tracehook.h>
60 #include <linux/psi.h>
61 #include <linux/seq_buf.h>
62 #include "internal.h"
63 #include <net/sock.h>
64 #include <net/ip.h>
65 #include "slab.h"
66 
67 #include <linux/uaccess.h>
68 
69 #include <trace/events/vmscan.h>
70 #include <trace/hooks/mm.h>
71 
72 struct cgroup_subsys memory_cgrp_subsys __read_mostly;
73 EXPORT_SYMBOL(memory_cgrp_subsys);
74 
75 struct mem_cgroup *root_mem_cgroup __read_mostly;
76 EXPORT_SYMBOL_GPL(root_mem_cgroup);
77 
78 /* Active memory cgroup to use from an interrupt context */
79 DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg);
80 
81 /* Socket memory accounting disabled? */
82 static bool cgroup_memory_nosocket;
83 
84 /* Kernel memory accounting disabled? */
85 static bool cgroup_memory_nokmem;
86 
87 /* Whether the swap controller is active */
88 #ifdef CONFIG_MEMCG_SWAP
89 bool cgroup_memory_noswap __read_mostly;
90 #else
91 #define cgroup_memory_noswap		1
92 #endif
93 
94 #ifdef CONFIG_CGROUP_WRITEBACK
95 static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq);
96 #endif
97 
98 /* Whether legacy memory+swap accounting is active */
do_memsw_account(void)99 static bool do_memsw_account(void)
100 {
101 	return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_noswap;
102 }
103 
104 #define THRESHOLDS_EVENTS_TARGET 128
105 #define SOFTLIMIT_EVENTS_TARGET 1024
106 
107 /*
108  * Cgroups above their limits are maintained in a RB-Tree, independent of
109  * their hierarchy representation
110  */
111 
112 struct mem_cgroup_tree_per_node {
113 	struct rb_root rb_root;
114 	struct rb_node *rb_rightmost;
115 	spinlock_t lock;
116 };
117 
118 struct mem_cgroup_tree {
119 	struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
120 };
121 
122 static struct mem_cgroup_tree soft_limit_tree __read_mostly;
123 
124 /* for OOM */
125 struct mem_cgroup_eventfd_list {
126 	struct list_head list;
127 	struct eventfd_ctx *eventfd;
128 };
129 
130 /*
131  * cgroup_event represents events which userspace want to receive.
132  */
133 struct mem_cgroup_event {
134 	/*
135 	 * memcg which the event belongs to.
136 	 */
137 	struct mem_cgroup *memcg;
138 	/*
139 	 * eventfd to signal userspace about the event.
140 	 */
141 	struct eventfd_ctx *eventfd;
142 	/*
143 	 * Each of these stored in a list by the cgroup.
144 	 */
145 	struct list_head list;
146 	/*
147 	 * register_event() callback will be used to add new userspace
148 	 * waiter for changes related to this event.  Use eventfd_signal()
149 	 * on eventfd to send notification to userspace.
150 	 */
151 	int (*register_event)(struct mem_cgroup *memcg,
152 			      struct eventfd_ctx *eventfd, const char *args);
153 	/*
154 	 * unregister_event() callback will be called when userspace closes
155 	 * the eventfd or on cgroup removing.  This callback must be set,
156 	 * if you want provide notification functionality.
157 	 */
158 	void (*unregister_event)(struct mem_cgroup *memcg,
159 				 struct eventfd_ctx *eventfd);
160 	/*
161 	 * All fields below needed to unregister event when
162 	 * userspace closes eventfd.
163 	 */
164 	poll_table pt;
165 	wait_queue_head_t *wqh;
166 	wait_queue_entry_t wait;
167 	struct work_struct remove;
168 };
169 
170 static void mem_cgroup_threshold(struct mem_cgroup *memcg);
171 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
172 
173 /* Stuffs for move charges at task migration. */
174 /*
175  * Types of charges to be moved.
176  */
177 #define MOVE_ANON	0x1U
178 #define MOVE_FILE	0x2U
179 #define MOVE_MASK	(MOVE_ANON | MOVE_FILE)
180 
181 /* "mc" and its members are protected by cgroup_mutex */
182 static struct move_charge_struct {
183 	spinlock_t	  lock; /* for from, to */
184 	struct mm_struct  *mm;
185 	struct mem_cgroup *from;
186 	struct mem_cgroup *to;
187 	unsigned long flags;
188 	unsigned long precharge;
189 	unsigned long moved_charge;
190 	unsigned long moved_swap;
191 	struct task_struct *moving_task;	/* a task moving charges */
192 	wait_queue_head_t waitq;		/* a waitq for other context */
193 } mc = {
194 	.lock = __SPIN_LOCK_UNLOCKED(mc.lock),
195 	.waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
196 };
197 
198 /*
199  * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
200  * limit reclaim to prevent infinite loops, if they ever occur.
201  */
202 #define	MEM_CGROUP_MAX_RECLAIM_LOOPS		100
203 #define	MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS	2
204 
205 /* for encoding cft->private value on file */
206 enum res_type {
207 	_MEM,
208 	_MEMSWAP,
209 	_OOM_TYPE,
210 	_KMEM,
211 	_TCP,
212 };
213 
214 #define MEMFILE_PRIVATE(x, val)	((x) << 16 | (val))
215 #define MEMFILE_TYPE(val)	((val) >> 16 & 0xffff)
216 #define MEMFILE_ATTR(val)	((val) & 0xffff)
217 /* Used for OOM nofiier */
218 #define OOM_CONTROL		(0)
219 
220 /*
221  * Iteration constructs for visiting all cgroups (under a tree).  If
222  * loops are exited prematurely (break), mem_cgroup_iter_break() must
223  * be used for reference counting.
224  */
225 #define for_each_mem_cgroup_tree(iter, root)		\
226 	for (iter = mem_cgroup_iter(root, NULL, NULL);	\
227 	     iter != NULL;				\
228 	     iter = mem_cgroup_iter(root, iter, NULL))
229 
230 #define for_each_mem_cgroup(iter)			\
231 	for (iter = mem_cgroup_iter(NULL, NULL, NULL);	\
232 	     iter != NULL;				\
233 	     iter = mem_cgroup_iter(NULL, iter, NULL))
234 
task_is_dying(void)235 static inline bool task_is_dying(void)
236 {
237 	return tsk_is_oom_victim(current) || fatal_signal_pending(current) ||
238 		(current->flags & PF_EXITING);
239 }
240 
241 /* Some nice accessors for the vmpressure. */
memcg_to_vmpressure(struct mem_cgroup * memcg)242 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
243 {
244 	if (!memcg)
245 		memcg = root_mem_cgroup;
246 	return &memcg->vmpressure;
247 }
248 
vmpressure_to_css(struct vmpressure * vmpr)249 struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
250 {
251 	return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
252 }
253 
254 #ifdef CONFIG_MEMCG_KMEM
255 static DEFINE_SPINLOCK(objcg_lock);
256 
obj_cgroup_release(struct percpu_ref * ref)257 static void obj_cgroup_release(struct percpu_ref *ref)
258 {
259 	struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt);
260 	struct mem_cgroup *memcg;
261 	unsigned int nr_bytes;
262 	unsigned int nr_pages;
263 	unsigned long flags;
264 
265 	/*
266 	 * At this point all allocated objects are freed, and
267 	 * objcg->nr_charged_bytes can't have an arbitrary byte value.
268 	 * However, it can be PAGE_SIZE or (x * PAGE_SIZE).
269 	 *
270 	 * The following sequence can lead to it:
271 	 * 1) CPU0: objcg == stock->cached_objcg
272 	 * 2) CPU1: we do a small allocation (e.g. 92 bytes),
273 	 *          PAGE_SIZE bytes are charged
274 	 * 3) CPU1: a process from another memcg is allocating something,
275 	 *          the stock if flushed,
276 	 *          objcg->nr_charged_bytes = PAGE_SIZE - 92
277 	 * 5) CPU0: we do release this object,
278 	 *          92 bytes are added to stock->nr_bytes
279 	 * 6) CPU0: stock is flushed,
280 	 *          92 bytes are added to objcg->nr_charged_bytes
281 	 *
282 	 * In the result, nr_charged_bytes == PAGE_SIZE.
283 	 * This page will be uncharged in obj_cgroup_release().
284 	 */
285 	nr_bytes = atomic_read(&objcg->nr_charged_bytes);
286 	WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1));
287 	nr_pages = nr_bytes >> PAGE_SHIFT;
288 
289 	spin_lock_irqsave(&objcg_lock, flags);
290 	memcg = obj_cgroup_memcg(objcg);
291 	if (nr_pages)
292 		__memcg_kmem_uncharge(memcg, nr_pages);
293 	list_del(&objcg->list);
294 	mem_cgroup_put(memcg);
295 	spin_unlock_irqrestore(&objcg_lock, flags);
296 
297 	percpu_ref_exit(ref);
298 	kfree_rcu(objcg, rcu);
299 }
300 
obj_cgroup_alloc(void)301 static struct obj_cgroup *obj_cgroup_alloc(void)
302 {
303 	struct obj_cgroup *objcg;
304 	int ret;
305 
306 	objcg = kzalloc(sizeof(struct obj_cgroup), GFP_KERNEL);
307 	if (!objcg)
308 		return NULL;
309 
310 	ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0,
311 			      GFP_KERNEL);
312 	if (ret) {
313 		kfree(objcg);
314 		return NULL;
315 	}
316 	INIT_LIST_HEAD(&objcg->list);
317 	return objcg;
318 }
319 
memcg_reparent_objcgs(struct mem_cgroup * memcg,struct mem_cgroup * parent)320 static void memcg_reparent_objcgs(struct mem_cgroup *memcg,
321 				  struct mem_cgroup *parent)
322 {
323 	struct obj_cgroup *objcg, *iter;
324 
325 	objcg = rcu_replace_pointer(memcg->objcg, NULL, true);
326 
327 	spin_lock_irq(&objcg_lock);
328 
329 	/* Move active objcg to the parent's list */
330 	xchg(&objcg->memcg, parent);
331 	css_get(&parent->css);
332 	list_add(&objcg->list, &parent->objcg_list);
333 
334 	/* Move already reparented objcgs to the parent's list */
335 	list_for_each_entry(iter, &memcg->objcg_list, list) {
336 		css_get(&parent->css);
337 		xchg(&iter->memcg, parent);
338 		css_put(&memcg->css);
339 	}
340 	list_splice(&memcg->objcg_list, &parent->objcg_list);
341 
342 	spin_unlock_irq(&objcg_lock);
343 
344 	percpu_ref_kill(&objcg->refcnt);
345 }
346 
347 /*
348  * This will be used as a shrinker list's index.
349  * The main reason for not using cgroup id for this:
350  *  this works better in sparse environments, where we have a lot of memcgs,
351  *  but only a few kmem-limited. Or also, if we have, for instance, 200
352  *  memcgs, and none but the 200th is kmem-limited, we'd have to have a
353  *  200 entry array for that.
354  *
355  * The current size of the caches array is stored in memcg_nr_cache_ids. It
356  * will double each time we have to increase it.
357  */
358 static DEFINE_IDA(memcg_cache_ida);
359 int memcg_nr_cache_ids;
360 
361 /* Protects memcg_nr_cache_ids */
362 static DECLARE_RWSEM(memcg_cache_ids_sem);
363 
memcg_get_cache_ids(void)364 void memcg_get_cache_ids(void)
365 {
366 	down_read(&memcg_cache_ids_sem);
367 }
368 
memcg_put_cache_ids(void)369 void memcg_put_cache_ids(void)
370 {
371 	up_read(&memcg_cache_ids_sem);
372 }
373 
374 /*
375  * MIN_SIZE is different than 1, because we would like to avoid going through
376  * the alloc/free process all the time. In a small machine, 4 kmem-limited
377  * cgroups is a reasonable guess. In the future, it could be a parameter or
378  * tunable, but that is strictly not necessary.
379  *
380  * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get
381  * this constant directly from cgroup, but it is understandable that this is
382  * better kept as an internal representation in cgroup.c. In any case, the
383  * cgrp_id space is not getting any smaller, and we don't have to necessarily
384  * increase ours as well if it increases.
385  */
386 #define MEMCG_CACHES_MIN_SIZE 4
387 #define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX
388 
389 /*
390  * A lot of the calls to the cache allocation functions are expected to be
391  * inlined by the compiler. Since the calls to memcg_slab_pre_alloc_hook() are
392  * conditional to this static branch, we'll have to allow modules that does
393  * kmem_cache_alloc and the such to see this symbol as well
394  */
395 DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
396 EXPORT_SYMBOL(memcg_kmem_enabled_key);
397 #endif
398 
399 static int memcg_shrinker_map_size;
400 static DEFINE_MUTEX(memcg_shrinker_map_mutex);
401 
memcg_free_shrinker_map_rcu(struct rcu_head * head)402 static void memcg_free_shrinker_map_rcu(struct rcu_head *head)
403 {
404 	kvfree(container_of(head, struct memcg_shrinker_map, rcu));
405 }
406 
memcg_expand_one_shrinker_map(struct mem_cgroup * memcg,int size,int old_size)407 static int memcg_expand_one_shrinker_map(struct mem_cgroup *memcg,
408 					 int size, int old_size)
409 {
410 	struct memcg_shrinker_map *new, *old;
411 	int nid;
412 
413 	lockdep_assert_held(&memcg_shrinker_map_mutex);
414 
415 	for_each_node(nid) {
416 		old = rcu_dereference_protected(
417 			mem_cgroup_nodeinfo(memcg, nid)->shrinker_map, true);
418 		/* Not yet online memcg */
419 		if (!old)
420 			return 0;
421 
422 		new = kvmalloc_node(sizeof(*new) + size, GFP_KERNEL, nid);
423 		if (!new)
424 			return -ENOMEM;
425 
426 		/* Set all old bits, clear all new bits */
427 		memset(new->map, (int)0xff, old_size);
428 		memset((void *)new->map + old_size, 0, size - old_size);
429 
430 		rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, new);
431 		call_rcu(&old->rcu, memcg_free_shrinker_map_rcu);
432 	}
433 
434 	return 0;
435 }
436 
memcg_free_shrinker_maps(struct mem_cgroup * memcg)437 static void memcg_free_shrinker_maps(struct mem_cgroup *memcg)
438 {
439 	struct mem_cgroup_per_node *pn;
440 	struct memcg_shrinker_map *map;
441 	int nid;
442 
443 	if (mem_cgroup_is_root(memcg))
444 		return;
445 
446 	for_each_node(nid) {
447 		pn = mem_cgroup_nodeinfo(memcg, nid);
448 		map = rcu_dereference_protected(pn->shrinker_map, true);
449 		if (map)
450 			kvfree(map);
451 		rcu_assign_pointer(pn->shrinker_map, NULL);
452 	}
453 }
454 
memcg_alloc_shrinker_maps(struct mem_cgroup * memcg)455 static int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg)
456 {
457 	struct memcg_shrinker_map *map;
458 	int nid, size, ret = 0;
459 
460 	if (mem_cgroup_is_root(memcg))
461 		return 0;
462 
463 	mutex_lock(&memcg_shrinker_map_mutex);
464 	size = memcg_shrinker_map_size;
465 	for_each_node(nid) {
466 		map = kvzalloc_node(sizeof(*map) + size, GFP_KERNEL, nid);
467 		if (!map) {
468 			memcg_free_shrinker_maps(memcg);
469 			ret = -ENOMEM;
470 			break;
471 		}
472 		rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, map);
473 	}
474 	mutex_unlock(&memcg_shrinker_map_mutex);
475 
476 	return ret;
477 }
478 
memcg_expand_shrinker_maps(int new_id)479 int memcg_expand_shrinker_maps(int new_id)
480 {
481 	int size, old_size, ret = 0;
482 	struct mem_cgroup *memcg;
483 
484 	size = DIV_ROUND_UP(new_id + 1, BITS_PER_LONG) * sizeof(unsigned long);
485 	old_size = memcg_shrinker_map_size;
486 	if (size <= old_size)
487 		return 0;
488 
489 	mutex_lock(&memcg_shrinker_map_mutex);
490 	if (!root_mem_cgroup)
491 		goto unlock;
492 
493 	for_each_mem_cgroup(memcg) {
494 		if (mem_cgroup_is_root(memcg))
495 			continue;
496 		ret = memcg_expand_one_shrinker_map(memcg, size, old_size);
497 		if (ret) {
498 			mem_cgroup_iter_break(NULL, memcg);
499 			goto unlock;
500 		}
501 	}
502 unlock:
503 	if (!ret)
504 		memcg_shrinker_map_size = size;
505 	mutex_unlock(&memcg_shrinker_map_mutex);
506 	return ret;
507 }
508 
memcg_set_shrinker_bit(struct mem_cgroup * memcg,int nid,int shrinker_id)509 void memcg_set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id)
510 {
511 	if (shrinker_id >= 0 && memcg && !mem_cgroup_is_root(memcg)) {
512 		struct memcg_shrinker_map *map;
513 
514 		rcu_read_lock();
515 		map = rcu_dereference(memcg->nodeinfo[nid]->shrinker_map);
516 		/* Pairs with smp mb in shrink_slab() */
517 		smp_mb__before_atomic();
518 		set_bit(shrinker_id, map->map);
519 		rcu_read_unlock();
520 	}
521 }
522 
523 /**
524  * mem_cgroup_css_from_page - css of the memcg associated with a page
525  * @page: page of interest
526  *
527  * If memcg is bound to the default hierarchy, css of the memcg associated
528  * with @page is returned.  The returned css remains associated with @page
529  * until it is released.
530  *
531  * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
532  * is returned.
533  */
mem_cgroup_css_from_page(struct page * page)534 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page)
535 {
536 	struct mem_cgroup *memcg;
537 
538 	memcg = page->mem_cgroup;
539 
540 	if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
541 		memcg = root_mem_cgroup;
542 
543 	return &memcg->css;
544 }
545 
546 /**
547  * page_cgroup_ino - return inode number of the memcg a page is charged to
548  * @page: the page
549  *
550  * Look up the closest online ancestor of the memory cgroup @page is charged to
551  * and return its inode number or 0 if @page is not charged to any cgroup. It
552  * is safe to call this function without holding a reference to @page.
553  *
554  * Note, this function is inherently racy, because there is nothing to prevent
555  * the cgroup inode from getting torn down and potentially reallocated a moment
556  * after page_cgroup_ino() returns, so it only should be used by callers that
557  * do not care (such as procfs interfaces).
558  */
page_cgroup_ino(struct page * page)559 ino_t page_cgroup_ino(struct page *page)
560 {
561 	struct mem_cgroup *memcg;
562 	unsigned long ino = 0;
563 
564 	rcu_read_lock();
565 	memcg = page->mem_cgroup;
566 
567 	/*
568 	 * The lowest bit set means that memcg isn't a valid
569 	 * memcg pointer, but a obj_cgroups pointer.
570 	 * In this case the page is shared and doesn't belong
571 	 * to any specific memory cgroup.
572 	 */
573 	if ((unsigned long) memcg & 0x1UL)
574 		memcg = NULL;
575 
576 	while (memcg && !(memcg->css.flags & CSS_ONLINE))
577 		memcg = parent_mem_cgroup(memcg);
578 	if (memcg)
579 		ino = cgroup_ino(memcg->css.cgroup);
580 	rcu_read_unlock();
581 	return ino;
582 }
583 
584 static struct mem_cgroup_per_node *
mem_cgroup_page_nodeinfo(struct mem_cgroup * memcg,struct page * page)585 mem_cgroup_page_nodeinfo(struct mem_cgroup *memcg, struct page *page)
586 {
587 	int nid = page_to_nid(page);
588 
589 	return memcg->nodeinfo[nid];
590 }
591 
592 static struct mem_cgroup_tree_per_node *
soft_limit_tree_node(int nid)593 soft_limit_tree_node(int nid)
594 {
595 	return soft_limit_tree.rb_tree_per_node[nid];
596 }
597 
598 static struct mem_cgroup_tree_per_node *
soft_limit_tree_from_page(struct page * page)599 soft_limit_tree_from_page(struct page *page)
600 {
601 	int nid = page_to_nid(page);
602 
603 	return soft_limit_tree.rb_tree_per_node[nid];
604 }
605 
__mem_cgroup_insert_exceeded(struct mem_cgroup_per_node * mz,struct mem_cgroup_tree_per_node * mctz,unsigned long new_usage_in_excess)606 static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz,
607 					 struct mem_cgroup_tree_per_node *mctz,
608 					 unsigned long new_usage_in_excess)
609 {
610 	struct rb_node **p = &mctz->rb_root.rb_node;
611 	struct rb_node *parent = NULL;
612 	struct mem_cgroup_per_node *mz_node;
613 	bool rightmost = true;
614 
615 	if (mz->on_tree)
616 		return;
617 
618 	mz->usage_in_excess = new_usage_in_excess;
619 	if (!mz->usage_in_excess)
620 		return;
621 	while (*p) {
622 		parent = *p;
623 		mz_node = rb_entry(parent, struct mem_cgroup_per_node,
624 					tree_node);
625 		if (mz->usage_in_excess < mz_node->usage_in_excess) {
626 			p = &(*p)->rb_left;
627 			rightmost = false;
628 		}
629 
630 		/*
631 		 * We can't avoid mem cgroups that are over their soft
632 		 * limit by the same amount
633 		 */
634 		else if (mz->usage_in_excess >= mz_node->usage_in_excess)
635 			p = &(*p)->rb_right;
636 	}
637 
638 	if (rightmost)
639 		mctz->rb_rightmost = &mz->tree_node;
640 
641 	rb_link_node(&mz->tree_node, parent, p);
642 	rb_insert_color(&mz->tree_node, &mctz->rb_root);
643 	mz->on_tree = true;
644 }
645 
__mem_cgroup_remove_exceeded(struct mem_cgroup_per_node * mz,struct mem_cgroup_tree_per_node * mctz)646 static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
647 					 struct mem_cgroup_tree_per_node *mctz)
648 {
649 	if (!mz->on_tree)
650 		return;
651 
652 	if (&mz->tree_node == mctz->rb_rightmost)
653 		mctz->rb_rightmost = rb_prev(&mz->tree_node);
654 
655 	rb_erase(&mz->tree_node, &mctz->rb_root);
656 	mz->on_tree = false;
657 }
658 
mem_cgroup_remove_exceeded(struct mem_cgroup_per_node * mz,struct mem_cgroup_tree_per_node * mctz)659 static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
660 				       struct mem_cgroup_tree_per_node *mctz)
661 {
662 	unsigned long flags;
663 
664 	spin_lock_irqsave(&mctz->lock, flags);
665 	__mem_cgroup_remove_exceeded(mz, mctz);
666 	spin_unlock_irqrestore(&mctz->lock, flags);
667 }
668 
soft_limit_excess(struct mem_cgroup * memcg)669 static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
670 {
671 	unsigned long nr_pages = page_counter_read(&memcg->memory);
672 	unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
673 	unsigned long excess = 0;
674 
675 	if (nr_pages > soft_limit)
676 		excess = nr_pages - soft_limit;
677 
678 	return excess;
679 }
680 
mem_cgroup_update_tree(struct mem_cgroup * memcg,struct page * page)681 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
682 {
683 	unsigned long excess;
684 	struct mem_cgroup_per_node *mz;
685 	struct mem_cgroup_tree_per_node *mctz;
686 
687 	mctz = soft_limit_tree_from_page(page);
688 	if (!mctz)
689 		return;
690 	/*
691 	 * Necessary to update all ancestors when hierarchy is used.
692 	 * because their event counter is not touched.
693 	 */
694 	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
695 		mz = mem_cgroup_page_nodeinfo(memcg, page);
696 		excess = soft_limit_excess(memcg);
697 		/*
698 		 * We have to update the tree if mz is on RB-tree or
699 		 * mem is over its softlimit.
700 		 */
701 		if (excess || mz->on_tree) {
702 			unsigned long flags;
703 
704 			spin_lock_irqsave(&mctz->lock, flags);
705 			/* if on-tree, remove it */
706 			if (mz->on_tree)
707 				__mem_cgroup_remove_exceeded(mz, mctz);
708 			/*
709 			 * Insert again. mz->usage_in_excess will be updated.
710 			 * If excess is 0, no tree ops.
711 			 */
712 			__mem_cgroup_insert_exceeded(mz, mctz, excess);
713 			spin_unlock_irqrestore(&mctz->lock, flags);
714 		}
715 	}
716 }
717 
mem_cgroup_remove_from_trees(struct mem_cgroup * memcg)718 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
719 {
720 	struct mem_cgroup_tree_per_node *mctz;
721 	struct mem_cgroup_per_node *mz;
722 	int nid;
723 
724 	for_each_node(nid) {
725 		mz = mem_cgroup_nodeinfo(memcg, nid);
726 		mctz = soft_limit_tree_node(nid);
727 		if (mctz)
728 			mem_cgroup_remove_exceeded(mz, mctz);
729 	}
730 }
731 
732 static struct mem_cgroup_per_node *
__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node * mctz)733 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
734 {
735 	struct mem_cgroup_per_node *mz;
736 
737 retry:
738 	mz = NULL;
739 	if (!mctz->rb_rightmost)
740 		goto done;		/* Nothing to reclaim from */
741 
742 	mz = rb_entry(mctz->rb_rightmost,
743 		      struct mem_cgroup_per_node, tree_node);
744 	/*
745 	 * Remove the node now but someone else can add it back,
746 	 * we will to add it back at the end of reclaim to its correct
747 	 * position in the tree.
748 	 */
749 	__mem_cgroup_remove_exceeded(mz, mctz);
750 	if (!soft_limit_excess(mz->memcg) ||
751 	    !css_tryget(&mz->memcg->css))
752 		goto retry;
753 done:
754 	return mz;
755 }
756 
757 static struct mem_cgroup_per_node *
mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node * mctz)758 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
759 {
760 	struct mem_cgroup_per_node *mz;
761 
762 	spin_lock_irq(&mctz->lock);
763 	mz = __mem_cgroup_largest_soft_limit_node(mctz);
764 	spin_unlock_irq(&mctz->lock);
765 	return mz;
766 }
767 
768 /**
769  * __mod_memcg_state - update cgroup memory statistics
770  * @memcg: the memory cgroup
771  * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item
772  * @val: delta to add to the counter, can be negative
773  */
__mod_memcg_state(struct mem_cgroup * memcg,int idx,int val)774 void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val)
775 {
776 	long x, threshold = MEMCG_CHARGE_BATCH;
777 
778 	if (mem_cgroup_disabled())
779 		return;
780 
781 	if (memcg_stat_item_in_bytes(idx))
782 		threshold <<= PAGE_SHIFT;
783 
784 	x = val + __this_cpu_read(memcg->vmstats_percpu->stat[idx]);
785 	if (unlikely(abs(x) > threshold)) {
786 		struct mem_cgroup *mi;
787 
788 		/*
789 		 * Batch local counters to keep them in sync with
790 		 * the hierarchical ones.
791 		 */
792 		__this_cpu_add(memcg->vmstats_local->stat[idx], x);
793 		for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
794 			atomic_long_add(x, &mi->vmstats[idx]);
795 		x = 0;
796 	}
797 	__this_cpu_write(memcg->vmstats_percpu->stat[idx], x);
798 }
799 
800 static struct mem_cgroup_per_node *
parent_nodeinfo(struct mem_cgroup_per_node * pn,int nid)801 parent_nodeinfo(struct mem_cgroup_per_node *pn, int nid)
802 {
803 	struct mem_cgroup *parent;
804 
805 	parent = parent_mem_cgroup(pn->memcg);
806 	if (!parent)
807 		return NULL;
808 	return mem_cgroup_nodeinfo(parent, nid);
809 }
810 
__mod_memcg_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx,int val)811 void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
812 			      int val)
813 {
814 	struct mem_cgroup_per_node *pn;
815 	struct mem_cgroup *memcg;
816 	long x, threshold = MEMCG_CHARGE_BATCH;
817 
818 	pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
819 	memcg = pn->memcg;
820 
821 	/* Update memcg */
822 	__mod_memcg_state(memcg, idx, val);
823 
824 	/* Update lruvec */
825 	__this_cpu_add(pn->lruvec_stat_local->count[idx], val);
826 
827 	if (vmstat_item_in_bytes(idx))
828 		threshold <<= PAGE_SHIFT;
829 
830 	x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]);
831 	if (unlikely(abs(x) > threshold)) {
832 		pg_data_t *pgdat = lruvec_pgdat(lruvec);
833 		struct mem_cgroup_per_node *pi;
834 
835 		for (pi = pn; pi; pi = parent_nodeinfo(pi, pgdat->node_id))
836 			atomic_long_add(x, &pi->lruvec_stat[idx]);
837 		x = 0;
838 	}
839 	__this_cpu_write(pn->lruvec_stat_cpu->count[idx], x);
840 }
841 
842 /**
843  * __mod_lruvec_state - update lruvec memory statistics
844  * @lruvec: the lruvec
845  * @idx: the stat item
846  * @val: delta to add to the counter, can be negative
847  *
848  * The lruvec is the intersection of the NUMA node and a cgroup. This
849  * function updates the all three counters that are affected by a
850  * change of state at this level: per-node, per-cgroup, per-lruvec.
851  */
__mod_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx,int val)852 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
853 			int val)
854 {
855 	/* Update node */
856 	__mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
857 
858 	/* Update memcg and lruvec */
859 	if (!mem_cgroup_disabled())
860 		__mod_memcg_lruvec_state(lruvec, idx, val);
861 }
862 EXPORT_SYMBOL_GPL(__mod_lruvec_state);
863 
__mod_lruvec_slab_state(void * p,enum node_stat_item idx,int val)864 void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val)
865 {
866 	pg_data_t *pgdat = page_pgdat(virt_to_page(p));
867 	struct mem_cgroup *memcg;
868 	struct lruvec *lruvec;
869 
870 	rcu_read_lock();
871 	memcg = mem_cgroup_from_obj(p);
872 
873 	/*
874 	 * Untracked pages have no memcg, no lruvec. Update only the
875 	 * node. If we reparent the slab objects to the root memcg,
876 	 * when we free the slab object, we need to update the per-memcg
877 	 * vmstats to keep it correct for the root memcg.
878 	 */
879 	if (!memcg) {
880 		__mod_node_page_state(pgdat, idx, val);
881 	} else {
882 		lruvec = mem_cgroup_lruvec(memcg, pgdat);
883 		__mod_lruvec_state(lruvec, idx, val);
884 	}
885 	rcu_read_unlock();
886 }
887 
mod_memcg_obj_state(void * p,int idx,int val)888 void mod_memcg_obj_state(void *p, int idx, int val)
889 {
890 	struct mem_cgroup *memcg;
891 
892 	rcu_read_lock();
893 	memcg = mem_cgroup_from_obj(p);
894 	if (memcg)
895 		mod_memcg_state(memcg, idx, val);
896 	rcu_read_unlock();
897 }
898 
899 /**
900  * __count_memcg_events - account VM events in a cgroup
901  * @memcg: the memory cgroup
902  * @idx: the event item
903  * @count: the number of events that occured
904  */
__count_memcg_events(struct mem_cgroup * memcg,enum vm_event_item idx,unsigned long count)905 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
906 			  unsigned long count)
907 {
908 	unsigned long x;
909 
910 	if (mem_cgroup_disabled())
911 		return;
912 
913 	x = count + __this_cpu_read(memcg->vmstats_percpu->events[idx]);
914 	if (unlikely(x > MEMCG_CHARGE_BATCH)) {
915 		struct mem_cgroup *mi;
916 
917 		/*
918 		 * Batch local counters to keep them in sync with
919 		 * the hierarchical ones.
920 		 */
921 		__this_cpu_add(memcg->vmstats_local->events[idx], x);
922 		for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
923 			atomic_long_add(x, &mi->vmevents[idx]);
924 		x = 0;
925 	}
926 	__this_cpu_write(memcg->vmstats_percpu->events[idx], x);
927 }
928 
memcg_events(struct mem_cgroup * memcg,int event)929 static unsigned long memcg_events(struct mem_cgroup *memcg, int event)
930 {
931 	return atomic_long_read(&memcg->vmevents[event]);
932 }
933 
memcg_events_local(struct mem_cgroup * memcg,int event)934 static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
935 {
936 	long x = 0;
937 	int cpu;
938 
939 	for_each_possible_cpu(cpu)
940 		x += per_cpu(memcg->vmstats_local->events[event], cpu);
941 	return x;
942 }
943 
mem_cgroup_charge_statistics(struct mem_cgroup * memcg,struct page * page,int nr_pages)944 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
945 					 struct page *page,
946 					 int nr_pages)
947 {
948 	/* pagein of a big page is an event. So, ignore page size */
949 	if (nr_pages > 0)
950 		__count_memcg_events(memcg, PGPGIN, 1);
951 	else {
952 		__count_memcg_events(memcg, PGPGOUT, 1);
953 		nr_pages = -nr_pages; /* for event */
954 	}
955 
956 	__this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages);
957 }
958 
mem_cgroup_event_ratelimit(struct mem_cgroup * memcg,enum mem_cgroup_events_target target)959 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
960 				       enum mem_cgroup_events_target target)
961 {
962 	unsigned long val, next;
963 
964 	val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events);
965 	next = __this_cpu_read(memcg->vmstats_percpu->targets[target]);
966 	/* from time_after() in jiffies.h */
967 	if ((long)(next - val) < 0) {
968 		switch (target) {
969 		case MEM_CGROUP_TARGET_THRESH:
970 			next = val + THRESHOLDS_EVENTS_TARGET;
971 			break;
972 		case MEM_CGROUP_TARGET_SOFTLIMIT:
973 			next = val + SOFTLIMIT_EVENTS_TARGET;
974 			break;
975 		default:
976 			break;
977 		}
978 		__this_cpu_write(memcg->vmstats_percpu->targets[target], next);
979 		return true;
980 	}
981 	return false;
982 }
983 
984 /*
985  * Check events in order.
986  *
987  */
memcg_check_events(struct mem_cgroup * memcg,struct page * page)988 static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
989 {
990 	/* threshold event is triggered in finer grain than soft limit */
991 	if (unlikely(mem_cgroup_event_ratelimit(memcg,
992 						MEM_CGROUP_TARGET_THRESH))) {
993 		bool do_softlimit;
994 
995 		do_softlimit = mem_cgroup_event_ratelimit(memcg,
996 						MEM_CGROUP_TARGET_SOFTLIMIT);
997 		mem_cgroup_threshold(memcg);
998 		if (unlikely(do_softlimit))
999 			mem_cgroup_update_tree(memcg, page);
1000 	}
1001 }
1002 
mem_cgroup_from_task(struct task_struct * p)1003 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
1004 {
1005 	/*
1006 	 * mm_update_next_owner() may clear mm->owner to NULL
1007 	 * if it races with swapoff, page migration, etc.
1008 	 * So this can be called with p == NULL.
1009 	 */
1010 	if (unlikely(!p))
1011 		return NULL;
1012 
1013 	return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
1014 }
1015 EXPORT_SYMBOL(mem_cgroup_from_task);
1016 
1017 /**
1018  * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg.
1019  * @mm: mm from which memcg should be extracted. It can be NULL.
1020  *
1021  * Obtain a reference on mm->memcg and returns it if successful. Otherwise
1022  * root_mem_cgroup is returned. However if mem_cgroup is disabled, NULL is
1023  * returned.
1024  */
get_mem_cgroup_from_mm(struct mm_struct * mm)1025 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
1026 {
1027 	struct mem_cgroup *memcg;
1028 
1029 	if (mem_cgroup_disabled())
1030 		return NULL;
1031 
1032 	rcu_read_lock();
1033 	do {
1034 		/*
1035 		 * Page cache insertions can happen withou an
1036 		 * actual mm context, e.g. during disk probing
1037 		 * on boot, loopback IO, acct() writes etc.
1038 		 */
1039 		if (unlikely(!mm))
1040 			memcg = root_mem_cgroup;
1041 		else {
1042 			memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1043 			if (unlikely(!memcg))
1044 				memcg = root_mem_cgroup;
1045 		}
1046 	} while (!css_tryget(&memcg->css));
1047 	rcu_read_unlock();
1048 	return memcg;
1049 }
1050 EXPORT_SYMBOL(get_mem_cgroup_from_mm);
1051 
1052 /**
1053  * get_mem_cgroup_from_page: Obtain a reference on given page's memcg.
1054  * @page: page from which memcg should be extracted.
1055  *
1056  * Obtain a reference on page->memcg and returns it if successful. Otherwise
1057  * root_mem_cgroup is returned.
1058  */
get_mem_cgroup_from_page(struct page * page)1059 struct mem_cgroup *get_mem_cgroup_from_page(struct page *page)
1060 {
1061 	struct mem_cgroup *memcg = page->mem_cgroup;
1062 
1063 	if (mem_cgroup_disabled())
1064 		return NULL;
1065 
1066 	rcu_read_lock();
1067 	/* Page should not get uncharged and freed memcg under us. */
1068 	if (!memcg || WARN_ON_ONCE(!css_tryget(&memcg->css)))
1069 		memcg = root_mem_cgroup;
1070 	rcu_read_unlock();
1071 	return memcg;
1072 }
1073 EXPORT_SYMBOL(get_mem_cgroup_from_page);
1074 
active_memcg(void)1075 static __always_inline struct mem_cgroup *active_memcg(void)
1076 {
1077 	if (in_interrupt())
1078 		return this_cpu_read(int_active_memcg);
1079 	else
1080 		return current->active_memcg;
1081 }
1082 
get_active_memcg(void)1083 static __always_inline struct mem_cgroup *get_active_memcg(void)
1084 {
1085 	struct mem_cgroup *memcg;
1086 
1087 	rcu_read_lock();
1088 	memcg = active_memcg();
1089 	/* remote memcg must hold a ref. */
1090 	if (memcg && WARN_ON_ONCE(!css_tryget(&memcg->css)))
1091 		memcg = root_mem_cgroup;
1092 	rcu_read_unlock();
1093 
1094 	return memcg;
1095 }
1096 
memcg_kmem_bypass(void)1097 static __always_inline bool memcg_kmem_bypass(void)
1098 {
1099 	/* Allow remote memcg charging from any context. */
1100 	if (unlikely(active_memcg()))
1101 		return false;
1102 
1103 	/* Memcg to charge can't be determined. */
1104 	if (in_interrupt() || !current->mm || (current->flags & PF_KTHREAD))
1105 		return true;
1106 
1107 	return false;
1108 }
1109 
1110 /**
1111  * If active memcg is set, do not fallback to current->mm->memcg.
1112  */
get_mem_cgroup_from_current(void)1113 static __always_inline struct mem_cgroup *get_mem_cgroup_from_current(void)
1114 {
1115 	if (memcg_kmem_bypass())
1116 		return NULL;
1117 
1118 	if (unlikely(active_memcg()))
1119 		return get_active_memcg();
1120 
1121 	return get_mem_cgroup_from_mm(current->mm);
1122 }
1123 
1124 /**
1125  * mem_cgroup_iter - iterate over memory cgroup hierarchy
1126  * @root: hierarchy root
1127  * @prev: previously returned memcg, NULL on first invocation
1128  * @reclaim: cookie for shared reclaim walks, NULL for full walks
1129  *
1130  * Returns references to children of the hierarchy below @root, or
1131  * @root itself, or %NULL after a full round-trip.
1132  *
1133  * Caller must pass the return value in @prev on subsequent
1134  * invocations for reference counting, or use mem_cgroup_iter_break()
1135  * to cancel a hierarchy walk before the round-trip is complete.
1136  *
1137  * Reclaimers can specify a node in @reclaim to divide up the memcgs
1138  * in the hierarchy among all concurrent reclaimers operating on the
1139  * same node.
1140  */
mem_cgroup_iter(struct mem_cgroup * root,struct mem_cgroup * prev,struct mem_cgroup_reclaim_cookie * reclaim)1141 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
1142 				   struct mem_cgroup *prev,
1143 				   struct mem_cgroup_reclaim_cookie *reclaim)
1144 {
1145 	struct mem_cgroup_reclaim_iter *iter;
1146 	struct cgroup_subsys_state *css = NULL;
1147 	struct mem_cgroup *memcg = NULL;
1148 	struct mem_cgroup *pos = NULL;
1149 
1150 	if (mem_cgroup_disabled())
1151 		return NULL;
1152 
1153 	if (!root)
1154 		root = root_mem_cgroup;
1155 
1156 	if (prev && !reclaim)
1157 		pos = prev;
1158 
1159 	if (!root->use_hierarchy && root != root_mem_cgroup) {
1160 		if (prev)
1161 			goto out;
1162 		return root;
1163 	}
1164 
1165 	rcu_read_lock();
1166 
1167 	if (reclaim) {
1168 		struct mem_cgroup_per_node *mz;
1169 
1170 		mz = mem_cgroup_nodeinfo(root, reclaim->pgdat->node_id);
1171 		iter = &mz->iter;
1172 
1173 		if (prev && reclaim->generation != iter->generation)
1174 			goto out_unlock;
1175 
1176 		while (1) {
1177 			pos = READ_ONCE(iter->position);
1178 			if (!pos || css_tryget(&pos->css))
1179 				break;
1180 			/*
1181 			 * css reference reached zero, so iter->position will
1182 			 * be cleared by ->css_released. However, we should not
1183 			 * rely on this happening soon, because ->css_released
1184 			 * is called from a work queue, and by busy-waiting we
1185 			 * might block it. So we clear iter->position right
1186 			 * away.
1187 			 */
1188 			(void)cmpxchg(&iter->position, pos, NULL);
1189 		}
1190 	}
1191 
1192 	if (pos)
1193 		css = &pos->css;
1194 
1195 	for (;;) {
1196 		css = css_next_descendant_pre(css, &root->css);
1197 		if (!css) {
1198 			/*
1199 			 * Reclaimers share the hierarchy walk, and a
1200 			 * new one might jump in right at the end of
1201 			 * the hierarchy - make sure they see at least
1202 			 * one group and restart from the beginning.
1203 			 */
1204 			if (!prev)
1205 				continue;
1206 			break;
1207 		}
1208 
1209 		/*
1210 		 * Verify the css and acquire a reference.  The root
1211 		 * is provided by the caller, so we know it's alive
1212 		 * and kicking, and don't take an extra reference.
1213 		 */
1214 		memcg = mem_cgroup_from_css(css);
1215 
1216 		if (css == &root->css)
1217 			break;
1218 
1219 		if (css_tryget(css))
1220 			break;
1221 
1222 		memcg = NULL;
1223 	}
1224 
1225 	if (reclaim) {
1226 		/*
1227 		 * The position could have already been updated by a competing
1228 		 * thread, so check that the value hasn't changed since we read
1229 		 * it to avoid reclaiming from the same cgroup twice.
1230 		 */
1231 		(void)cmpxchg(&iter->position, pos, memcg);
1232 
1233 		if (pos)
1234 			css_put(&pos->css);
1235 
1236 		if (!memcg)
1237 			iter->generation++;
1238 		else if (!prev)
1239 			reclaim->generation = iter->generation;
1240 	}
1241 
1242 out_unlock:
1243 	rcu_read_unlock();
1244 out:
1245 	if (prev && prev != root)
1246 		css_put(&prev->css);
1247 
1248 	return memcg;
1249 }
1250 
1251 /**
1252  * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1253  * @root: hierarchy root
1254  * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1255  */
mem_cgroup_iter_break(struct mem_cgroup * root,struct mem_cgroup * prev)1256 void mem_cgroup_iter_break(struct mem_cgroup *root,
1257 			   struct mem_cgroup *prev)
1258 {
1259 	if (!root)
1260 		root = root_mem_cgroup;
1261 	if (prev && prev != root)
1262 		css_put(&prev->css);
1263 }
1264 
__invalidate_reclaim_iterators(struct mem_cgroup * from,struct mem_cgroup * dead_memcg)1265 static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
1266 					struct mem_cgroup *dead_memcg)
1267 {
1268 	struct mem_cgroup_reclaim_iter *iter;
1269 	struct mem_cgroup_per_node *mz;
1270 	int nid;
1271 
1272 	for_each_node(nid) {
1273 		mz = mem_cgroup_nodeinfo(from, nid);
1274 		iter = &mz->iter;
1275 		cmpxchg(&iter->position, dead_memcg, NULL);
1276 	}
1277 }
1278 
invalidate_reclaim_iterators(struct mem_cgroup * dead_memcg)1279 static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
1280 {
1281 	struct mem_cgroup *memcg = dead_memcg;
1282 	struct mem_cgroup *last;
1283 
1284 	do {
1285 		__invalidate_reclaim_iterators(memcg, dead_memcg);
1286 		last = memcg;
1287 	} while ((memcg = parent_mem_cgroup(memcg)));
1288 
1289 	/*
1290 	 * When cgruop1 non-hierarchy mode is used,
1291 	 * parent_mem_cgroup() does not walk all the way up to the
1292 	 * cgroup root (root_mem_cgroup). So we have to handle
1293 	 * dead_memcg from cgroup root separately.
1294 	 */
1295 	if (last != root_mem_cgroup)
1296 		__invalidate_reclaim_iterators(root_mem_cgroup,
1297 						dead_memcg);
1298 }
1299 
1300 /**
1301  * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1302  * @memcg: hierarchy root
1303  * @fn: function to call for each task
1304  * @arg: argument passed to @fn
1305  *
1306  * This function iterates over tasks attached to @memcg or to any of its
1307  * descendants and calls @fn for each task. If @fn returns a non-zero
1308  * value, the function breaks the iteration loop and returns the value.
1309  * Otherwise, it will iterate over all tasks and return 0.
1310  *
1311  * This function must not be called for the root memory cgroup.
1312  */
mem_cgroup_scan_tasks(struct mem_cgroup * memcg,int (* fn)(struct task_struct *,void *),void * arg)1313 int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1314 			  int (*fn)(struct task_struct *, void *), void *arg)
1315 {
1316 	struct mem_cgroup *iter;
1317 	int ret = 0;
1318 
1319 	BUG_ON(memcg == root_mem_cgroup);
1320 
1321 	for_each_mem_cgroup_tree(iter, memcg) {
1322 		struct css_task_iter it;
1323 		struct task_struct *task;
1324 
1325 		css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
1326 		while (!ret && (task = css_task_iter_next(&it)))
1327 			ret = fn(task, arg);
1328 		css_task_iter_end(&it);
1329 		if (ret) {
1330 			mem_cgroup_iter_break(memcg, iter);
1331 			break;
1332 		}
1333 	}
1334 	return ret;
1335 }
1336 
1337 /**
1338  * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page
1339  * @page: the page
1340  * @pgdat: pgdat of the page
1341  *
1342  * This function relies on page->mem_cgroup being stable - see the
1343  * access rules in commit_charge().
1344  */
mem_cgroup_page_lruvec(struct page * page,struct pglist_data * pgdat)1345 struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct pglist_data *pgdat)
1346 {
1347 	struct mem_cgroup_per_node *mz;
1348 	struct mem_cgroup *memcg;
1349 	struct lruvec *lruvec;
1350 
1351 	if (mem_cgroup_disabled()) {
1352 		lruvec = &pgdat->__lruvec;
1353 		goto out;
1354 	}
1355 
1356 	memcg = page->mem_cgroup;
1357 	/*
1358 	 * Swapcache readahead pages are added to the LRU - and
1359 	 * possibly migrated - before they are charged.
1360 	 */
1361 	if (!memcg)
1362 		memcg = root_mem_cgroup;
1363 
1364 	mz = mem_cgroup_page_nodeinfo(memcg, page);
1365 	lruvec = &mz->lruvec;
1366 out:
1367 	/*
1368 	 * Since a node can be onlined after the mem_cgroup was created,
1369 	 * we have to be prepared to initialize lruvec->zone here;
1370 	 * and if offlined then reonlined, we need to reinitialize it.
1371 	 */
1372 	if (unlikely(lruvec->pgdat != pgdat))
1373 		lruvec->pgdat = pgdat;
1374 	return lruvec;
1375 }
1376 
page_to_lruvec(struct page * page,pg_data_t * pgdat)1377 struct lruvec *page_to_lruvec(struct page *page, pg_data_t *pgdat)
1378 {
1379 	struct lruvec *lruvec;
1380 
1381 	lruvec = mem_cgroup_page_lruvec(page, pgdat);
1382 
1383 	return lruvec;
1384 }
1385 EXPORT_SYMBOL_GPL(page_to_lruvec);
1386 
do_traversal_all_lruvec(void)1387 void do_traversal_all_lruvec(void)
1388 {
1389 	pg_data_t *pgdat;
1390 
1391 	for_each_online_pgdat(pgdat) {
1392 		struct mem_cgroup *memcg = NULL;
1393 
1394 		spin_lock_irq(&pgdat->lru_lock);
1395 		memcg = mem_cgroup_iter(NULL, NULL, NULL);
1396 		do {
1397 			struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
1398 
1399 			trace_android_vh_do_traversal_lruvec(lruvec);
1400 
1401 			memcg = mem_cgroup_iter(NULL, memcg, NULL);
1402 		} while (memcg);
1403 
1404 		spin_unlock_irq(&pgdat->lru_lock);
1405 	}
1406 }
1407 EXPORT_SYMBOL_GPL(do_traversal_all_lruvec);
1408 
1409 /**
1410  * mem_cgroup_update_lru_size - account for adding or removing an lru page
1411  * @lruvec: mem_cgroup per zone lru vector
1412  * @lru: index of lru list the page is sitting on
1413  * @zid: zone id of the accounted pages
1414  * @nr_pages: positive when adding or negative when removing
1415  *
1416  * This function must be called under lru_lock, just before a page is added
1417  * to or just after a page is removed from an lru list (that ordering being
1418  * so as to allow it to check that lru_size 0 is consistent with list_empty).
1419  */
mem_cgroup_update_lru_size(struct lruvec * lruvec,enum lru_list lru,int zid,int nr_pages)1420 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1421 				int zid, int nr_pages)
1422 {
1423 	struct mem_cgroup_per_node *mz;
1424 	unsigned long *lru_size;
1425 	long size;
1426 
1427 	if (mem_cgroup_disabled())
1428 		return;
1429 
1430 	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
1431 	lru_size = &mz->lru_zone_size[zid][lru];
1432 
1433 	if (nr_pages < 0)
1434 		*lru_size += nr_pages;
1435 
1436 	size = *lru_size;
1437 	if (WARN_ONCE(size < 0,
1438 		"%s(%p, %d, %d): lru_size %ld\n",
1439 		__func__, lruvec, lru, nr_pages, size)) {
1440 		VM_BUG_ON(1);
1441 		*lru_size = 0;
1442 	}
1443 
1444 	if (nr_pages > 0)
1445 		*lru_size += nr_pages;
1446 }
1447 EXPORT_SYMBOL_GPL(mem_cgroup_update_lru_size);
1448 
1449 /**
1450  * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1451  * @memcg: the memory cgroup
1452  *
1453  * Returns the maximum amount of memory @mem can be charged with, in
1454  * pages.
1455  */
mem_cgroup_margin(struct mem_cgroup * memcg)1456 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1457 {
1458 	unsigned long margin = 0;
1459 	unsigned long count;
1460 	unsigned long limit;
1461 
1462 	count = page_counter_read(&memcg->memory);
1463 	limit = READ_ONCE(memcg->memory.max);
1464 	if (count < limit)
1465 		margin = limit - count;
1466 
1467 	if (do_memsw_account()) {
1468 		count = page_counter_read(&memcg->memsw);
1469 		limit = READ_ONCE(memcg->memsw.max);
1470 		if (count < limit)
1471 			margin = min(margin, limit - count);
1472 		else
1473 			margin = 0;
1474 	}
1475 
1476 	return margin;
1477 }
1478 
1479 /*
1480  * A routine for checking "mem" is under move_account() or not.
1481  *
1482  * Checking a cgroup is mc.from or mc.to or under hierarchy of
1483  * moving cgroups. This is for waiting at high-memory pressure
1484  * caused by "move".
1485  */
mem_cgroup_under_move(struct mem_cgroup * memcg)1486 static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1487 {
1488 	struct mem_cgroup *from;
1489 	struct mem_cgroup *to;
1490 	bool ret = false;
1491 	/*
1492 	 * Unlike task_move routines, we access mc.to, mc.from not under
1493 	 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1494 	 */
1495 	spin_lock(&mc.lock);
1496 	from = mc.from;
1497 	to = mc.to;
1498 	if (!from)
1499 		goto unlock;
1500 
1501 	ret = mem_cgroup_is_descendant(from, memcg) ||
1502 		mem_cgroup_is_descendant(to, memcg);
1503 unlock:
1504 	spin_unlock(&mc.lock);
1505 	return ret;
1506 }
1507 
mem_cgroup_wait_acct_move(struct mem_cgroup * memcg)1508 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1509 {
1510 	if (mc.moving_task && current != mc.moving_task) {
1511 		if (mem_cgroup_under_move(memcg)) {
1512 			DEFINE_WAIT(wait);
1513 			prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1514 			/* moving charge context might have finished. */
1515 			if (mc.moving_task)
1516 				schedule();
1517 			finish_wait(&mc.waitq, &wait);
1518 			return true;
1519 		}
1520 	}
1521 	return false;
1522 }
1523 
1524 struct memory_stat {
1525 	const char *name;
1526 	unsigned int ratio;
1527 	unsigned int idx;
1528 };
1529 
1530 static struct memory_stat memory_stats[] = {
1531 	{ "anon", PAGE_SIZE, NR_ANON_MAPPED },
1532 	{ "file", PAGE_SIZE, NR_FILE_PAGES },
1533 	{ "kernel_stack", 1024, NR_KERNEL_STACK_KB },
1534 	{ "percpu", 1, MEMCG_PERCPU_B },
1535 	{ "sock", PAGE_SIZE, MEMCG_SOCK },
1536 	{ "shmem", PAGE_SIZE, NR_SHMEM },
1537 	{ "file_mapped", PAGE_SIZE, NR_FILE_MAPPED },
1538 	{ "file_dirty", PAGE_SIZE, NR_FILE_DIRTY },
1539 	{ "file_writeback", PAGE_SIZE, NR_WRITEBACK },
1540 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1541 	/*
1542 	 * The ratio will be initialized in memory_stats_init(). Because
1543 	 * on some architectures, the macro of HPAGE_PMD_SIZE is not
1544 	 * constant(e.g. powerpc).
1545 	 */
1546 	{ "anon_thp", 0, NR_ANON_THPS },
1547 #endif
1548 	{ "inactive_anon", PAGE_SIZE, NR_INACTIVE_ANON },
1549 	{ "active_anon", PAGE_SIZE, NR_ACTIVE_ANON },
1550 	{ "inactive_file", PAGE_SIZE, NR_INACTIVE_FILE },
1551 	{ "active_file", PAGE_SIZE, NR_ACTIVE_FILE },
1552 	{ "unevictable", PAGE_SIZE, NR_UNEVICTABLE },
1553 
1554 	/*
1555 	 * Note: The slab_reclaimable and slab_unreclaimable must be
1556 	 * together and slab_reclaimable must be in front.
1557 	 */
1558 	{ "slab_reclaimable", 1, NR_SLAB_RECLAIMABLE_B },
1559 	{ "slab_unreclaimable", 1, NR_SLAB_UNRECLAIMABLE_B },
1560 
1561 	/* The memory events */
1562 	{ "workingset_refault_anon", 1, WORKINGSET_REFAULT_ANON },
1563 	{ "workingset_refault_file", 1, WORKINGSET_REFAULT_FILE },
1564 	{ "workingset_activate_anon", 1, WORKINGSET_ACTIVATE_ANON },
1565 	{ "workingset_activate_file", 1, WORKINGSET_ACTIVATE_FILE },
1566 	{ "workingset_restore_anon", 1, WORKINGSET_RESTORE_ANON },
1567 	{ "workingset_restore_file", 1, WORKINGSET_RESTORE_FILE },
1568 	{ "workingset_nodereclaim", 1, WORKINGSET_NODERECLAIM },
1569 };
1570 
memory_stats_init(void)1571 static int __init memory_stats_init(void)
1572 {
1573 	int i;
1574 
1575 	for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
1576 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1577 		if (memory_stats[i].idx == NR_ANON_THPS)
1578 			memory_stats[i].ratio = HPAGE_PMD_SIZE;
1579 #endif
1580 		VM_BUG_ON(!memory_stats[i].ratio);
1581 		VM_BUG_ON(memory_stats[i].idx >= MEMCG_NR_STAT);
1582 	}
1583 
1584 	return 0;
1585 }
1586 pure_initcall(memory_stats_init);
1587 
memory_stat_format(struct mem_cgroup * memcg)1588 static char *memory_stat_format(struct mem_cgroup *memcg)
1589 {
1590 	struct seq_buf s;
1591 	int i;
1592 
1593 	seq_buf_init(&s, kmalloc(PAGE_SIZE, GFP_KERNEL), PAGE_SIZE);
1594 	if (!s.buffer)
1595 		return NULL;
1596 
1597 	/*
1598 	 * Provide statistics on the state of the memory subsystem as
1599 	 * well as cumulative event counters that show past behavior.
1600 	 *
1601 	 * This list is ordered following a combination of these gradients:
1602 	 * 1) generic big picture -> specifics and details
1603 	 * 2) reflecting userspace activity -> reflecting kernel heuristics
1604 	 *
1605 	 * Current memory state:
1606 	 */
1607 
1608 	for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
1609 		u64 size;
1610 
1611 		size = memcg_page_state(memcg, memory_stats[i].idx);
1612 		size *= memory_stats[i].ratio;
1613 		seq_buf_printf(&s, "%s %llu\n", memory_stats[i].name, size);
1614 
1615 		if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) {
1616 			size = memcg_page_state(memcg, NR_SLAB_RECLAIMABLE_B) +
1617 			       memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE_B);
1618 			seq_buf_printf(&s, "slab %llu\n", size);
1619 		}
1620 	}
1621 
1622 	/* Accumulated memory events */
1623 
1624 	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGFAULT),
1625 		       memcg_events(memcg, PGFAULT));
1626 	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGMAJFAULT),
1627 		       memcg_events(memcg, PGMAJFAULT));
1628 	seq_buf_printf(&s, "%s %lu\n",  vm_event_name(PGREFILL),
1629 		       memcg_events(memcg, PGREFILL));
1630 	seq_buf_printf(&s, "pgscan %lu\n",
1631 		       memcg_events(memcg, PGSCAN_KSWAPD) +
1632 		       memcg_events(memcg, PGSCAN_DIRECT));
1633 	seq_buf_printf(&s, "pgsteal %lu\n",
1634 		       memcg_events(memcg, PGSTEAL_KSWAPD) +
1635 		       memcg_events(memcg, PGSTEAL_DIRECT));
1636 	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGACTIVATE),
1637 		       memcg_events(memcg, PGACTIVATE));
1638 	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGDEACTIVATE),
1639 		       memcg_events(memcg, PGDEACTIVATE));
1640 	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREE),
1641 		       memcg_events(memcg, PGLAZYFREE));
1642 	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREED),
1643 		       memcg_events(memcg, PGLAZYFREED));
1644 
1645 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1646 	seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_FAULT_ALLOC),
1647 		       memcg_events(memcg, THP_FAULT_ALLOC));
1648 	seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_COLLAPSE_ALLOC),
1649 		       memcg_events(memcg, THP_COLLAPSE_ALLOC));
1650 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1651 
1652 	/* The above should easily fit into one page */
1653 	WARN_ON_ONCE(seq_buf_has_overflowed(&s));
1654 
1655 	return s.buffer;
1656 }
1657 
1658 #define K(x) ((x) << (PAGE_SHIFT-10))
1659 /**
1660  * mem_cgroup_print_oom_context: Print OOM information relevant to
1661  * memory controller.
1662  * @memcg: The memory cgroup that went over limit
1663  * @p: Task that is going to be killed
1664  *
1665  * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1666  * enabled
1667  */
mem_cgroup_print_oom_context(struct mem_cgroup * memcg,struct task_struct * p)1668 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1669 {
1670 	rcu_read_lock();
1671 
1672 	if (memcg) {
1673 		pr_cont(",oom_memcg=");
1674 		pr_cont_cgroup_path(memcg->css.cgroup);
1675 	} else
1676 		pr_cont(",global_oom");
1677 	if (p) {
1678 		pr_cont(",task_memcg=");
1679 		pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1680 	}
1681 	rcu_read_unlock();
1682 }
1683 
1684 /**
1685  * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to
1686  * memory controller.
1687  * @memcg: The memory cgroup that went over limit
1688  */
mem_cgroup_print_oom_meminfo(struct mem_cgroup * memcg)1689 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1690 {
1691 	char *buf;
1692 
1693 	pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1694 		K((u64)page_counter_read(&memcg->memory)),
1695 		K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt);
1696 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1697 		pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n",
1698 			K((u64)page_counter_read(&memcg->swap)),
1699 			K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt);
1700 	else {
1701 		pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1702 			K((u64)page_counter_read(&memcg->memsw)),
1703 			K((u64)memcg->memsw.max), memcg->memsw.failcnt);
1704 		pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1705 			K((u64)page_counter_read(&memcg->kmem)),
1706 			K((u64)memcg->kmem.max), memcg->kmem.failcnt);
1707 	}
1708 
1709 	pr_info("Memory cgroup stats for ");
1710 	pr_cont_cgroup_path(memcg->css.cgroup);
1711 	pr_cont(":");
1712 	buf = memory_stat_format(memcg);
1713 	if (!buf)
1714 		return;
1715 	pr_info("%s", buf);
1716 	kfree(buf);
1717 }
1718 
1719 /*
1720  * Return the memory (and swap, if configured) limit for a memcg.
1721  */
mem_cgroup_get_max(struct mem_cgroup * memcg)1722 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
1723 {
1724 	unsigned long max = READ_ONCE(memcg->memory.max);
1725 
1726 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
1727 		if (mem_cgroup_swappiness(memcg))
1728 			max += min(READ_ONCE(memcg->swap.max),
1729 				   (unsigned long)total_swap_pages);
1730 	} else { /* v1 */
1731 		if (mem_cgroup_swappiness(memcg)) {
1732 			/* Calculate swap excess capacity from memsw limit */
1733 			unsigned long swap = READ_ONCE(memcg->memsw.max) - max;
1734 
1735 			max += min(swap, (unsigned long)total_swap_pages);
1736 		}
1737 	}
1738 	return max;
1739 }
1740 
mem_cgroup_size(struct mem_cgroup * memcg)1741 unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1742 {
1743 	return page_counter_read(&memcg->memory);
1744 }
1745 
mem_cgroup_out_of_memory(struct mem_cgroup * memcg,gfp_t gfp_mask,int order)1746 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1747 				     int order)
1748 {
1749 	struct oom_control oc = {
1750 		.zonelist = NULL,
1751 		.nodemask = NULL,
1752 		.memcg = memcg,
1753 		.gfp_mask = gfp_mask,
1754 		.order = order,
1755 	};
1756 	bool ret = true;
1757 
1758 	if (mutex_lock_killable(&oom_lock))
1759 		return true;
1760 
1761 	if (mem_cgroup_margin(memcg) >= (1 << order))
1762 		goto unlock;
1763 
1764 	/*
1765 	 * A few threads which were not waiting at mutex_lock_killable() can
1766 	 * fail to bail out. Therefore, check again after holding oom_lock.
1767 	 */
1768 	ret = task_is_dying() || out_of_memory(&oc);
1769 
1770 unlock:
1771 	mutex_unlock(&oom_lock);
1772 	return ret;
1773 }
1774 
mem_cgroup_soft_reclaim(struct mem_cgroup * root_memcg,pg_data_t * pgdat,gfp_t gfp_mask,unsigned long * total_scanned)1775 static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1776 				   pg_data_t *pgdat,
1777 				   gfp_t gfp_mask,
1778 				   unsigned long *total_scanned)
1779 {
1780 	struct mem_cgroup *victim = NULL;
1781 	int total = 0;
1782 	int loop = 0;
1783 	unsigned long excess;
1784 	unsigned long nr_scanned;
1785 	struct mem_cgroup_reclaim_cookie reclaim = {
1786 		.pgdat = pgdat,
1787 	};
1788 
1789 	excess = soft_limit_excess(root_memcg);
1790 
1791 	while (1) {
1792 		victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1793 		if (!victim) {
1794 			loop++;
1795 			if (loop >= 2) {
1796 				/*
1797 				 * If we have not been able to reclaim
1798 				 * anything, it might because there are
1799 				 * no reclaimable pages under this hierarchy
1800 				 */
1801 				if (!total)
1802 					break;
1803 				/*
1804 				 * We want to do more targeted reclaim.
1805 				 * excess >> 2 is not to excessive so as to
1806 				 * reclaim too much, nor too less that we keep
1807 				 * coming back to reclaim from this cgroup
1808 				 */
1809 				if (total >= (excess >> 2) ||
1810 					(loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1811 					break;
1812 			}
1813 			continue;
1814 		}
1815 		total += mem_cgroup_shrink_node(victim, gfp_mask, false,
1816 					pgdat, &nr_scanned);
1817 		*total_scanned += nr_scanned;
1818 		if (!soft_limit_excess(root_memcg))
1819 			break;
1820 	}
1821 	mem_cgroup_iter_break(root_memcg, victim);
1822 	return total;
1823 }
1824 
1825 #ifdef CONFIG_LOCKDEP
1826 static struct lockdep_map memcg_oom_lock_dep_map = {
1827 	.name = "memcg_oom_lock",
1828 };
1829 #endif
1830 
1831 static DEFINE_SPINLOCK(memcg_oom_lock);
1832 
1833 /*
1834  * Check OOM-Killer is already running under our hierarchy.
1835  * If someone is running, return false.
1836  */
mem_cgroup_oom_trylock(struct mem_cgroup * memcg)1837 static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
1838 {
1839 	struct mem_cgroup *iter, *failed = NULL;
1840 
1841 	spin_lock(&memcg_oom_lock);
1842 
1843 	for_each_mem_cgroup_tree(iter, memcg) {
1844 		if (iter->oom_lock) {
1845 			/*
1846 			 * this subtree of our hierarchy is already locked
1847 			 * so we cannot give a lock.
1848 			 */
1849 			failed = iter;
1850 			mem_cgroup_iter_break(memcg, iter);
1851 			break;
1852 		} else
1853 			iter->oom_lock = true;
1854 	}
1855 
1856 	if (failed) {
1857 		/*
1858 		 * OK, we failed to lock the whole subtree so we have
1859 		 * to clean up what we set up to the failing subtree
1860 		 */
1861 		for_each_mem_cgroup_tree(iter, memcg) {
1862 			if (iter == failed) {
1863 				mem_cgroup_iter_break(memcg, iter);
1864 				break;
1865 			}
1866 			iter->oom_lock = false;
1867 		}
1868 	} else
1869 		mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
1870 
1871 	spin_unlock(&memcg_oom_lock);
1872 
1873 	return !failed;
1874 }
1875 
mem_cgroup_oom_unlock(struct mem_cgroup * memcg)1876 static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
1877 {
1878 	struct mem_cgroup *iter;
1879 
1880 	spin_lock(&memcg_oom_lock);
1881 	mutex_release(&memcg_oom_lock_dep_map, _RET_IP_);
1882 	for_each_mem_cgroup_tree(iter, memcg)
1883 		iter->oom_lock = false;
1884 	spin_unlock(&memcg_oom_lock);
1885 }
1886 
mem_cgroup_mark_under_oom(struct mem_cgroup * memcg)1887 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
1888 {
1889 	struct mem_cgroup *iter;
1890 
1891 	spin_lock(&memcg_oom_lock);
1892 	for_each_mem_cgroup_tree(iter, memcg)
1893 		iter->under_oom++;
1894 	spin_unlock(&memcg_oom_lock);
1895 }
1896 
mem_cgroup_unmark_under_oom(struct mem_cgroup * memcg)1897 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
1898 {
1899 	struct mem_cgroup *iter;
1900 
1901 	/*
1902 	 * Be careful about under_oom underflows becase a child memcg
1903 	 * could have been added after mem_cgroup_mark_under_oom.
1904 	 */
1905 	spin_lock(&memcg_oom_lock);
1906 	for_each_mem_cgroup_tree(iter, memcg)
1907 		if (iter->under_oom > 0)
1908 			iter->under_oom--;
1909 	spin_unlock(&memcg_oom_lock);
1910 }
1911 
1912 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1913 
1914 struct oom_wait_info {
1915 	struct mem_cgroup *memcg;
1916 	wait_queue_entry_t	wait;
1917 };
1918 
memcg_oom_wake_function(wait_queue_entry_t * wait,unsigned mode,int sync,void * arg)1919 static int memcg_oom_wake_function(wait_queue_entry_t *wait,
1920 	unsigned mode, int sync, void *arg)
1921 {
1922 	struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1923 	struct mem_cgroup *oom_wait_memcg;
1924 	struct oom_wait_info *oom_wait_info;
1925 
1926 	oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1927 	oom_wait_memcg = oom_wait_info->memcg;
1928 
1929 	if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
1930 	    !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
1931 		return 0;
1932 	return autoremove_wake_function(wait, mode, sync, arg);
1933 }
1934 
memcg_oom_recover(struct mem_cgroup * memcg)1935 static void memcg_oom_recover(struct mem_cgroup *memcg)
1936 {
1937 	/*
1938 	 * For the following lockless ->under_oom test, the only required
1939 	 * guarantee is that it must see the state asserted by an OOM when
1940 	 * this function is called as a result of userland actions
1941 	 * triggered by the notification of the OOM.  This is trivially
1942 	 * achieved by invoking mem_cgroup_mark_under_oom() before
1943 	 * triggering notification.
1944 	 */
1945 	if (memcg && memcg->under_oom)
1946 		__wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
1947 }
1948 
1949 enum oom_status {
1950 	OOM_SUCCESS,
1951 	OOM_FAILED,
1952 	OOM_ASYNC,
1953 	OOM_SKIPPED
1954 };
1955 
mem_cgroup_oom(struct mem_cgroup * memcg,gfp_t mask,int order)1956 static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1957 {
1958 	enum oom_status ret;
1959 	bool locked;
1960 
1961 	if (order > PAGE_ALLOC_COSTLY_ORDER)
1962 		return OOM_SKIPPED;
1963 
1964 	memcg_memory_event(memcg, MEMCG_OOM);
1965 
1966 	/*
1967 	 * We are in the middle of the charge context here, so we
1968 	 * don't want to block when potentially sitting on a callstack
1969 	 * that holds all kinds of filesystem and mm locks.
1970 	 *
1971 	 * cgroup1 allows disabling the OOM killer and waiting for outside
1972 	 * handling until the charge can succeed; remember the context and put
1973 	 * the task to sleep at the end of the page fault when all locks are
1974 	 * released.
1975 	 *
1976 	 * On the other hand, in-kernel OOM killer allows for an async victim
1977 	 * memory reclaim (oom_reaper) and that means that we are not solely
1978 	 * relying on the oom victim to make a forward progress and we can
1979 	 * invoke the oom killer here.
1980 	 *
1981 	 * Please note that mem_cgroup_out_of_memory might fail to find a
1982 	 * victim and then we have to bail out from the charge path.
1983 	 */
1984 	if (memcg->oom_kill_disable) {
1985 		if (!current->in_user_fault)
1986 			return OOM_SKIPPED;
1987 		css_get(&memcg->css);
1988 		current->memcg_in_oom = memcg;
1989 		current->memcg_oom_gfp_mask = mask;
1990 		current->memcg_oom_order = order;
1991 
1992 		return OOM_ASYNC;
1993 	}
1994 
1995 	mem_cgroup_mark_under_oom(memcg);
1996 
1997 	locked = mem_cgroup_oom_trylock(memcg);
1998 
1999 	if (locked)
2000 		mem_cgroup_oom_notify(memcg);
2001 
2002 	mem_cgroup_unmark_under_oom(memcg);
2003 	if (mem_cgroup_out_of_memory(memcg, mask, order))
2004 		ret = OOM_SUCCESS;
2005 	else
2006 		ret = OOM_FAILED;
2007 
2008 	if (locked)
2009 		mem_cgroup_oom_unlock(memcg);
2010 
2011 	return ret;
2012 }
2013 
2014 /**
2015  * mem_cgroup_oom_synchronize - complete memcg OOM handling
2016  * @handle: actually kill/wait or just clean up the OOM state
2017  *
2018  * This has to be called at the end of a page fault if the memcg OOM
2019  * handler was enabled.
2020  *
2021  * Memcg supports userspace OOM handling where failed allocations must
2022  * sleep on a waitqueue until the userspace task resolves the
2023  * situation.  Sleeping directly in the charge context with all kinds
2024  * of locks held is not a good idea, instead we remember an OOM state
2025  * in the task and mem_cgroup_oom_synchronize() has to be called at
2026  * the end of the page fault to complete the OOM handling.
2027  *
2028  * Returns %true if an ongoing memcg OOM situation was detected and
2029  * completed, %false otherwise.
2030  */
mem_cgroup_oom_synchronize(bool handle)2031 bool mem_cgroup_oom_synchronize(bool handle)
2032 {
2033 	struct mem_cgroup *memcg = current->memcg_in_oom;
2034 	struct oom_wait_info owait;
2035 	bool locked;
2036 
2037 	/* OOM is global, do not handle */
2038 	if (!memcg)
2039 		return false;
2040 
2041 	if (!handle)
2042 		goto cleanup;
2043 
2044 	owait.memcg = memcg;
2045 	owait.wait.flags = 0;
2046 	owait.wait.func = memcg_oom_wake_function;
2047 	owait.wait.private = current;
2048 	INIT_LIST_HEAD(&owait.wait.entry);
2049 
2050 	prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
2051 	mem_cgroup_mark_under_oom(memcg);
2052 
2053 	locked = mem_cgroup_oom_trylock(memcg);
2054 
2055 	if (locked)
2056 		mem_cgroup_oom_notify(memcg);
2057 
2058 	if (locked && !memcg->oom_kill_disable) {
2059 		mem_cgroup_unmark_under_oom(memcg);
2060 		finish_wait(&memcg_oom_waitq, &owait.wait);
2061 		mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask,
2062 					 current->memcg_oom_order);
2063 	} else {
2064 		schedule();
2065 		mem_cgroup_unmark_under_oom(memcg);
2066 		finish_wait(&memcg_oom_waitq, &owait.wait);
2067 	}
2068 
2069 	if (locked) {
2070 		mem_cgroup_oom_unlock(memcg);
2071 		/*
2072 		 * There is no guarantee that an OOM-lock contender
2073 		 * sees the wakeups triggered by the OOM kill
2074 		 * uncharges.  Wake any sleepers explicitely.
2075 		 */
2076 		memcg_oom_recover(memcg);
2077 	}
2078 cleanup:
2079 	current->memcg_in_oom = NULL;
2080 	css_put(&memcg->css);
2081 	return true;
2082 }
2083 
2084 /**
2085  * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM
2086  * @victim: task to be killed by the OOM killer
2087  * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM
2088  *
2089  * Returns a pointer to a memory cgroup, which has to be cleaned up
2090  * by killing all belonging OOM-killable tasks.
2091  *
2092  * Caller has to call mem_cgroup_put() on the returned non-NULL memcg.
2093  */
mem_cgroup_get_oom_group(struct task_struct * victim,struct mem_cgroup * oom_domain)2094 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
2095 					    struct mem_cgroup *oom_domain)
2096 {
2097 	struct mem_cgroup *oom_group = NULL;
2098 	struct mem_cgroup *memcg;
2099 
2100 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
2101 		return NULL;
2102 
2103 	if (!oom_domain)
2104 		oom_domain = root_mem_cgroup;
2105 
2106 	rcu_read_lock();
2107 
2108 	memcg = mem_cgroup_from_task(victim);
2109 	if (memcg == root_mem_cgroup)
2110 		goto out;
2111 
2112 	/*
2113 	 * If the victim task has been asynchronously moved to a different
2114 	 * memory cgroup, we might end up killing tasks outside oom_domain.
2115 	 * In this case it's better to ignore memory.group.oom.
2116 	 */
2117 	if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain)))
2118 		goto out;
2119 
2120 	/*
2121 	 * Traverse the memory cgroup hierarchy from the victim task's
2122 	 * cgroup up to the OOMing cgroup (or root) to find the
2123 	 * highest-level memory cgroup with oom.group set.
2124 	 */
2125 	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
2126 		if (memcg->oom_group)
2127 			oom_group = memcg;
2128 
2129 		if (memcg == oom_domain)
2130 			break;
2131 	}
2132 
2133 	if (oom_group)
2134 		css_get(&oom_group->css);
2135 out:
2136 	rcu_read_unlock();
2137 
2138 	return oom_group;
2139 }
2140 
mem_cgroup_print_oom_group(struct mem_cgroup * memcg)2141 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
2142 {
2143 	pr_info("Tasks in ");
2144 	pr_cont_cgroup_path(memcg->css.cgroup);
2145 	pr_cont(" are going to be killed due to memory.oom.group set\n");
2146 }
2147 
2148 /**
2149  * lock_page_memcg - lock a page->mem_cgroup binding
2150  * @page: the page
2151  *
2152  * This function protects unlocked LRU pages from being moved to
2153  * another cgroup.
2154  *
2155  * It ensures lifetime of the returned memcg. Caller is responsible
2156  * for the lifetime of the page; __unlock_page_memcg() is available
2157  * when @page might get freed inside the locked section.
2158  */
lock_page_memcg(struct page * page)2159 struct mem_cgroup *lock_page_memcg(struct page *page)
2160 {
2161 	struct page *head = compound_head(page); /* rmap on tail pages */
2162 	struct mem_cgroup *memcg;
2163 	unsigned long flags;
2164 
2165 	/*
2166 	 * The RCU lock is held throughout the transaction.  The fast
2167 	 * path can get away without acquiring the memcg->move_lock
2168 	 * because page moving starts with an RCU grace period.
2169 	 *
2170 	 * The RCU lock also protects the memcg from being freed when
2171 	 * the page state that is going to change is the only thing
2172 	 * preventing the page itself from being freed. E.g. writeback
2173 	 * doesn't hold a page reference and relies on PG_writeback to
2174 	 * keep off truncation, migration and so forth.
2175          */
2176 	rcu_read_lock();
2177 
2178 	if (mem_cgroup_disabled())
2179 		return NULL;
2180 again:
2181 	memcg = head->mem_cgroup;
2182 	if (unlikely(!memcg))
2183 		return NULL;
2184 
2185 	if (atomic_read(&memcg->moving_account) <= 0)
2186 		return memcg;
2187 
2188 	spin_lock_irqsave(&memcg->move_lock, flags);
2189 	if (memcg != head->mem_cgroup) {
2190 		spin_unlock_irqrestore(&memcg->move_lock, flags);
2191 		goto again;
2192 	}
2193 
2194 	/*
2195 	 * When charge migration first begins, we can have locked and
2196 	 * unlocked page stat updates happening concurrently.  Track
2197 	 * the task who has the lock for unlock_page_memcg().
2198 	 */
2199 	memcg->move_lock_task = current;
2200 	memcg->move_lock_flags = flags;
2201 
2202 	return memcg;
2203 }
2204 EXPORT_SYMBOL(lock_page_memcg);
2205 
2206 /**
2207  * __unlock_page_memcg - unlock and unpin a memcg
2208  * @memcg: the memcg
2209  *
2210  * Unlock and unpin a memcg returned by lock_page_memcg().
2211  */
__unlock_page_memcg(struct mem_cgroup * memcg)2212 void __unlock_page_memcg(struct mem_cgroup *memcg)
2213 {
2214 	if (memcg && memcg->move_lock_task == current) {
2215 		unsigned long flags = memcg->move_lock_flags;
2216 
2217 		memcg->move_lock_task = NULL;
2218 		memcg->move_lock_flags = 0;
2219 
2220 		spin_unlock_irqrestore(&memcg->move_lock, flags);
2221 	}
2222 
2223 	rcu_read_unlock();
2224 }
2225 
2226 /**
2227  * unlock_page_memcg - unlock a page->mem_cgroup binding
2228  * @page: the page
2229  */
unlock_page_memcg(struct page * page)2230 void unlock_page_memcg(struct page *page)
2231 {
2232 	struct page *head = compound_head(page);
2233 
2234 	__unlock_page_memcg(head->mem_cgroup);
2235 }
2236 EXPORT_SYMBOL(unlock_page_memcg);
2237 
2238 struct memcg_stock_pcp {
2239 	struct mem_cgroup *cached; /* this never be root cgroup */
2240 	unsigned int nr_pages;
2241 
2242 #ifdef CONFIG_MEMCG_KMEM
2243 	struct obj_cgroup *cached_objcg;
2244 	unsigned int nr_bytes;
2245 #endif
2246 
2247 	struct work_struct work;
2248 	unsigned long flags;
2249 #define FLUSHING_CACHED_CHARGE	0
2250 };
2251 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
2252 static DEFINE_MUTEX(percpu_charge_mutex);
2253 
2254 #ifdef CONFIG_MEMCG_KMEM
2255 static void drain_obj_stock(struct memcg_stock_pcp *stock);
2256 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2257 				     struct mem_cgroup *root_memcg);
2258 
2259 #else
drain_obj_stock(struct memcg_stock_pcp * stock)2260 static inline void drain_obj_stock(struct memcg_stock_pcp *stock)
2261 {
2262 }
obj_stock_flush_required(struct memcg_stock_pcp * stock,struct mem_cgroup * root_memcg)2263 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2264 				     struct mem_cgroup *root_memcg)
2265 {
2266 	return false;
2267 }
2268 #endif
2269 
2270 /**
2271  * consume_stock: Try to consume stocked charge on this cpu.
2272  * @memcg: memcg to consume from.
2273  * @nr_pages: how many pages to charge.
2274  *
2275  * The charges will only happen if @memcg matches the current cpu's memcg
2276  * stock, and at least @nr_pages are available in that stock.  Failure to
2277  * service an allocation will refill the stock.
2278  *
2279  * returns true if successful, false otherwise.
2280  */
consume_stock(struct mem_cgroup * memcg,unsigned int nr_pages)2281 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2282 {
2283 	struct memcg_stock_pcp *stock;
2284 	unsigned long flags;
2285 	bool ret = false;
2286 
2287 	if (nr_pages > MEMCG_CHARGE_BATCH)
2288 		return ret;
2289 
2290 	local_irq_save(flags);
2291 
2292 	stock = this_cpu_ptr(&memcg_stock);
2293 	if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
2294 		stock->nr_pages -= nr_pages;
2295 		ret = true;
2296 	}
2297 
2298 	local_irq_restore(flags);
2299 
2300 	return ret;
2301 }
2302 
2303 /*
2304  * Returns stocks cached in percpu and reset cached information.
2305  */
drain_stock(struct memcg_stock_pcp * stock)2306 static void drain_stock(struct memcg_stock_pcp *stock)
2307 {
2308 	struct mem_cgroup *old = stock->cached;
2309 
2310 	if (!old)
2311 		return;
2312 
2313 	if (stock->nr_pages) {
2314 		page_counter_uncharge(&old->memory, stock->nr_pages);
2315 		if (do_memsw_account())
2316 			page_counter_uncharge(&old->memsw, stock->nr_pages);
2317 		stock->nr_pages = 0;
2318 	}
2319 
2320 	css_put(&old->css);
2321 	stock->cached = NULL;
2322 }
2323 
drain_local_stock(struct work_struct * dummy)2324 static void drain_local_stock(struct work_struct *dummy)
2325 {
2326 	struct memcg_stock_pcp *stock;
2327 	unsigned long flags;
2328 
2329 	/*
2330 	 * The only protection from memory hotplug vs. drain_stock races is
2331 	 * that we always operate on local CPU stock here with IRQ disabled
2332 	 */
2333 	local_irq_save(flags);
2334 
2335 	stock = this_cpu_ptr(&memcg_stock);
2336 	drain_obj_stock(stock);
2337 	drain_stock(stock);
2338 	clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
2339 
2340 	local_irq_restore(flags);
2341 }
2342 
2343 /*
2344  * Cache charges(val) to local per_cpu area.
2345  * This will be consumed by consume_stock() function, later.
2346  */
refill_stock(struct mem_cgroup * memcg,unsigned int nr_pages)2347 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2348 {
2349 	struct memcg_stock_pcp *stock;
2350 	unsigned long flags;
2351 
2352 	local_irq_save(flags);
2353 
2354 	stock = this_cpu_ptr(&memcg_stock);
2355 	if (stock->cached != memcg) { /* reset if necessary */
2356 		drain_stock(stock);
2357 		css_get(&memcg->css);
2358 		stock->cached = memcg;
2359 	}
2360 	stock->nr_pages += nr_pages;
2361 
2362 	if (stock->nr_pages > MEMCG_CHARGE_BATCH)
2363 		drain_stock(stock);
2364 
2365 	local_irq_restore(flags);
2366 }
2367 
2368 /*
2369  * Drains all per-CPU charge caches for given root_memcg resp. subtree
2370  * of the hierarchy under it.
2371  */
drain_all_stock(struct mem_cgroup * root_memcg)2372 static void drain_all_stock(struct mem_cgroup *root_memcg)
2373 {
2374 	int cpu, curcpu;
2375 
2376 	/* If someone's already draining, avoid adding running more workers. */
2377 	if (!mutex_trylock(&percpu_charge_mutex))
2378 		return;
2379 	/*
2380 	 * Notify other cpus that system-wide "drain" is running
2381 	 * We do not care about races with the cpu hotplug because cpu down
2382 	 * as well as workers from this path always operate on the local
2383 	 * per-cpu data. CPU up doesn't touch memcg_stock at all.
2384 	 */
2385 	curcpu = get_cpu();
2386 	for_each_online_cpu(cpu) {
2387 		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2388 		struct mem_cgroup *memcg;
2389 		bool flush = false;
2390 
2391 		rcu_read_lock();
2392 		memcg = stock->cached;
2393 		if (memcg && stock->nr_pages &&
2394 		    mem_cgroup_is_descendant(memcg, root_memcg))
2395 			flush = true;
2396 		if (obj_stock_flush_required(stock, root_memcg))
2397 			flush = true;
2398 		rcu_read_unlock();
2399 
2400 		if (flush &&
2401 		    !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
2402 			if (cpu == curcpu)
2403 				drain_local_stock(&stock->work);
2404 			else
2405 				schedule_work_on(cpu, &stock->work);
2406 		}
2407 	}
2408 	put_cpu();
2409 	mutex_unlock(&percpu_charge_mutex);
2410 }
2411 
memcg_hotplug_cpu_dead(unsigned int cpu)2412 static int memcg_hotplug_cpu_dead(unsigned int cpu)
2413 {
2414 	struct memcg_stock_pcp *stock;
2415 	struct mem_cgroup *memcg, *mi;
2416 
2417 	stock = &per_cpu(memcg_stock, cpu);
2418 	drain_stock(stock);
2419 
2420 	for_each_mem_cgroup(memcg) {
2421 		int i;
2422 
2423 		for (i = 0; i < MEMCG_NR_STAT; i++) {
2424 			int nid;
2425 			long x;
2426 
2427 			x = this_cpu_xchg(memcg->vmstats_percpu->stat[i], 0);
2428 			if (x)
2429 				for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
2430 					atomic_long_add(x, &memcg->vmstats[i]);
2431 
2432 			if (i >= NR_VM_NODE_STAT_ITEMS)
2433 				continue;
2434 
2435 			for_each_node(nid) {
2436 				struct mem_cgroup_per_node *pn;
2437 
2438 				pn = mem_cgroup_nodeinfo(memcg, nid);
2439 				x = this_cpu_xchg(pn->lruvec_stat_cpu->count[i], 0);
2440 				if (x)
2441 					do {
2442 						atomic_long_add(x, &pn->lruvec_stat[i]);
2443 					} while ((pn = parent_nodeinfo(pn, nid)));
2444 			}
2445 		}
2446 
2447 		for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
2448 			long x;
2449 
2450 			x = this_cpu_xchg(memcg->vmstats_percpu->events[i], 0);
2451 			if (x)
2452 				for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
2453 					atomic_long_add(x, &memcg->vmevents[i]);
2454 		}
2455 	}
2456 
2457 	return 0;
2458 }
2459 
reclaim_high(struct mem_cgroup * memcg,unsigned int nr_pages,gfp_t gfp_mask)2460 static unsigned long reclaim_high(struct mem_cgroup *memcg,
2461 				  unsigned int nr_pages,
2462 				  gfp_t gfp_mask)
2463 {
2464 	unsigned long nr_reclaimed = 0;
2465 
2466 	do {
2467 		unsigned long pflags;
2468 
2469 		if (page_counter_read(&memcg->memory) <=
2470 		    READ_ONCE(memcg->memory.high))
2471 			continue;
2472 
2473 		memcg_memory_event(memcg, MEMCG_HIGH);
2474 
2475 		psi_memstall_enter(&pflags);
2476 		nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages,
2477 							     gfp_mask, true);
2478 		psi_memstall_leave(&pflags);
2479 	} while ((memcg = parent_mem_cgroup(memcg)) &&
2480 		 !mem_cgroup_is_root(memcg));
2481 
2482 	return nr_reclaimed;
2483 }
2484 
high_work_func(struct work_struct * work)2485 static void high_work_func(struct work_struct *work)
2486 {
2487 	struct mem_cgroup *memcg;
2488 
2489 	memcg = container_of(work, struct mem_cgroup, high_work);
2490 	reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
2491 }
2492 
2493 /*
2494  * Clamp the maximum sleep time per allocation batch to 2 seconds. This is
2495  * enough to still cause a significant slowdown in most cases, while still
2496  * allowing diagnostics and tracing to proceed without becoming stuck.
2497  */
2498 #define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ)
2499 
2500 /*
2501  * When calculating the delay, we use these either side of the exponentiation to
2502  * maintain precision and scale to a reasonable number of jiffies (see the table
2503  * below.
2504  *
2505  * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the
2506  *   overage ratio to a delay.
2507  * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the
2508  *   proposed penalty in order to reduce to a reasonable number of jiffies, and
2509  *   to produce a reasonable delay curve.
2510  *
2511  * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a
2512  * reasonable delay curve compared to precision-adjusted overage, not
2513  * penalising heavily at first, but still making sure that growth beyond the
2514  * limit penalises misbehaviour cgroups by slowing them down exponentially. For
2515  * example, with a high of 100 megabytes:
2516  *
2517  *  +-------+------------------------+
2518  *  | usage | time to allocate in ms |
2519  *  +-------+------------------------+
2520  *  | 100M  |                      0 |
2521  *  | 101M  |                      6 |
2522  *  | 102M  |                     25 |
2523  *  | 103M  |                     57 |
2524  *  | 104M  |                    102 |
2525  *  | 105M  |                    159 |
2526  *  | 106M  |                    230 |
2527  *  | 107M  |                    313 |
2528  *  | 108M  |                    409 |
2529  *  | 109M  |                    518 |
2530  *  | 110M  |                    639 |
2531  *  | 111M  |                    774 |
2532  *  | 112M  |                    921 |
2533  *  | 113M  |                   1081 |
2534  *  | 114M  |                   1254 |
2535  *  | 115M  |                   1439 |
2536  *  | 116M  |                   1638 |
2537  *  | 117M  |                   1849 |
2538  *  | 118M  |                   2000 |
2539  *  | 119M  |                   2000 |
2540  *  | 120M  |                   2000 |
2541  *  +-------+------------------------+
2542  */
2543  #define MEMCG_DELAY_PRECISION_SHIFT 20
2544  #define MEMCG_DELAY_SCALING_SHIFT 14
2545 
calculate_overage(unsigned long usage,unsigned long high)2546 static u64 calculate_overage(unsigned long usage, unsigned long high)
2547 {
2548 	u64 overage;
2549 
2550 	if (usage <= high)
2551 		return 0;
2552 
2553 	/*
2554 	 * Prevent division by 0 in overage calculation by acting as if
2555 	 * it was a threshold of 1 page
2556 	 */
2557 	high = max(high, 1UL);
2558 
2559 	overage = usage - high;
2560 	overage <<= MEMCG_DELAY_PRECISION_SHIFT;
2561 	return div64_u64(overage, high);
2562 }
2563 
mem_find_max_overage(struct mem_cgroup * memcg)2564 static u64 mem_find_max_overage(struct mem_cgroup *memcg)
2565 {
2566 	u64 overage, max_overage = 0;
2567 
2568 	do {
2569 		overage = calculate_overage(page_counter_read(&memcg->memory),
2570 					    READ_ONCE(memcg->memory.high));
2571 		max_overage = max(overage, max_overage);
2572 	} while ((memcg = parent_mem_cgroup(memcg)) &&
2573 		 !mem_cgroup_is_root(memcg));
2574 
2575 	return max_overage;
2576 }
2577 
swap_find_max_overage(struct mem_cgroup * memcg)2578 static u64 swap_find_max_overage(struct mem_cgroup *memcg)
2579 {
2580 	u64 overage, max_overage = 0;
2581 
2582 	do {
2583 		overage = calculate_overage(page_counter_read(&memcg->swap),
2584 					    READ_ONCE(memcg->swap.high));
2585 		if (overage)
2586 			memcg_memory_event(memcg, MEMCG_SWAP_HIGH);
2587 		max_overage = max(overage, max_overage);
2588 	} while ((memcg = parent_mem_cgroup(memcg)) &&
2589 		 !mem_cgroup_is_root(memcg));
2590 
2591 	return max_overage;
2592 }
2593 
2594 /*
2595  * Get the number of jiffies that we should penalise a mischievous cgroup which
2596  * is exceeding its memory.high by checking both it and its ancestors.
2597  */
calculate_high_delay(struct mem_cgroup * memcg,unsigned int nr_pages,u64 max_overage)2598 static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
2599 					  unsigned int nr_pages,
2600 					  u64 max_overage)
2601 {
2602 	unsigned long penalty_jiffies;
2603 
2604 	if (!max_overage)
2605 		return 0;
2606 
2607 	/*
2608 	 * We use overage compared to memory.high to calculate the number of
2609 	 * jiffies to sleep (penalty_jiffies). Ideally this value should be
2610 	 * fairly lenient on small overages, and increasingly harsh when the
2611 	 * memcg in question makes it clear that it has no intention of stopping
2612 	 * its crazy behaviour, so we exponentially increase the delay based on
2613 	 * overage amount.
2614 	 */
2615 	penalty_jiffies = max_overage * max_overage * HZ;
2616 	penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT;
2617 	penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT;
2618 
2619 	/*
2620 	 * Factor in the task's own contribution to the overage, such that four
2621 	 * N-sized allocations are throttled approximately the same as one
2622 	 * 4N-sized allocation.
2623 	 *
2624 	 * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or
2625 	 * larger the current charge patch is than that.
2626 	 */
2627 	return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH;
2628 }
2629 
2630 /*
2631  * Scheduled by try_charge() to be executed from the userland return path
2632  * and reclaims memory over the high limit.
2633  */
mem_cgroup_handle_over_high(void)2634 void mem_cgroup_handle_over_high(void)
2635 {
2636 	unsigned long penalty_jiffies;
2637 	unsigned long pflags;
2638 	unsigned long nr_reclaimed;
2639 	unsigned int nr_pages = current->memcg_nr_pages_over_high;
2640 	int nr_retries = MAX_RECLAIM_RETRIES;
2641 	struct mem_cgroup *memcg;
2642 	bool in_retry = false;
2643 
2644 	if (likely(!nr_pages))
2645 		return;
2646 
2647 	memcg = get_mem_cgroup_from_mm(current->mm);
2648 	current->memcg_nr_pages_over_high = 0;
2649 
2650 retry_reclaim:
2651 	/*
2652 	 * The allocating task should reclaim at least the batch size, but for
2653 	 * subsequent retries we only want to do what's necessary to prevent oom
2654 	 * or breaching resource isolation.
2655 	 *
2656 	 * This is distinct from memory.max or page allocator behaviour because
2657 	 * memory.high is currently batched, whereas memory.max and the page
2658 	 * allocator run every time an allocation is made.
2659 	 */
2660 	nr_reclaimed = reclaim_high(memcg,
2661 				    in_retry ? SWAP_CLUSTER_MAX : nr_pages,
2662 				    GFP_KERNEL);
2663 
2664 	/*
2665 	 * memory.high is breached and reclaim is unable to keep up. Throttle
2666 	 * allocators proactively to slow down excessive growth.
2667 	 */
2668 	penalty_jiffies = calculate_high_delay(memcg, nr_pages,
2669 					       mem_find_max_overage(memcg));
2670 
2671 	penalty_jiffies += calculate_high_delay(memcg, nr_pages,
2672 						swap_find_max_overage(memcg));
2673 
2674 	/*
2675 	 * Clamp the max delay per usermode return so as to still keep the
2676 	 * application moving forwards and also permit diagnostics, albeit
2677 	 * extremely slowly.
2678 	 */
2679 	penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES);
2680 
2681 	/*
2682 	 * Don't sleep if the amount of jiffies this memcg owes us is so low
2683 	 * that it's not even worth doing, in an attempt to be nice to those who
2684 	 * go only a small amount over their memory.high value and maybe haven't
2685 	 * been aggressively reclaimed enough yet.
2686 	 */
2687 	if (penalty_jiffies <= HZ / 100)
2688 		goto out;
2689 
2690 	/*
2691 	 * If reclaim is making forward progress but we're still over
2692 	 * memory.high, we want to encourage that rather than doing allocator
2693 	 * throttling.
2694 	 */
2695 	if (nr_reclaimed || nr_retries--) {
2696 		in_retry = true;
2697 		goto retry_reclaim;
2698 	}
2699 
2700 	/*
2701 	 * If we exit early, we're guaranteed to die (since
2702 	 * schedule_timeout_killable sets TASK_KILLABLE). This means we don't
2703 	 * need to account for any ill-begotten jiffies to pay them off later.
2704 	 */
2705 	psi_memstall_enter(&pflags);
2706 	schedule_timeout_killable(penalty_jiffies);
2707 	psi_memstall_leave(&pflags);
2708 
2709 out:
2710 	css_put(&memcg->css);
2711 }
2712 
try_charge(struct mem_cgroup * memcg,gfp_t gfp_mask,unsigned int nr_pages)2713 static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2714 		      unsigned int nr_pages)
2715 {
2716 	unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
2717 	int nr_retries = MAX_RECLAIM_RETRIES;
2718 	struct mem_cgroup *mem_over_limit;
2719 	struct page_counter *counter;
2720 	enum oom_status oom_status;
2721 	unsigned long nr_reclaimed;
2722 	bool passed_oom = false;
2723 	bool may_swap = true;
2724 	bool drained = false;
2725 	unsigned long pflags;
2726 
2727 	if (mem_cgroup_is_root(memcg))
2728 		return 0;
2729 retry:
2730 	if (consume_stock(memcg, nr_pages))
2731 		return 0;
2732 
2733 	if (!do_memsw_account() ||
2734 	    page_counter_try_charge(&memcg->memsw, batch, &counter)) {
2735 		if (page_counter_try_charge(&memcg->memory, batch, &counter))
2736 			goto done_restock;
2737 		if (do_memsw_account())
2738 			page_counter_uncharge(&memcg->memsw, batch);
2739 		mem_over_limit = mem_cgroup_from_counter(counter, memory);
2740 	} else {
2741 		mem_over_limit = mem_cgroup_from_counter(counter, memsw);
2742 		may_swap = false;
2743 	}
2744 
2745 	if (batch > nr_pages) {
2746 		batch = nr_pages;
2747 		goto retry;
2748 	}
2749 
2750 	/*
2751 	 * Memcg doesn't have a dedicated reserve for atomic
2752 	 * allocations. But like the global atomic pool, we need to
2753 	 * put the burden of reclaim on regular allocation requests
2754 	 * and let these go through as privileged allocations.
2755 	 */
2756 	if (gfp_mask & __GFP_ATOMIC)
2757 		goto force;
2758 
2759 	/*
2760 	 * Prevent unbounded recursion when reclaim operations need to
2761 	 * allocate memory. This might exceed the limits temporarily,
2762 	 * but we prefer facilitating memory reclaim and getting back
2763 	 * under the limit over triggering OOM kills in these cases.
2764 	 */
2765 	if (unlikely(current->flags & PF_MEMALLOC))
2766 		goto force;
2767 
2768 	if (unlikely(task_in_memcg_oom(current)))
2769 		goto nomem;
2770 
2771 	if (!gfpflags_allow_blocking(gfp_mask))
2772 		goto nomem;
2773 
2774 	memcg_memory_event(mem_over_limit, MEMCG_MAX);
2775 
2776 	psi_memstall_enter(&pflags);
2777 	nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
2778 						    gfp_mask, may_swap);
2779 	psi_memstall_leave(&pflags);
2780 
2781 	if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2782 		goto retry;
2783 
2784 	if (!drained) {
2785 		drain_all_stock(mem_over_limit);
2786 		drained = true;
2787 		goto retry;
2788 	}
2789 
2790 	if (gfp_mask & __GFP_NORETRY)
2791 		goto nomem;
2792 	/*
2793 	 * Even though the limit is exceeded at this point, reclaim
2794 	 * may have been able to free some pages.  Retry the charge
2795 	 * before killing the task.
2796 	 *
2797 	 * Only for regular pages, though: huge pages are rather
2798 	 * unlikely to succeed so close to the limit, and we fall back
2799 	 * to regular pages anyway in case of failure.
2800 	 */
2801 	if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
2802 		goto retry;
2803 	/*
2804 	 * At task move, charge accounts can be doubly counted. So, it's
2805 	 * better to wait until the end of task_move if something is going on.
2806 	 */
2807 	if (mem_cgroup_wait_acct_move(mem_over_limit))
2808 		goto retry;
2809 
2810 	if (nr_retries--)
2811 		goto retry;
2812 
2813 	if (gfp_mask & __GFP_RETRY_MAYFAIL)
2814 		goto nomem;
2815 
2816 	if (gfp_mask & __GFP_NOFAIL)
2817 		goto force;
2818 
2819 	/* Avoid endless loop for tasks bypassed by the oom killer */
2820 	if (passed_oom && task_is_dying())
2821 		goto nomem;
2822 
2823 	/*
2824 	 * keep retrying as long as the memcg oom killer is able to make
2825 	 * a forward progress or bypass the charge if the oom killer
2826 	 * couldn't make any progress.
2827 	 */
2828 	oom_status = mem_cgroup_oom(mem_over_limit, gfp_mask,
2829 		       get_order(nr_pages * PAGE_SIZE));
2830 	if (oom_status == OOM_SUCCESS) {
2831 		passed_oom = true;
2832 		nr_retries = MAX_RECLAIM_RETRIES;
2833 		goto retry;
2834 	}
2835 nomem:
2836 	if (!(gfp_mask & __GFP_NOFAIL))
2837 		return -ENOMEM;
2838 force:
2839 	/*
2840 	 * The allocation either can't fail or will lead to more memory
2841 	 * being freed very soon.  Allow memory usage go over the limit
2842 	 * temporarily by force charging it.
2843 	 */
2844 	page_counter_charge(&memcg->memory, nr_pages);
2845 	if (do_memsw_account())
2846 		page_counter_charge(&memcg->memsw, nr_pages);
2847 
2848 	return 0;
2849 
2850 done_restock:
2851 	if (batch > nr_pages)
2852 		refill_stock(memcg, batch - nr_pages);
2853 
2854 	/*
2855 	 * If the hierarchy is above the normal consumption range, schedule
2856 	 * reclaim on returning to userland.  We can perform reclaim here
2857 	 * if __GFP_RECLAIM but let's always punt for simplicity and so that
2858 	 * GFP_KERNEL can consistently be used during reclaim.  @memcg is
2859 	 * not recorded as it most likely matches current's and won't
2860 	 * change in the meantime.  As high limit is checked again before
2861 	 * reclaim, the cost of mismatch is negligible.
2862 	 */
2863 	do {
2864 		bool mem_high, swap_high;
2865 
2866 		mem_high = page_counter_read(&memcg->memory) >
2867 			READ_ONCE(memcg->memory.high);
2868 		swap_high = page_counter_read(&memcg->swap) >
2869 			READ_ONCE(memcg->swap.high);
2870 
2871 		/* Don't bother a random interrupted task */
2872 		if (in_interrupt()) {
2873 			if (mem_high) {
2874 				schedule_work(&memcg->high_work);
2875 				break;
2876 			}
2877 			continue;
2878 		}
2879 
2880 		if (mem_high || swap_high) {
2881 			/*
2882 			 * The allocating tasks in this cgroup will need to do
2883 			 * reclaim or be throttled to prevent further growth
2884 			 * of the memory or swap footprints.
2885 			 *
2886 			 * Target some best-effort fairness between the tasks,
2887 			 * and distribute reclaim work and delay penalties
2888 			 * based on how much each task is actually allocating.
2889 			 */
2890 			current->memcg_nr_pages_over_high += batch;
2891 			set_notify_resume(current);
2892 			break;
2893 		}
2894 	} while ((memcg = parent_mem_cgroup(memcg)));
2895 
2896 	return 0;
2897 }
2898 
2899 #if defined(CONFIG_MEMCG_KMEM) || defined(CONFIG_MMU)
cancel_charge(struct mem_cgroup * memcg,unsigned int nr_pages)2900 static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
2901 {
2902 	if (mem_cgroup_is_root(memcg))
2903 		return;
2904 
2905 	page_counter_uncharge(&memcg->memory, nr_pages);
2906 	if (do_memsw_account())
2907 		page_counter_uncharge(&memcg->memsw, nr_pages);
2908 }
2909 #endif
2910 
commit_charge(struct page * page,struct mem_cgroup * memcg)2911 static void commit_charge(struct page *page, struct mem_cgroup *memcg)
2912 {
2913 	VM_BUG_ON_PAGE(page->mem_cgroup, page);
2914 	/*
2915 	 * Any of the following ensures page->mem_cgroup stability:
2916 	 *
2917 	 * - the page lock
2918 	 * - LRU isolation
2919 	 * - lock_page_memcg()
2920 	 * - exclusive reference
2921 	 */
2922 	page->mem_cgroup = memcg;
2923 }
2924 
2925 #ifdef CONFIG_MEMCG_KMEM
2926 /*
2927  * The allocated objcg pointers array is not accounted directly.
2928  * Moreover, it should not come from DMA buffer and is not readily
2929  * reclaimable. So those GFP bits should be masked off.
2930  */
2931 #define OBJCGS_CLEAR_MASK	(__GFP_DMA | __GFP_RECLAIMABLE | \
2932 				 __GFP_ACCOUNT | __GFP_NOFAIL)
2933 
memcg_alloc_page_obj_cgroups(struct page * page,struct kmem_cache * s,gfp_t gfp)2934 int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s,
2935 				 gfp_t gfp)
2936 {
2937 	unsigned int objects = objs_per_slab_page(s, page);
2938 	void *vec;
2939 
2940 	gfp &= ~OBJCGS_CLEAR_MASK;
2941 	vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp,
2942 			   page_to_nid(page));
2943 	if (!vec)
2944 		return -ENOMEM;
2945 
2946 	if (cmpxchg(&page->obj_cgroups, NULL,
2947 		    (struct obj_cgroup **) ((unsigned long)vec | 0x1UL)))
2948 		kfree(vec);
2949 	else
2950 		kmemleak_not_leak(vec);
2951 
2952 	return 0;
2953 }
2954 
2955 /*
2956  * Returns a pointer to the memory cgroup to which the kernel object is charged.
2957  *
2958  * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
2959  * cgroup_mutex, etc.
2960  */
mem_cgroup_from_obj(void * p)2961 struct mem_cgroup *mem_cgroup_from_obj(void *p)
2962 {
2963 	struct page *page;
2964 
2965 	if (mem_cgroup_disabled())
2966 		return NULL;
2967 
2968 	page = virt_to_head_page(p);
2969 
2970 	/*
2971 	 * If page->mem_cgroup is set, it's either a simple mem_cgroup pointer
2972 	 * or a pointer to obj_cgroup vector. In the latter case the lowest
2973 	 * bit of the pointer is set.
2974 	 * The page->mem_cgroup pointer can be asynchronously changed
2975 	 * from NULL to (obj_cgroup_vec | 0x1UL), but can't be changed
2976 	 * from a valid memcg pointer to objcg vector or back.
2977 	 */
2978 	if (!page->mem_cgroup)
2979 		return NULL;
2980 
2981 	/*
2982 	 * Slab objects are accounted individually, not per-page.
2983 	 * Memcg membership data for each individual object is saved in
2984 	 * the page->obj_cgroups.
2985 	 */
2986 	if (page_has_obj_cgroups(page)) {
2987 		struct obj_cgroup *objcg;
2988 		unsigned int off;
2989 
2990 		off = obj_to_index(page->slab_cache, page, p);
2991 		objcg = page_obj_cgroups(page)[off];
2992 		if (objcg)
2993 			return obj_cgroup_memcg(objcg);
2994 
2995 		return NULL;
2996 	}
2997 
2998 	/* All other pages use page->mem_cgroup */
2999 	return page->mem_cgroup;
3000 }
3001 
get_obj_cgroup_from_current(void)3002 __always_inline struct obj_cgroup *get_obj_cgroup_from_current(void)
3003 {
3004 	struct obj_cgroup *objcg = NULL;
3005 	struct mem_cgroup *memcg;
3006 
3007 	if (memcg_kmem_bypass())
3008 		return NULL;
3009 
3010 	rcu_read_lock();
3011 	if (unlikely(active_memcg()))
3012 		memcg = active_memcg();
3013 	else
3014 		memcg = mem_cgroup_from_task(current);
3015 
3016 	for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) {
3017 		objcg = rcu_dereference(memcg->objcg);
3018 		if (objcg && obj_cgroup_tryget(objcg))
3019 			break;
3020 		objcg = NULL;
3021 	}
3022 	rcu_read_unlock();
3023 
3024 	return objcg;
3025 }
3026 
memcg_alloc_cache_id(void)3027 static int memcg_alloc_cache_id(void)
3028 {
3029 	int id, size;
3030 	int err;
3031 
3032 	id = ida_simple_get(&memcg_cache_ida,
3033 			    0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
3034 	if (id < 0)
3035 		return id;
3036 
3037 	if (id < memcg_nr_cache_ids)
3038 		return id;
3039 
3040 	/*
3041 	 * There's no space for the new id in memcg_caches arrays,
3042 	 * so we have to grow them.
3043 	 */
3044 	down_write(&memcg_cache_ids_sem);
3045 
3046 	size = 2 * (id + 1);
3047 	if (size < MEMCG_CACHES_MIN_SIZE)
3048 		size = MEMCG_CACHES_MIN_SIZE;
3049 	else if (size > MEMCG_CACHES_MAX_SIZE)
3050 		size = MEMCG_CACHES_MAX_SIZE;
3051 
3052 	err = memcg_update_all_list_lrus(size);
3053 	if (!err)
3054 		memcg_nr_cache_ids = size;
3055 
3056 	up_write(&memcg_cache_ids_sem);
3057 
3058 	if (err) {
3059 		ida_simple_remove(&memcg_cache_ida, id);
3060 		return err;
3061 	}
3062 	return id;
3063 }
3064 
memcg_free_cache_id(int id)3065 static void memcg_free_cache_id(int id)
3066 {
3067 	ida_simple_remove(&memcg_cache_ida, id);
3068 }
3069 
3070 /**
3071  * __memcg_kmem_charge: charge a number of kernel pages to a memcg
3072  * @memcg: memory cgroup to charge
3073  * @gfp: reclaim mode
3074  * @nr_pages: number of pages to charge
3075  *
3076  * Returns 0 on success, an error code on failure.
3077  */
__memcg_kmem_charge(struct mem_cgroup * memcg,gfp_t gfp,unsigned int nr_pages)3078 int __memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp,
3079 			unsigned int nr_pages)
3080 {
3081 	struct page_counter *counter;
3082 	int ret;
3083 
3084 	ret = try_charge(memcg, gfp, nr_pages);
3085 	if (ret)
3086 		return ret;
3087 
3088 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
3089 	    !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) {
3090 
3091 		/*
3092 		 * Enforce __GFP_NOFAIL allocation because callers are not
3093 		 * prepared to see failures and likely do not have any failure
3094 		 * handling code.
3095 		 */
3096 		if (gfp & __GFP_NOFAIL) {
3097 			page_counter_charge(&memcg->kmem, nr_pages);
3098 			return 0;
3099 		}
3100 		cancel_charge(memcg, nr_pages);
3101 		return -ENOMEM;
3102 	}
3103 	return 0;
3104 }
3105 
3106 /**
3107  * __memcg_kmem_uncharge: uncharge a number of kernel pages from a memcg
3108  * @memcg: memcg to uncharge
3109  * @nr_pages: number of pages to uncharge
3110  */
__memcg_kmem_uncharge(struct mem_cgroup * memcg,unsigned int nr_pages)3111 void __memcg_kmem_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages)
3112 {
3113 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
3114 		page_counter_uncharge(&memcg->kmem, nr_pages);
3115 
3116 	refill_stock(memcg, nr_pages);
3117 }
3118 
3119 /**
3120  * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup
3121  * @page: page to charge
3122  * @gfp: reclaim mode
3123  * @order: allocation order
3124  *
3125  * Returns 0 on success, an error code on failure.
3126  */
__memcg_kmem_charge_page(struct page * page,gfp_t gfp,int order)3127 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
3128 {
3129 	struct mem_cgroup *memcg;
3130 	int ret = 0;
3131 
3132 	memcg = get_mem_cgroup_from_current();
3133 	if (memcg && !mem_cgroup_is_root(memcg)) {
3134 		ret = __memcg_kmem_charge(memcg, gfp, 1 << order);
3135 		if (!ret) {
3136 			page->mem_cgroup = memcg;
3137 			__SetPageKmemcg(page);
3138 			return 0;
3139 		}
3140 		css_put(&memcg->css);
3141 	}
3142 	return ret;
3143 }
3144 
3145 /**
3146  * __memcg_kmem_uncharge_page: uncharge a kmem page
3147  * @page: page to uncharge
3148  * @order: allocation order
3149  */
__memcg_kmem_uncharge_page(struct page * page,int order)3150 void __memcg_kmem_uncharge_page(struct page *page, int order)
3151 {
3152 	struct mem_cgroup *memcg = page->mem_cgroup;
3153 	unsigned int nr_pages = 1 << order;
3154 
3155 	if (!memcg)
3156 		return;
3157 
3158 	VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
3159 	__memcg_kmem_uncharge(memcg, nr_pages);
3160 	page->mem_cgroup = NULL;
3161 	css_put(&memcg->css);
3162 
3163 	/* slab pages do not have PageKmemcg flag set */
3164 	if (PageKmemcg(page))
3165 		__ClearPageKmemcg(page);
3166 }
3167 
consume_obj_stock(struct obj_cgroup * objcg,unsigned int nr_bytes)3168 static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
3169 {
3170 	struct memcg_stock_pcp *stock;
3171 	unsigned long flags;
3172 	bool ret = false;
3173 
3174 	local_irq_save(flags);
3175 
3176 	stock = this_cpu_ptr(&memcg_stock);
3177 	if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) {
3178 		stock->nr_bytes -= nr_bytes;
3179 		ret = true;
3180 	}
3181 
3182 	local_irq_restore(flags);
3183 
3184 	return ret;
3185 }
3186 
drain_obj_stock(struct memcg_stock_pcp * stock)3187 static void drain_obj_stock(struct memcg_stock_pcp *stock)
3188 {
3189 	struct obj_cgroup *old = stock->cached_objcg;
3190 
3191 	if (!old)
3192 		return;
3193 
3194 	if (stock->nr_bytes) {
3195 		unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3196 		unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1);
3197 
3198 		if (nr_pages) {
3199 			struct mem_cgroup *memcg;
3200 
3201 			rcu_read_lock();
3202 retry:
3203 			memcg = obj_cgroup_memcg(old);
3204 			if (unlikely(!css_tryget(&memcg->css)))
3205 				goto retry;
3206 			rcu_read_unlock();
3207 
3208 			__memcg_kmem_uncharge(memcg, nr_pages);
3209 			css_put(&memcg->css);
3210 		}
3211 
3212 		/*
3213 		 * The leftover is flushed to the centralized per-memcg value.
3214 		 * On the next attempt to refill obj stock it will be moved
3215 		 * to a per-cpu stock (probably, on an other CPU), see
3216 		 * refill_obj_stock().
3217 		 *
3218 		 * How often it's flushed is a trade-off between the memory
3219 		 * limit enforcement accuracy and potential CPU contention,
3220 		 * so it might be changed in the future.
3221 		 */
3222 		atomic_add(nr_bytes, &old->nr_charged_bytes);
3223 		stock->nr_bytes = 0;
3224 	}
3225 
3226 	obj_cgroup_put(old);
3227 	stock->cached_objcg = NULL;
3228 }
3229 
obj_stock_flush_required(struct memcg_stock_pcp * stock,struct mem_cgroup * root_memcg)3230 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
3231 				     struct mem_cgroup *root_memcg)
3232 {
3233 	struct mem_cgroup *memcg;
3234 
3235 	if (stock->cached_objcg) {
3236 		memcg = obj_cgroup_memcg(stock->cached_objcg);
3237 		if (memcg && mem_cgroup_is_descendant(memcg, root_memcg))
3238 			return true;
3239 	}
3240 
3241 	return false;
3242 }
3243 
refill_obj_stock(struct obj_cgroup * objcg,unsigned int nr_bytes)3244 static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
3245 {
3246 	struct memcg_stock_pcp *stock;
3247 	unsigned long flags;
3248 
3249 	local_irq_save(flags);
3250 
3251 	stock = this_cpu_ptr(&memcg_stock);
3252 	if (stock->cached_objcg != objcg) { /* reset if necessary */
3253 		drain_obj_stock(stock);
3254 		obj_cgroup_get(objcg);
3255 		stock->cached_objcg = objcg;
3256 		stock->nr_bytes = atomic_xchg(&objcg->nr_charged_bytes, 0);
3257 	}
3258 	stock->nr_bytes += nr_bytes;
3259 
3260 	if (stock->nr_bytes > PAGE_SIZE)
3261 		drain_obj_stock(stock);
3262 
3263 	local_irq_restore(flags);
3264 }
3265 
obj_cgroup_charge(struct obj_cgroup * objcg,gfp_t gfp,size_t size)3266 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
3267 {
3268 	struct mem_cgroup *memcg;
3269 	unsigned int nr_pages, nr_bytes;
3270 	int ret;
3271 
3272 	if (consume_obj_stock(objcg, size))
3273 		return 0;
3274 
3275 	/*
3276 	 * In theory, memcg->nr_charged_bytes can have enough
3277 	 * pre-charged bytes to satisfy the allocation. However,
3278 	 * flushing memcg->nr_charged_bytes requires two atomic
3279 	 * operations, and memcg->nr_charged_bytes can't be big,
3280 	 * so it's better to ignore it and try grab some new pages.
3281 	 * memcg->nr_charged_bytes will be flushed in
3282 	 * refill_obj_stock(), called from this function or
3283 	 * independently later.
3284 	 */
3285 	rcu_read_lock();
3286 retry:
3287 	memcg = obj_cgroup_memcg(objcg);
3288 	if (unlikely(!css_tryget(&memcg->css)))
3289 		goto retry;
3290 	rcu_read_unlock();
3291 
3292 	nr_pages = size >> PAGE_SHIFT;
3293 	nr_bytes = size & (PAGE_SIZE - 1);
3294 
3295 	if (nr_bytes)
3296 		nr_pages += 1;
3297 
3298 	ret = __memcg_kmem_charge(memcg, gfp, nr_pages);
3299 	if (!ret && nr_bytes)
3300 		refill_obj_stock(objcg, PAGE_SIZE - nr_bytes);
3301 
3302 	css_put(&memcg->css);
3303 	return ret;
3304 }
3305 
obj_cgroup_uncharge(struct obj_cgroup * objcg,size_t size)3306 void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size)
3307 {
3308 	refill_obj_stock(objcg, size);
3309 }
3310 
3311 #endif /* CONFIG_MEMCG_KMEM */
3312 
3313 /*
3314  * Because head->mem_cgroup is not set on tails, set it now.
3315  */
split_page_memcg(struct page * head,unsigned int nr)3316 void split_page_memcg(struct page *head, unsigned int nr)
3317 {
3318 	struct mem_cgroup *memcg = head->mem_cgroup;
3319 	int kmemcg = PageKmemcg(head);
3320 	int i;
3321 
3322 	if (mem_cgroup_disabled() || !memcg)
3323 		return;
3324 
3325 	for (i = 1; i < nr; i++) {
3326 		head[i].mem_cgroup = memcg;
3327 		if (kmemcg)
3328 			__SetPageKmemcg(head + i);
3329 	}
3330 	css_get_many(&memcg->css, nr - 1);
3331 }
3332 
3333 #ifdef CONFIG_MEMCG_SWAP
3334 /**
3335  * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
3336  * @entry: swap entry to be moved
3337  * @from:  mem_cgroup which the entry is moved from
3338  * @to:  mem_cgroup which the entry is moved to
3339  *
3340  * It succeeds only when the swap_cgroup's record for this entry is the same
3341  * as the mem_cgroup's id of @from.
3342  *
3343  * Returns 0 on success, -EINVAL on failure.
3344  *
3345  * The caller must have charged to @to, IOW, called page_counter_charge() about
3346  * both res and memsw, and called css_get().
3347  */
mem_cgroup_move_swap_account(swp_entry_t entry,struct mem_cgroup * from,struct mem_cgroup * to)3348 static int mem_cgroup_move_swap_account(swp_entry_t entry,
3349 				struct mem_cgroup *from, struct mem_cgroup *to)
3350 {
3351 	unsigned short old_id, new_id;
3352 
3353 	old_id = mem_cgroup_id(from);
3354 	new_id = mem_cgroup_id(to);
3355 
3356 	if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
3357 		mod_memcg_state(from, MEMCG_SWAP, -1);
3358 		mod_memcg_state(to, MEMCG_SWAP, 1);
3359 		return 0;
3360 	}
3361 	return -EINVAL;
3362 }
3363 #else
mem_cgroup_move_swap_account(swp_entry_t entry,struct mem_cgroup * from,struct mem_cgroup * to)3364 static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
3365 				struct mem_cgroup *from, struct mem_cgroup *to)
3366 {
3367 	return -EINVAL;
3368 }
3369 #endif
3370 
3371 static DEFINE_MUTEX(memcg_max_mutex);
3372 
mem_cgroup_resize_max(struct mem_cgroup * memcg,unsigned long max,bool memsw)3373 static int mem_cgroup_resize_max(struct mem_cgroup *memcg,
3374 				 unsigned long max, bool memsw)
3375 {
3376 	bool enlarge = false;
3377 	bool drained = false;
3378 	int ret;
3379 	bool limits_invariant;
3380 	struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory;
3381 
3382 	do {
3383 		if (signal_pending(current)) {
3384 			ret = -EINTR;
3385 			break;
3386 		}
3387 
3388 		mutex_lock(&memcg_max_mutex);
3389 		/*
3390 		 * Make sure that the new limit (memsw or memory limit) doesn't
3391 		 * break our basic invariant rule memory.max <= memsw.max.
3392 		 */
3393 		limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) :
3394 					   max <= memcg->memsw.max;
3395 		if (!limits_invariant) {
3396 			mutex_unlock(&memcg_max_mutex);
3397 			ret = -EINVAL;
3398 			break;
3399 		}
3400 		if (max > counter->max)
3401 			enlarge = true;
3402 		ret = page_counter_set_max(counter, max);
3403 		mutex_unlock(&memcg_max_mutex);
3404 
3405 		if (!ret)
3406 			break;
3407 
3408 		if (!drained) {
3409 			drain_all_stock(memcg);
3410 			drained = true;
3411 			continue;
3412 		}
3413 
3414 		if (!try_to_free_mem_cgroup_pages(memcg, 1,
3415 					GFP_KERNEL, !memsw)) {
3416 			ret = -EBUSY;
3417 			break;
3418 		}
3419 	} while (true);
3420 
3421 	if (!ret && enlarge)
3422 		memcg_oom_recover(memcg);
3423 
3424 	return ret;
3425 }
3426 
mem_cgroup_soft_limit_reclaim(pg_data_t * pgdat,int order,gfp_t gfp_mask,unsigned long * total_scanned)3427 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
3428 					    gfp_t gfp_mask,
3429 					    unsigned long *total_scanned)
3430 {
3431 	unsigned long nr_reclaimed = 0;
3432 	struct mem_cgroup_per_node *mz, *next_mz = NULL;
3433 	unsigned long reclaimed;
3434 	int loop = 0;
3435 	struct mem_cgroup_tree_per_node *mctz;
3436 	unsigned long excess;
3437 	unsigned long nr_scanned;
3438 
3439 	if (order > 0)
3440 		return 0;
3441 
3442 	mctz = soft_limit_tree_node(pgdat->node_id);
3443 
3444 	/*
3445 	 * Do not even bother to check the largest node if the root
3446 	 * is empty. Do it lockless to prevent lock bouncing. Races
3447 	 * are acceptable as soft limit is best effort anyway.
3448 	 */
3449 	if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root))
3450 		return 0;
3451 
3452 	/*
3453 	 * This loop can run a while, specially if mem_cgroup's continuously
3454 	 * keep exceeding their soft limit and putting the system under
3455 	 * pressure
3456 	 */
3457 	do {
3458 		if (next_mz)
3459 			mz = next_mz;
3460 		else
3461 			mz = mem_cgroup_largest_soft_limit_node(mctz);
3462 		if (!mz)
3463 			break;
3464 
3465 		nr_scanned = 0;
3466 		reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat,
3467 						    gfp_mask, &nr_scanned);
3468 		nr_reclaimed += reclaimed;
3469 		*total_scanned += nr_scanned;
3470 		spin_lock_irq(&mctz->lock);
3471 		__mem_cgroup_remove_exceeded(mz, mctz);
3472 
3473 		/*
3474 		 * If we failed to reclaim anything from this memory cgroup
3475 		 * it is time to move on to the next cgroup
3476 		 */
3477 		next_mz = NULL;
3478 		if (!reclaimed)
3479 			next_mz = __mem_cgroup_largest_soft_limit_node(mctz);
3480 
3481 		excess = soft_limit_excess(mz->memcg);
3482 		/*
3483 		 * One school of thought says that we should not add
3484 		 * back the node to the tree if reclaim returns 0.
3485 		 * But our reclaim could return 0, simply because due
3486 		 * to priority we are exposing a smaller subset of
3487 		 * memory to reclaim from. Consider this as a longer
3488 		 * term TODO.
3489 		 */
3490 		/* If excess == 0, no tree ops */
3491 		__mem_cgroup_insert_exceeded(mz, mctz, excess);
3492 		spin_unlock_irq(&mctz->lock);
3493 		css_put(&mz->memcg->css);
3494 		loop++;
3495 		/*
3496 		 * Could not reclaim anything and there are no more
3497 		 * mem cgroups to try or we seem to be looping without
3498 		 * reclaiming anything.
3499 		 */
3500 		if (!nr_reclaimed &&
3501 			(next_mz == NULL ||
3502 			loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
3503 			break;
3504 	} while (!nr_reclaimed);
3505 	if (next_mz)
3506 		css_put(&next_mz->memcg->css);
3507 	return nr_reclaimed;
3508 }
3509 
3510 /*
3511  * Test whether @memcg has children, dead or alive.  Note that this
3512  * function doesn't care whether @memcg has use_hierarchy enabled and
3513  * returns %true if there are child csses according to the cgroup
3514  * hierarchy.  Testing use_hierarchy is the caller's responsibility.
3515  */
memcg_has_children(struct mem_cgroup * memcg)3516 static inline bool memcg_has_children(struct mem_cgroup *memcg)
3517 {
3518 	bool ret;
3519 
3520 	rcu_read_lock();
3521 	ret = css_next_child(NULL, &memcg->css);
3522 	rcu_read_unlock();
3523 	return ret;
3524 }
3525 
3526 /*
3527  * Reclaims as many pages from the given memcg as possible.
3528  *
3529  * Caller is responsible for holding css reference for memcg.
3530  */
mem_cgroup_force_empty(struct mem_cgroup * memcg)3531 static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
3532 {
3533 	int nr_retries = MAX_RECLAIM_RETRIES;
3534 
3535 	/* we call try-to-free pages for make this cgroup empty */
3536 	lru_add_drain_all();
3537 
3538 	drain_all_stock(memcg);
3539 
3540 	/* try to free all pages in this cgroup */
3541 	while (nr_retries && page_counter_read(&memcg->memory)) {
3542 		int progress;
3543 
3544 		if (signal_pending(current))
3545 			return -EINTR;
3546 
3547 		progress = try_to_free_mem_cgroup_pages(memcg, 1,
3548 							GFP_KERNEL, true);
3549 		if (!progress) {
3550 			nr_retries--;
3551 			/* maybe some writeback is necessary */
3552 			congestion_wait(BLK_RW_ASYNC, HZ/10);
3553 		}
3554 
3555 	}
3556 
3557 	return 0;
3558 }
3559 
mem_cgroup_force_empty_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)3560 static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
3561 					    char *buf, size_t nbytes,
3562 					    loff_t off)
3563 {
3564 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3565 
3566 	if (mem_cgroup_is_root(memcg))
3567 		return -EINVAL;
3568 	return mem_cgroup_force_empty(memcg) ?: nbytes;
3569 }
3570 
mem_cgroup_hierarchy_read(struct cgroup_subsys_state * css,struct cftype * cft)3571 static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
3572 				     struct cftype *cft)
3573 {
3574 	return mem_cgroup_from_css(css)->use_hierarchy;
3575 }
3576 
mem_cgroup_hierarchy_write(struct cgroup_subsys_state * css,struct cftype * cft,u64 val)3577 static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
3578 				      struct cftype *cft, u64 val)
3579 {
3580 	int retval = 0;
3581 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3582 	struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent);
3583 
3584 	if (memcg->use_hierarchy == val)
3585 		return 0;
3586 
3587 	/*
3588 	 * If parent's use_hierarchy is set, we can't make any modifications
3589 	 * in the child subtrees. If it is unset, then the change can
3590 	 * occur, provided the current cgroup has no children.
3591 	 *
3592 	 * For the root cgroup, parent_mem is NULL, we allow value to be
3593 	 * set if there are no children.
3594 	 */
3595 	if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
3596 				(val == 1 || val == 0)) {
3597 		if (!memcg_has_children(memcg))
3598 			memcg->use_hierarchy = val;
3599 		else
3600 			retval = -EBUSY;
3601 	} else
3602 		retval = -EINVAL;
3603 
3604 	return retval;
3605 }
3606 
mem_cgroup_usage(struct mem_cgroup * memcg,bool swap)3607 static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
3608 {
3609 	unsigned long val;
3610 
3611 	if (mem_cgroup_is_root(memcg)) {
3612 		val = memcg_page_state(memcg, NR_FILE_PAGES) +
3613 			memcg_page_state(memcg, NR_ANON_MAPPED);
3614 		if (swap)
3615 			val += memcg_page_state(memcg, MEMCG_SWAP);
3616 	} else {
3617 		if (!swap)
3618 			val = page_counter_read(&memcg->memory);
3619 		else
3620 			val = page_counter_read(&memcg->memsw);
3621 	}
3622 	return val;
3623 }
3624 
3625 enum {
3626 	RES_USAGE,
3627 	RES_LIMIT,
3628 	RES_MAX_USAGE,
3629 	RES_FAILCNT,
3630 	RES_SOFT_LIMIT,
3631 };
3632 
mem_cgroup_read_u64(struct cgroup_subsys_state * css,struct cftype * cft)3633 static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
3634 			       struct cftype *cft)
3635 {
3636 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3637 	struct page_counter *counter;
3638 
3639 	switch (MEMFILE_TYPE(cft->private)) {
3640 	case _MEM:
3641 		counter = &memcg->memory;
3642 		break;
3643 	case _MEMSWAP:
3644 		counter = &memcg->memsw;
3645 		break;
3646 	case _KMEM:
3647 		counter = &memcg->kmem;
3648 		break;
3649 	case _TCP:
3650 		counter = &memcg->tcpmem;
3651 		break;
3652 	default:
3653 		BUG();
3654 	}
3655 
3656 	switch (MEMFILE_ATTR(cft->private)) {
3657 	case RES_USAGE:
3658 		if (counter == &memcg->memory)
3659 			return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
3660 		if (counter == &memcg->memsw)
3661 			return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
3662 		return (u64)page_counter_read(counter) * PAGE_SIZE;
3663 	case RES_LIMIT:
3664 		return (u64)counter->max * PAGE_SIZE;
3665 	case RES_MAX_USAGE:
3666 		return (u64)counter->watermark * PAGE_SIZE;
3667 	case RES_FAILCNT:
3668 		return counter->failcnt;
3669 	case RES_SOFT_LIMIT:
3670 		return (u64)memcg->soft_limit * PAGE_SIZE;
3671 	default:
3672 		BUG();
3673 	}
3674 }
3675 
memcg_flush_percpu_vmstats(struct mem_cgroup * memcg)3676 static void memcg_flush_percpu_vmstats(struct mem_cgroup *memcg)
3677 {
3678 	unsigned long stat[MEMCG_NR_STAT] = {0};
3679 	struct mem_cgroup *mi;
3680 	int node, cpu, i;
3681 
3682 	for_each_online_cpu(cpu)
3683 		for (i = 0; i < MEMCG_NR_STAT; i++)
3684 			stat[i] += per_cpu(memcg->vmstats_percpu->stat[i], cpu);
3685 
3686 	for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
3687 		for (i = 0; i < MEMCG_NR_STAT; i++)
3688 			atomic_long_add(stat[i], &mi->vmstats[i]);
3689 
3690 	for_each_node(node) {
3691 		struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
3692 		struct mem_cgroup_per_node *pi;
3693 
3694 		for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
3695 			stat[i] = 0;
3696 
3697 		for_each_online_cpu(cpu)
3698 			for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
3699 				stat[i] += per_cpu(
3700 					pn->lruvec_stat_cpu->count[i], cpu);
3701 
3702 		for (pi = pn; pi; pi = parent_nodeinfo(pi, node))
3703 			for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
3704 				atomic_long_add(stat[i], &pi->lruvec_stat[i]);
3705 	}
3706 }
3707 
memcg_flush_percpu_vmevents(struct mem_cgroup * memcg)3708 static void memcg_flush_percpu_vmevents(struct mem_cgroup *memcg)
3709 {
3710 	unsigned long events[NR_VM_EVENT_ITEMS];
3711 	struct mem_cgroup *mi;
3712 	int cpu, i;
3713 
3714 	for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
3715 		events[i] = 0;
3716 
3717 	for_each_online_cpu(cpu)
3718 		for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
3719 			events[i] += per_cpu(memcg->vmstats_percpu->events[i],
3720 					     cpu);
3721 
3722 	for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
3723 		for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
3724 			atomic_long_add(events[i], &mi->vmevents[i]);
3725 }
3726 
3727 #ifdef CONFIG_MEMCG_KMEM
memcg_online_kmem(struct mem_cgroup * memcg)3728 static int memcg_online_kmem(struct mem_cgroup *memcg)
3729 {
3730 	struct obj_cgroup *objcg;
3731 	int memcg_id;
3732 
3733 	if (cgroup_memory_nokmem)
3734 		return 0;
3735 
3736 	BUG_ON(memcg->kmemcg_id >= 0);
3737 	BUG_ON(memcg->kmem_state);
3738 
3739 	memcg_id = memcg_alloc_cache_id();
3740 	if (memcg_id < 0)
3741 		return memcg_id;
3742 
3743 	objcg = obj_cgroup_alloc();
3744 	if (!objcg) {
3745 		memcg_free_cache_id(memcg_id);
3746 		return -ENOMEM;
3747 	}
3748 	objcg->memcg = memcg;
3749 	rcu_assign_pointer(memcg->objcg, objcg);
3750 
3751 	static_branch_enable(&memcg_kmem_enabled_key);
3752 
3753 	/*
3754 	 * A memory cgroup is considered kmem-online as soon as it gets
3755 	 * kmemcg_id. Setting the id after enabling static branching will
3756 	 * guarantee no one starts accounting before all call sites are
3757 	 * patched.
3758 	 */
3759 	memcg->kmemcg_id = memcg_id;
3760 	memcg->kmem_state = KMEM_ONLINE;
3761 
3762 	return 0;
3763 }
3764 
memcg_offline_kmem(struct mem_cgroup * memcg)3765 static void memcg_offline_kmem(struct mem_cgroup *memcg)
3766 {
3767 	struct cgroup_subsys_state *css;
3768 	struct mem_cgroup *parent, *child;
3769 	int kmemcg_id;
3770 
3771 	if (memcg->kmem_state != KMEM_ONLINE)
3772 		return;
3773 
3774 	memcg->kmem_state = KMEM_ALLOCATED;
3775 
3776 	parent = parent_mem_cgroup(memcg);
3777 	if (!parent)
3778 		parent = root_mem_cgroup;
3779 
3780 	memcg_reparent_objcgs(memcg, parent);
3781 
3782 	kmemcg_id = memcg->kmemcg_id;
3783 	BUG_ON(kmemcg_id < 0);
3784 
3785 	/*
3786 	 * Change kmemcg_id of this cgroup and all its descendants to the
3787 	 * parent's id, and then move all entries from this cgroup's list_lrus
3788 	 * to ones of the parent. After we have finished, all list_lrus
3789 	 * corresponding to this cgroup are guaranteed to remain empty. The
3790 	 * ordering is imposed by list_lru_node->lock taken by
3791 	 * memcg_drain_all_list_lrus().
3792 	 */
3793 	rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */
3794 	css_for_each_descendant_pre(css, &memcg->css) {
3795 		child = mem_cgroup_from_css(css);
3796 		BUG_ON(child->kmemcg_id != kmemcg_id);
3797 		child->kmemcg_id = parent->kmemcg_id;
3798 		if (!memcg->use_hierarchy)
3799 			break;
3800 	}
3801 	rcu_read_unlock();
3802 
3803 	memcg_drain_all_list_lrus(kmemcg_id, parent);
3804 
3805 	memcg_free_cache_id(kmemcg_id);
3806 }
3807 
memcg_free_kmem(struct mem_cgroup * memcg)3808 static void memcg_free_kmem(struct mem_cgroup *memcg)
3809 {
3810 	/* css_alloc() failed, offlining didn't happen */
3811 	if (unlikely(memcg->kmem_state == KMEM_ONLINE))
3812 		memcg_offline_kmem(memcg);
3813 }
3814 #else
memcg_online_kmem(struct mem_cgroup * memcg)3815 static int memcg_online_kmem(struct mem_cgroup *memcg)
3816 {
3817 	return 0;
3818 }
memcg_offline_kmem(struct mem_cgroup * memcg)3819 static void memcg_offline_kmem(struct mem_cgroup *memcg)
3820 {
3821 }
memcg_free_kmem(struct mem_cgroup * memcg)3822 static void memcg_free_kmem(struct mem_cgroup *memcg)
3823 {
3824 }
3825 #endif /* CONFIG_MEMCG_KMEM */
3826 
memcg_update_kmem_max(struct mem_cgroup * memcg,unsigned long max)3827 static int memcg_update_kmem_max(struct mem_cgroup *memcg,
3828 				 unsigned long max)
3829 {
3830 	int ret;
3831 
3832 	mutex_lock(&memcg_max_mutex);
3833 	ret = page_counter_set_max(&memcg->kmem, max);
3834 	mutex_unlock(&memcg_max_mutex);
3835 	return ret;
3836 }
3837 
memcg_update_tcp_max(struct mem_cgroup * memcg,unsigned long max)3838 static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max)
3839 {
3840 	int ret;
3841 
3842 	mutex_lock(&memcg_max_mutex);
3843 
3844 	ret = page_counter_set_max(&memcg->tcpmem, max);
3845 	if (ret)
3846 		goto out;
3847 
3848 	if (!memcg->tcpmem_active) {
3849 		/*
3850 		 * The active flag needs to be written after the static_key
3851 		 * update. This is what guarantees that the socket activation
3852 		 * function is the last one to run. See mem_cgroup_sk_alloc()
3853 		 * for details, and note that we don't mark any socket as
3854 		 * belonging to this memcg until that flag is up.
3855 		 *
3856 		 * We need to do this, because static_keys will span multiple
3857 		 * sites, but we can't control their order. If we mark a socket
3858 		 * as accounted, but the accounting functions are not patched in
3859 		 * yet, we'll lose accounting.
3860 		 *
3861 		 * We never race with the readers in mem_cgroup_sk_alloc(),
3862 		 * because when this value change, the code to process it is not
3863 		 * patched in yet.
3864 		 */
3865 		static_branch_inc(&memcg_sockets_enabled_key);
3866 		memcg->tcpmem_active = true;
3867 	}
3868 out:
3869 	mutex_unlock(&memcg_max_mutex);
3870 	return ret;
3871 }
3872 
3873 /*
3874  * The user of this function is...
3875  * RES_LIMIT.
3876  */
mem_cgroup_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)3877 static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
3878 				char *buf, size_t nbytes, loff_t off)
3879 {
3880 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3881 	unsigned long nr_pages;
3882 	int ret;
3883 
3884 	buf = strstrip(buf);
3885 	ret = page_counter_memparse(buf, "-1", &nr_pages);
3886 	if (ret)
3887 		return ret;
3888 
3889 	switch (MEMFILE_ATTR(of_cft(of)->private)) {
3890 	case RES_LIMIT:
3891 		if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
3892 			ret = -EINVAL;
3893 			break;
3894 		}
3895 		switch (MEMFILE_TYPE(of_cft(of)->private)) {
3896 		case _MEM:
3897 			ret = mem_cgroup_resize_max(memcg, nr_pages, false);
3898 			break;
3899 		case _MEMSWAP:
3900 			ret = mem_cgroup_resize_max(memcg, nr_pages, true);
3901 			break;
3902 		case _KMEM:
3903 			pr_warn_once("kmem.limit_in_bytes is deprecated and will be removed. "
3904 				     "Please report your usecase to linux-mm@kvack.org if you "
3905 				     "depend on this functionality.\n");
3906 			ret = memcg_update_kmem_max(memcg, nr_pages);
3907 			break;
3908 		case _TCP:
3909 			ret = memcg_update_tcp_max(memcg, nr_pages);
3910 			break;
3911 		}
3912 		break;
3913 	case RES_SOFT_LIMIT:
3914 		memcg->soft_limit = nr_pages;
3915 		ret = 0;
3916 		break;
3917 	}
3918 	return ret ?: nbytes;
3919 }
3920 
mem_cgroup_reset(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)3921 static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
3922 				size_t nbytes, loff_t off)
3923 {
3924 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3925 	struct page_counter *counter;
3926 
3927 	switch (MEMFILE_TYPE(of_cft(of)->private)) {
3928 	case _MEM:
3929 		counter = &memcg->memory;
3930 		break;
3931 	case _MEMSWAP:
3932 		counter = &memcg->memsw;
3933 		break;
3934 	case _KMEM:
3935 		counter = &memcg->kmem;
3936 		break;
3937 	case _TCP:
3938 		counter = &memcg->tcpmem;
3939 		break;
3940 	default:
3941 		BUG();
3942 	}
3943 
3944 	switch (MEMFILE_ATTR(of_cft(of)->private)) {
3945 	case RES_MAX_USAGE:
3946 		page_counter_reset_watermark(counter);
3947 		break;
3948 	case RES_FAILCNT:
3949 		counter->failcnt = 0;
3950 		break;
3951 	default:
3952 		BUG();
3953 	}
3954 
3955 	return nbytes;
3956 }
3957 
mem_cgroup_move_charge_read(struct cgroup_subsys_state * css,struct cftype * cft)3958 static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
3959 					struct cftype *cft)
3960 {
3961 	return mem_cgroup_from_css(css)->move_charge_at_immigrate;
3962 }
3963 
3964 #ifdef CONFIG_MMU
mem_cgroup_move_charge_write(struct cgroup_subsys_state * css,struct cftype * cft,u64 val)3965 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3966 					struct cftype *cft, u64 val)
3967 {
3968 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3969 
3970 	pr_warn_once("Cgroup memory moving (move_charge_at_immigrate) is deprecated. "
3971 		     "Please report your usecase to linux-mm@kvack.org if you "
3972 		     "depend on this functionality.\n");
3973 
3974 	if (val & ~MOVE_MASK)
3975 		return -EINVAL;
3976 
3977 	/*
3978 	 * No kind of locking is needed in here, because ->can_attach() will
3979 	 * check this value once in the beginning of the process, and then carry
3980 	 * on with stale data. This means that changes to this value will only
3981 	 * affect task migrations starting after the change.
3982 	 */
3983 	memcg->move_charge_at_immigrate = val;
3984 	return 0;
3985 }
3986 #else
mem_cgroup_move_charge_write(struct cgroup_subsys_state * css,struct cftype * cft,u64 val)3987 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3988 					struct cftype *cft, u64 val)
3989 {
3990 	return -ENOSYS;
3991 }
3992 #endif
3993 
3994 #ifdef CONFIG_NUMA
3995 
3996 #define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
3997 #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
3998 #define LRU_ALL	     ((1 << NR_LRU_LISTS) - 1)
3999 
mem_cgroup_node_nr_lru_pages(struct mem_cgroup * memcg,int nid,unsigned int lru_mask,bool tree)4000 static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
4001 				int nid, unsigned int lru_mask, bool tree)
4002 {
4003 	struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
4004 	unsigned long nr = 0;
4005 	enum lru_list lru;
4006 
4007 	VM_BUG_ON((unsigned)nid >= nr_node_ids);
4008 
4009 	for_each_lru(lru) {
4010 		if (!(BIT(lru) & lru_mask))
4011 			continue;
4012 		if (tree)
4013 			nr += lruvec_page_state(lruvec, NR_LRU_BASE + lru);
4014 		else
4015 			nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru);
4016 	}
4017 	return nr;
4018 }
4019 
mem_cgroup_nr_lru_pages(struct mem_cgroup * memcg,unsigned int lru_mask,bool tree)4020 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
4021 					     unsigned int lru_mask,
4022 					     bool tree)
4023 {
4024 	unsigned long nr = 0;
4025 	enum lru_list lru;
4026 
4027 	for_each_lru(lru) {
4028 		if (!(BIT(lru) & lru_mask))
4029 			continue;
4030 		if (tree)
4031 			nr += memcg_page_state(memcg, NR_LRU_BASE + lru);
4032 		else
4033 			nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru);
4034 	}
4035 	return nr;
4036 }
4037 
memcg_numa_stat_show(struct seq_file * m,void * v)4038 static int memcg_numa_stat_show(struct seq_file *m, void *v)
4039 {
4040 	struct numa_stat {
4041 		const char *name;
4042 		unsigned int lru_mask;
4043 	};
4044 
4045 	static const struct numa_stat stats[] = {
4046 		{ "total", LRU_ALL },
4047 		{ "file", LRU_ALL_FILE },
4048 		{ "anon", LRU_ALL_ANON },
4049 		{ "unevictable", BIT(LRU_UNEVICTABLE) },
4050 	};
4051 	const struct numa_stat *stat;
4052 	int nid;
4053 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4054 
4055 	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
4056 		seq_printf(m, "%s=%lu", stat->name,
4057 			   mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
4058 						   false));
4059 		for_each_node_state(nid, N_MEMORY)
4060 			seq_printf(m, " N%d=%lu", nid,
4061 				   mem_cgroup_node_nr_lru_pages(memcg, nid,
4062 							stat->lru_mask, false));
4063 		seq_putc(m, '\n');
4064 	}
4065 
4066 	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
4067 
4068 		seq_printf(m, "hierarchical_%s=%lu", stat->name,
4069 			   mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
4070 						   true));
4071 		for_each_node_state(nid, N_MEMORY)
4072 			seq_printf(m, " N%d=%lu", nid,
4073 				   mem_cgroup_node_nr_lru_pages(memcg, nid,
4074 							stat->lru_mask, true));
4075 		seq_putc(m, '\n');
4076 	}
4077 
4078 	return 0;
4079 }
4080 #endif /* CONFIG_NUMA */
4081 
4082 static const unsigned int memcg1_stats[] = {
4083 	NR_FILE_PAGES,
4084 	NR_ANON_MAPPED,
4085 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4086 	NR_ANON_THPS,
4087 #endif
4088 	NR_SHMEM,
4089 	NR_FILE_MAPPED,
4090 	NR_FILE_DIRTY,
4091 	NR_WRITEBACK,
4092 	MEMCG_SWAP,
4093 };
4094 
4095 static const char *const memcg1_stat_names[] = {
4096 	"cache",
4097 	"rss",
4098 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4099 	"rss_huge",
4100 #endif
4101 	"shmem",
4102 	"mapped_file",
4103 	"dirty",
4104 	"writeback",
4105 	"swap",
4106 };
4107 
4108 /* Universal VM events cgroup1 shows, original sort order */
4109 static const unsigned int memcg1_events[] = {
4110 	PGPGIN,
4111 	PGPGOUT,
4112 	PGFAULT,
4113 	PGMAJFAULT,
4114 };
4115 
memcg_stat_show(struct seq_file * m,void * v)4116 static int memcg_stat_show(struct seq_file *m, void *v)
4117 {
4118 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4119 	unsigned long memory, memsw;
4120 	struct mem_cgroup *mi;
4121 	unsigned int i;
4122 
4123 	BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats));
4124 
4125 	for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
4126 		unsigned long nr;
4127 
4128 		if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
4129 			continue;
4130 		nr = memcg_page_state_local(memcg, memcg1_stats[i]);
4131 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4132 		if (memcg1_stats[i] == NR_ANON_THPS)
4133 			nr *= HPAGE_PMD_NR;
4134 #endif
4135 		seq_printf(m, "%s %lu\n", memcg1_stat_names[i], nr * PAGE_SIZE);
4136 	}
4137 
4138 	for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
4139 		seq_printf(m, "%s %lu\n", vm_event_name(memcg1_events[i]),
4140 			   memcg_events_local(memcg, memcg1_events[i]));
4141 
4142 	for (i = 0; i < NR_LRU_LISTS; i++)
4143 		seq_printf(m, "%s %lu\n", lru_list_name(i),
4144 			   memcg_page_state_local(memcg, NR_LRU_BASE + i) *
4145 			   PAGE_SIZE);
4146 
4147 	/* Hierarchical information */
4148 	memory = memsw = PAGE_COUNTER_MAX;
4149 	for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
4150 		memory = min(memory, READ_ONCE(mi->memory.max));
4151 		memsw = min(memsw, READ_ONCE(mi->memsw.max));
4152 	}
4153 	seq_printf(m, "hierarchical_memory_limit %llu\n",
4154 		   (u64)memory * PAGE_SIZE);
4155 	if (do_memsw_account())
4156 		seq_printf(m, "hierarchical_memsw_limit %llu\n",
4157 			   (u64)memsw * PAGE_SIZE);
4158 
4159 	for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
4160 		unsigned long nr;
4161 
4162 		if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
4163 			continue;
4164 		nr = memcg_page_state(memcg, memcg1_stats[i]);
4165 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4166 		if (memcg1_stats[i] == NR_ANON_THPS)
4167 			nr *= HPAGE_PMD_NR;
4168 #endif
4169 		seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i],
4170 						(u64)nr * PAGE_SIZE);
4171 	}
4172 
4173 	for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
4174 		seq_printf(m, "total_%s %llu\n",
4175 			   vm_event_name(memcg1_events[i]),
4176 			   (u64)memcg_events(memcg, memcg1_events[i]));
4177 
4178 	for (i = 0; i < NR_LRU_LISTS; i++)
4179 		seq_printf(m, "total_%s %llu\n", lru_list_name(i),
4180 			   (u64)memcg_page_state(memcg, NR_LRU_BASE + i) *
4181 			   PAGE_SIZE);
4182 
4183 #ifdef CONFIG_DEBUG_VM
4184 	{
4185 		pg_data_t *pgdat;
4186 		struct mem_cgroup_per_node *mz;
4187 		unsigned long anon_cost = 0;
4188 		unsigned long file_cost = 0;
4189 
4190 		for_each_online_pgdat(pgdat) {
4191 			mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
4192 
4193 			anon_cost += mz->lruvec.anon_cost;
4194 			file_cost += mz->lruvec.file_cost;
4195 		}
4196 		seq_printf(m, "anon_cost %lu\n", anon_cost);
4197 		seq_printf(m, "file_cost %lu\n", file_cost);
4198 	}
4199 #endif
4200 
4201 	return 0;
4202 }
4203 
mem_cgroup_swappiness_read(struct cgroup_subsys_state * css,struct cftype * cft)4204 static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
4205 				      struct cftype *cft)
4206 {
4207 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4208 
4209 	return mem_cgroup_swappiness(memcg);
4210 }
4211 
mem_cgroup_swappiness_write(struct cgroup_subsys_state * css,struct cftype * cft,u64 val)4212 static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
4213 				       struct cftype *cft, u64 val)
4214 {
4215 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4216 
4217 	if (val > 200)
4218 		return -EINVAL;
4219 
4220 	if (css->parent)
4221 		memcg->swappiness = val;
4222 	else
4223 		vm_swappiness = val;
4224 
4225 	return 0;
4226 }
4227 
__mem_cgroup_threshold(struct mem_cgroup * memcg,bool swap)4228 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
4229 {
4230 	struct mem_cgroup_threshold_ary *t;
4231 	unsigned long usage;
4232 	int i;
4233 
4234 	rcu_read_lock();
4235 	if (!swap)
4236 		t = rcu_dereference(memcg->thresholds.primary);
4237 	else
4238 		t = rcu_dereference(memcg->memsw_thresholds.primary);
4239 
4240 	if (!t)
4241 		goto unlock;
4242 
4243 	usage = mem_cgroup_usage(memcg, swap);
4244 
4245 	/*
4246 	 * current_threshold points to threshold just below or equal to usage.
4247 	 * If it's not true, a threshold was crossed after last
4248 	 * call of __mem_cgroup_threshold().
4249 	 */
4250 	i = t->current_threshold;
4251 
4252 	/*
4253 	 * Iterate backward over array of thresholds starting from
4254 	 * current_threshold and check if a threshold is crossed.
4255 	 * If none of thresholds below usage is crossed, we read
4256 	 * only one element of the array here.
4257 	 */
4258 	for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
4259 		eventfd_signal(t->entries[i].eventfd, 1);
4260 
4261 	/* i = current_threshold + 1 */
4262 	i++;
4263 
4264 	/*
4265 	 * Iterate forward over array of thresholds starting from
4266 	 * current_threshold+1 and check if a threshold is crossed.
4267 	 * If none of thresholds above usage is crossed, we read
4268 	 * only one element of the array here.
4269 	 */
4270 	for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
4271 		eventfd_signal(t->entries[i].eventfd, 1);
4272 
4273 	/* Update current_threshold */
4274 	t->current_threshold = i - 1;
4275 unlock:
4276 	rcu_read_unlock();
4277 }
4278 
mem_cgroup_threshold(struct mem_cgroup * memcg)4279 static void mem_cgroup_threshold(struct mem_cgroup *memcg)
4280 {
4281 	while (memcg) {
4282 		__mem_cgroup_threshold(memcg, false);
4283 		if (do_memsw_account())
4284 			__mem_cgroup_threshold(memcg, true);
4285 
4286 		memcg = parent_mem_cgroup(memcg);
4287 	}
4288 }
4289 
compare_thresholds(const void * a,const void * b)4290 static int compare_thresholds(const void *a, const void *b)
4291 {
4292 	const struct mem_cgroup_threshold *_a = a;
4293 	const struct mem_cgroup_threshold *_b = b;
4294 
4295 	if (_a->threshold > _b->threshold)
4296 		return 1;
4297 
4298 	if (_a->threshold < _b->threshold)
4299 		return -1;
4300 
4301 	return 0;
4302 }
4303 
mem_cgroup_oom_notify_cb(struct mem_cgroup * memcg)4304 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
4305 {
4306 	struct mem_cgroup_eventfd_list *ev;
4307 
4308 	spin_lock(&memcg_oom_lock);
4309 
4310 	list_for_each_entry(ev, &memcg->oom_notify, list)
4311 		eventfd_signal(ev->eventfd, 1);
4312 
4313 	spin_unlock(&memcg_oom_lock);
4314 	return 0;
4315 }
4316 
mem_cgroup_oom_notify(struct mem_cgroup * memcg)4317 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
4318 {
4319 	struct mem_cgroup *iter;
4320 
4321 	for_each_mem_cgroup_tree(iter, memcg)
4322 		mem_cgroup_oom_notify_cb(iter);
4323 }
4324 
__mem_cgroup_usage_register_event(struct mem_cgroup * memcg,struct eventfd_ctx * eventfd,const char * args,enum res_type type)4325 static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
4326 	struct eventfd_ctx *eventfd, const char *args, enum res_type type)
4327 {
4328 	struct mem_cgroup_thresholds *thresholds;
4329 	struct mem_cgroup_threshold_ary *new;
4330 	unsigned long threshold;
4331 	unsigned long usage;
4332 	int i, size, ret;
4333 
4334 	ret = page_counter_memparse(args, "-1", &threshold);
4335 	if (ret)
4336 		return ret;
4337 
4338 	mutex_lock(&memcg->thresholds_lock);
4339 
4340 	if (type == _MEM) {
4341 		thresholds = &memcg->thresholds;
4342 		usage = mem_cgroup_usage(memcg, false);
4343 	} else if (type == _MEMSWAP) {
4344 		thresholds = &memcg->memsw_thresholds;
4345 		usage = mem_cgroup_usage(memcg, true);
4346 	} else
4347 		BUG();
4348 
4349 	/* Check if a threshold crossed before adding a new one */
4350 	if (thresholds->primary)
4351 		__mem_cgroup_threshold(memcg, type == _MEMSWAP);
4352 
4353 	size = thresholds->primary ? thresholds->primary->size + 1 : 1;
4354 
4355 	/* Allocate memory for new array of thresholds */
4356 	new = kmalloc(struct_size(new, entries, size), GFP_KERNEL);
4357 	if (!new) {
4358 		ret = -ENOMEM;
4359 		goto unlock;
4360 	}
4361 	new->size = size;
4362 
4363 	/* Copy thresholds (if any) to new array */
4364 	if (thresholds->primary)
4365 		memcpy(new->entries, thresholds->primary->entries,
4366 		       flex_array_size(new, entries, size - 1));
4367 
4368 	/* Add new threshold */
4369 	new->entries[size - 1].eventfd = eventfd;
4370 	new->entries[size - 1].threshold = threshold;
4371 
4372 	/* Sort thresholds. Registering of new threshold isn't time-critical */
4373 	sort(new->entries, size, sizeof(*new->entries),
4374 			compare_thresholds, NULL);
4375 
4376 	/* Find current threshold */
4377 	new->current_threshold = -1;
4378 	for (i = 0; i < size; i++) {
4379 		if (new->entries[i].threshold <= usage) {
4380 			/*
4381 			 * new->current_threshold will not be used until
4382 			 * rcu_assign_pointer(), so it's safe to increment
4383 			 * it here.
4384 			 */
4385 			++new->current_threshold;
4386 		} else
4387 			break;
4388 	}
4389 
4390 	/* Free old spare buffer and save old primary buffer as spare */
4391 	kfree(thresholds->spare);
4392 	thresholds->spare = thresholds->primary;
4393 
4394 	rcu_assign_pointer(thresholds->primary, new);
4395 
4396 	/* To be sure that nobody uses thresholds */
4397 	synchronize_rcu();
4398 
4399 unlock:
4400 	mutex_unlock(&memcg->thresholds_lock);
4401 
4402 	return ret;
4403 }
4404 
mem_cgroup_usage_register_event(struct mem_cgroup * memcg,struct eventfd_ctx * eventfd,const char * args)4405 static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
4406 	struct eventfd_ctx *eventfd, const char *args)
4407 {
4408 	return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
4409 }
4410 
memsw_cgroup_usage_register_event(struct mem_cgroup * memcg,struct eventfd_ctx * eventfd,const char * args)4411 static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
4412 	struct eventfd_ctx *eventfd, const char *args)
4413 {
4414 	return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
4415 }
4416 
__mem_cgroup_usage_unregister_event(struct mem_cgroup * memcg,struct eventfd_ctx * eventfd,enum res_type type)4417 static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4418 	struct eventfd_ctx *eventfd, enum res_type type)
4419 {
4420 	struct mem_cgroup_thresholds *thresholds;
4421 	struct mem_cgroup_threshold_ary *new;
4422 	unsigned long usage;
4423 	int i, j, size, entries;
4424 
4425 	mutex_lock(&memcg->thresholds_lock);
4426 
4427 	if (type == _MEM) {
4428 		thresholds = &memcg->thresholds;
4429 		usage = mem_cgroup_usage(memcg, false);
4430 	} else if (type == _MEMSWAP) {
4431 		thresholds = &memcg->memsw_thresholds;
4432 		usage = mem_cgroup_usage(memcg, true);
4433 	} else
4434 		BUG();
4435 
4436 	if (!thresholds->primary)
4437 		goto unlock;
4438 
4439 	/* Check if a threshold crossed before removing */
4440 	__mem_cgroup_threshold(memcg, type == _MEMSWAP);
4441 
4442 	/* Calculate new number of threshold */
4443 	size = entries = 0;
4444 	for (i = 0; i < thresholds->primary->size; i++) {
4445 		if (thresholds->primary->entries[i].eventfd != eventfd)
4446 			size++;
4447 		else
4448 			entries++;
4449 	}
4450 
4451 	new = thresholds->spare;
4452 
4453 	/* If no items related to eventfd have been cleared, nothing to do */
4454 	if (!entries)
4455 		goto unlock;
4456 
4457 	/* Set thresholds array to NULL if we don't have thresholds */
4458 	if (!size) {
4459 		kfree(new);
4460 		new = NULL;
4461 		goto swap_buffers;
4462 	}
4463 
4464 	new->size = size;
4465 
4466 	/* Copy thresholds and find current threshold */
4467 	new->current_threshold = -1;
4468 	for (i = 0, j = 0; i < thresholds->primary->size; i++) {
4469 		if (thresholds->primary->entries[i].eventfd == eventfd)
4470 			continue;
4471 
4472 		new->entries[j] = thresholds->primary->entries[i];
4473 		if (new->entries[j].threshold <= usage) {
4474 			/*
4475 			 * new->current_threshold will not be used
4476 			 * until rcu_assign_pointer(), so it's safe to increment
4477 			 * it here.
4478 			 */
4479 			++new->current_threshold;
4480 		}
4481 		j++;
4482 	}
4483 
4484 swap_buffers:
4485 	/* Swap primary and spare array */
4486 	thresholds->spare = thresholds->primary;
4487 
4488 	rcu_assign_pointer(thresholds->primary, new);
4489 
4490 	/* To be sure that nobody uses thresholds */
4491 	synchronize_rcu();
4492 
4493 	/* If all events are unregistered, free the spare array */
4494 	if (!new) {
4495 		kfree(thresholds->spare);
4496 		thresholds->spare = NULL;
4497 	}
4498 unlock:
4499 	mutex_unlock(&memcg->thresholds_lock);
4500 }
4501 
mem_cgroup_usage_unregister_event(struct mem_cgroup * memcg,struct eventfd_ctx * eventfd)4502 static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4503 	struct eventfd_ctx *eventfd)
4504 {
4505 	return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
4506 }
4507 
memsw_cgroup_usage_unregister_event(struct mem_cgroup * memcg,struct eventfd_ctx * eventfd)4508 static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4509 	struct eventfd_ctx *eventfd)
4510 {
4511 	return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
4512 }
4513 
mem_cgroup_oom_register_event(struct mem_cgroup * memcg,struct eventfd_ctx * eventfd,const char * args)4514 static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
4515 	struct eventfd_ctx *eventfd, const char *args)
4516 {
4517 	struct mem_cgroup_eventfd_list *event;
4518 
4519 	event = kmalloc(sizeof(*event),	GFP_KERNEL);
4520 	if (!event)
4521 		return -ENOMEM;
4522 
4523 	spin_lock(&memcg_oom_lock);
4524 
4525 	event->eventfd = eventfd;
4526 	list_add(&event->list, &memcg->oom_notify);
4527 
4528 	/* already in OOM ? */
4529 	if (memcg->under_oom)
4530 		eventfd_signal(eventfd, 1);
4531 	spin_unlock(&memcg_oom_lock);
4532 
4533 	return 0;
4534 }
4535 
mem_cgroup_oom_unregister_event(struct mem_cgroup * memcg,struct eventfd_ctx * eventfd)4536 static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
4537 	struct eventfd_ctx *eventfd)
4538 {
4539 	struct mem_cgroup_eventfd_list *ev, *tmp;
4540 
4541 	spin_lock(&memcg_oom_lock);
4542 
4543 	list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
4544 		if (ev->eventfd == eventfd) {
4545 			list_del(&ev->list);
4546 			kfree(ev);
4547 		}
4548 	}
4549 
4550 	spin_unlock(&memcg_oom_lock);
4551 }
4552 
mem_cgroup_oom_control_read(struct seq_file * sf,void * v)4553 static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
4554 {
4555 	struct mem_cgroup *memcg = mem_cgroup_from_seq(sf);
4556 
4557 	seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
4558 	seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
4559 	seq_printf(sf, "oom_kill %lu\n",
4560 		   atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL]));
4561 	return 0;
4562 }
4563 
mem_cgroup_oom_control_write(struct cgroup_subsys_state * css,struct cftype * cft,u64 val)4564 static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
4565 	struct cftype *cft, u64 val)
4566 {
4567 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4568 
4569 	/* cannot set to root cgroup and only 0 and 1 are allowed */
4570 	if (!css->parent || !((val == 0) || (val == 1)))
4571 		return -EINVAL;
4572 
4573 	memcg->oom_kill_disable = val;
4574 	if (!val)
4575 		memcg_oom_recover(memcg);
4576 
4577 	return 0;
4578 }
4579 
4580 #ifdef CONFIG_CGROUP_WRITEBACK
4581 
4582 #include <trace/events/writeback.h>
4583 
memcg_wb_domain_init(struct mem_cgroup * memcg,gfp_t gfp)4584 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4585 {
4586 	return wb_domain_init(&memcg->cgwb_domain, gfp);
4587 }
4588 
memcg_wb_domain_exit(struct mem_cgroup * memcg)4589 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4590 {
4591 	wb_domain_exit(&memcg->cgwb_domain);
4592 }
4593 
memcg_wb_domain_size_changed(struct mem_cgroup * memcg)4594 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4595 {
4596 	wb_domain_size_changed(&memcg->cgwb_domain);
4597 }
4598 
mem_cgroup_wb_domain(struct bdi_writeback * wb)4599 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
4600 {
4601 	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4602 
4603 	if (!memcg->css.parent)
4604 		return NULL;
4605 
4606 	return &memcg->cgwb_domain;
4607 }
4608 
4609 /*
4610  * idx can be of type enum memcg_stat_item or node_stat_item.
4611  * Keep in sync with memcg_exact_page().
4612  */
memcg_exact_page_state(struct mem_cgroup * memcg,int idx)4613 static unsigned long memcg_exact_page_state(struct mem_cgroup *memcg, int idx)
4614 {
4615 	long x = atomic_long_read(&memcg->vmstats[idx]);
4616 	int cpu;
4617 
4618 	for_each_online_cpu(cpu)
4619 		x += per_cpu_ptr(memcg->vmstats_percpu, cpu)->stat[idx];
4620 	if (x < 0)
4621 		x = 0;
4622 	return x;
4623 }
4624 
4625 /**
4626  * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
4627  * @wb: bdi_writeback in question
4628  * @pfilepages: out parameter for number of file pages
4629  * @pheadroom: out parameter for number of allocatable pages according to memcg
4630  * @pdirty: out parameter for number of dirty pages
4631  * @pwriteback: out parameter for number of pages under writeback
4632  *
4633  * Determine the numbers of file, headroom, dirty, and writeback pages in
4634  * @wb's memcg.  File, dirty and writeback are self-explanatory.  Headroom
4635  * is a bit more involved.
4636  *
4637  * A memcg's headroom is "min(max, high) - used".  In the hierarchy, the
4638  * headroom is calculated as the lowest headroom of itself and the
4639  * ancestors.  Note that this doesn't consider the actual amount of
4640  * available memory in the system.  The caller should further cap
4641  * *@pheadroom accordingly.
4642  */
mem_cgroup_wb_stats(struct bdi_writeback * wb,unsigned long * pfilepages,unsigned long * pheadroom,unsigned long * pdirty,unsigned long * pwriteback)4643 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
4644 			 unsigned long *pheadroom, unsigned long *pdirty,
4645 			 unsigned long *pwriteback)
4646 {
4647 	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4648 	struct mem_cgroup *parent;
4649 
4650 	*pdirty = memcg_exact_page_state(memcg, NR_FILE_DIRTY);
4651 
4652 	*pwriteback = memcg_exact_page_state(memcg, NR_WRITEBACK);
4653 	*pfilepages = memcg_exact_page_state(memcg, NR_INACTIVE_FILE) +
4654 			memcg_exact_page_state(memcg, NR_ACTIVE_FILE);
4655 	*pheadroom = PAGE_COUNTER_MAX;
4656 
4657 	while ((parent = parent_mem_cgroup(memcg))) {
4658 		unsigned long ceiling = min(READ_ONCE(memcg->memory.max),
4659 					    READ_ONCE(memcg->memory.high));
4660 		unsigned long used = page_counter_read(&memcg->memory);
4661 
4662 		*pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
4663 		memcg = parent;
4664 	}
4665 }
4666 
4667 /*
4668  * Foreign dirty flushing
4669  *
4670  * There's an inherent mismatch between memcg and writeback.  The former
4671  * trackes ownership per-page while the latter per-inode.  This was a
4672  * deliberate design decision because honoring per-page ownership in the
4673  * writeback path is complicated, may lead to higher CPU and IO overheads
4674  * and deemed unnecessary given that write-sharing an inode across
4675  * different cgroups isn't a common use-case.
4676  *
4677  * Combined with inode majority-writer ownership switching, this works well
4678  * enough in most cases but there are some pathological cases.  For
4679  * example, let's say there are two cgroups A and B which keep writing to
4680  * different but confined parts of the same inode.  B owns the inode and
4681  * A's memory is limited far below B's.  A's dirty ratio can rise enough to
4682  * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid
4683  * triggering background writeback.  A will be slowed down without a way to
4684  * make writeback of the dirty pages happen.
4685  *
4686  * Conditions like the above can lead to a cgroup getting repatedly and
4687  * severely throttled after making some progress after each
4688  * dirty_expire_interval while the underyling IO device is almost
4689  * completely idle.
4690  *
4691  * Solving this problem completely requires matching the ownership tracking
4692  * granularities between memcg and writeback in either direction.  However,
4693  * the more egregious behaviors can be avoided by simply remembering the
4694  * most recent foreign dirtying events and initiating remote flushes on
4695  * them when local writeback isn't enough to keep the memory clean enough.
4696  *
4697  * The following two functions implement such mechanism.  When a foreign
4698  * page - a page whose memcg and writeback ownerships don't match - is
4699  * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning
4700  * bdi_writeback on the page owning memcg.  When balance_dirty_pages()
4701  * decides that the memcg needs to sleep due to high dirty ratio, it calls
4702  * mem_cgroup_flush_foreign() which queues writeback on the recorded
4703  * foreign bdi_writebacks which haven't expired.  Both the numbers of
4704  * recorded bdi_writebacks and concurrent in-flight foreign writebacks are
4705  * limited to MEMCG_CGWB_FRN_CNT.
4706  *
4707  * The mechanism only remembers IDs and doesn't hold any object references.
4708  * As being wrong occasionally doesn't matter, updates and accesses to the
4709  * records are lockless and racy.
4710  */
mem_cgroup_track_foreign_dirty_slowpath(struct page * page,struct bdi_writeback * wb)4711 void mem_cgroup_track_foreign_dirty_slowpath(struct page *page,
4712 					     struct bdi_writeback *wb)
4713 {
4714 	struct mem_cgroup *memcg = page->mem_cgroup;
4715 	struct memcg_cgwb_frn *frn;
4716 	u64 now = get_jiffies_64();
4717 	u64 oldest_at = now;
4718 	int oldest = -1;
4719 	int i;
4720 
4721 	trace_track_foreign_dirty(page, wb);
4722 
4723 	/*
4724 	 * Pick the slot to use.  If there is already a slot for @wb, keep
4725 	 * using it.  If not replace the oldest one which isn't being
4726 	 * written out.
4727 	 */
4728 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4729 		frn = &memcg->cgwb_frn[i];
4730 		if (frn->bdi_id == wb->bdi->id &&
4731 		    frn->memcg_id == wb->memcg_css->id)
4732 			break;
4733 		if (time_before64(frn->at, oldest_at) &&
4734 		    atomic_read(&frn->done.cnt) == 1) {
4735 			oldest = i;
4736 			oldest_at = frn->at;
4737 		}
4738 	}
4739 
4740 	if (i < MEMCG_CGWB_FRN_CNT) {
4741 		/*
4742 		 * Re-using an existing one.  Update timestamp lazily to
4743 		 * avoid making the cacheline hot.  We want them to be
4744 		 * reasonably up-to-date and significantly shorter than
4745 		 * dirty_expire_interval as that's what expires the record.
4746 		 * Use the shorter of 1s and dirty_expire_interval / 8.
4747 		 */
4748 		unsigned long update_intv =
4749 			min_t(unsigned long, HZ,
4750 			      msecs_to_jiffies(dirty_expire_interval * 10) / 8);
4751 
4752 		if (time_before64(frn->at, now - update_intv))
4753 			frn->at = now;
4754 	} else if (oldest >= 0) {
4755 		/* replace the oldest free one */
4756 		frn = &memcg->cgwb_frn[oldest];
4757 		frn->bdi_id = wb->bdi->id;
4758 		frn->memcg_id = wb->memcg_css->id;
4759 		frn->at = now;
4760 	}
4761 }
4762 
4763 /* issue foreign writeback flushes for recorded foreign dirtying events */
mem_cgroup_flush_foreign(struct bdi_writeback * wb)4764 void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
4765 {
4766 	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4767 	unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10);
4768 	u64 now = jiffies_64;
4769 	int i;
4770 
4771 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4772 		struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i];
4773 
4774 		/*
4775 		 * If the record is older than dirty_expire_interval,
4776 		 * writeback on it has already started.  No need to kick it
4777 		 * off again.  Also, don't start a new one if there's
4778 		 * already one in flight.
4779 		 */
4780 		if (time_after64(frn->at, now - intv) &&
4781 		    atomic_read(&frn->done.cnt) == 1) {
4782 			frn->at = 0;
4783 			trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id);
4784 			cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id, 0,
4785 					       WB_REASON_FOREIGN_FLUSH,
4786 					       &frn->done);
4787 		}
4788 	}
4789 }
4790 
4791 #else	/* CONFIG_CGROUP_WRITEBACK */
4792 
memcg_wb_domain_init(struct mem_cgroup * memcg,gfp_t gfp)4793 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4794 {
4795 	return 0;
4796 }
4797 
memcg_wb_domain_exit(struct mem_cgroup * memcg)4798 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4799 {
4800 }
4801 
memcg_wb_domain_size_changed(struct mem_cgroup * memcg)4802 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4803 {
4804 }
4805 
4806 #endif	/* CONFIG_CGROUP_WRITEBACK */
4807 
4808 /*
4809  * DO NOT USE IN NEW FILES.
4810  *
4811  * "cgroup.event_control" implementation.
4812  *
4813  * This is way over-engineered.  It tries to support fully configurable
4814  * events for each user.  Such level of flexibility is completely
4815  * unnecessary especially in the light of the planned unified hierarchy.
4816  *
4817  * Please deprecate this and replace with something simpler if at all
4818  * possible.
4819  */
4820 
4821 /*
4822  * Unregister event and free resources.
4823  *
4824  * Gets called from workqueue.
4825  */
memcg_event_remove(struct work_struct * work)4826 static void memcg_event_remove(struct work_struct *work)
4827 {
4828 	struct mem_cgroup_event *event =
4829 		container_of(work, struct mem_cgroup_event, remove);
4830 	struct mem_cgroup *memcg = event->memcg;
4831 
4832 	remove_wait_queue(event->wqh, &event->wait);
4833 
4834 	event->unregister_event(memcg, event->eventfd);
4835 
4836 	/* Notify userspace the event is going away. */
4837 	eventfd_signal(event->eventfd, 1);
4838 
4839 	eventfd_ctx_put(event->eventfd);
4840 	kfree(event);
4841 	css_put(&memcg->css);
4842 }
4843 
4844 /*
4845  * Gets called on EPOLLHUP on eventfd when user closes it.
4846  *
4847  * Called with wqh->lock held and interrupts disabled.
4848  */
memcg_event_wake(wait_queue_entry_t * wait,unsigned mode,int sync,void * key)4849 static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode,
4850 			    int sync, void *key)
4851 {
4852 	struct mem_cgroup_event *event =
4853 		container_of(wait, struct mem_cgroup_event, wait);
4854 	struct mem_cgroup *memcg = event->memcg;
4855 	__poll_t flags = key_to_poll(key);
4856 
4857 	if (flags & EPOLLHUP) {
4858 		/*
4859 		 * If the event has been detached at cgroup removal, we
4860 		 * can simply return knowing the other side will cleanup
4861 		 * for us.
4862 		 *
4863 		 * We can't race against event freeing since the other
4864 		 * side will require wqh->lock via remove_wait_queue(),
4865 		 * which we hold.
4866 		 */
4867 		spin_lock(&memcg->event_list_lock);
4868 		if (!list_empty(&event->list)) {
4869 			list_del_init(&event->list);
4870 			/*
4871 			 * We are in atomic context, but cgroup_event_remove()
4872 			 * may sleep, so we have to call it in workqueue.
4873 			 */
4874 			schedule_work(&event->remove);
4875 		}
4876 		spin_unlock(&memcg->event_list_lock);
4877 	}
4878 
4879 	return 0;
4880 }
4881 
memcg_event_ptable_queue_proc(struct file * file,wait_queue_head_t * wqh,poll_table * pt)4882 static void memcg_event_ptable_queue_proc(struct file *file,
4883 		wait_queue_head_t *wqh, poll_table *pt)
4884 {
4885 	struct mem_cgroup_event *event =
4886 		container_of(pt, struct mem_cgroup_event, pt);
4887 
4888 	event->wqh = wqh;
4889 	add_wait_queue(wqh, &event->wait);
4890 }
4891 
4892 /*
4893  * DO NOT USE IN NEW FILES.
4894  *
4895  * Parse input and register new cgroup event handler.
4896  *
4897  * Input must be in format '<event_fd> <control_fd> <args>'.
4898  * Interpretation of args is defined by control file implementation.
4899  */
memcg_write_event_control(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4900 static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
4901 					 char *buf, size_t nbytes, loff_t off)
4902 {
4903 	struct cgroup_subsys_state *css = of_css(of);
4904 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4905 	struct mem_cgroup_event *event;
4906 	struct cgroup_subsys_state *cfile_css;
4907 	unsigned int efd, cfd;
4908 	struct fd efile;
4909 	struct fd cfile;
4910 	struct dentry *cdentry;
4911 	const char *name;
4912 	char *endp;
4913 	int ret;
4914 
4915 	buf = strstrip(buf);
4916 
4917 	efd = simple_strtoul(buf, &endp, 10);
4918 	if (*endp != ' ')
4919 		return -EINVAL;
4920 	buf = endp + 1;
4921 
4922 	cfd = simple_strtoul(buf, &endp, 10);
4923 	if ((*endp != ' ') && (*endp != '\0'))
4924 		return -EINVAL;
4925 	buf = endp + 1;
4926 
4927 	event = kzalloc(sizeof(*event), GFP_KERNEL);
4928 	if (!event)
4929 		return -ENOMEM;
4930 
4931 	event->memcg = memcg;
4932 	INIT_LIST_HEAD(&event->list);
4933 	init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
4934 	init_waitqueue_func_entry(&event->wait, memcg_event_wake);
4935 	INIT_WORK(&event->remove, memcg_event_remove);
4936 
4937 	efile = fdget(efd);
4938 	if (!efile.file) {
4939 		ret = -EBADF;
4940 		goto out_kfree;
4941 	}
4942 
4943 	event->eventfd = eventfd_ctx_fileget(efile.file);
4944 	if (IS_ERR(event->eventfd)) {
4945 		ret = PTR_ERR(event->eventfd);
4946 		goto out_put_efile;
4947 	}
4948 
4949 	cfile = fdget(cfd);
4950 	if (!cfile.file) {
4951 		ret = -EBADF;
4952 		goto out_put_eventfd;
4953 	}
4954 
4955 	/* the process need read permission on control file */
4956 	/* AV: shouldn't we check that it's been opened for read instead? */
4957 	ret = inode_permission(file_inode(cfile.file), MAY_READ);
4958 	if (ret < 0)
4959 		goto out_put_cfile;
4960 
4961 	/*
4962 	 * The control file must be a regular cgroup1 file. As a regular cgroup
4963 	 * file can't be renamed, it's safe to access its name afterwards.
4964 	 */
4965 	cdentry = cfile.file->f_path.dentry;
4966 	if (cdentry->d_sb->s_type != &cgroup_fs_type || !d_is_reg(cdentry)) {
4967 		ret = -EINVAL;
4968 		goto out_put_cfile;
4969 	}
4970 
4971 	/*
4972 	 * Determine the event callbacks and set them in @event.  This used
4973 	 * to be done via struct cftype but cgroup core no longer knows
4974 	 * about these events.  The following is crude but the whole thing
4975 	 * is for compatibility anyway.
4976 	 *
4977 	 * DO NOT ADD NEW FILES.
4978 	 */
4979 	name = cdentry->d_name.name;
4980 
4981 	if (!strcmp(name, "memory.usage_in_bytes")) {
4982 		event->register_event = mem_cgroup_usage_register_event;
4983 		event->unregister_event = mem_cgroup_usage_unregister_event;
4984 	} else if (!strcmp(name, "memory.oom_control")) {
4985 		event->register_event = mem_cgroup_oom_register_event;
4986 		event->unregister_event = mem_cgroup_oom_unregister_event;
4987 	} else if (!strcmp(name, "memory.pressure_level")) {
4988 		event->register_event = vmpressure_register_event;
4989 		event->unregister_event = vmpressure_unregister_event;
4990 	} else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
4991 		event->register_event = memsw_cgroup_usage_register_event;
4992 		event->unregister_event = memsw_cgroup_usage_unregister_event;
4993 	} else {
4994 		ret = -EINVAL;
4995 		goto out_put_cfile;
4996 	}
4997 
4998 	/*
4999 	 * Verify @cfile should belong to @css.  Also, remaining events are
5000 	 * automatically removed on cgroup destruction but the removal is
5001 	 * asynchronous, so take an extra ref on @css.
5002 	 */
5003 	cfile_css = css_tryget_online_from_dir(cdentry->d_parent,
5004 					       &memory_cgrp_subsys);
5005 	ret = -EINVAL;
5006 	if (IS_ERR(cfile_css))
5007 		goto out_put_cfile;
5008 	if (cfile_css != css) {
5009 		css_put(cfile_css);
5010 		goto out_put_cfile;
5011 	}
5012 
5013 	ret = event->register_event(memcg, event->eventfd, buf);
5014 	if (ret)
5015 		goto out_put_css;
5016 
5017 	vfs_poll(efile.file, &event->pt);
5018 
5019 	spin_lock(&memcg->event_list_lock);
5020 	list_add(&event->list, &memcg->event_list);
5021 	spin_unlock(&memcg->event_list_lock);
5022 
5023 	fdput(cfile);
5024 	fdput(efile);
5025 
5026 	return nbytes;
5027 
5028 out_put_css:
5029 	css_put(css);
5030 out_put_cfile:
5031 	fdput(cfile);
5032 out_put_eventfd:
5033 	eventfd_ctx_put(event->eventfd);
5034 out_put_efile:
5035 	fdput(efile);
5036 out_kfree:
5037 	kfree(event);
5038 
5039 	return ret;
5040 }
5041 
5042 static struct cftype mem_cgroup_legacy_files[] = {
5043 	{
5044 		.name = "usage_in_bytes",
5045 		.private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
5046 		.read_u64 = mem_cgroup_read_u64,
5047 	},
5048 	{
5049 		.name = "max_usage_in_bytes",
5050 		.private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
5051 		.write = mem_cgroup_reset,
5052 		.read_u64 = mem_cgroup_read_u64,
5053 	},
5054 	{
5055 		.name = "limit_in_bytes",
5056 		.private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
5057 		.write = mem_cgroup_write,
5058 		.read_u64 = mem_cgroup_read_u64,
5059 	},
5060 	{
5061 		.name = "soft_limit_in_bytes",
5062 		.private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
5063 		.write = mem_cgroup_write,
5064 		.read_u64 = mem_cgroup_read_u64,
5065 	},
5066 	{
5067 		.name = "failcnt",
5068 		.private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
5069 		.write = mem_cgroup_reset,
5070 		.read_u64 = mem_cgroup_read_u64,
5071 	},
5072 	{
5073 		.name = "stat",
5074 		.seq_show = memcg_stat_show,
5075 	},
5076 	{
5077 		.name = "force_empty",
5078 		.write = mem_cgroup_force_empty_write,
5079 	},
5080 	{
5081 		.name = "use_hierarchy",
5082 		.write_u64 = mem_cgroup_hierarchy_write,
5083 		.read_u64 = mem_cgroup_hierarchy_read,
5084 	},
5085 	{
5086 		.name = "cgroup.event_control",		/* XXX: for compat */
5087 		.write = memcg_write_event_control,
5088 		.flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,
5089 	},
5090 	{
5091 		.name = "swappiness",
5092 		.read_u64 = mem_cgroup_swappiness_read,
5093 		.write_u64 = mem_cgroup_swappiness_write,
5094 	},
5095 	{
5096 		.name = "move_charge_at_immigrate",
5097 		.read_u64 = mem_cgroup_move_charge_read,
5098 		.write_u64 = mem_cgroup_move_charge_write,
5099 	},
5100 	{
5101 		.name = "oom_control",
5102 		.seq_show = mem_cgroup_oom_control_read,
5103 		.write_u64 = mem_cgroup_oom_control_write,
5104 		.private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
5105 	},
5106 	{
5107 		.name = "pressure_level",
5108 	},
5109 #ifdef CONFIG_NUMA
5110 	{
5111 		.name = "numa_stat",
5112 		.seq_show = memcg_numa_stat_show,
5113 	},
5114 #endif
5115 	{
5116 		.name = "kmem.limit_in_bytes",
5117 		.private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
5118 		.write = mem_cgroup_write,
5119 		.read_u64 = mem_cgroup_read_u64,
5120 	},
5121 	{
5122 		.name = "kmem.usage_in_bytes",
5123 		.private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
5124 		.read_u64 = mem_cgroup_read_u64,
5125 	},
5126 	{
5127 		.name = "kmem.failcnt",
5128 		.private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
5129 		.write = mem_cgroup_reset,
5130 		.read_u64 = mem_cgroup_read_u64,
5131 	},
5132 	{
5133 		.name = "kmem.max_usage_in_bytes",
5134 		.private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
5135 		.write = mem_cgroup_reset,
5136 		.read_u64 = mem_cgroup_read_u64,
5137 	},
5138 #if defined(CONFIG_MEMCG_KMEM) && \
5139 	(defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG))
5140 	{
5141 		.name = "kmem.slabinfo",
5142 		.seq_show = memcg_slab_show,
5143 	},
5144 #endif
5145 	{
5146 		.name = "kmem.tcp.limit_in_bytes",
5147 		.private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
5148 		.write = mem_cgroup_write,
5149 		.read_u64 = mem_cgroup_read_u64,
5150 	},
5151 	{
5152 		.name = "kmem.tcp.usage_in_bytes",
5153 		.private = MEMFILE_PRIVATE(_TCP, RES_USAGE),
5154 		.read_u64 = mem_cgroup_read_u64,
5155 	},
5156 	{
5157 		.name = "kmem.tcp.failcnt",
5158 		.private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT),
5159 		.write = mem_cgroup_reset,
5160 		.read_u64 = mem_cgroup_read_u64,
5161 	},
5162 	{
5163 		.name = "kmem.tcp.max_usage_in_bytes",
5164 		.private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE),
5165 		.write = mem_cgroup_reset,
5166 		.read_u64 = mem_cgroup_read_u64,
5167 	},
5168 	{ },	/* terminate */
5169 };
5170 
5171 /*
5172  * Private memory cgroup IDR
5173  *
5174  * Swap-out records and page cache shadow entries need to store memcg
5175  * references in constrained space, so we maintain an ID space that is
5176  * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
5177  * memory-controlled cgroups to 64k.
5178  *
5179  * However, there usually are many references to the offline CSS after
5180  * the cgroup has been destroyed, such as page cache or reclaimable
5181  * slab objects, that don't need to hang on to the ID. We want to keep
5182  * those dead CSS from occupying IDs, or we might quickly exhaust the
5183  * relatively small ID space and prevent the creation of new cgroups
5184  * even when there are much fewer than 64k cgroups - possibly none.
5185  *
5186  * Maintain a private 16-bit ID space for memcg, and allow the ID to
5187  * be freed and recycled when it's no longer needed, which is usually
5188  * when the CSS is offlined.
5189  *
5190  * The only exception to that are records of swapped out tmpfs/shmem
5191  * pages that need to be attributed to live ancestors on swapin. But
5192  * those references are manageable from userspace.
5193  */
5194 
5195 static DEFINE_IDR(mem_cgroup_idr);
5196 
mem_cgroup_id_remove(struct mem_cgroup * memcg)5197 static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
5198 {
5199 	if (memcg->id.id > 0) {
5200 		trace_android_vh_mem_cgroup_id_remove(memcg);
5201 		idr_remove(&mem_cgroup_idr, memcg->id.id);
5202 		memcg->id.id = 0;
5203 	}
5204 }
5205 
mem_cgroup_id_get_many(struct mem_cgroup * memcg,unsigned int n)5206 static void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg,
5207 						  unsigned int n)
5208 {
5209 	refcount_add(n, &memcg->id.ref);
5210 }
5211 
mem_cgroup_id_put_many(struct mem_cgroup * memcg,unsigned int n)5212 static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
5213 {
5214 	if (refcount_sub_and_test(n, &memcg->id.ref)) {
5215 		mem_cgroup_id_remove(memcg);
5216 
5217 		/* Memcg ID pins CSS */
5218 		css_put(&memcg->css);
5219 	}
5220 }
5221 
mem_cgroup_id_put(struct mem_cgroup * memcg)5222 static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
5223 {
5224 	mem_cgroup_id_put_many(memcg, 1);
5225 }
5226 
5227 /**
5228  * mem_cgroup_from_id - look up a memcg from a memcg id
5229  * @id: the memcg id to look up
5230  *
5231  * Caller must hold rcu_read_lock().
5232  */
mem_cgroup_from_id(unsigned short id)5233 struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
5234 {
5235 	WARN_ON_ONCE(!rcu_read_lock_held());
5236 	return idr_find(&mem_cgroup_idr, id);
5237 }
5238 EXPORT_SYMBOL_GPL(mem_cgroup_from_id);
5239 
alloc_mem_cgroup_per_node_info(struct mem_cgroup * memcg,int node)5240 static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
5241 {
5242 	struct mem_cgroup_per_node *pn;
5243 	int tmp = node;
5244 	/*
5245 	 * This routine is called against possible nodes.
5246 	 * But it's BUG to call kmalloc() against offline node.
5247 	 *
5248 	 * TODO: this routine can waste much memory for nodes which will
5249 	 *       never be onlined. It's better to use memory hotplug callback
5250 	 *       function.
5251 	 */
5252 	if (!node_state(node, N_NORMAL_MEMORY))
5253 		tmp = -1;
5254 	pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
5255 	if (!pn)
5256 		return 1;
5257 
5258 	pn->lruvec_stat_local = alloc_percpu_gfp(struct lruvec_stat,
5259 						 GFP_KERNEL_ACCOUNT);
5260 	if (!pn->lruvec_stat_local) {
5261 		kfree(pn);
5262 		return 1;
5263 	}
5264 
5265 	pn->lruvec_stat_cpu = alloc_percpu_gfp(struct lruvec_stat,
5266 					       GFP_KERNEL_ACCOUNT);
5267 	if (!pn->lruvec_stat_cpu) {
5268 		free_percpu(pn->lruvec_stat_local);
5269 		kfree(pn);
5270 		return 1;
5271 	}
5272 
5273 	lruvec_init(&pn->lruvec);
5274 	pn->usage_in_excess = 0;
5275 	pn->on_tree = false;
5276 	pn->memcg = memcg;
5277 
5278 	memcg->nodeinfo[node] = pn;
5279 	return 0;
5280 }
5281 
free_mem_cgroup_per_node_info(struct mem_cgroup * memcg,int node)5282 static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
5283 {
5284 	struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
5285 
5286 	if (!pn)
5287 		return;
5288 
5289 	free_percpu(pn->lruvec_stat_cpu);
5290 	free_percpu(pn->lruvec_stat_local);
5291 	kfree(pn);
5292 }
5293 
__mem_cgroup_free(struct mem_cgroup * memcg)5294 static void __mem_cgroup_free(struct mem_cgroup *memcg)
5295 {
5296 	int node;
5297 
5298 	trace_android_vh_mem_cgroup_free(memcg);
5299 	for_each_node(node)
5300 		free_mem_cgroup_per_node_info(memcg, node);
5301 	free_percpu(memcg->vmstats_percpu);
5302 	free_percpu(memcg->vmstats_local);
5303 	kfree(memcg);
5304 }
5305 
mem_cgroup_free(struct mem_cgroup * memcg)5306 static void mem_cgroup_free(struct mem_cgroup *memcg)
5307 {
5308 	memcg_wb_domain_exit(memcg);
5309 	/*
5310 	 * Flush percpu vmstats and vmevents to guarantee the value correctness
5311 	 * on parent's and all ancestor levels.
5312 	 */
5313 	memcg_flush_percpu_vmstats(memcg);
5314 	memcg_flush_percpu_vmevents(memcg);
5315 	__mem_cgroup_free(memcg);
5316 }
5317 
mem_cgroup_alloc(void)5318 static struct mem_cgroup *mem_cgroup_alloc(void)
5319 {
5320 	struct mem_cgroup *memcg;
5321 	unsigned int size;
5322 	int node;
5323 	int __maybe_unused i;
5324 	long error = -ENOMEM;
5325 
5326 	size = sizeof(struct mem_cgroup);
5327 	size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
5328 
5329 	memcg = kzalloc(size, GFP_KERNEL);
5330 	if (!memcg)
5331 		return ERR_PTR(error);
5332 
5333 	memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
5334 				 1, MEM_CGROUP_ID_MAX,
5335 				 GFP_KERNEL);
5336 	if (memcg->id.id < 0) {
5337 		error = memcg->id.id;
5338 		goto fail;
5339 	}
5340 
5341 	memcg->vmstats_local = alloc_percpu_gfp(struct memcg_vmstats_percpu,
5342 						GFP_KERNEL_ACCOUNT);
5343 	if (!memcg->vmstats_local)
5344 		goto fail;
5345 
5346 	memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu,
5347 						 GFP_KERNEL_ACCOUNT);
5348 	if (!memcg->vmstats_percpu)
5349 		goto fail;
5350 
5351 	for_each_node(node)
5352 		if (alloc_mem_cgroup_per_node_info(memcg, node))
5353 			goto fail;
5354 
5355 	if (memcg_wb_domain_init(memcg, GFP_KERNEL))
5356 		goto fail;
5357 
5358 	INIT_WORK(&memcg->high_work, high_work_func);
5359 	INIT_LIST_HEAD(&memcg->oom_notify);
5360 	mutex_init(&memcg->thresholds_lock);
5361 	spin_lock_init(&memcg->move_lock);
5362 	vmpressure_init(&memcg->vmpressure);
5363 	INIT_LIST_HEAD(&memcg->event_list);
5364 	spin_lock_init(&memcg->event_list_lock);
5365 	memcg->socket_pressure = jiffies;
5366 #ifdef CONFIG_MEMCG_KMEM
5367 	memcg->kmemcg_id = -1;
5368 	INIT_LIST_HEAD(&memcg->objcg_list);
5369 #endif
5370 #ifdef CONFIG_CGROUP_WRITEBACK
5371 	INIT_LIST_HEAD(&memcg->cgwb_list);
5372 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5373 		memcg->cgwb_frn[i].done =
5374 			__WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq);
5375 #endif
5376 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5377 	spin_lock_init(&memcg->deferred_split_queue.split_queue_lock);
5378 	INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
5379 	memcg->deferred_split_queue.split_queue_len = 0;
5380 #endif
5381 	idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
5382 	trace_android_vh_mem_cgroup_alloc(memcg);
5383 	return memcg;
5384 fail:
5385 	mem_cgroup_id_remove(memcg);
5386 	__mem_cgroup_free(memcg);
5387 	return ERR_PTR(error);
5388 }
5389 
5390 static struct cgroup_subsys_state * __ref
mem_cgroup_css_alloc(struct cgroup_subsys_state * parent_css)5391 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
5392 {
5393 	struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
5394 	struct mem_cgroup *memcg, *old_memcg;
5395 	long error = -ENOMEM;
5396 
5397 	old_memcg = set_active_memcg(parent);
5398 	memcg = mem_cgroup_alloc();
5399 	set_active_memcg(old_memcg);
5400 	if (IS_ERR(memcg))
5401 		return ERR_CAST(memcg);
5402 
5403 	page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
5404 	memcg->soft_limit = PAGE_COUNTER_MAX;
5405 	page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
5406 	if (parent) {
5407 		memcg->swappiness = mem_cgroup_swappiness(parent);
5408 		memcg->oom_kill_disable = parent->oom_kill_disable;
5409 	}
5410 	if (!parent) {
5411 		page_counter_init(&memcg->memory, NULL);
5412 		page_counter_init(&memcg->swap, NULL);
5413 		page_counter_init(&memcg->kmem, NULL);
5414 		page_counter_init(&memcg->tcpmem, NULL);
5415 	} else if (parent->use_hierarchy) {
5416 		memcg->use_hierarchy = true;
5417 		page_counter_init(&memcg->memory, &parent->memory);
5418 		page_counter_init(&memcg->swap, &parent->swap);
5419 		page_counter_init(&memcg->kmem, &parent->kmem);
5420 		page_counter_init(&memcg->tcpmem, &parent->tcpmem);
5421 	} else {
5422 		page_counter_init(&memcg->memory, &root_mem_cgroup->memory);
5423 		page_counter_init(&memcg->swap, &root_mem_cgroup->swap);
5424 		page_counter_init(&memcg->kmem, &root_mem_cgroup->kmem);
5425 		page_counter_init(&memcg->tcpmem, &root_mem_cgroup->tcpmem);
5426 		/*
5427 		 * Deeper hierachy with use_hierarchy == false doesn't make
5428 		 * much sense so let cgroup subsystem know about this
5429 		 * unfortunate state in our controller.
5430 		 */
5431 		if (parent != root_mem_cgroup)
5432 			memory_cgrp_subsys.broken_hierarchy = true;
5433 	}
5434 
5435 	/* The following stuff does not apply to the root */
5436 	if (!parent) {
5437 		root_mem_cgroup = memcg;
5438 		return &memcg->css;
5439 	}
5440 
5441 	error = memcg_online_kmem(memcg);
5442 	if (error)
5443 		goto fail;
5444 
5445 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5446 		static_branch_inc(&memcg_sockets_enabled_key);
5447 
5448 	return &memcg->css;
5449 fail:
5450 	mem_cgroup_id_remove(memcg);
5451 	mem_cgroup_free(memcg);
5452 	return ERR_PTR(error);
5453 }
5454 
mem_cgroup_css_online(struct cgroup_subsys_state * css)5455 static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
5456 {
5457 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5458 
5459 	/*
5460 	 * A memcg must be visible for memcg_expand_shrinker_maps()
5461 	 * by the time the maps are allocated. So, we allocate maps
5462 	 * here, when for_each_mem_cgroup() can't skip it.
5463 	 */
5464 	if (memcg_alloc_shrinker_maps(memcg)) {
5465 		mem_cgroup_id_remove(memcg);
5466 		return -ENOMEM;
5467 	}
5468 
5469 	/* Online state pins memcg ID, memcg ID pins CSS */
5470 	refcount_set(&memcg->id.ref, 1);
5471 	css_get(css);
5472 	trace_android_vh_mem_cgroup_css_online(css, memcg);
5473 	return 0;
5474 }
5475 
mem_cgroup_css_offline(struct cgroup_subsys_state * css)5476 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
5477 {
5478 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5479 	struct mem_cgroup_event *event, *tmp;
5480 
5481 	trace_android_vh_mem_cgroup_css_offline(css, memcg);
5482 	/*
5483 	 * Unregister events and notify userspace.
5484 	 * Notify userspace about cgroup removing only after rmdir of cgroup
5485 	 * directory to avoid race between userspace and kernelspace.
5486 	 */
5487 	spin_lock(&memcg->event_list_lock);
5488 	list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
5489 		list_del_init(&event->list);
5490 		schedule_work(&event->remove);
5491 	}
5492 	spin_unlock(&memcg->event_list_lock);
5493 
5494 	page_counter_set_min(&memcg->memory, 0);
5495 	page_counter_set_low(&memcg->memory, 0);
5496 
5497 	memcg_offline_kmem(memcg);
5498 	wb_memcg_offline(memcg);
5499 
5500 	drain_all_stock(memcg);
5501 
5502 	mem_cgroup_id_put(memcg);
5503 }
5504 
mem_cgroup_css_released(struct cgroup_subsys_state * css)5505 static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
5506 {
5507 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5508 
5509 	invalidate_reclaim_iterators(memcg);
5510 }
5511 
mem_cgroup_css_free(struct cgroup_subsys_state * css)5512 static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
5513 {
5514 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5515 	int __maybe_unused i;
5516 
5517 #ifdef CONFIG_CGROUP_WRITEBACK
5518 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5519 		wb_wait_for_completion(&memcg->cgwb_frn[i].done);
5520 #endif
5521 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5522 		static_branch_dec(&memcg_sockets_enabled_key);
5523 
5524 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
5525 		static_branch_dec(&memcg_sockets_enabled_key);
5526 
5527 	vmpressure_cleanup(&memcg->vmpressure);
5528 	cancel_work_sync(&memcg->high_work);
5529 	mem_cgroup_remove_from_trees(memcg);
5530 	memcg_free_shrinker_maps(memcg);
5531 	memcg_free_kmem(memcg);
5532 	mem_cgroup_free(memcg);
5533 }
5534 
5535 /**
5536  * mem_cgroup_css_reset - reset the states of a mem_cgroup
5537  * @css: the target css
5538  *
5539  * Reset the states of the mem_cgroup associated with @css.  This is
5540  * invoked when the userland requests disabling on the default hierarchy
5541  * but the memcg is pinned through dependency.  The memcg should stop
5542  * applying policies and should revert to the vanilla state as it may be
5543  * made visible again.
5544  *
5545  * The current implementation only resets the essential configurations.
5546  * This needs to be expanded to cover all the visible parts.
5547  */
mem_cgroup_css_reset(struct cgroup_subsys_state * css)5548 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
5549 {
5550 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5551 
5552 	page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX);
5553 	page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX);
5554 	page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX);
5555 	page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX);
5556 	page_counter_set_min(&memcg->memory, 0);
5557 	page_counter_set_low(&memcg->memory, 0);
5558 	page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
5559 	memcg->soft_limit = PAGE_COUNTER_MAX;
5560 	page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
5561 	memcg_wb_domain_size_changed(memcg);
5562 }
5563 
5564 #ifdef CONFIG_MMU
5565 /* Handlers for move charge at task migration. */
mem_cgroup_do_precharge(unsigned long count)5566 static int mem_cgroup_do_precharge(unsigned long count)
5567 {
5568 	int ret;
5569 
5570 	/* Try a single bulk charge without reclaim first, kswapd may wake */
5571 	ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count);
5572 	if (!ret) {
5573 		mc.precharge += count;
5574 		return ret;
5575 	}
5576 
5577 	/* Try charges one by one with reclaim, but do not retry */
5578 	while (count--) {
5579 		ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1);
5580 		if (ret)
5581 			return ret;
5582 		mc.precharge++;
5583 		cond_resched();
5584 	}
5585 	return 0;
5586 }
5587 
5588 union mc_target {
5589 	struct page	*page;
5590 	swp_entry_t	ent;
5591 };
5592 
5593 enum mc_target_type {
5594 	MC_TARGET_NONE = 0,
5595 	MC_TARGET_PAGE,
5596 	MC_TARGET_SWAP,
5597 	MC_TARGET_DEVICE,
5598 };
5599 
mc_handle_present_pte(struct vm_area_struct * vma,unsigned long addr,pte_t ptent)5600 static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
5601 						unsigned long addr, pte_t ptent)
5602 {
5603 	struct page *page = vm_normal_page(vma, addr, ptent);
5604 
5605 	if (!page || !page_mapped(page))
5606 		return NULL;
5607 	if (PageAnon(page)) {
5608 		if (!(mc.flags & MOVE_ANON))
5609 			return NULL;
5610 	} else {
5611 		if (!(mc.flags & MOVE_FILE))
5612 			return NULL;
5613 	}
5614 	if (!get_page_unless_zero(page))
5615 		return NULL;
5616 
5617 	return page;
5618 }
5619 
5620 #if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE)
mc_handle_swap_pte(struct vm_area_struct * vma,pte_t ptent,swp_entry_t * entry)5621 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5622 			pte_t ptent, swp_entry_t *entry)
5623 {
5624 	struct page *page = NULL;
5625 	swp_entry_t ent = pte_to_swp_entry(ptent);
5626 
5627 	if (!(mc.flags & MOVE_ANON))
5628 		return NULL;
5629 
5630 	/*
5631 	 * Handle MEMORY_DEVICE_PRIVATE which are ZONE_DEVICE page belonging to
5632 	 * a device and because they are not accessible by CPU they are store
5633 	 * as special swap entry in the CPU page table.
5634 	 */
5635 	if (is_device_private_entry(ent)) {
5636 		page = device_private_entry_to_page(ent);
5637 		/*
5638 		 * MEMORY_DEVICE_PRIVATE means ZONE_DEVICE page and which have
5639 		 * a refcount of 1 when free (unlike normal page)
5640 		 */
5641 		if (!page_ref_add_unless(page, 1, 1))
5642 			return NULL;
5643 		return page;
5644 	}
5645 
5646 	if (non_swap_entry(ent))
5647 		return NULL;
5648 
5649 	/*
5650 	 * Because lookup_swap_cache() updates some statistics counter,
5651 	 * we call find_get_page() with swapper_space directly.
5652 	 */
5653 	page = find_get_page(swap_address_space(ent), swp_offset(ent));
5654 	entry->val = ent.val;
5655 
5656 	return page;
5657 }
5658 #else
mc_handle_swap_pte(struct vm_area_struct * vma,pte_t ptent,swp_entry_t * entry)5659 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5660 			pte_t ptent, swp_entry_t *entry)
5661 {
5662 	return NULL;
5663 }
5664 #endif
5665 
mc_handle_file_pte(struct vm_area_struct * vma,unsigned long addr,pte_t ptent,swp_entry_t * entry)5666 static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
5667 			unsigned long addr, pte_t ptent, swp_entry_t *entry)
5668 {
5669 	if (!vma->vm_file) /* anonymous vma */
5670 		return NULL;
5671 	if (!(mc.flags & MOVE_FILE))
5672 		return NULL;
5673 
5674 	/* page is moved even if it's not RSS of this task(page-faulted). */
5675 	/* shmem/tmpfs may report page out on swap: account for that too. */
5676 	return find_get_incore_page(vma->vm_file->f_mapping,
5677 			linear_page_index(vma, addr));
5678 }
5679 
5680 /**
5681  * mem_cgroup_move_account - move account of the page
5682  * @page: the page
5683  * @compound: charge the page as compound or small page
5684  * @from: mem_cgroup which the page is moved from.
5685  * @to:	mem_cgroup which the page is moved to. @from != @to.
5686  *
5687  * The caller must make sure the page is not on LRU (isolate_page() is useful.)
5688  *
5689  * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
5690  * from old cgroup.
5691  */
mem_cgroup_move_account(struct page * page,bool compound,struct mem_cgroup * from,struct mem_cgroup * to)5692 static int mem_cgroup_move_account(struct page *page,
5693 				   bool compound,
5694 				   struct mem_cgroup *from,
5695 				   struct mem_cgroup *to)
5696 {
5697 	struct lruvec *from_vec, *to_vec;
5698 	struct pglist_data *pgdat;
5699 	unsigned int nr_pages = compound ? thp_nr_pages(page) : 1;
5700 	int ret;
5701 
5702 	VM_BUG_ON(from == to);
5703 	VM_BUG_ON_PAGE(PageLRU(page), page);
5704 	VM_BUG_ON(compound && !PageTransHuge(page));
5705 
5706 	/*
5707 	 * Prevent mem_cgroup_migrate() from looking at
5708 	 * page->mem_cgroup of its source page while we change it.
5709 	 */
5710 	ret = -EBUSY;
5711 	if (!trylock_page(page))
5712 		goto out;
5713 
5714 	ret = -EINVAL;
5715 	if (page->mem_cgroup != from)
5716 		goto out_unlock;
5717 
5718 	pgdat = page_pgdat(page);
5719 	from_vec = mem_cgroup_lruvec(from, pgdat);
5720 	to_vec = mem_cgroup_lruvec(to, pgdat);
5721 
5722 	lock_page_memcg(page);
5723 
5724 	if (PageAnon(page)) {
5725 		if (page_mapped(page)) {
5726 			__mod_lruvec_state(from_vec, NR_ANON_MAPPED, -nr_pages);
5727 			__mod_lruvec_state(to_vec, NR_ANON_MAPPED, nr_pages);
5728 			if (PageTransHuge(page)) {
5729 				__dec_lruvec_state(from_vec, NR_ANON_THPS);
5730 				__inc_lruvec_state(to_vec, NR_ANON_THPS);
5731 			}
5732 
5733 		}
5734 	} else {
5735 		__mod_lruvec_state(from_vec, NR_FILE_PAGES, -nr_pages);
5736 		__mod_lruvec_state(to_vec, NR_FILE_PAGES, nr_pages);
5737 
5738 		if (PageSwapBacked(page)) {
5739 			__mod_lruvec_state(from_vec, NR_SHMEM, -nr_pages);
5740 			__mod_lruvec_state(to_vec, NR_SHMEM, nr_pages);
5741 		}
5742 
5743 		if (page_mapped(page)) {
5744 			__mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages);
5745 			__mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages);
5746 		}
5747 
5748 		if (PageDirty(page)) {
5749 			struct address_space *mapping = page_mapping(page);
5750 
5751 			if (mapping_can_writeback(mapping)) {
5752 				__mod_lruvec_state(from_vec, NR_FILE_DIRTY,
5753 						   -nr_pages);
5754 				__mod_lruvec_state(to_vec, NR_FILE_DIRTY,
5755 						   nr_pages);
5756 			}
5757 		}
5758 	}
5759 
5760 	if (PageWriteback(page)) {
5761 		__mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages);
5762 		__mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages);
5763 	}
5764 
5765 	/*
5766 	 * All state has been migrated, let's switch to the new memcg.
5767 	 *
5768 	 * It is safe to change page->mem_cgroup here because the page
5769 	 * is referenced, charged, isolated, and locked: we can't race
5770 	 * with (un)charging, migration, LRU putback, or anything else
5771 	 * that would rely on a stable page->mem_cgroup.
5772 	 *
5773 	 * Note that lock_page_memcg is a memcg lock, not a page lock,
5774 	 * to save space. As soon as we switch page->mem_cgroup to a
5775 	 * new memcg that isn't locked, the above state can change
5776 	 * concurrently again. Make sure we're truly done with it.
5777 	 */
5778 	smp_mb();
5779 
5780 	css_get(&to->css);
5781 	css_put(&from->css);
5782 
5783 	page->mem_cgroup = to;
5784 
5785 	__unlock_page_memcg(from);
5786 
5787 	ret = 0;
5788 
5789 	local_irq_disable();
5790 	mem_cgroup_charge_statistics(to, page, nr_pages);
5791 	memcg_check_events(to, page);
5792 	mem_cgroup_charge_statistics(from, page, -nr_pages);
5793 	memcg_check_events(from, page);
5794 	local_irq_enable();
5795 out_unlock:
5796 	unlock_page(page);
5797 out:
5798 	return ret;
5799 }
5800 
5801 /**
5802  * get_mctgt_type - get target type of moving charge
5803  * @vma: the vma the pte to be checked belongs
5804  * @addr: the address corresponding to the pte to be checked
5805  * @ptent: the pte to be checked
5806  * @target: the pointer the target page or swap ent will be stored(can be NULL)
5807  *
5808  * Returns
5809  *   0(MC_TARGET_NONE): if the pte is not a target for move charge.
5810  *   1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
5811  *     move charge. if @target is not NULL, the page is stored in target->page
5812  *     with extra refcnt got(Callers should handle it).
5813  *   2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
5814  *     target for charge migration. if @target is not NULL, the entry is stored
5815  *     in target->ent.
5816  *   3(MC_TARGET_DEVICE): like MC_TARGET_PAGE  but page is MEMORY_DEVICE_PRIVATE
5817  *     (so ZONE_DEVICE page and thus not on the lru).
5818  *     For now we such page is charge like a regular page would be as for all
5819  *     intent and purposes it is just special memory taking the place of a
5820  *     regular page.
5821  *
5822  *     See Documentations/vm/hmm.txt and include/linux/hmm.h
5823  *
5824  * Called with pte lock held.
5825  */
5826 
get_mctgt_type(struct vm_area_struct * vma,unsigned long addr,pte_t ptent,union mc_target * target)5827 static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
5828 		unsigned long addr, pte_t ptent, union mc_target *target)
5829 {
5830 	struct page *page = NULL;
5831 	enum mc_target_type ret = MC_TARGET_NONE;
5832 	swp_entry_t ent = { .val = 0 };
5833 
5834 	if (pte_present(ptent))
5835 		page = mc_handle_present_pte(vma, addr, ptent);
5836 	else if (is_swap_pte(ptent))
5837 		page = mc_handle_swap_pte(vma, ptent, &ent);
5838 	else if (pte_none(ptent))
5839 		page = mc_handle_file_pte(vma, addr, ptent, &ent);
5840 
5841 	if (!page && !ent.val)
5842 		return ret;
5843 	if (page) {
5844 		/*
5845 		 * Do only loose check w/o serialization.
5846 		 * mem_cgroup_move_account() checks the page is valid or
5847 		 * not under LRU exclusion.
5848 		 */
5849 		if (page->mem_cgroup == mc.from) {
5850 			ret = MC_TARGET_PAGE;
5851 			if (is_device_private_page(page))
5852 				ret = MC_TARGET_DEVICE;
5853 			if (target)
5854 				target->page = page;
5855 		}
5856 		if (!ret || !target)
5857 			put_page(page);
5858 	}
5859 	/*
5860 	 * There is a swap entry and a page doesn't exist or isn't charged.
5861 	 * But we cannot move a tail-page in a THP.
5862 	 */
5863 	if (ent.val && !ret && (!page || !PageTransCompound(page)) &&
5864 	    mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
5865 		ret = MC_TARGET_SWAP;
5866 		if (target)
5867 			target->ent = ent;
5868 	}
5869 	return ret;
5870 }
5871 
5872 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5873 /*
5874  * We don't consider PMD mapped swapping or file mapped pages because THP does
5875  * not support them for now.
5876  * Caller should make sure that pmd_trans_huge(pmd) is true.
5877  */
get_mctgt_type_thp(struct vm_area_struct * vma,unsigned long addr,pmd_t pmd,union mc_target * target)5878 static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
5879 		unsigned long addr, pmd_t pmd, union mc_target *target)
5880 {
5881 	struct page *page = NULL;
5882 	enum mc_target_type ret = MC_TARGET_NONE;
5883 
5884 	if (unlikely(is_swap_pmd(pmd))) {
5885 		VM_BUG_ON(thp_migration_supported() &&
5886 				  !is_pmd_migration_entry(pmd));
5887 		return ret;
5888 	}
5889 	page = pmd_page(pmd);
5890 	VM_BUG_ON_PAGE(!page || !PageHead(page), page);
5891 	if (!(mc.flags & MOVE_ANON))
5892 		return ret;
5893 	if (page->mem_cgroup == mc.from) {
5894 		ret = MC_TARGET_PAGE;
5895 		if (target) {
5896 			get_page(page);
5897 			target->page = page;
5898 		}
5899 	}
5900 	return ret;
5901 }
5902 #else
get_mctgt_type_thp(struct vm_area_struct * vma,unsigned long addr,pmd_t pmd,union mc_target * target)5903 static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
5904 		unsigned long addr, pmd_t pmd, union mc_target *target)
5905 {
5906 	return MC_TARGET_NONE;
5907 }
5908 #endif
5909 
mem_cgroup_count_precharge_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,struct mm_walk * walk)5910 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
5911 					unsigned long addr, unsigned long end,
5912 					struct mm_walk *walk)
5913 {
5914 	struct vm_area_struct *vma = walk->vma;
5915 	pte_t *pte;
5916 	spinlock_t *ptl;
5917 
5918 	ptl = pmd_trans_huge_lock(pmd, vma);
5919 	if (ptl) {
5920 		/*
5921 		 * Note their can not be MC_TARGET_DEVICE for now as we do not
5922 		 * support transparent huge page with MEMORY_DEVICE_PRIVATE but
5923 		 * this might change.
5924 		 */
5925 		if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
5926 			mc.precharge += HPAGE_PMD_NR;
5927 		spin_unlock(ptl);
5928 		return 0;
5929 	}
5930 
5931 	if (pmd_trans_unstable(pmd))
5932 		return 0;
5933 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
5934 	for (; addr != end; pte++, addr += PAGE_SIZE)
5935 		if (get_mctgt_type(vma, addr, *pte, NULL))
5936 			mc.precharge++;	/* increment precharge temporarily */
5937 	pte_unmap_unlock(pte - 1, ptl);
5938 	cond_resched();
5939 
5940 	return 0;
5941 }
5942 
5943 static const struct mm_walk_ops precharge_walk_ops = {
5944 	.pmd_entry	= mem_cgroup_count_precharge_pte_range,
5945 };
5946 
mem_cgroup_count_precharge(struct mm_struct * mm)5947 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
5948 {
5949 	unsigned long precharge;
5950 
5951 	mmap_read_lock(mm);
5952 	walk_page_range(mm, 0, mm->highest_vm_end, &precharge_walk_ops, NULL);
5953 	mmap_read_unlock(mm);
5954 
5955 	precharge = mc.precharge;
5956 	mc.precharge = 0;
5957 
5958 	return precharge;
5959 }
5960 
mem_cgroup_precharge_mc(struct mm_struct * mm)5961 static int mem_cgroup_precharge_mc(struct mm_struct *mm)
5962 {
5963 	unsigned long precharge = mem_cgroup_count_precharge(mm);
5964 
5965 	VM_BUG_ON(mc.moving_task);
5966 	mc.moving_task = current;
5967 	return mem_cgroup_do_precharge(precharge);
5968 }
5969 
5970 /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
__mem_cgroup_clear_mc(void)5971 static void __mem_cgroup_clear_mc(void)
5972 {
5973 	struct mem_cgroup *from = mc.from;
5974 	struct mem_cgroup *to = mc.to;
5975 
5976 	/* we must uncharge all the leftover precharges from mc.to */
5977 	if (mc.precharge) {
5978 		cancel_charge(mc.to, mc.precharge);
5979 		mc.precharge = 0;
5980 	}
5981 	/*
5982 	 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
5983 	 * we must uncharge here.
5984 	 */
5985 	if (mc.moved_charge) {
5986 		cancel_charge(mc.from, mc.moved_charge);
5987 		mc.moved_charge = 0;
5988 	}
5989 	/* we must fixup refcnts and charges */
5990 	if (mc.moved_swap) {
5991 		/* uncharge swap account from the old cgroup */
5992 		if (!mem_cgroup_is_root(mc.from))
5993 			page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
5994 
5995 		mem_cgroup_id_put_many(mc.from, mc.moved_swap);
5996 
5997 		/*
5998 		 * we charged both to->memory and to->memsw, so we
5999 		 * should uncharge to->memory.
6000 		 */
6001 		if (!mem_cgroup_is_root(mc.to))
6002 			page_counter_uncharge(&mc.to->memory, mc.moved_swap);
6003 
6004 		mc.moved_swap = 0;
6005 	}
6006 	memcg_oom_recover(from);
6007 	memcg_oom_recover(to);
6008 	wake_up_all(&mc.waitq);
6009 }
6010 
mem_cgroup_clear_mc(void)6011 static void mem_cgroup_clear_mc(void)
6012 {
6013 	struct mm_struct *mm = mc.mm;
6014 
6015 	/*
6016 	 * we must clear moving_task before waking up waiters at the end of
6017 	 * task migration.
6018 	 */
6019 	mc.moving_task = NULL;
6020 	__mem_cgroup_clear_mc();
6021 	spin_lock(&mc.lock);
6022 	mc.from = NULL;
6023 	mc.to = NULL;
6024 	mc.mm = NULL;
6025 	spin_unlock(&mc.lock);
6026 
6027 	mmput(mm);
6028 }
6029 
mem_cgroup_can_attach(struct cgroup_taskset * tset)6030 static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
6031 {
6032 	struct cgroup_subsys_state *css;
6033 	struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */
6034 	struct mem_cgroup *from;
6035 	struct task_struct *leader, *p;
6036 	struct mm_struct *mm;
6037 	unsigned long move_flags;
6038 	int ret = 0;
6039 
6040 	/* charge immigration isn't supported on the default hierarchy */
6041 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
6042 		return 0;
6043 
6044 	/*
6045 	 * Multi-process migrations only happen on the default hierarchy
6046 	 * where charge immigration is not used.  Perform charge
6047 	 * immigration if @tset contains a leader and whine if there are
6048 	 * multiple.
6049 	 */
6050 	p = NULL;
6051 	cgroup_taskset_for_each_leader(leader, css, tset) {
6052 		WARN_ON_ONCE(p);
6053 		p = leader;
6054 		memcg = mem_cgroup_from_css(css);
6055 	}
6056 	if (!p)
6057 		return 0;
6058 
6059 	/*
6060 	 * We are now commited to this value whatever it is. Changes in this
6061 	 * tunable will only affect upcoming migrations, not the current one.
6062 	 * So we need to save it, and keep it going.
6063 	 */
6064 	move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
6065 	if (!move_flags)
6066 		return 0;
6067 
6068 	from = mem_cgroup_from_task(p);
6069 
6070 	VM_BUG_ON(from == memcg);
6071 
6072 	mm = get_task_mm(p);
6073 	if (!mm)
6074 		return 0;
6075 	/* We move charges only when we move a owner of the mm */
6076 	if (mm->owner == p) {
6077 		VM_BUG_ON(mc.from);
6078 		VM_BUG_ON(mc.to);
6079 		VM_BUG_ON(mc.precharge);
6080 		VM_BUG_ON(mc.moved_charge);
6081 		VM_BUG_ON(mc.moved_swap);
6082 
6083 		spin_lock(&mc.lock);
6084 		mc.mm = mm;
6085 		mc.from = from;
6086 		mc.to = memcg;
6087 		mc.flags = move_flags;
6088 		spin_unlock(&mc.lock);
6089 		/* We set mc.moving_task later */
6090 
6091 		ret = mem_cgroup_precharge_mc(mm);
6092 		if (ret)
6093 			mem_cgroup_clear_mc();
6094 	} else {
6095 		mmput(mm);
6096 	}
6097 	return ret;
6098 }
6099 
mem_cgroup_cancel_attach(struct cgroup_taskset * tset)6100 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
6101 {
6102 	if (mc.to)
6103 		mem_cgroup_clear_mc();
6104 }
6105 
mem_cgroup_move_charge_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,struct mm_walk * walk)6106 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
6107 				unsigned long addr, unsigned long end,
6108 				struct mm_walk *walk)
6109 {
6110 	int ret = 0;
6111 	struct vm_area_struct *vma = walk->vma;
6112 	pte_t *pte;
6113 	spinlock_t *ptl;
6114 	enum mc_target_type target_type;
6115 	union mc_target target;
6116 	struct page *page;
6117 
6118 	ptl = pmd_trans_huge_lock(pmd, vma);
6119 	if (ptl) {
6120 		if (mc.precharge < HPAGE_PMD_NR) {
6121 			spin_unlock(ptl);
6122 			return 0;
6123 		}
6124 		target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
6125 		if (target_type == MC_TARGET_PAGE) {
6126 			page = target.page;
6127 			if (!isolate_lru_page(page)) {
6128 				if (!mem_cgroup_move_account(page, true,
6129 							     mc.from, mc.to)) {
6130 					mc.precharge -= HPAGE_PMD_NR;
6131 					mc.moved_charge += HPAGE_PMD_NR;
6132 				}
6133 				putback_lru_page(page);
6134 			}
6135 			put_page(page);
6136 		} else if (target_type == MC_TARGET_DEVICE) {
6137 			page = target.page;
6138 			if (!mem_cgroup_move_account(page, true,
6139 						     mc.from, mc.to)) {
6140 				mc.precharge -= HPAGE_PMD_NR;
6141 				mc.moved_charge += HPAGE_PMD_NR;
6142 			}
6143 			put_page(page);
6144 		}
6145 		spin_unlock(ptl);
6146 		return 0;
6147 	}
6148 
6149 	if (pmd_trans_unstable(pmd))
6150 		return 0;
6151 retry:
6152 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
6153 	for (; addr != end; addr += PAGE_SIZE) {
6154 		pte_t ptent = *(pte++);
6155 		bool device = false;
6156 		swp_entry_t ent;
6157 
6158 		if (!mc.precharge)
6159 			break;
6160 
6161 		switch (get_mctgt_type(vma, addr, ptent, &target)) {
6162 		case MC_TARGET_DEVICE:
6163 			device = true;
6164 			fallthrough;
6165 		case MC_TARGET_PAGE:
6166 			page = target.page;
6167 			/*
6168 			 * We can have a part of the split pmd here. Moving it
6169 			 * can be done but it would be too convoluted so simply
6170 			 * ignore such a partial THP and keep it in original
6171 			 * memcg. There should be somebody mapping the head.
6172 			 */
6173 			if (PageTransCompound(page))
6174 				goto put;
6175 			if (!device && isolate_lru_page(page))
6176 				goto put;
6177 			if (!mem_cgroup_move_account(page, false,
6178 						mc.from, mc.to)) {
6179 				mc.precharge--;
6180 				/* we uncharge from mc.from later. */
6181 				mc.moved_charge++;
6182 			}
6183 			if (!device)
6184 				putback_lru_page(page);
6185 put:			/* get_mctgt_type() gets the page */
6186 			put_page(page);
6187 			break;
6188 		case MC_TARGET_SWAP:
6189 			ent = target.ent;
6190 			if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
6191 				mc.precharge--;
6192 				mem_cgroup_id_get_many(mc.to, 1);
6193 				/* we fixup other refcnts and charges later. */
6194 				mc.moved_swap++;
6195 			}
6196 			break;
6197 		default:
6198 			break;
6199 		}
6200 	}
6201 	pte_unmap_unlock(pte - 1, ptl);
6202 	cond_resched();
6203 
6204 	if (addr != end) {
6205 		/*
6206 		 * We have consumed all precharges we got in can_attach().
6207 		 * We try charge one by one, but don't do any additional
6208 		 * charges to mc.to if we have failed in charge once in attach()
6209 		 * phase.
6210 		 */
6211 		ret = mem_cgroup_do_precharge(1);
6212 		if (!ret)
6213 			goto retry;
6214 	}
6215 
6216 	return ret;
6217 }
6218 
6219 static const struct mm_walk_ops charge_walk_ops = {
6220 	.pmd_entry	= mem_cgroup_move_charge_pte_range,
6221 };
6222 
mem_cgroup_move_charge(void)6223 static void mem_cgroup_move_charge(void)
6224 {
6225 	lru_add_drain_all();
6226 	/*
6227 	 * Signal lock_page_memcg() to take the memcg's move_lock
6228 	 * while we're moving its pages to another memcg. Then wait
6229 	 * for already started RCU-only updates to finish.
6230 	 */
6231 	atomic_inc(&mc.from->moving_account);
6232 	synchronize_rcu();
6233 retry:
6234 	if (unlikely(!mmap_read_trylock(mc.mm))) {
6235 		/*
6236 		 * Someone who are holding the mmap_lock might be waiting in
6237 		 * waitq. So we cancel all extra charges, wake up all waiters,
6238 		 * and retry. Because we cancel precharges, we might not be able
6239 		 * to move enough charges, but moving charge is a best-effort
6240 		 * feature anyway, so it wouldn't be a big problem.
6241 		 */
6242 		__mem_cgroup_clear_mc();
6243 		cond_resched();
6244 		goto retry;
6245 	}
6246 	/*
6247 	 * When we have consumed all precharges and failed in doing
6248 	 * additional charge, the page walk just aborts.
6249 	 */
6250 	walk_page_range(mc.mm, 0, mc.mm->highest_vm_end, &charge_walk_ops,
6251 			NULL);
6252 
6253 	mmap_read_unlock(mc.mm);
6254 	atomic_dec(&mc.from->moving_account);
6255 }
6256 
mem_cgroup_move_task(void)6257 static void mem_cgroup_move_task(void)
6258 {
6259 	if (mc.to) {
6260 		mem_cgroup_move_charge();
6261 		mem_cgroup_clear_mc();
6262 	}
6263 }
6264 #else	/* !CONFIG_MMU */
mem_cgroup_can_attach(struct cgroup_taskset * tset)6265 static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
6266 {
6267 	return 0;
6268 }
mem_cgroup_cancel_attach(struct cgroup_taskset * tset)6269 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
6270 {
6271 }
mem_cgroup_move_task(void)6272 static void mem_cgroup_move_task(void)
6273 {
6274 }
6275 #endif
6276 
6277 /*
6278  * Cgroup retains root cgroups across [un]mount cycles making it necessary
6279  * to verify whether we're attached to the default hierarchy on each mount
6280  * attempt.
6281  */
mem_cgroup_bind(struct cgroup_subsys_state * root_css)6282 static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
6283 {
6284 	/*
6285 	 * use_hierarchy is forced on the default hierarchy.  cgroup core
6286 	 * guarantees that @root doesn't have any children, so turning it
6287 	 * on for the root memcg is enough.
6288 	 */
6289 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
6290 		root_mem_cgroup->use_hierarchy = true;
6291 	else
6292 		root_mem_cgroup->use_hierarchy = false;
6293 }
6294 
seq_puts_memcg_tunable(struct seq_file * m,unsigned long value)6295 static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
6296 {
6297 	if (value == PAGE_COUNTER_MAX)
6298 		seq_puts(m, "max\n");
6299 	else
6300 		seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE);
6301 
6302 	return 0;
6303 }
6304 
memory_current_read(struct cgroup_subsys_state * css,struct cftype * cft)6305 static u64 memory_current_read(struct cgroup_subsys_state *css,
6306 			       struct cftype *cft)
6307 {
6308 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6309 
6310 	return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
6311 }
6312 
memory_min_show(struct seq_file * m,void * v)6313 static int memory_min_show(struct seq_file *m, void *v)
6314 {
6315 	return seq_puts_memcg_tunable(m,
6316 		READ_ONCE(mem_cgroup_from_seq(m)->memory.min));
6317 }
6318 
memory_min_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)6319 static ssize_t memory_min_write(struct kernfs_open_file *of,
6320 				char *buf, size_t nbytes, loff_t off)
6321 {
6322 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6323 	unsigned long min;
6324 	int err;
6325 
6326 	buf = strstrip(buf);
6327 	err = page_counter_memparse(buf, "max", &min);
6328 	if (err)
6329 		return err;
6330 
6331 	page_counter_set_min(&memcg->memory, min);
6332 
6333 	return nbytes;
6334 }
6335 
memory_low_show(struct seq_file * m,void * v)6336 static int memory_low_show(struct seq_file *m, void *v)
6337 {
6338 	return seq_puts_memcg_tunable(m,
6339 		READ_ONCE(mem_cgroup_from_seq(m)->memory.low));
6340 }
6341 
memory_low_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)6342 static ssize_t memory_low_write(struct kernfs_open_file *of,
6343 				char *buf, size_t nbytes, loff_t off)
6344 {
6345 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6346 	unsigned long low;
6347 	int err;
6348 
6349 	buf = strstrip(buf);
6350 	err = page_counter_memparse(buf, "max", &low);
6351 	if (err)
6352 		return err;
6353 
6354 	page_counter_set_low(&memcg->memory, low);
6355 
6356 	return nbytes;
6357 }
6358 
memory_high_show(struct seq_file * m,void * v)6359 static int memory_high_show(struct seq_file *m, void *v)
6360 {
6361 	return seq_puts_memcg_tunable(m,
6362 		READ_ONCE(mem_cgroup_from_seq(m)->memory.high));
6363 }
6364 
memory_high_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)6365 static ssize_t memory_high_write(struct kernfs_open_file *of,
6366 				 char *buf, size_t nbytes, loff_t off)
6367 {
6368 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6369 	unsigned int nr_retries = MAX_RECLAIM_RETRIES;
6370 	bool drained = false;
6371 	unsigned long high;
6372 	int err;
6373 
6374 	buf = strstrip(buf);
6375 	err = page_counter_memparse(buf, "max", &high);
6376 	if (err)
6377 		return err;
6378 
6379 	page_counter_set_high(&memcg->memory, high);
6380 
6381 	for (;;) {
6382 		unsigned long nr_pages = page_counter_read(&memcg->memory);
6383 		unsigned long reclaimed;
6384 
6385 		if (nr_pages <= high)
6386 			break;
6387 
6388 		if (signal_pending(current))
6389 			break;
6390 
6391 		if (!drained) {
6392 			drain_all_stock(memcg);
6393 			drained = true;
6394 			continue;
6395 		}
6396 
6397 		reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
6398 							 GFP_KERNEL, true);
6399 
6400 		if (!reclaimed && !nr_retries--)
6401 			break;
6402 	}
6403 
6404 	memcg_wb_domain_size_changed(memcg);
6405 	return nbytes;
6406 }
6407 
memory_max_show(struct seq_file * m,void * v)6408 static int memory_max_show(struct seq_file *m, void *v)
6409 {
6410 	return seq_puts_memcg_tunable(m,
6411 		READ_ONCE(mem_cgroup_from_seq(m)->memory.max));
6412 }
6413 
memory_max_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)6414 static ssize_t memory_max_write(struct kernfs_open_file *of,
6415 				char *buf, size_t nbytes, loff_t off)
6416 {
6417 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6418 	unsigned int nr_reclaims = MAX_RECLAIM_RETRIES;
6419 	bool drained = false;
6420 	unsigned long max;
6421 	int err;
6422 
6423 	buf = strstrip(buf);
6424 	err = page_counter_memparse(buf, "max", &max);
6425 	if (err)
6426 		return err;
6427 
6428 	xchg(&memcg->memory.max, max);
6429 
6430 	for (;;) {
6431 		unsigned long nr_pages = page_counter_read(&memcg->memory);
6432 
6433 		if (nr_pages <= max)
6434 			break;
6435 
6436 		if (signal_pending(current))
6437 			break;
6438 
6439 		if (!drained) {
6440 			drain_all_stock(memcg);
6441 			drained = true;
6442 			continue;
6443 		}
6444 
6445 		if (nr_reclaims) {
6446 			if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
6447 							  GFP_KERNEL, true))
6448 				nr_reclaims--;
6449 			continue;
6450 		}
6451 
6452 		memcg_memory_event(memcg, MEMCG_OOM);
6453 		if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
6454 			break;
6455 	}
6456 
6457 	memcg_wb_domain_size_changed(memcg);
6458 	return nbytes;
6459 }
6460 
__memory_events_show(struct seq_file * m,atomic_long_t * events)6461 static void __memory_events_show(struct seq_file *m, atomic_long_t *events)
6462 {
6463 	seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW]));
6464 	seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH]));
6465 	seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX]));
6466 	seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM]));
6467 	seq_printf(m, "oom_kill %lu\n",
6468 		   atomic_long_read(&events[MEMCG_OOM_KILL]));
6469 }
6470 
memory_events_show(struct seq_file * m,void * v)6471 static int memory_events_show(struct seq_file *m, void *v)
6472 {
6473 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6474 
6475 	__memory_events_show(m, memcg->memory_events);
6476 	return 0;
6477 }
6478 
memory_events_local_show(struct seq_file * m,void * v)6479 static int memory_events_local_show(struct seq_file *m, void *v)
6480 {
6481 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6482 
6483 	__memory_events_show(m, memcg->memory_events_local);
6484 	return 0;
6485 }
6486 
memory_stat_show(struct seq_file * m,void * v)6487 static int memory_stat_show(struct seq_file *m, void *v)
6488 {
6489 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6490 	char *buf;
6491 
6492 	buf = memory_stat_format(memcg);
6493 	if (!buf)
6494 		return -ENOMEM;
6495 	seq_puts(m, buf);
6496 	kfree(buf);
6497 	return 0;
6498 }
6499 
6500 #ifdef CONFIG_NUMA
memory_numa_stat_show(struct seq_file * m,void * v)6501 static int memory_numa_stat_show(struct seq_file *m, void *v)
6502 {
6503 	int i;
6504 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6505 
6506 	for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
6507 		int nid;
6508 
6509 		if (memory_stats[i].idx >= NR_VM_NODE_STAT_ITEMS)
6510 			continue;
6511 
6512 		seq_printf(m, "%s", memory_stats[i].name);
6513 		for_each_node_state(nid, N_MEMORY) {
6514 			u64 size;
6515 			struct lruvec *lruvec;
6516 
6517 			lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
6518 			size = lruvec_page_state(lruvec, memory_stats[i].idx);
6519 			size *= memory_stats[i].ratio;
6520 			seq_printf(m, " N%d=%llu", nid, size);
6521 		}
6522 		seq_putc(m, '\n');
6523 	}
6524 
6525 	return 0;
6526 }
6527 #endif
6528 
memory_oom_group_show(struct seq_file * m,void * v)6529 static int memory_oom_group_show(struct seq_file *m, void *v)
6530 {
6531 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6532 
6533 	seq_printf(m, "%d\n", memcg->oom_group);
6534 
6535 	return 0;
6536 }
6537 
memory_oom_group_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)6538 static ssize_t memory_oom_group_write(struct kernfs_open_file *of,
6539 				      char *buf, size_t nbytes, loff_t off)
6540 {
6541 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6542 	int ret, oom_group;
6543 
6544 	buf = strstrip(buf);
6545 	if (!buf)
6546 		return -EINVAL;
6547 
6548 	ret = kstrtoint(buf, 0, &oom_group);
6549 	if (ret)
6550 		return ret;
6551 
6552 	if (oom_group != 0 && oom_group != 1)
6553 		return -EINVAL;
6554 
6555 	memcg->oom_group = oom_group;
6556 
6557 	return nbytes;
6558 }
6559 
6560 static struct cftype memory_files[] = {
6561 	{
6562 		.name = "current",
6563 		.flags = CFTYPE_NOT_ON_ROOT,
6564 		.read_u64 = memory_current_read,
6565 	},
6566 	{
6567 		.name = "min",
6568 		.flags = CFTYPE_NOT_ON_ROOT,
6569 		.seq_show = memory_min_show,
6570 		.write = memory_min_write,
6571 	},
6572 	{
6573 		.name = "low",
6574 		.flags = CFTYPE_NOT_ON_ROOT,
6575 		.seq_show = memory_low_show,
6576 		.write = memory_low_write,
6577 	},
6578 	{
6579 		.name = "high",
6580 		.flags = CFTYPE_NOT_ON_ROOT,
6581 		.seq_show = memory_high_show,
6582 		.write = memory_high_write,
6583 	},
6584 	{
6585 		.name = "max",
6586 		.flags = CFTYPE_NOT_ON_ROOT,
6587 		.seq_show = memory_max_show,
6588 		.write = memory_max_write,
6589 	},
6590 	{
6591 		.name = "events",
6592 		.flags = CFTYPE_NOT_ON_ROOT,
6593 		.file_offset = offsetof(struct mem_cgroup, events_file),
6594 		.seq_show = memory_events_show,
6595 	},
6596 	{
6597 		.name = "events.local",
6598 		.flags = CFTYPE_NOT_ON_ROOT,
6599 		.file_offset = offsetof(struct mem_cgroup, events_local_file),
6600 		.seq_show = memory_events_local_show,
6601 	},
6602 	{
6603 		.name = "stat",
6604 		.seq_show = memory_stat_show,
6605 	},
6606 #ifdef CONFIG_NUMA
6607 	{
6608 		.name = "numa_stat",
6609 		.seq_show = memory_numa_stat_show,
6610 	},
6611 #endif
6612 	{
6613 		.name = "oom.group",
6614 		.flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE,
6615 		.seq_show = memory_oom_group_show,
6616 		.write = memory_oom_group_write,
6617 	},
6618 	{ }	/* terminate */
6619 };
6620 
6621 struct cgroup_subsys memory_cgrp_subsys = {
6622 	.css_alloc = mem_cgroup_css_alloc,
6623 	.css_online = mem_cgroup_css_online,
6624 	.css_offline = mem_cgroup_css_offline,
6625 	.css_released = mem_cgroup_css_released,
6626 	.css_free = mem_cgroup_css_free,
6627 	.css_reset = mem_cgroup_css_reset,
6628 	.can_attach = mem_cgroup_can_attach,
6629 	.cancel_attach = mem_cgroup_cancel_attach,
6630 	.post_attach = mem_cgroup_move_task,
6631 	.bind = mem_cgroup_bind,
6632 	.dfl_cftypes = memory_files,
6633 	.legacy_cftypes = mem_cgroup_legacy_files,
6634 	.early_init = 0,
6635 };
6636 
6637 /*
6638  * This function calculates an individual cgroup's effective
6639  * protection which is derived from its own memory.min/low, its
6640  * parent's and siblings' settings, as well as the actual memory
6641  * distribution in the tree.
6642  *
6643  * The following rules apply to the effective protection values:
6644  *
6645  * 1. At the first level of reclaim, effective protection is equal to
6646  *    the declared protection in memory.min and memory.low.
6647  *
6648  * 2. To enable safe delegation of the protection configuration, at
6649  *    subsequent levels the effective protection is capped to the
6650  *    parent's effective protection.
6651  *
6652  * 3. To make complex and dynamic subtrees easier to configure, the
6653  *    user is allowed to overcommit the declared protection at a given
6654  *    level. If that is the case, the parent's effective protection is
6655  *    distributed to the children in proportion to how much protection
6656  *    they have declared and how much of it they are utilizing.
6657  *
6658  *    This makes distribution proportional, but also work-conserving:
6659  *    if one cgroup claims much more protection than it uses memory,
6660  *    the unused remainder is available to its siblings.
6661  *
6662  * 4. Conversely, when the declared protection is undercommitted at a
6663  *    given level, the distribution of the larger parental protection
6664  *    budget is NOT proportional. A cgroup's protection from a sibling
6665  *    is capped to its own memory.min/low setting.
6666  *
6667  * 5. However, to allow protecting recursive subtrees from each other
6668  *    without having to declare each individual cgroup's fixed share
6669  *    of the ancestor's claim to protection, any unutilized -
6670  *    "floating" - protection from up the tree is distributed in
6671  *    proportion to each cgroup's *usage*. This makes the protection
6672  *    neutral wrt sibling cgroups and lets them compete freely over
6673  *    the shared parental protection budget, but it protects the
6674  *    subtree as a whole from neighboring subtrees.
6675  *
6676  * Note that 4. and 5. are not in conflict: 4. is about protecting
6677  * against immediate siblings whereas 5. is about protecting against
6678  * neighboring subtrees.
6679  */
effective_protection(unsigned long usage,unsigned long parent_usage,unsigned long setting,unsigned long parent_effective,unsigned long siblings_protected)6680 static unsigned long effective_protection(unsigned long usage,
6681 					  unsigned long parent_usage,
6682 					  unsigned long setting,
6683 					  unsigned long parent_effective,
6684 					  unsigned long siblings_protected)
6685 {
6686 	unsigned long protected;
6687 	unsigned long ep;
6688 
6689 	protected = min(usage, setting);
6690 	/*
6691 	 * If all cgroups at this level combined claim and use more
6692 	 * protection then what the parent affords them, distribute
6693 	 * shares in proportion to utilization.
6694 	 *
6695 	 * We are using actual utilization rather than the statically
6696 	 * claimed protection in order to be work-conserving: claimed
6697 	 * but unused protection is available to siblings that would
6698 	 * otherwise get a smaller chunk than what they claimed.
6699 	 */
6700 	if (siblings_protected > parent_effective)
6701 		return protected * parent_effective / siblings_protected;
6702 
6703 	/*
6704 	 * Ok, utilized protection of all children is within what the
6705 	 * parent affords them, so we know whatever this child claims
6706 	 * and utilizes is effectively protected.
6707 	 *
6708 	 * If there is unprotected usage beyond this value, reclaim
6709 	 * will apply pressure in proportion to that amount.
6710 	 *
6711 	 * If there is unutilized protection, the cgroup will be fully
6712 	 * shielded from reclaim, but we do return a smaller value for
6713 	 * protection than what the group could enjoy in theory. This
6714 	 * is okay. With the overcommit distribution above, effective
6715 	 * protection is always dependent on how memory is actually
6716 	 * consumed among the siblings anyway.
6717 	 */
6718 	ep = protected;
6719 
6720 	/*
6721 	 * If the children aren't claiming (all of) the protection
6722 	 * afforded to them by the parent, distribute the remainder in
6723 	 * proportion to the (unprotected) memory of each cgroup. That
6724 	 * way, cgroups that aren't explicitly prioritized wrt each
6725 	 * other compete freely over the allowance, but they are
6726 	 * collectively protected from neighboring trees.
6727 	 *
6728 	 * We're using unprotected memory for the weight so that if
6729 	 * some cgroups DO claim explicit protection, we don't protect
6730 	 * the same bytes twice.
6731 	 *
6732 	 * Check both usage and parent_usage against the respective
6733 	 * protected values. One should imply the other, but they
6734 	 * aren't read atomically - make sure the division is sane.
6735 	 */
6736 	if (!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT))
6737 		return ep;
6738 	if (parent_effective > siblings_protected &&
6739 	    parent_usage > siblings_protected &&
6740 	    usage > protected) {
6741 		unsigned long unclaimed;
6742 
6743 		unclaimed = parent_effective - siblings_protected;
6744 		unclaimed *= usage - protected;
6745 		unclaimed /= parent_usage - siblings_protected;
6746 
6747 		ep += unclaimed;
6748 	}
6749 
6750 	return ep;
6751 }
6752 
6753 /**
6754  * mem_cgroup_protected - check if memory consumption is in the normal range
6755  * @root: the top ancestor of the sub-tree being checked
6756  * @memcg: the memory cgroup to check
6757  *
6758  * WARNING: This function is not stateless! It can only be used as part
6759  *          of a top-down tree iteration, not for isolated queries.
6760  */
mem_cgroup_calculate_protection(struct mem_cgroup * root,struct mem_cgroup * memcg)6761 void mem_cgroup_calculate_protection(struct mem_cgroup *root,
6762 				     struct mem_cgroup *memcg)
6763 {
6764 	unsigned long usage, parent_usage;
6765 	struct mem_cgroup *parent;
6766 
6767 	if (mem_cgroup_disabled())
6768 		return;
6769 
6770 	if (!root)
6771 		root = root_mem_cgroup;
6772 
6773 	/*
6774 	 * Effective values of the reclaim targets are ignored so they
6775 	 * can be stale. Have a look at mem_cgroup_protection for more
6776 	 * details.
6777 	 * TODO: calculation should be more robust so that we do not need
6778 	 * that special casing.
6779 	 */
6780 	if (memcg == root)
6781 		return;
6782 
6783 	usage = page_counter_read(&memcg->memory);
6784 	if (!usage)
6785 		return;
6786 
6787 	parent = parent_mem_cgroup(memcg);
6788 	/* No parent means a non-hierarchical mode on v1 memcg */
6789 	if (!parent)
6790 		return;
6791 
6792 	if (parent == root) {
6793 		memcg->memory.emin = READ_ONCE(memcg->memory.min);
6794 		memcg->memory.elow = READ_ONCE(memcg->memory.low);
6795 		return;
6796 	}
6797 
6798 	parent_usage = page_counter_read(&parent->memory);
6799 
6800 	WRITE_ONCE(memcg->memory.emin, effective_protection(usage, parent_usage,
6801 			READ_ONCE(memcg->memory.min),
6802 			READ_ONCE(parent->memory.emin),
6803 			atomic_long_read(&parent->memory.children_min_usage)));
6804 
6805 	WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage,
6806 			READ_ONCE(memcg->memory.low),
6807 			READ_ONCE(parent->memory.elow),
6808 			atomic_long_read(&parent->memory.children_low_usage)));
6809 }
6810 
6811 /**
6812  * __mem_cgroup_charge - charge a newly allocated page to a cgroup
6813  * @page: page to charge
6814  * @mm: mm context of the victim
6815  * @gfp_mask: reclaim mode
6816  *
6817  * Try to charge @page to the memcg that @mm belongs to, reclaiming
6818  * pages according to @gfp_mask if necessary.
6819  *
6820  * Returns 0 on success. Otherwise, an error code is returned.
6821  */
__mem_cgroup_charge(struct page * page,struct mm_struct * mm,gfp_t gfp_mask)6822 int __mem_cgroup_charge(struct page *page, struct mm_struct *mm,
6823 			gfp_t gfp_mask)
6824 {
6825 	unsigned int nr_pages = thp_nr_pages(page);
6826 	struct mem_cgroup *memcg = NULL;
6827 	int ret = 0;
6828 
6829 	if (PageSwapCache(page)) {
6830 		swp_entry_t ent = { .val = page_private(page), };
6831 		unsigned short id;
6832 
6833 		/*
6834 		 * Every swap fault against a single page tries to charge the
6835 		 * page, bail as early as possible.  shmem_unuse() encounters
6836 		 * already charged pages, too.  page->mem_cgroup is protected
6837 		 * by the page lock, which serializes swap cache removal, which
6838 		 * in turn serializes uncharging.
6839 		 */
6840 		VM_BUG_ON_PAGE(!PageLocked(page), page);
6841 		if (compound_head(page)->mem_cgroup)
6842 			goto out;
6843 
6844 		id = lookup_swap_cgroup_id(ent);
6845 		rcu_read_lock();
6846 		memcg = mem_cgroup_from_id(id);
6847 		if (memcg && !css_tryget_online(&memcg->css))
6848 			memcg = NULL;
6849 		rcu_read_unlock();
6850 	}
6851 
6852 	if (!memcg)
6853 		memcg = get_mem_cgroup_from_mm(mm);
6854 
6855 	ret = try_charge(memcg, gfp_mask, nr_pages);
6856 	if (ret)
6857 		goto out_put;
6858 
6859 	css_get(&memcg->css);
6860 	commit_charge(page, memcg);
6861 
6862 	local_irq_disable();
6863 	mem_cgroup_charge_statistics(memcg, page, nr_pages);
6864 	memcg_check_events(memcg, page);
6865 	local_irq_enable();
6866 
6867 	/*
6868 	 * Cgroup1's unified memory+swap counter has been charged with the
6869 	 * new swapcache page, finish the transfer by uncharging the swap
6870 	 * slot. The swap slot would also get uncharged when it dies, but
6871 	 * it can stick around indefinitely and we'd count the page twice
6872 	 * the entire time.
6873 	 *
6874 	 * Cgroup2 has separate resource counters for memory and swap,
6875 	 * so this is a non-issue here. Memory and swap charge lifetimes
6876 	 * correspond 1:1 to page and swap slot lifetimes: we charge the
6877 	 * page to memory here, and uncharge swap when the slot is freed.
6878 	 */
6879 	if (do_memsw_account() && PageSwapCache(page)) {
6880 		swp_entry_t entry = { .val = page_private(page) };
6881 		/*
6882 		 * The swap entry might not get freed for a long time,
6883 		 * let's not wait for it.  The page already received a
6884 		 * memory+swap charge, drop the swap entry duplicate.
6885 		 */
6886 		mem_cgroup_uncharge_swap(entry, nr_pages);
6887 	}
6888 
6889 out_put:
6890 	css_put(&memcg->css);
6891 out:
6892 	return ret;
6893 }
6894 
6895 struct uncharge_gather {
6896 	struct mem_cgroup *memcg;
6897 	unsigned long nr_pages;
6898 	unsigned long pgpgout;
6899 	unsigned long nr_kmem;
6900 	struct page *dummy_page;
6901 };
6902 
uncharge_gather_clear(struct uncharge_gather * ug)6903 static inline void uncharge_gather_clear(struct uncharge_gather *ug)
6904 {
6905 	memset(ug, 0, sizeof(*ug));
6906 }
6907 
uncharge_batch(const struct uncharge_gather * ug)6908 static void uncharge_batch(const struct uncharge_gather *ug)
6909 {
6910 	unsigned long flags;
6911 
6912 	if (!mem_cgroup_is_root(ug->memcg)) {
6913 		page_counter_uncharge(&ug->memcg->memory, ug->nr_pages);
6914 		if (do_memsw_account())
6915 			page_counter_uncharge(&ug->memcg->memsw, ug->nr_pages);
6916 		if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && ug->nr_kmem)
6917 			page_counter_uncharge(&ug->memcg->kmem, ug->nr_kmem);
6918 		memcg_oom_recover(ug->memcg);
6919 	}
6920 
6921 	local_irq_save(flags);
6922 	__count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
6923 	__this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_pages);
6924 	memcg_check_events(ug->memcg, ug->dummy_page);
6925 	local_irq_restore(flags);
6926 
6927 	/* drop reference from uncharge_page */
6928 	css_put(&ug->memcg->css);
6929 }
6930 
uncharge_page(struct page * page,struct uncharge_gather * ug)6931 static void uncharge_page(struct page *page, struct uncharge_gather *ug)
6932 {
6933 	unsigned long nr_pages;
6934 
6935 	VM_BUG_ON_PAGE(PageLRU(page), page);
6936 
6937 	if (!page->mem_cgroup)
6938 		return;
6939 
6940 	/*
6941 	 * Nobody should be changing or seriously looking at
6942 	 * page->mem_cgroup at this point, we have fully
6943 	 * exclusive access to the page.
6944 	 */
6945 
6946 	if (ug->memcg != page->mem_cgroup) {
6947 		if (ug->memcg) {
6948 			uncharge_batch(ug);
6949 			uncharge_gather_clear(ug);
6950 		}
6951 		ug->memcg = page->mem_cgroup;
6952 
6953 		/* pairs with css_put in uncharge_batch */
6954 		css_get(&ug->memcg->css);
6955 	}
6956 
6957 	nr_pages = compound_nr(page);
6958 	ug->nr_pages += nr_pages;
6959 
6960 	if (!PageKmemcg(page)) {
6961 		ug->pgpgout++;
6962 	} else {
6963 		ug->nr_kmem += nr_pages;
6964 		__ClearPageKmemcg(page);
6965 	}
6966 
6967 	ug->dummy_page = page;
6968 	page->mem_cgroup = NULL;
6969 	css_put(&ug->memcg->css);
6970 }
6971 
uncharge_list(struct list_head * page_list)6972 static void uncharge_list(struct list_head *page_list)
6973 {
6974 	struct uncharge_gather ug;
6975 	struct list_head *next;
6976 
6977 	uncharge_gather_clear(&ug);
6978 
6979 	/*
6980 	 * Note that the list can be a single page->lru; hence the
6981 	 * do-while loop instead of a simple list_for_each_entry().
6982 	 */
6983 	next = page_list->next;
6984 	do {
6985 		struct page *page;
6986 
6987 		page = list_entry(next, struct page, lru);
6988 		next = page->lru.next;
6989 
6990 		uncharge_page(page, &ug);
6991 	} while (next != page_list);
6992 
6993 	if (ug.memcg)
6994 		uncharge_batch(&ug);
6995 }
6996 
6997 /**
6998  * __mem_cgroup_uncharge - uncharge a page
6999  * @page: page to uncharge
7000  *
7001  * Uncharge a page previously charged with __mem_cgroup_charge().
7002  */
__mem_cgroup_uncharge(struct page * page)7003 void __mem_cgroup_uncharge(struct page *page)
7004 {
7005 	struct uncharge_gather ug;
7006 
7007 	/* Don't touch page->lru of any random page, pre-check: */
7008 	if (!page->mem_cgroup)
7009 		return;
7010 
7011 	uncharge_gather_clear(&ug);
7012 	uncharge_page(page, &ug);
7013 	uncharge_batch(&ug);
7014 }
7015 
7016 /**
7017  * __mem_cgroup_uncharge_list - uncharge a list of page
7018  * @page_list: list of pages to uncharge
7019  *
7020  * Uncharge a list of pages previously charged with
7021  * __mem_cgroup_charge().
7022  */
__mem_cgroup_uncharge_list(struct list_head * page_list)7023 void __mem_cgroup_uncharge_list(struct list_head *page_list)
7024 {
7025 	if (!list_empty(page_list))
7026 		uncharge_list(page_list);
7027 }
7028 
7029 /**
7030  * mem_cgroup_migrate - charge a page's replacement
7031  * @oldpage: currently circulating page
7032  * @newpage: replacement page
7033  *
7034  * Charge @newpage as a replacement page for @oldpage. @oldpage will
7035  * be uncharged upon free.
7036  *
7037  * Both pages must be locked, @newpage->mapping must be set up.
7038  */
mem_cgroup_migrate(struct page * oldpage,struct page * newpage)7039 void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
7040 {
7041 	struct mem_cgroup *memcg;
7042 	unsigned int nr_pages;
7043 	unsigned long flags;
7044 
7045 	VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
7046 	VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
7047 	VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage);
7048 	VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage),
7049 		       newpage);
7050 
7051 	if (mem_cgroup_disabled())
7052 		return;
7053 
7054 	/* Page cache replacement: new page already charged? */
7055 	if (newpage->mem_cgroup)
7056 		return;
7057 
7058 	/* Swapcache readahead pages can get replaced before being charged */
7059 	memcg = oldpage->mem_cgroup;
7060 	if (!memcg)
7061 		return;
7062 
7063 	/* Force-charge the new page. The old one will be freed soon */
7064 	nr_pages = thp_nr_pages(newpage);
7065 
7066 	page_counter_charge(&memcg->memory, nr_pages);
7067 	if (do_memsw_account())
7068 		page_counter_charge(&memcg->memsw, nr_pages);
7069 
7070 	css_get(&memcg->css);
7071 	commit_charge(newpage, memcg);
7072 
7073 	local_irq_save(flags);
7074 	mem_cgroup_charge_statistics(memcg, newpage, nr_pages);
7075 	memcg_check_events(memcg, newpage);
7076 	local_irq_restore(flags);
7077 }
7078 
7079 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
7080 EXPORT_SYMBOL(memcg_sockets_enabled_key);
7081 
mem_cgroup_sk_alloc(struct sock * sk)7082 void mem_cgroup_sk_alloc(struct sock *sk)
7083 {
7084 	struct mem_cgroup *memcg;
7085 
7086 	if (!mem_cgroup_sockets_enabled)
7087 		return;
7088 
7089 	/* Do not associate the sock with unrelated interrupted task's memcg. */
7090 	if (in_interrupt())
7091 		return;
7092 
7093 	rcu_read_lock();
7094 	memcg = mem_cgroup_from_task(current);
7095 	if (memcg == root_mem_cgroup)
7096 		goto out;
7097 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
7098 		goto out;
7099 	if (css_tryget(&memcg->css))
7100 		sk->sk_memcg = memcg;
7101 out:
7102 	rcu_read_unlock();
7103 }
7104 
mem_cgroup_sk_free(struct sock * sk)7105 void mem_cgroup_sk_free(struct sock *sk)
7106 {
7107 	if (sk->sk_memcg)
7108 		css_put(&sk->sk_memcg->css);
7109 }
7110 
7111 /**
7112  * mem_cgroup_charge_skmem - charge socket memory
7113  * @memcg: memcg to charge
7114  * @nr_pages: number of pages to charge
7115  *
7116  * Charges @nr_pages to @memcg. Returns %true if the charge fit within
7117  * @memcg's configured limit, %false if the charge had to be forced.
7118  */
mem_cgroup_charge_skmem(struct mem_cgroup * memcg,unsigned int nr_pages)7119 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
7120 {
7121 	gfp_t gfp_mask = GFP_KERNEL;
7122 
7123 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
7124 		struct page_counter *fail;
7125 
7126 		if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
7127 			memcg->tcpmem_pressure = 0;
7128 			return true;
7129 		}
7130 		page_counter_charge(&memcg->tcpmem, nr_pages);
7131 		memcg->tcpmem_pressure = 1;
7132 		return false;
7133 	}
7134 
7135 	/* Don't block in the packet receive path */
7136 	if (in_softirq())
7137 		gfp_mask = GFP_NOWAIT;
7138 
7139 	mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
7140 
7141 	if (try_charge(memcg, gfp_mask, nr_pages) == 0)
7142 		return true;
7143 
7144 	try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages);
7145 	return false;
7146 }
7147 
7148 /**
7149  * mem_cgroup_uncharge_skmem - uncharge socket memory
7150  * @memcg: memcg to uncharge
7151  * @nr_pages: number of pages to uncharge
7152  */
mem_cgroup_uncharge_skmem(struct mem_cgroup * memcg,unsigned int nr_pages)7153 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
7154 {
7155 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
7156 		page_counter_uncharge(&memcg->tcpmem, nr_pages);
7157 		return;
7158 	}
7159 
7160 	mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
7161 
7162 	refill_stock(memcg, nr_pages);
7163 }
7164 
cgroup_memory(char * s)7165 static int __init cgroup_memory(char *s)
7166 {
7167 	char *token;
7168 
7169 	while ((token = strsep(&s, ",")) != NULL) {
7170 		if (!*token)
7171 			continue;
7172 		if (!strcmp(token, "nosocket"))
7173 			cgroup_memory_nosocket = true;
7174 		if (!strcmp(token, "nokmem"))
7175 			cgroup_memory_nokmem = true;
7176 	}
7177 	return 1;
7178 }
7179 __setup("cgroup.memory=", cgroup_memory);
7180 
7181 /*
7182  * subsys_initcall() for memory controller.
7183  *
7184  * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
7185  * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
7186  * basically everything that doesn't depend on a specific mem_cgroup structure
7187  * should be initialized from here.
7188  */
mem_cgroup_init(void)7189 static int __init mem_cgroup_init(void)
7190 {
7191 	int cpu, node;
7192 
7193 	cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
7194 				  memcg_hotplug_cpu_dead);
7195 
7196 	for_each_possible_cpu(cpu)
7197 		INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
7198 			  drain_local_stock);
7199 
7200 	for_each_node(node) {
7201 		struct mem_cgroup_tree_per_node *rtpn;
7202 
7203 		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
7204 				    node_online(node) ? node : NUMA_NO_NODE);
7205 
7206 		rtpn->rb_root = RB_ROOT;
7207 		rtpn->rb_rightmost = NULL;
7208 		spin_lock_init(&rtpn->lock);
7209 		soft_limit_tree.rb_tree_per_node[node] = rtpn;
7210 	}
7211 
7212 	return 0;
7213 }
7214 subsys_initcall(mem_cgroup_init);
7215 
7216 #ifdef CONFIG_MEMCG_SWAP
mem_cgroup_id_get_online(struct mem_cgroup * memcg)7217 static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
7218 {
7219 	while (!refcount_inc_not_zero(&memcg->id.ref)) {
7220 		/*
7221 		 * The root cgroup cannot be destroyed, so it's refcount must
7222 		 * always be >= 1.
7223 		 */
7224 		if (WARN_ON_ONCE(memcg == root_mem_cgroup)) {
7225 			VM_BUG_ON(1);
7226 			break;
7227 		}
7228 		memcg = parent_mem_cgroup(memcg);
7229 		if (!memcg)
7230 			memcg = root_mem_cgroup;
7231 	}
7232 	return memcg;
7233 }
7234 
7235 /**
7236  * mem_cgroup_swapout - transfer a memsw charge to swap
7237  * @page: page whose memsw charge to transfer
7238  * @entry: swap entry to move the charge to
7239  *
7240  * Transfer the memsw charge of @page to @entry.
7241  */
mem_cgroup_swapout(struct page * page,swp_entry_t entry)7242 void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
7243 {
7244 	struct mem_cgroup *memcg, *swap_memcg;
7245 	unsigned int nr_entries;
7246 	unsigned short oldid;
7247 
7248 	VM_BUG_ON_PAGE(PageLRU(page), page);
7249 	VM_BUG_ON_PAGE(page_count(page), page);
7250 
7251 	if (mem_cgroup_disabled())
7252 		return;
7253 
7254 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
7255 		return;
7256 
7257 	memcg = page->mem_cgroup;
7258 
7259 	/* Readahead page, never charged */
7260 	if (!memcg)
7261 		return;
7262 
7263 	/*
7264 	 * In case the memcg owning these pages has been offlined and doesn't
7265 	 * have an ID allocated to it anymore, charge the closest online
7266 	 * ancestor for the swap instead and transfer the memory+swap charge.
7267 	 */
7268 	swap_memcg = mem_cgroup_id_get_online(memcg);
7269 	nr_entries = thp_nr_pages(page);
7270 	/* Get references for the tail pages, too */
7271 	if (nr_entries > 1)
7272 		mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
7273 	oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg),
7274 				   nr_entries);
7275 	VM_BUG_ON_PAGE(oldid, page);
7276 	mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
7277 
7278 	page->mem_cgroup = NULL;
7279 
7280 	if (!mem_cgroup_is_root(memcg))
7281 		page_counter_uncharge(&memcg->memory, nr_entries);
7282 
7283 	if (!cgroup_memory_noswap && memcg != swap_memcg) {
7284 		if (!mem_cgroup_is_root(swap_memcg))
7285 			page_counter_charge(&swap_memcg->memsw, nr_entries);
7286 		page_counter_uncharge(&memcg->memsw, nr_entries);
7287 	}
7288 
7289 	/*
7290 	 * Interrupts should be disabled here because the caller holds the
7291 	 * i_pages lock which is taken with interrupts-off. It is
7292 	 * important here to have the interrupts disabled because it is the
7293 	 * only synchronisation we have for updating the per-CPU variables.
7294 	 */
7295 	VM_BUG_ON(!irqs_disabled());
7296 	mem_cgroup_charge_statistics(memcg, page, -nr_entries);
7297 	memcg_check_events(memcg, page);
7298 
7299 	css_put(&memcg->css);
7300 }
7301 
7302 /**
7303  * __mem_cgroup_try_charge_swap - try charging swap space for a page
7304  * @page: page being added to swap
7305  * @entry: swap entry to charge
7306  *
7307  * Try to charge @page's memcg for the swap space at @entry.
7308  *
7309  * Returns 0 on success, -ENOMEM on failure.
7310  */
__mem_cgroup_try_charge_swap(struct page * page,swp_entry_t entry)7311 int __mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
7312 {
7313 	unsigned int nr_pages = thp_nr_pages(page);
7314 	struct page_counter *counter;
7315 	struct mem_cgroup *memcg;
7316 	unsigned short oldid;
7317 
7318 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
7319 		return 0;
7320 
7321 	memcg = page->mem_cgroup;
7322 
7323 	/* Readahead page, never charged */
7324 	if (!memcg)
7325 		return 0;
7326 
7327 	if (!entry.val) {
7328 		memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7329 		return 0;
7330 	}
7331 
7332 	memcg = mem_cgroup_id_get_online(memcg);
7333 
7334 	if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg) &&
7335 	    !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
7336 		memcg_memory_event(memcg, MEMCG_SWAP_MAX);
7337 		memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7338 		mem_cgroup_id_put(memcg);
7339 		return -ENOMEM;
7340 	}
7341 
7342 	/* Get references for the tail pages, too */
7343 	if (nr_pages > 1)
7344 		mem_cgroup_id_get_many(memcg, nr_pages - 1);
7345 	oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages);
7346 	VM_BUG_ON_PAGE(oldid, page);
7347 	mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
7348 
7349 	return 0;
7350 }
7351 
7352 /**
7353  * __mem_cgroup_uncharge_swap - uncharge swap space
7354  * @entry: swap entry to uncharge
7355  * @nr_pages: the amount of swap space to uncharge
7356  */
__mem_cgroup_uncharge_swap(swp_entry_t entry,unsigned int nr_pages)7357 void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
7358 {
7359 	struct mem_cgroup *memcg;
7360 	unsigned short id;
7361 
7362 	id = swap_cgroup_record(entry, 0, nr_pages);
7363 	rcu_read_lock();
7364 	memcg = mem_cgroup_from_id(id);
7365 	if (memcg) {
7366 		if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg)) {
7367 			if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
7368 				page_counter_uncharge(&memcg->swap, nr_pages);
7369 			else
7370 				page_counter_uncharge(&memcg->memsw, nr_pages);
7371 		}
7372 		mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
7373 		mem_cgroup_id_put_many(memcg, nr_pages);
7374 	}
7375 	rcu_read_unlock();
7376 }
7377 
mem_cgroup_get_nr_swap_pages(struct mem_cgroup * memcg)7378 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
7379 {
7380 	long nr_swap_pages = get_nr_swap_pages();
7381 
7382 	if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
7383 		return nr_swap_pages;
7384 	for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
7385 		nr_swap_pages = min_t(long, nr_swap_pages,
7386 				      READ_ONCE(memcg->swap.max) -
7387 				      page_counter_read(&memcg->swap));
7388 	return nr_swap_pages;
7389 }
7390 
mem_cgroup_swap_full(struct page * page)7391 bool mem_cgroup_swap_full(struct page *page)
7392 {
7393 	struct mem_cgroup *memcg;
7394 
7395 	VM_BUG_ON_PAGE(!PageLocked(page), page);
7396 
7397 	if (vm_swap_full())
7398 		return true;
7399 	if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
7400 		return false;
7401 
7402 	memcg = page->mem_cgroup;
7403 	if (!memcg)
7404 		return false;
7405 
7406 	for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) {
7407 		unsigned long usage = page_counter_read(&memcg->swap);
7408 
7409 		if (usage * 2 >= READ_ONCE(memcg->swap.high) ||
7410 		    usage * 2 >= READ_ONCE(memcg->swap.max))
7411 			return true;
7412 	}
7413 
7414 	return false;
7415 }
7416 
setup_swap_account(char * s)7417 static int __init setup_swap_account(char *s)
7418 {
7419 	if (!strcmp(s, "1"))
7420 		cgroup_memory_noswap = 0;
7421 	else if (!strcmp(s, "0"))
7422 		cgroup_memory_noswap = 1;
7423 	return 1;
7424 }
7425 __setup("swapaccount=", setup_swap_account);
7426 
swap_current_read(struct cgroup_subsys_state * css,struct cftype * cft)7427 static u64 swap_current_read(struct cgroup_subsys_state *css,
7428 			     struct cftype *cft)
7429 {
7430 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
7431 
7432 	return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
7433 }
7434 
swap_high_show(struct seq_file * m,void * v)7435 static int swap_high_show(struct seq_file *m, void *v)
7436 {
7437 	return seq_puts_memcg_tunable(m,
7438 		READ_ONCE(mem_cgroup_from_seq(m)->swap.high));
7439 }
7440 
swap_high_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)7441 static ssize_t swap_high_write(struct kernfs_open_file *of,
7442 			       char *buf, size_t nbytes, loff_t off)
7443 {
7444 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
7445 	unsigned long high;
7446 	int err;
7447 
7448 	buf = strstrip(buf);
7449 	err = page_counter_memparse(buf, "max", &high);
7450 	if (err)
7451 		return err;
7452 
7453 	page_counter_set_high(&memcg->swap, high);
7454 
7455 	return nbytes;
7456 }
7457 
swap_max_show(struct seq_file * m,void * v)7458 static int swap_max_show(struct seq_file *m, void *v)
7459 {
7460 	return seq_puts_memcg_tunable(m,
7461 		READ_ONCE(mem_cgroup_from_seq(m)->swap.max));
7462 }
7463 
swap_max_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)7464 static ssize_t swap_max_write(struct kernfs_open_file *of,
7465 			      char *buf, size_t nbytes, loff_t off)
7466 {
7467 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
7468 	unsigned long max;
7469 	int err;
7470 
7471 	buf = strstrip(buf);
7472 	err = page_counter_memparse(buf, "max", &max);
7473 	if (err)
7474 		return err;
7475 
7476 	xchg(&memcg->swap.max, max);
7477 
7478 	return nbytes;
7479 }
7480 
swap_events_show(struct seq_file * m,void * v)7481 static int swap_events_show(struct seq_file *m, void *v)
7482 {
7483 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
7484 
7485 	seq_printf(m, "high %lu\n",
7486 		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH]));
7487 	seq_printf(m, "max %lu\n",
7488 		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
7489 	seq_printf(m, "fail %lu\n",
7490 		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL]));
7491 
7492 	return 0;
7493 }
7494 
7495 static struct cftype swap_files[] = {
7496 	{
7497 		.name = "swap.current",
7498 		.flags = CFTYPE_NOT_ON_ROOT,
7499 		.read_u64 = swap_current_read,
7500 	},
7501 	{
7502 		.name = "swap.high",
7503 		.flags = CFTYPE_NOT_ON_ROOT,
7504 		.seq_show = swap_high_show,
7505 		.write = swap_high_write,
7506 	},
7507 	{
7508 		.name = "swap.max",
7509 		.flags = CFTYPE_NOT_ON_ROOT,
7510 		.seq_show = swap_max_show,
7511 		.write = swap_max_write,
7512 	},
7513 	{
7514 		.name = "swap.events",
7515 		.flags = CFTYPE_NOT_ON_ROOT,
7516 		.file_offset = offsetof(struct mem_cgroup, swap_events_file),
7517 		.seq_show = swap_events_show,
7518 	},
7519 	{ }	/* terminate */
7520 };
7521 
7522 static struct cftype memsw_files[] = {
7523 	{
7524 		.name = "memsw.usage_in_bytes",
7525 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
7526 		.read_u64 = mem_cgroup_read_u64,
7527 	},
7528 	{
7529 		.name = "memsw.max_usage_in_bytes",
7530 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
7531 		.write = mem_cgroup_reset,
7532 		.read_u64 = mem_cgroup_read_u64,
7533 	},
7534 	{
7535 		.name = "memsw.limit_in_bytes",
7536 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
7537 		.write = mem_cgroup_write,
7538 		.read_u64 = mem_cgroup_read_u64,
7539 	},
7540 	{
7541 		.name = "memsw.failcnt",
7542 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
7543 		.write = mem_cgroup_reset,
7544 		.read_u64 = mem_cgroup_read_u64,
7545 	},
7546 	{ },	/* terminate */
7547 };
7548 
7549 /*
7550  * If mem_cgroup_swap_init() is implemented as a subsys_initcall()
7551  * instead of a core_initcall(), this could mean cgroup_memory_noswap still
7552  * remains set to false even when memcg is disabled via "cgroup_disable=memory"
7553  * boot parameter. This may result in premature OOPS inside
7554  * mem_cgroup_get_nr_swap_pages() function in corner cases.
7555  */
mem_cgroup_swap_init(void)7556 static int __init mem_cgroup_swap_init(void)
7557 {
7558 	/* No memory control -> no swap control */
7559 	if (mem_cgroup_disabled())
7560 		cgroup_memory_noswap = true;
7561 
7562 	if (cgroup_memory_noswap)
7563 		return 0;
7564 
7565 	WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files));
7566 	WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files));
7567 
7568 	return 0;
7569 }
7570 core_initcall(mem_cgroup_swap_init);
7571 
7572 #endif /* CONFIG_MEMCG_SWAP */
7573