1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* memcontrol.c - Memory Controller
3 *
4 * Copyright IBM Corporation, 2007
5 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6 *
7 * Copyright 2007 OpenVZ SWsoft Inc
8 * Author: Pavel Emelianov <xemul@openvz.org>
9 *
10 * Memory thresholds
11 * Copyright (C) 2009 Nokia Corporation
12 * Author: Kirill A. Shutemov
13 *
14 * Kernel Memory Controller
15 * Copyright (C) 2012 Parallels Inc. and Google Inc.
16 * Authors: Glauber Costa and Suleiman Souhlal
17 *
18 * Native page reclaim
19 * Charge lifetime sanitation
20 * Lockless page tracking & accounting
21 * Unified hierarchy configuration model
22 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
23 *
24 * Per memcg lru locking
25 * Copyright (C) 2020 Alibaba, Inc, Alex Shi
26 */
27
28 #include <linux/page_counter.h>
29 #include <linux/memcontrol.h>
30 #include <linux/cgroup.h>
31 #include <linux/pagewalk.h>
32 #include <linux/sched/mm.h>
33 #include <linux/shmem_fs.h>
34 #include <linux/hugetlb.h>
35 #include <linux/pagemap.h>
36 #include <linux/vm_event_item.h>
37 #include <linux/smp.h>
38 #include <linux/page-flags.h>
39 #include <linux/backing-dev.h>
40 #include <linux/bit_spinlock.h>
41 #include <linux/rcupdate.h>
42 #include <linux/limits.h>
43 #include <linux/export.h>
44 #include <linux/mutex.h>
45 #include <linux/rbtree.h>
46 #include <linux/slab.h>
47 #include <linux/swap.h>
48 #include <linux/swapops.h>
49 #include <linux/spinlock.h>
50 #include <linux/eventfd.h>
51 #include <linux/poll.h>
52 #include <linux/sort.h>
53 #include <linux/fs.h>
54 #include <linux/seq_file.h>
55 #include <linux/vmpressure.h>
56 #include <linux/mm_inline.h>
57 #include <linux/swap_cgroup.h>
58 #include <linux/cpu.h>
59 #include <linux/oom.h>
60 #include <linux/lockdep.h>
61 #include <linux/file.h>
62 #include <linux/tracehook.h>
63 #include <linux/psi.h>
64 #include <linux/seq_buf.h>
65 #include "internal.h"
66 #include <net/sock.h>
67 #include <net/ip.h>
68 #include "slab.h"
69
70 #include <linux/uaccess.h>
71
72 #include <trace/events/vmscan.h>
73 #include <trace/hooks/mm.h>
74
75 #include <trace/hooks/cgroup.h>
76
77 struct cgroup_subsys memory_cgrp_subsys __read_mostly;
78 EXPORT_SYMBOL(memory_cgrp_subsys);
79
80 struct mem_cgroup *root_mem_cgroup __read_mostly;
81
82 /* Active memory cgroup to use from an interrupt context */
83 DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg);
84 EXPORT_PER_CPU_SYMBOL_GPL(int_active_memcg);
85
86 /* Socket memory accounting disabled? */
87 static bool cgroup_memory_nosocket __ro_after_init;
88
89 /* Kernel memory accounting disabled? */
90 bool cgroup_memory_nokmem __ro_after_init;
91
92 /* Whether the swap controller is active */
93 #ifdef CONFIG_MEMCG_SWAP
94 bool cgroup_memory_noswap __ro_after_init;
95 #else
96 #define cgroup_memory_noswap 1
97 #endif
98
99 #ifdef CONFIG_CGROUP_WRITEBACK
100 static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq);
101 #endif
102
103 /* Whether legacy memory+swap accounting is active */
do_memsw_account(void)104 static bool do_memsw_account(void)
105 {
106 return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_noswap;
107 }
108
109 #define THRESHOLDS_EVENTS_TARGET 128
110 #define SOFTLIMIT_EVENTS_TARGET 1024
111
112 /*
113 * Cgroups above their limits are maintained in a RB-Tree, independent of
114 * their hierarchy representation
115 */
116
117 struct mem_cgroup_tree_per_node {
118 struct rb_root rb_root;
119 struct rb_node *rb_rightmost;
120 spinlock_t lock;
121 };
122
123 struct mem_cgroup_tree {
124 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
125 };
126
127 static struct mem_cgroup_tree soft_limit_tree __read_mostly;
128
129 /* for OOM */
130 struct mem_cgroup_eventfd_list {
131 struct list_head list;
132 struct eventfd_ctx *eventfd;
133 };
134
135 /*
136 * cgroup_event represents events which userspace want to receive.
137 */
138 struct mem_cgroup_event {
139 /*
140 * memcg which the event belongs to.
141 */
142 struct mem_cgroup *memcg;
143 /*
144 * eventfd to signal userspace about the event.
145 */
146 struct eventfd_ctx *eventfd;
147 /*
148 * Each of these stored in a list by the cgroup.
149 */
150 struct list_head list;
151 /*
152 * register_event() callback will be used to add new userspace
153 * waiter for changes related to this event. Use eventfd_signal()
154 * on eventfd to send notification to userspace.
155 */
156 int (*register_event)(struct mem_cgroup *memcg,
157 struct eventfd_ctx *eventfd, const char *args);
158 /*
159 * unregister_event() callback will be called when userspace closes
160 * the eventfd or on cgroup removing. This callback must be set,
161 * if you want provide notification functionality.
162 */
163 void (*unregister_event)(struct mem_cgroup *memcg,
164 struct eventfd_ctx *eventfd);
165 /*
166 * All fields below needed to unregister event when
167 * userspace closes eventfd.
168 */
169 poll_table pt;
170 wait_queue_head_t *wqh;
171 wait_queue_entry_t wait;
172 struct work_struct remove;
173 };
174
175 static void mem_cgroup_threshold(struct mem_cgroup *memcg);
176 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
177
178 /* Stuffs for move charges at task migration. */
179 /*
180 * Types of charges to be moved.
181 */
182 #define MOVE_ANON 0x1U
183 #define MOVE_FILE 0x2U
184 #define MOVE_MASK (MOVE_ANON | MOVE_FILE)
185
186 /* "mc" and its members are protected by cgroup_mutex */
187 static struct move_charge_struct {
188 spinlock_t lock; /* for from, to */
189 struct mm_struct *mm;
190 struct mem_cgroup *from;
191 struct mem_cgroup *to;
192 unsigned long flags;
193 unsigned long precharge;
194 unsigned long moved_charge;
195 unsigned long moved_swap;
196 struct task_struct *moving_task; /* a task moving charges */
197 wait_queue_head_t waitq; /* a waitq for other context */
198 } mc = {
199 .lock = __SPIN_LOCK_UNLOCKED(mc.lock),
200 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
201 };
202
203 /*
204 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
205 * limit reclaim to prevent infinite loops, if they ever occur.
206 */
207 #define MEM_CGROUP_MAX_RECLAIM_LOOPS 100
208 #define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2
209
210 /* for encoding cft->private value on file */
211 enum res_type {
212 _MEM,
213 _MEMSWAP,
214 _OOM_TYPE,
215 _KMEM,
216 _TCP,
217 };
218
219 #define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val))
220 #define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff)
221 #define MEMFILE_ATTR(val) ((val) & 0xffff)
222 /* Used for OOM notifier */
223 #define OOM_CONTROL (0)
224
225 /*
226 * Iteration constructs for visiting all cgroups (under a tree). If
227 * loops are exited prematurely (break), mem_cgroup_iter_break() must
228 * be used for reference counting.
229 */
230 #define for_each_mem_cgroup_tree(iter, root) \
231 for (iter = mem_cgroup_iter(root, NULL, NULL); \
232 iter != NULL; \
233 iter = mem_cgroup_iter(root, iter, NULL))
234
235 #define for_each_mem_cgroup(iter) \
236 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \
237 iter != NULL; \
238 iter = mem_cgroup_iter(NULL, iter, NULL))
239
task_is_dying(void)240 static inline bool task_is_dying(void)
241 {
242 return tsk_is_oom_victim(current) || fatal_signal_pending(current) ||
243 (current->flags & PF_EXITING);
244 }
245
246 /* Some nice accessors for the vmpressure. */
memcg_to_vmpressure(struct mem_cgroup * memcg)247 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
248 {
249 if (!memcg)
250 memcg = root_mem_cgroup;
251 return &memcg->vmpressure;
252 }
253
vmpressure_to_memcg(struct vmpressure * vmpr)254 struct mem_cgroup *vmpressure_to_memcg(struct vmpressure *vmpr)
255 {
256 return container_of(vmpr, struct mem_cgroup, vmpressure);
257 }
258
259 #ifdef CONFIG_MEMCG_KMEM
260 static DEFINE_SPINLOCK(objcg_lock);
261
mem_cgroup_kmem_disabled(void)262 bool mem_cgroup_kmem_disabled(void)
263 {
264 return cgroup_memory_nokmem;
265 }
266
267 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
268 unsigned int nr_pages);
269
obj_cgroup_release(struct percpu_ref * ref)270 static void obj_cgroup_release(struct percpu_ref *ref)
271 {
272 struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt);
273 unsigned int nr_bytes;
274 unsigned int nr_pages;
275 unsigned long flags;
276
277 /*
278 * At this point all allocated objects are freed, and
279 * objcg->nr_charged_bytes can't have an arbitrary byte value.
280 * However, it can be PAGE_SIZE or (x * PAGE_SIZE).
281 *
282 * The following sequence can lead to it:
283 * 1) CPU0: objcg == stock->cached_objcg
284 * 2) CPU1: we do a small allocation (e.g. 92 bytes),
285 * PAGE_SIZE bytes are charged
286 * 3) CPU1: a process from another memcg is allocating something,
287 * the stock if flushed,
288 * objcg->nr_charged_bytes = PAGE_SIZE - 92
289 * 5) CPU0: we do release this object,
290 * 92 bytes are added to stock->nr_bytes
291 * 6) CPU0: stock is flushed,
292 * 92 bytes are added to objcg->nr_charged_bytes
293 *
294 * In the result, nr_charged_bytes == PAGE_SIZE.
295 * This page will be uncharged in obj_cgroup_release().
296 */
297 nr_bytes = atomic_read(&objcg->nr_charged_bytes);
298 WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1));
299 nr_pages = nr_bytes >> PAGE_SHIFT;
300
301 if (nr_pages)
302 obj_cgroup_uncharge_pages(objcg, nr_pages);
303
304 spin_lock_irqsave(&objcg_lock, flags);
305 list_del(&objcg->list);
306 spin_unlock_irqrestore(&objcg_lock, flags);
307
308 percpu_ref_exit(ref);
309 kfree_rcu(objcg, rcu);
310 }
311
obj_cgroup_alloc(void)312 static struct obj_cgroup *obj_cgroup_alloc(void)
313 {
314 struct obj_cgroup *objcg;
315 int ret;
316
317 objcg = kzalloc(sizeof(struct obj_cgroup), GFP_KERNEL);
318 if (!objcg)
319 return NULL;
320
321 ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0,
322 GFP_KERNEL);
323 if (ret) {
324 kfree(objcg);
325 return NULL;
326 }
327 INIT_LIST_HEAD(&objcg->list);
328 return objcg;
329 }
330
memcg_reparent_objcgs(struct mem_cgroup * memcg,struct mem_cgroup * parent)331 static void memcg_reparent_objcgs(struct mem_cgroup *memcg,
332 struct mem_cgroup *parent)
333 {
334 struct obj_cgroup *objcg, *iter;
335
336 objcg = rcu_replace_pointer(memcg->objcg, NULL, true);
337
338 spin_lock_irq(&objcg_lock);
339
340 /* 1) Ready to reparent active objcg. */
341 list_add(&objcg->list, &memcg->objcg_list);
342 /* 2) Reparent active objcg and already reparented objcgs to parent. */
343 list_for_each_entry(iter, &memcg->objcg_list, list)
344 WRITE_ONCE(iter->memcg, parent);
345 /* 3) Move already reparented objcgs to the parent's list */
346 list_splice(&memcg->objcg_list, &parent->objcg_list);
347
348 spin_unlock_irq(&objcg_lock);
349
350 percpu_ref_kill(&objcg->refcnt);
351 }
352
353 /*
354 * This will be used as a shrinker list's index.
355 * The main reason for not using cgroup id for this:
356 * this works better in sparse environments, where we have a lot of memcgs,
357 * but only a few kmem-limited. Or also, if we have, for instance, 200
358 * memcgs, and none but the 200th is kmem-limited, we'd have to have a
359 * 200 entry array for that.
360 *
361 * The current size of the caches array is stored in memcg_nr_cache_ids. It
362 * will double each time we have to increase it.
363 */
364 static DEFINE_IDA(memcg_cache_ida);
365 int memcg_nr_cache_ids;
366
367 /* Protects memcg_nr_cache_ids */
368 static DECLARE_RWSEM(memcg_cache_ids_sem);
369
memcg_get_cache_ids(void)370 void memcg_get_cache_ids(void)
371 {
372 down_read(&memcg_cache_ids_sem);
373 }
374
memcg_put_cache_ids(void)375 void memcg_put_cache_ids(void)
376 {
377 up_read(&memcg_cache_ids_sem);
378 }
379
380 /*
381 * MIN_SIZE is different than 1, because we would like to avoid going through
382 * the alloc/free process all the time. In a small machine, 4 kmem-limited
383 * cgroups is a reasonable guess. In the future, it could be a parameter or
384 * tunable, but that is strictly not necessary.
385 *
386 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get
387 * this constant directly from cgroup, but it is understandable that this is
388 * better kept as an internal representation in cgroup.c. In any case, the
389 * cgrp_id space is not getting any smaller, and we don't have to necessarily
390 * increase ours as well if it increases.
391 */
392 #define MEMCG_CACHES_MIN_SIZE 4
393 #define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX
394
395 /*
396 * A lot of the calls to the cache allocation functions are expected to be
397 * inlined by the compiler. Since the calls to memcg_slab_pre_alloc_hook() are
398 * conditional to this static branch, we'll have to allow modules that does
399 * kmem_cache_alloc and the such to see this symbol as well
400 */
401 DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
402 EXPORT_SYMBOL(memcg_kmem_enabled_key);
403 #endif
404
405 /**
406 * mem_cgroup_css_from_page - css of the memcg associated with a page
407 * @page: page of interest
408 *
409 * If memcg is bound to the default hierarchy, css of the memcg associated
410 * with @page is returned. The returned css remains associated with @page
411 * until it is released.
412 *
413 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
414 * is returned.
415 */
mem_cgroup_css_from_page(struct page * page)416 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page)
417 {
418 struct mem_cgroup *memcg;
419
420 memcg = page_memcg(page);
421
422 if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
423 memcg = root_mem_cgroup;
424
425 return &memcg->css;
426 }
427
428 /**
429 * page_cgroup_ino - return inode number of the memcg a page is charged to
430 * @page: the page
431 *
432 * Look up the closest online ancestor of the memory cgroup @page is charged to
433 * and return its inode number or 0 if @page is not charged to any cgroup. It
434 * is safe to call this function without holding a reference to @page.
435 *
436 * Note, this function is inherently racy, because there is nothing to prevent
437 * the cgroup inode from getting torn down and potentially reallocated a moment
438 * after page_cgroup_ino() returns, so it only should be used by callers that
439 * do not care (such as procfs interfaces).
440 */
page_cgroup_ino(struct page * page)441 ino_t page_cgroup_ino(struct page *page)
442 {
443 struct mem_cgroup *memcg;
444 unsigned long ino = 0;
445
446 rcu_read_lock();
447 memcg = page_memcg_check(page);
448
449 while (memcg && !(memcg->css.flags & CSS_ONLINE))
450 memcg = parent_mem_cgroup(memcg);
451 if (memcg)
452 ino = cgroup_ino(memcg->css.cgroup);
453 rcu_read_unlock();
454 return ino;
455 }
456
457 static struct mem_cgroup_per_node *
mem_cgroup_page_nodeinfo(struct mem_cgroup * memcg,struct page * page)458 mem_cgroup_page_nodeinfo(struct mem_cgroup *memcg, struct page *page)
459 {
460 int nid = page_to_nid(page);
461
462 return memcg->nodeinfo[nid];
463 }
464
465 static struct mem_cgroup_tree_per_node *
soft_limit_tree_node(int nid)466 soft_limit_tree_node(int nid)
467 {
468 return soft_limit_tree.rb_tree_per_node[nid];
469 }
470
471 static struct mem_cgroup_tree_per_node *
soft_limit_tree_from_page(struct page * page)472 soft_limit_tree_from_page(struct page *page)
473 {
474 int nid = page_to_nid(page);
475
476 return soft_limit_tree.rb_tree_per_node[nid];
477 }
478
__mem_cgroup_insert_exceeded(struct mem_cgroup_per_node * mz,struct mem_cgroup_tree_per_node * mctz,unsigned long new_usage_in_excess)479 static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz,
480 struct mem_cgroup_tree_per_node *mctz,
481 unsigned long new_usage_in_excess)
482 {
483 struct rb_node **p = &mctz->rb_root.rb_node;
484 struct rb_node *parent = NULL;
485 struct mem_cgroup_per_node *mz_node;
486 bool rightmost = true;
487
488 if (mz->on_tree)
489 return;
490
491 mz->usage_in_excess = new_usage_in_excess;
492 if (!mz->usage_in_excess)
493 return;
494 while (*p) {
495 parent = *p;
496 mz_node = rb_entry(parent, struct mem_cgroup_per_node,
497 tree_node);
498 if (mz->usage_in_excess < mz_node->usage_in_excess) {
499 p = &(*p)->rb_left;
500 rightmost = false;
501 } else {
502 p = &(*p)->rb_right;
503 }
504 }
505
506 if (rightmost)
507 mctz->rb_rightmost = &mz->tree_node;
508
509 rb_link_node(&mz->tree_node, parent, p);
510 rb_insert_color(&mz->tree_node, &mctz->rb_root);
511 mz->on_tree = true;
512 }
513
__mem_cgroup_remove_exceeded(struct mem_cgroup_per_node * mz,struct mem_cgroup_tree_per_node * mctz)514 static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
515 struct mem_cgroup_tree_per_node *mctz)
516 {
517 if (!mz->on_tree)
518 return;
519
520 if (&mz->tree_node == mctz->rb_rightmost)
521 mctz->rb_rightmost = rb_prev(&mz->tree_node);
522
523 rb_erase(&mz->tree_node, &mctz->rb_root);
524 mz->on_tree = false;
525 }
526
mem_cgroup_remove_exceeded(struct mem_cgroup_per_node * mz,struct mem_cgroup_tree_per_node * mctz)527 static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
528 struct mem_cgroup_tree_per_node *mctz)
529 {
530 unsigned long flags;
531
532 spin_lock_irqsave(&mctz->lock, flags);
533 __mem_cgroup_remove_exceeded(mz, mctz);
534 spin_unlock_irqrestore(&mctz->lock, flags);
535 }
536
soft_limit_excess(struct mem_cgroup * memcg)537 static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
538 {
539 unsigned long nr_pages = page_counter_read(&memcg->memory);
540 unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
541 unsigned long excess = 0;
542
543 if (nr_pages > soft_limit)
544 excess = nr_pages - soft_limit;
545
546 return excess;
547 }
548
mem_cgroup_update_tree(struct mem_cgroup * memcg,struct page * page)549 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
550 {
551 unsigned long excess;
552 struct mem_cgroup_per_node *mz;
553 struct mem_cgroup_tree_per_node *mctz;
554
555 if (lru_gen_enabled()) {
556 struct lruvec *lruvec = &mem_cgroup_page_nodeinfo(memcg, page)->lruvec;
557
558 /* see the comment on MEMCG_NR_GENS */
559 if (soft_limit_excess(memcg) && lru_gen_memcg_seg(lruvec) != MEMCG_LRU_HEAD)
560 lru_gen_rotate_memcg(lruvec, MEMCG_LRU_HEAD);
561
562 return;
563 }
564
565 mctz = soft_limit_tree_from_page(page);
566 if (!mctz)
567 return;
568 /*
569 * Necessary to update all ancestors when hierarchy is used.
570 * because their event counter is not touched.
571 */
572 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
573 mz = mem_cgroup_page_nodeinfo(memcg, page);
574 excess = soft_limit_excess(memcg);
575 /*
576 * We have to update the tree if mz is on RB-tree or
577 * mem is over its softlimit.
578 */
579 if (excess || mz->on_tree) {
580 unsigned long flags;
581
582 spin_lock_irqsave(&mctz->lock, flags);
583 /* if on-tree, remove it */
584 if (mz->on_tree)
585 __mem_cgroup_remove_exceeded(mz, mctz);
586 /*
587 * Insert again. mz->usage_in_excess will be updated.
588 * If excess is 0, no tree ops.
589 */
590 __mem_cgroup_insert_exceeded(mz, mctz, excess);
591 spin_unlock_irqrestore(&mctz->lock, flags);
592 }
593 }
594 }
595
mem_cgroup_remove_from_trees(struct mem_cgroup * memcg)596 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
597 {
598 struct mem_cgroup_tree_per_node *mctz;
599 struct mem_cgroup_per_node *mz;
600 int nid;
601
602 for_each_node(nid) {
603 mz = memcg->nodeinfo[nid];
604 mctz = soft_limit_tree_node(nid);
605 if (mctz)
606 mem_cgroup_remove_exceeded(mz, mctz);
607 }
608 }
609
610 static struct mem_cgroup_per_node *
__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node * mctz)611 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
612 {
613 struct mem_cgroup_per_node *mz;
614
615 retry:
616 mz = NULL;
617 if (!mctz->rb_rightmost)
618 goto done; /* Nothing to reclaim from */
619
620 mz = rb_entry(mctz->rb_rightmost,
621 struct mem_cgroup_per_node, tree_node);
622 /*
623 * Remove the node now but someone else can add it back,
624 * we will to add it back at the end of reclaim to its correct
625 * position in the tree.
626 */
627 __mem_cgroup_remove_exceeded(mz, mctz);
628 if (!soft_limit_excess(mz->memcg) ||
629 !css_tryget(&mz->memcg->css))
630 goto retry;
631 done:
632 return mz;
633 }
634
635 static struct mem_cgroup_per_node *
mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node * mctz)636 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
637 {
638 struct mem_cgroup_per_node *mz;
639
640 spin_lock_irq(&mctz->lock);
641 mz = __mem_cgroup_largest_soft_limit_node(mctz);
642 spin_unlock_irq(&mctz->lock);
643 return mz;
644 }
645
646 /*
647 * memcg and lruvec stats flushing
648 *
649 * Many codepaths leading to stats update or read are performance sensitive and
650 * adding stats flushing in such codepaths is not desirable. So, to optimize the
651 * flushing the kernel does:
652 *
653 * 1) Periodically and asynchronously flush the stats every 2 seconds to not let
654 * rstat update tree grow unbounded.
655 *
656 * 2) Flush the stats synchronously on reader side only when there are more than
657 * (MEMCG_CHARGE_BATCH * nr_cpus) update events. Though this optimization
658 * will let stats be out of sync by atmost (MEMCG_CHARGE_BATCH * nr_cpus) but
659 * only for 2 seconds due to (1).
660 */
661 static void flush_memcg_stats_dwork(struct work_struct *w);
662 static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork);
663 static DEFINE_SPINLOCK(stats_flush_lock);
664 static DEFINE_PER_CPU(unsigned int, stats_updates);
665 static atomic_t stats_flush_threshold = ATOMIC_INIT(0);
666 static u64 flush_next_time;
667
668 #define FLUSH_TIME (2UL*HZ)
669
memcg_rstat_updated(struct mem_cgroup * memcg,int val)670 static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val)
671 {
672 unsigned int x;
673
674 cgroup_rstat_updated(memcg->css.cgroup, smp_processor_id());
675
676 x = __this_cpu_add_return(stats_updates, abs(val));
677 if (x > MEMCG_CHARGE_BATCH) {
678 atomic_add(x / MEMCG_CHARGE_BATCH, &stats_flush_threshold);
679 __this_cpu_write(stats_updates, 0);
680 }
681 }
682
__mem_cgroup_flush_stats(void)683 static void __mem_cgroup_flush_stats(void)
684 {
685 unsigned long flag;
686
687 if (!spin_trylock_irqsave(&stats_flush_lock, flag))
688 return;
689
690 flush_next_time = jiffies_64 + 2*FLUSH_TIME;
691 cgroup_rstat_flush_irqsafe(root_mem_cgroup->css.cgroup);
692 atomic_set(&stats_flush_threshold, 0);
693 spin_unlock_irqrestore(&stats_flush_lock, flag);
694 }
695
mem_cgroup_flush_stats(void)696 void mem_cgroup_flush_stats(void)
697 {
698 if (atomic_read(&stats_flush_threshold) > num_online_cpus())
699 __mem_cgroup_flush_stats();
700 }
701
mem_cgroup_flush_stats_delayed(void)702 void mem_cgroup_flush_stats_delayed(void)
703 {
704 if (time_after64(jiffies_64, flush_next_time))
705 mem_cgroup_flush_stats();
706 }
707
flush_memcg_stats_dwork(struct work_struct * w)708 static void flush_memcg_stats_dwork(struct work_struct *w)
709 {
710 __mem_cgroup_flush_stats();
711 queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME);
712 }
713
714 /**
715 * __mod_memcg_state - update cgroup memory statistics
716 * @memcg: the memory cgroup
717 * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item
718 * @val: delta to add to the counter, can be negative
719 */
__mod_memcg_state(struct mem_cgroup * memcg,int idx,int val)720 void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val)
721 {
722 if (mem_cgroup_disabled())
723 return;
724
725 __this_cpu_add(memcg->vmstats_percpu->state[idx], val);
726 memcg_rstat_updated(memcg, val);
727 }
728
729 /* idx can be of type enum memcg_stat_item or node_stat_item. */
memcg_page_state_local(struct mem_cgroup * memcg,int idx)730 static unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx)
731 {
732 long x = 0;
733 int cpu;
734
735 for_each_possible_cpu(cpu)
736 x += per_cpu(memcg->vmstats_percpu->state[idx], cpu);
737 #ifdef CONFIG_SMP
738 if (x < 0)
739 x = 0;
740 #endif
741 return x;
742 }
743
__mod_memcg_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx,int val)744 void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
745 int val)
746 {
747 struct mem_cgroup_per_node *pn;
748 struct mem_cgroup *memcg;
749
750 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
751 memcg = pn->memcg;
752
753 /* Update memcg */
754 __this_cpu_add(memcg->vmstats_percpu->state[idx], val);
755
756 /* Update lruvec */
757 __this_cpu_add(pn->lruvec_stats_percpu->state[idx], val);
758
759 memcg_rstat_updated(memcg, val);
760 }
761
762 /**
763 * __mod_lruvec_state - update lruvec memory statistics
764 * @lruvec: the lruvec
765 * @idx: the stat item
766 * @val: delta to add to the counter, can be negative
767 *
768 * The lruvec is the intersection of the NUMA node and a cgroup. This
769 * function updates the all three counters that are affected by a
770 * change of state at this level: per-node, per-cgroup, per-lruvec.
771 */
__mod_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx,int val)772 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
773 int val)
774 {
775 /* Update node */
776 __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
777
778 /* Update memcg and lruvec */
779 if (!mem_cgroup_disabled())
780 __mod_memcg_lruvec_state(lruvec, idx, val);
781 }
782
__mod_lruvec_page_state(struct page * page,enum node_stat_item idx,int val)783 void __mod_lruvec_page_state(struct page *page, enum node_stat_item idx,
784 int val)
785 {
786 struct page *head = compound_head(page); /* rmap on tail pages */
787 struct mem_cgroup *memcg;
788 pg_data_t *pgdat = page_pgdat(page);
789 struct lruvec *lruvec;
790
791 rcu_read_lock();
792 memcg = page_memcg(head);
793 /* Untracked pages have no memcg, no lruvec. Update only the node */
794 if (!memcg) {
795 rcu_read_unlock();
796 __mod_node_page_state(pgdat, idx, val);
797 return;
798 }
799
800 lruvec = mem_cgroup_lruvec(memcg, pgdat);
801 __mod_lruvec_state(lruvec, idx, val);
802 rcu_read_unlock();
803 }
804 EXPORT_SYMBOL(__mod_lruvec_page_state);
805
__mod_lruvec_kmem_state(void * p,enum node_stat_item idx,int val)806 void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
807 {
808 pg_data_t *pgdat = page_pgdat(virt_to_page(p));
809 struct mem_cgroup *memcg;
810 struct lruvec *lruvec;
811
812 rcu_read_lock();
813 memcg = mem_cgroup_from_obj(p);
814
815 /*
816 * Untracked pages have no memcg, no lruvec. Update only the
817 * node. If we reparent the slab objects to the root memcg,
818 * when we free the slab object, we need to update the per-memcg
819 * vmstats to keep it correct for the root memcg.
820 */
821 if (!memcg) {
822 __mod_node_page_state(pgdat, idx, val);
823 } else {
824 lruvec = mem_cgroup_lruvec(memcg, pgdat);
825 __mod_lruvec_state(lruvec, idx, val);
826 }
827 rcu_read_unlock();
828 }
829
830 /*
831 * mod_objcg_mlstate() may be called with irq enabled, so
832 * mod_memcg_lruvec_state() should be used.
833 */
mod_objcg_mlstate(struct obj_cgroup * objcg,struct pglist_data * pgdat,enum node_stat_item idx,int nr)834 static inline void mod_objcg_mlstate(struct obj_cgroup *objcg,
835 struct pglist_data *pgdat,
836 enum node_stat_item idx, int nr)
837 {
838 struct mem_cgroup *memcg;
839 struct lruvec *lruvec;
840
841 rcu_read_lock();
842 memcg = obj_cgroup_memcg(objcg);
843 lruvec = mem_cgroup_lruvec(memcg, pgdat);
844 mod_memcg_lruvec_state(lruvec, idx, nr);
845 rcu_read_unlock();
846 }
847
848 /**
849 * __count_memcg_events - account VM events in a cgroup
850 * @memcg: the memory cgroup
851 * @idx: the event item
852 * @count: the number of events that occurred
853 */
__count_memcg_events(struct mem_cgroup * memcg,enum vm_event_item idx,unsigned long count)854 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
855 unsigned long count)
856 {
857 if (mem_cgroup_disabled())
858 return;
859
860 __this_cpu_add(memcg->vmstats_percpu->events[idx], count);
861 memcg_rstat_updated(memcg, count);
862 }
863
memcg_events(struct mem_cgroup * memcg,int event)864 static unsigned long memcg_events(struct mem_cgroup *memcg, int event)
865 {
866 return READ_ONCE(memcg->vmstats.events[event]);
867 }
868
memcg_events_local(struct mem_cgroup * memcg,int event)869 static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
870 {
871 long x = 0;
872 int cpu;
873
874 for_each_possible_cpu(cpu)
875 x += per_cpu(memcg->vmstats_percpu->events[event], cpu);
876 return x;
877 }
878
mem_cgroup_charge_statistics(struct mem_cgroup * memcg,struct page * page,int nr_pages)879 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
880 struct page *page,
881 int nr_pages)
882 {
883 /* pagein of a big page is an event. So, ignore page size */
884 if (nr_pages > 0)
885 __count_memcg_events(memcg, PGPGIN, 1);
886 else {
887 __count_memcg_events(memcg, PGPGOUT, 1);
888 nr_pages = -nr_pages; /* for event */
889 }
890
891 __this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages);
892 }
893
mem_cgroup_event_ratelimit(struct mem_cgroup * memcg,enum mem_cgroup_events_target target)894 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
895 enum mem_cgroup_events_target target)
896 {
897 unsigned long val, next;
898
899 val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events);
900 next = __this_cpu_read(memcg->vmstats_percpu->targets[target]);
901 /* from time_after() in jiffies.h */
902 if ((long)(next - val) < 0) {
903 switch (target) {
904 case MEM_CGROUP_TARGET_THRESH:
905 next = val + THRESHOLDS_EVENTS_TARGET;
906 break;
907 case MEM_CGROUP_TARGET_SOFTLIMIT:
908 next = val + SOFTLIMIT_EVENTS_TARGET;
909 break;
910 default:
911 break;
912 }
913 __this_cpu_write(memcg->vmstats_percpu->targets[target], next);
914 return true;
915 }
916 return false;
917 }
918
919 /*
920 * Check events in order.
921 *
922 */
memcg_check_events(struct mem_cgroup * memcg,struct page * page)923 static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
924 {
925 /* threshold event is triggered in finer grain than soft limit */
926 if (unlikely(mem_cgroup_event_ratelimit(memcg,
927 MEM_CGROUP_TARGET_THRESH))) {
928 bool do_softlimit;
929
930 do_softlimit = mem_cgroup_event_ratelimit(memcg,
931 MEM_CGROUP_TARGET_SOFTLIMIT);
932 mem_cgroup_threshold(memcg);
933 if (unlikely(do_softlimit))
934 mem_cgroup_update_tree(memcg, page);
935 }
936 }
937
mem_cgroup_from_task(struct task_struct * p)938 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
939 {
940 /*
941 * mm_update_next_owner() may clear mm->owner to NULL
942 * if it races with swapoff, page migration, etc.
943 * So this can be called with p == NULL.
944 */
945 if (unlikely(!p))
946 return NULL;
947
948 return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
949 }
950 EXPORT_SYMBOL(mem_cgroup_from_task);
951
active_memcg(void)952 static __always_inline struct mem_cgroup *active_memcg(void)
953 {
954 if (!in_task())
955 return this_cpu_read(int_active_memcg);
956 else
957 return current->active_memcg;
958 }
959
960 /**
961 * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg.
962 * @mm: mm from which memcg should be extracted. It can be NULL.
963 *
964 * Obtain a reference on mm->memcg and returns it if successful. If mm
965 * is NULL, then the memcg is chosen as follows:
966 * 1) The active memcg, if set.
967 * 2) current->mm->memcg, if available
968 * 3) root memcg
969 * If mem_cgroup is disabled, NULL is returned.
970 */
get_mem_cgroup_from_mm(struct mm_struct * mm)971 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
972 {
973 struct mem_cgroup *memcg;
974
975 if (mem_cgroup_disabled())
976 return NULL;
977
978 /*
979 * Page cache insertions can happen without an
980 * actual mm context, e.g. during disk probing
981 * on boot, loopback IO, acct() writes etc.
982 *
983 * No need to css_get on root memcg as the reference
984 * counting is disabled on the root level in the
985 * cgroup core. See CSS_NO_REF.
986 */
987 if (unlikely(!mm)) {
988 memcg = active_memcg();
989 if (unlikely(memcg)) {
990 /* remote memcg must hold a ref */
991 css_get(&memcg->css);
992 return memcg;
993 }
994 mm = current->mm;
995 if (unlikely(!mm))
996 return root_mem_cgroup;
997 }
998
999 rcu_read_lock();
1000 do {
1001 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1002 if (unlikely(!memcg))
1003 memcg = root_mem_cgroup;
1004 } while (!css_tryget(&memcg->css));
1005 rcu_read_unlock();
1006 return memcg;
1007 }
1008 EXPORT_SYMBOL(get_mem_cgroup_from_mm);
1009
memcg_kmem_bypass(void)1010 static __always_inline bool memcg_kmem_bypass(void)
1011 {
1012 /* Allow remote memcg charging from any context. */
1013 if (unlikely(active_memcg()))
1014 return false;
1015
1016 /* Memcg to charge can't be determined. */
1017 if (!in_task() || !current->mm || (current->flags & PF_KTHREAD))
1018 return true;
1019
1020 return false;
1021 }
1022
1023 /**
1024 * mem_cgroup_iter - iterate over memory cgroup hierarchy
1025 * @root: hierarchy root
1026 * @prev: previously returned memcg, NULL on first invocation
1027 * @reclaim: cookie for shared reclaim walks, NULL for full walks
1028 *
1029 * Returns references to children of the hierarchy below @root, or
1030 * @root itself, or %NULL after a full round-trip.
1031 *
1032 * Caller must pass the return value in @prev on subsequent
1033 * invocations for reference counting, or use mem_cgroup_iter_break()
1034 * to cancel a hierarchy walk before the round-trip is complete.
1035 *
1036 * Reclaimers can specify a node in @reclaim to divide up the memcgs
1037 * in the hierarchy among all concurrent reclaimers operating on the
1038 * same node.
1039 */
mem_cgroup_iter(struct mem_cgroup * root,struct mem_cgroup * prev,struct mem_cgroup_reclaim_cookie * reclaim)1040 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
1041 struct mem_cgroup *prev,
1042 struct mem_cgroup_reclaim_cookie *reclaim)
1043 {
1044 struct mem_cgroup_reclaim_iter *iter;
1045 struct cgroup_subsys_state *css = NULL;
1046 struct mem_cgroup *memcg = NULL;
1047 struct mem_cgroup *pos = NULL;
1048
1049 if (mem_cgroup_disabled())
1050 return NULL;
1051
1052 if (!root)
1053 root = root_mem_cgroup;
1054
1055 if (prev && !reclaim)
1056 pos = prev;
1057
1058 rcu_read_lock();
1059
1060 if (reclaim) {
1061 struct mem_cgroup_per_node *mz;
1062
1063 mz = root->nodeinfo[reclaim->pgdat->node_id];
1064 iter = &mz->iter;
1065
1066 if (prev && reclaim->generation != iter->generation)
1067 goto out_unlock;
1068
1069 while (1) {
1070 pos = READ_ONCE(iter->position);
1071 if (!pos || css_tryget(&pos->css))
1072 break;
1073 /*
1074 * css reference reached zero, so iter->position will
1075 * be cleared by ->css_released. However, we should not
1076 * rely on this happening soon, because ->css_released
1077 * is called from a work queue, and by busy-waiting we
1078 * might block it. So we clear iter->position right
1079 * away.
1080 */
1081 (void)cmpxchg(&iter->position, pos, NULL);
1082 }
1083 }
1084
1085 if (pos)
1086 css = &pos->css;
1087
1088 for (;;) {
1089 css = css_next_descendant_pre(css, &root->css);
1090 if (!css) {
1091 /*
1092 * Reclaimers share the hierarchy walk, and a
1093 * new one might jump in right at the end of
1094 * the hierarchy - make sure they see at least
1095 * one group and restart from the beginning.
1096 */
1097 if (!prev)
1098 continue;
1099 break;
1100 }
1101
1102 /*
1103 * Verify the css and acquire a reference. The root
1104 * is provided by the caller, so we know it's alive
1105 * and kicking, and don't take an extra reference.
1106 */
1107 memcg = mem_cgroup_from_css(css);
1108
1109 if (css == &root->css)
1110 break;
1111
1112 if (css_tryget(css))
1113 break;
1114
1115 memcg = NULL;
1116 }
1117
1118 if (reclaim) {
1119 /*
1120 * The position could have already been updated by a competing
1121 * thread, so check that the value hasn't changed since we read
1122 * it to avoid reclaiming from the same cgroup twice.
1123 */
1124 (void)cmpxchg(&iter->position, pos, memcg);
1125
1126 if (pos)
1127 css_put(&pos->css);
1128
1129 if (!memcg)
1130 iter->generation++;
1131 else if (!prev)
1132 reclaim->generation = iter->generation;
1133 }
1134
1135 out_unlock:
1136 rcu_read_unlock();
1137 if (prev && prev != root)
1138 css_put(&prev->css);
1139
1140 return memcg;
1141 }
1142
1143 /**
1144 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1145 * @root: hierarchy root
1146 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1147 */
mem_cgroup_iter_break(struct mem_cgroup * root,struct mem_cgroup * prev)1148 void mem_cgroup_iter_break(struct mem_cgroup *root,
1149 struct mem_cgroup *prev)
1150 {
1151 if (!root)
1152 root = root_mem_cgroup;
1153 if (prev && prev != root)
1154 css_put(&prev->css);
1155 }
1156
__invalidate_reclaim_iterators(struct mem_cgroup * from,struct mem_cgroup * dead_memcg)1157 static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
1158 struct mem_cgroup *dead_memcg)
1159 {
1160 struct mem_cgroup_reclaim_iter *iter;
1161 struct mem_cgroup_per_node *mz;
1162 int nid;
1163
1164 for_each_node(nid) {
1165 mz = from->nodeinfo[nid];
1166 iter = &mz->iter;
1167 cmpxchg(&iter->position, dead_memcg, NULL);
1168 }
1169 }
1170
invalidate_reclaim_iterators(struct mem_cgroup * dead_memcg)1171 static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
1172 {
1173 struct mem_cgroup *memcg = dead_memcg;
1174 struct mem_cgroup *last;
1175
1176 do {
1177 __invalidate_reclaim_iterators(memcg, dead_memcg);
1178 last = memcg;
1179 } while ((memcg = parent_mem_cgroup(memcg)));
1180
1181 /*
1182 * When cgruop1 non-hierarchy mode is used,
1183 * parent_mem_cgroup() does not walk all the way up to the
1184 * cgroup root (root_mem_cgroup). So we have to handle
1185 * dead_memcg from cgroup root separately.
1186 */
1187 if (last != root_mem_cgroup)
1188 __invalidate_reclaim_iterators(root_mem_cgroup,
1189 dead_memcg);
1190 }
1191
1192 /**
1193 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1194 * @memcg: hierarchy root
1195 * @fn: function to call for each task
1196 * @arg: argument passed to @fn
1197 *
1198 * This function iterates over tasks attached to @memcg or to any of its
1199 * descendants and calls @fn for each task. If @fn returns a non-zero
1200 * value, the function breaks the iteration loop and returns the value.
1201 * Otherwise, it will iterate over all tasks and return 0.
1202 *
1203 * This function must not be called for the root memory cgroup.
1204 */
mem_cgroup_scan_tasks(struct mem_cgroup * memcg,int (* fn)(struct task_struct *,void *),void * arg)1205 int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1206 int (*fn)(struct task_struct *, void *), void *arg)
1207 {
1208 struct mem_cgroup *iter;
1209 int ret = 0;
1210
1211 BUG_ON(memcg == root_mem_cgroup);
1212
1213 for_each_mem_cgroup_tree(iter, memcg) {
1214 struct css_task_iter it;
1215 struct task_struct *task;
1216
1217 css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
1218 while (!ret && (task = css_task_iter_next(&it)))
1219 ret = fn(task, arg);
1220 css_task_iter_end(&it);
1221 if (ret) {
1222 mem_cgroup_iter_break(memcg, iter);
1223 break;
1224 }
1225 }
1226 return ret;
1227 }
1228
1229 #ifdef CONFIG_DEBUG_VM
lruvec_memcg_debug(struct lruvec * lruvec,struct page * page)1230 void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page)
1231 {
1232 struct mem_cgroup *memcg;
1233
1234 if (mem_cgroup_disabled())
1235 return;
1236
1237 memcg = page_memcg(page);
1238
1239 if (!memcg)
1240 VM_BUG_ON_PAGE(lruvec_memcg(lruvec) != root_mem_cgroup, page);
1241 else
1242 VM_BUG_ON_PAGE(lruvec_memcg(lruvec) != memcg, page);
1243 }
1244 #endif
1245
1246 /**
1247 * lock_page_lruvec - lock and return lruvec for a given page.
1248 * @page: the page
1249 *
1250 * These functions are safe to use under any of the following conditions:
1251 * - page locked
1252 * - PageLRU cleared
1253 * - lock_page_memcg()
1254 * - page->_refcount is zero
1255 */
lock_page_lruvec(struct page * page)1256 struct lruvec *lock_page_lruvec(struct page *page)
1257 {
1258 struct lruvec *lruvec;
1259
1260 lruvec = mem_cgroup_page_lruvec(page);
1261 spin_lock(&lruvec->lru_lock);
1262
1263 lruvec_memcg_debug(lruvec, page);
1264
1265 return lruvec;
1266 }
1267
lock_page_lruvec_irq(struct page * page)1268 struct lruvec *lock_page_lruvec_irq(struct page *page)
1269 {
1270 struct lruvec *lruvec;
1271
1272 lruvec = mem_cgroup_page_lruvec(page);
1273 spin_lock_irq(&lruvec->lru_lock);
1274
1275 lruvec_memcg_debug(lruvec, page);
1276
1277 return lruvec;
1278 }
1279
lock_page_lruvec_irqsave(struct page * page,unsigned long * flags)1280 struct lruvec *lock_page_lruvec_irqsave(struct page *page, unsigned long *flags)
1281 {
1282 struct lruvec *lruvec;
1283
1284 lruvec = mem_cgroup_page_lruvec(page);
1285 spin_lock_irqsave(&lruvec->lru_lock, *flags);
1286
1287 lruvec_memcg_debug(lruvec, page);
1288
1289 return lruvec;
1290 }
1291
1292 /**
1293 * mem_cgroup_update_lru_size - account for adding or removing an lru page
1294 * @lruvec: mem_cgroup per zone lru vector
1295 * @lru: index of lru list the page is sitting on
1296 * @zid: zone id of the accounted pages
1297 * @nr_pages: positive when adding or negative when removing
1298 *
1299 * This function must be called under lru_lock, just before a page is added
1300 * to or just after a page is removed from an lru list (that ordering being
1301 * so as to allow it to check that lru_size 0 is consistent with list_empty).
1302 */
mem_cgroup_update_lru_size(struct lruvec * lruvec,enum lru_list lru,int zid,int nr_pages)1303 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1304 int zid, int nr_pages)
1305 {
1306 struct mem_cgroup_per_node *mz;
1307 unsigned long *lru_size;
1308 long size;
1309
1310 if (mem_cgroup_disabled())
1311 return;
1312
1313 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
1314 lru_size = &mz->lru_zone_size[zid][lru];
1315
1316 if (nr_pages < 0)
1317 *lru_size += nr_pages;
1318
1319 size = *lru_size;
1320 if (WARN_ONCE(size < 0,
1321 "%s(%p, %d, %d): lru_size %ld\n",
1322 __func__, lruvec, lru, nr_pages, size)) {
1323 VM_BUG_ON(1);
1324 *lru_size = 0;
1325 }
1326
1327 if (nr_pages > 0)
1328 *lru_size += nr_pages;
1329 }
1330
1331 /**
1332 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1333 * @memcg: the memory cgroup
1334 *
1335 * Returns the maximum amount of memory @mem can be charged with, in
1336 * pages.
1337 */
mem_cgroup_margin(struct mem_cgroup * memcg)1338 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1339 {
1340 unsigned long margin = 0;
1341 unsigned long count;
1342 unsigned long limit;
1343
1344 count = page_counter_read(&memcg->memory);
1345 limit = READ_ONCE(memcg->memory.max);
1346 if (count < limit)
1347 margin = limit - count;
1348
1349 if (do_memsw_account()) {
1350 count = page_counter_read(&memcg->memsw);
1351 limit = READ_ONCE(memcg->memsw.max);
1352 if (count < limit)
1353 margin = min(margin, limit - count);
1354 else
1355 margin = 0;
1356 }
1357
1358 return margin;
1359 }
1360
1361 /*
1362 * A routine for checking "mem" is under move_account() or not.
1363 *
1364 * Checking a cgroup is mc.from or mc.to or under hierarchy of
1365 * moving cgroups. This is for waiting at high-memory pressure
1366 * caused by "move".
1367 */
mem_cgroup_under_move(struct mem_cgroup * memcg)1368 static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1369 {
1370 struct mem_cgroup *from;
1371 struct mem_cgroup *to;
1372 bool ret = false;
1373 /*
1374 * Unlike task_move routines, we access mc.to, mc.from not under
1375 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1376 */
1377 spin_lock(&mc.lock);
1378 from = mc.from;
1379 to = mc.to;
1380 if (!from)
1381 goto unlock;
1382
1383 ret = mem_cgroup_is_descendant(from, memcg) ||
1384 mem_cgroup_is_descendant(to, memcg);
1385 unlock:
1386 spin_unlock(&mc.lock);
1387 return ret;
1388 }
1389
mem_cgroup_wait_acct_move(struct mem_cgroup * memcg)1390 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1391 {
1392 if (mc.moving_task && current != mc.moving_task) {
1393 if (mem_cgroup_under_move(memcg)) {
1394 DEFINE_WAIT(wait);
1395 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1396 /* moving charge context might have finished. */
1397 if (mc.moving_task)
1398 schedule();
1399 finish_wait(&mc.waitq, &wait);
1400 return true;
1401 }
1402 }
1403 return false;
1404 }
1405
1406 struct memory_stat {
1407 const char *name;
1408 unsigned int idx;
1409 };
1410
1411 static const struct memory_stat memory_stats[] = {
1412 { "anon", NR_ANON_MAPPED },
1413 { "file", NR_FILE_PAGES },
1414 { "kernel_stack", NR_KERNEL_STACK_KB },
1415 { "pagetables", NR_PAGETABLE },
1416 { "sec_pagetables", NR_SECONDARY_PAGETABLE },
1417 { "percpu", MEMCG_PERCPU_B },
1418 { "sock", MEMCG_SOCK },
1419 { "shmem", NR_SHMEM },
1420 { "file_mapped", NR_FILE_MAPPED },
1421 { "file_dirty", NR_FILE_DIRTY },
1422 { "file_writeback", NR_WRITEBACK },
1423 #ifdef CONFIG_SWAP
1424 { "swapcached", NR_SWAPCACHE },
1425 #endif
1426 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1427 { "anon_thp", NR_ANON_THPS },
1428 { "file_thp", NR_FILE_THPS },
1429 { "shmem_thp", NR_SHMEM_THPS },
1430 #endif
1431 { "inactive_anon", NR_INACTIVE_ANON },
1432 { "active_anon", NR_ACTIVE_ANON },
1433 { "inactive_file", NR_INACTIVE_FILE },
1434 { "active_file", NR_ACTIVE_FILE },
1435 { "unevictable", NR_UNEVICTABLE },
1436 { "slab_reclaimable", NR_SLAB_RECLAIMABLE_B },
1437 { "slab_unreclaimable", NR_SLAB_UNRECLAIMABLE_B },
1438
1439 /* The memory events */
1440 { "workingset_refault_anon", WORKINGSET_REFAULT_ANON },
1441 { "workingset_refault_file", WORKINGSET_REFAULT_FILE },
1442 { "workingset_activate_anon", WORKINGSET_ACTIVATE_ANON },
1443 { "workingset_activate_file", WORKINGSET_ACTIVATE_FILE },
1444 { "workingset_restore_anon", WORKINGSET_RESTORE_ANON },
1445 { "workingset_restore_file", WORKINGSET_RESTORE_FILE },
1446 { "workingset_nodereclaim", WORKINGSET_NODERECLAIM },
1447 };
1448
1449 /* Translate stat items to the correct unit for memory.stat output */
memcg_page_state_unit(int item)1450 static int memcg_page_state_unit(int item)
1451 {
1452 switch (item) {
1453 case MEMCG_PERCPU_B:
1454 case NR_SLAB_RECLAIMABLE_B:
1455 case NR_SLAB_UNRECLAIMABLE_B:
1456 case WORKINGSET_REFAULT_ANON:
1457 case WORKINGSET_REFAULT_FILE:
1458 case WORKINGSET_ACTIVATE_ANON:
1459 case WORKINGSET_ACTIVATE_FILE:
1460 case WORKINGSET_RESTORE_ANON:
1461 case WORKINGSET_RESTORE_FILE:
1462 case WORKINGSET_NODERECLAIM:
1463 return 1;
1464 case NR_KERNEL_STACK_KB:
1465 return SZ_1K;
1466 default:
1467 return PAGE_SIZE;
1468 }
1469 }
1470
memcg_page_state_output(struct mem_cgroup * memcg,int item)1471 static inline unsigned long memcg_page_state_output(struct mem_cgroup *memcg,
1472 int item)
1473 {
1474 return memcg_page_state(memcg, item) * memcg_page_state_unit(item);
1475 }
1476
memory_stat_format(struct mem_cgroup * memcg)1477 static char *memory_stat_format(struct mem_cgroup *memcg)
1478 {
1479 struct seq_buf s;
1480 int i;
1481
1482 seq_buf_init(&s, kmalloc(PAGE_SIZE, GFP_KERNEL), PAGE_SIZE);
1483 if (!s.buffer)
1484 return NULL;
1485
1486 /*
1487 * Provide statistics on the state of the memory subsystem as
1488 * well as cumulative event counters that show past behavior.
1489 *
1490 * This list is ordered following a combination of these gradients:
1491 * 1) generic big picture -> specifics and details
1492 * 2) reflecting userspace activity -> reflecting kernel heuristics
1493 *
1494 * Current memory state:
1495 */
1496 mem_cgroup_flush_stats();
1497
1498 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
1499 u64 size;
1500
1501 size = memcg_page_state_output(memcg, memory_stats[i].idx);
1502 seq_buf_printf(&s, "%s %llu\n", memory_stats[i].name, size);
1503
1504 if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) {
1505 size += memcg_page_state_output(memcg,
1506 NR_SLAB_RECLAIMABLE_B);
1507 seq_buf_printf(&s, "slab %llu\n", size);
1508 }
1509 }
1510
1511 /* Accumulated memory events */
1512
1513 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGFAULT),
1514 memcg_events(memcg, PGFAULT));
1515 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGMAJFAULT),
1516 memcg_events(memcg, PGMAJFAULT));
1517 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGREFILL),
1518 memcg_events(memcg, PGREFILL));
1519 seq_buf_printf(&s, "pgscan %lu\n",
1520 memcg_events(memcg, PGSCAN_KSWAPD) +
1521 memcg_events(memcg, PGSCAN_DIRECT));
1522 seq_buf_printf(&s, "pgsteal %lu\n",
1523 memcg_events(memcg, PGSTEAL_KSWAPD) +
1524 memcg_events(memcg, PGSTEAL_DIRECT));
1525 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGACTIVATE),
1526 memcg_events(memcg, PGACTIVATE));
1527 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGDEACTIVATE),
1528 memcg_events(memcg, PGDEACTIVATE));
1529 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREE),
1530 memcg_events(memcg, PGLAZYFREE));
1531 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREED),
1532 memcg_events(memcg, PGLAZYFREED));
1533
1534 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1535 seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_FAULT_ALLOC),
1536 memcg_events(memcg, THP_FAULT_ALLOC));
1537 seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_COLLAPSE_ALLOC),
1538 memcg_events(memcg, THP_COLLAPSE_ALLOC));
1539 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1540
1541 /* The above should easily fit into one page */
1542 WARN_ON_ONCE(seq_buf_has_overflowed(&s));
1543
1544 return s.buffer;
1545 }
1546
1547 #define K(x) ((x) << (PAGE_SHIFT-10))
1548 /**
1549 * mem_cgroup_print_oom_context: Print OOM information relevant to
1550 * memory controller.
1551 * @memcg: The memory cgroup that went over limit
1552 * @p: Task that is going to be killed
1553 *
1554 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1555 * enabled
1556 */
mem_cgroup_print_oom_context(struct mem_cgroup * memcg,struct task_struct * p)1557 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1558 {
1559 rcu_read_lock();
1560
1561 if (memcg) {
1562 pr_cont(",oom_memcg=");
1563 pr_cont_cgroup_path(memcg->css.cgroup);
1564 } else
1565 pr_cont(",global_oom");
1566 if (p) {
1567 pr_cont(",task_memcg=");
1568 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1569 }
1570 rcu_read_unlock();
1571 }
1572
1573 /**
1574 * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to
1575 * memory controller.
1576 * @memcg: The memory cgroup that went over limit
1577 */
mem_cgroup_print_oom_meminfo(struct mem_cgroup * memcg)1578 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1579 {
1580 char *buf;
1581
1582 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1583 K((u64)page_counter_read(&memcg->memory)),
1584 K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt);
1585 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1586 pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n",
1587 K((u64)page_counter_read(&memcg->swap)),
1588 K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt);
1589 else {
1590 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1591 K((u64)page_counter_read(&memcg->memsw)),
1592 K((u64)memcg->memsw.max), memcg->memsw.failcnt);
1593 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1594 K((u64)page_counter_read(&memcg->kmem)),
1595 K((u64)memcg->kmem.max), memcg->kmem.failcnt);
1596 }
1597
1598 pr_info("Memory cgroup stats for ");
1599 pr_cont_cgroup_path(memcg->css.cgroup);
1600 pr_cont(":");
1601 buf = memory_stat_format(memcg);
1602 if (!buf)
1603 return;
1604 pr_info("%s", buf);
1605 kfree(buf);
1606 }
1607
1608 /*
1609 * Return the memory (and swap, if configured) limit for a memcg.
1610 */
mem_cgroup_get_max(struct mem_cgroup * memcg)1611 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
1612 {
1613 unsigned long max = READ_ONCE(memcg->memory.max);
1614
1615 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
1616 if (mem_cgroup_swappiness(memcg))
1617 max += min(READ_ONCE(memcg->swap.max),
1618 (unsigned long)total_swap_pages);
1619 } else { /* v1 */
1620 if (mem_cgroup_swappiness(memcg)) {
1621 /* Calculate swap excess capacity from memsw limit */
1622 unsigned long swap = READ_ONCE(memcg->memsw.max) - max;
1623
1624 max += min(swap, (unsigned long)total_swap_pages);
1625 }
1626 }
1627 return max;
1628 }
1629
mem_cgroup_size(struct mem_cgroup * memcg)1630 unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1631 {
1632 return page_counter_read(&memcg->memory);
1633 }
1634
mem_cgroup_out_of_memory(struct mem_cgroup * memcg,gfp_t gfp_mask,int order)1635 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1636 int order)
1637 {
1638 struct oom_control oc = {
1639 .zonelist = NULL,
1640 .nodemask = NULL,
1641 .memcg = memcg,
1642 .gfp_mask = gfp_mask,
1643 .order = order,
1644 };
1645 bool ret = true;
1646
1647 if (mutex_lock_killable(&oom_lock))
1648 return true;
1649
1650 if (mem_cgroup_margin(memcg) >= (1 << order))
1651 goto unlock;
1652
1653 /*
1654 * A few threads which were not waiting at mutex_lock_killable() can
1655 * fail to bail out. Therefore, check again after holding oom_lock.
1656 */
1657 ret = task_is_dying() || out_of_memory(&oc);
1658
1659 unlock:
1660 mutex_unlock(&oom_lock);
1661 return ret;
1662 }
1663
mem_cgroup_soft_reclaim(struct mem_cgroup * root_memcg,pg_data_t * pgdat,gfp_t gfp_mask,unsigned long * total_scanned)1664 static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1665 pg_data_t *pgdat,
1666 gfp_t gfp_mask,
1667 unsigned long *total_scanned)
1668 {
1669 struct mem_cgroup *victim = NULL;
1670 int total = 0;
1671 int loop = 0;
1672 unsigned long excess;
1673 unsigned long nr_scanned;
1674 struct mem_cgroup_reclaim_cookie reclaim = {
1675 .pgdat = pgdat,
1676 };
1677
1678 excess = soft_limit_excess(root_memcg);
1679
1680 while (1) {
1681 victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1682 if (!victim) {
1683 loop++;
1684 if (loop >= 2) {
1685 /*
1686 * If we have not been able to reclaim
1687 * anything, it might because there are
1688 * no reclaimable pages under this hierarchy
1689 */
1690 if (!total)
1691 break;
1692 /*
1693 * We want to do more targeted reclaim.
1694 * excess >> 2 is not to excessive so as to
1695 * reclaim too much, nor too less that we keep
1696 * coming back to reclaim from this cgroup
1697 */
1698 if (total >= (excess >> 2) ||
1699 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1700 break;
1701 }
1702 continue;
1703 }
1704 total += mem_cgroup_shrink_node(victim, gfp_mask, false,
1705 pgdat, &nr_scanned);
1706 *total_scanned += nr_scanned;
1707 if (!soft_limit_excess(root_memcg))
1708 break;
1709 }
1710 mem_cgroup_iter_break(root_memcg, victim);
1711 return total;
1712 }
1713
1714 #ifdef CONFIG_LOCKDEP
1715 static struct lockdep_map memcg_oom_lock_dep_map = {
1716 .name = "memcg_oom_lock",
1717 };
1718 #endif
1719
1720 static DEFINE_SPINLOCK(memcg_oom_lock);
1721
1722 /*
1723 * Check OOM-Killer is already running under our hierarchy.
1724 * If someone is running, return false.
1725 */
mem_cgroup_oom_trylock(struct mem_cgroup * memcg)1726 static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
1727 {
1728 struct mem_cgroup *iter, *failed = NULL;
1729
1730 spin_lock(&memcg_oom_lock);
1731
1732 for_each_mem_cgroup_tree(iter, memcg) {
1733 if (iter->oom_lock) {
1734 /*
1735 * this subtree of our hierarchy is already locked
1736 * so we cannot give a lock.
1737 */
1738 failed = iter;
1739 mem_cgroup_iter_break(memcg, iter);
1740 break;
1741 } else
1742 iter->oom_lock = true;
1743 }
1744
1745 if (failed) {
1746 /*
1747 * OK, we failed to lock the whole subtree so we have
1748 * to clean up what we set up to the failing subtree
1749 */
1750 for_each_mem_cgroup_tree(iter, memcg) {
1751 if (iter == failed) {
1752 mem_cgroup_iter_break(memcg, iter);
1753 break;
1754 }
1755 iter->oom_lock = false;
1756 }
1757 } else
1758 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
1759
1760 spin_unlock(&memcg_oom_lock);
1761
1762 return !failed;
1763 }
1764
mem_cgroup_oom_unlock(struct mem_cgroup * memcg)1765 static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
1766 {
1767 struct mem_cgroup *iter;
1768
1769 spin_lock(&memcg_oom_lock);
1770 mutex_release(&memcg_oom_lock_dep_map, _RET_IP_);
1771 for_each_mem_cgroup_tree(iter, memcg)
1772 iter->oom_lock = false;
1773 spin_unlock(&memcg_oom_lock);
1774 }
1775
mem_cgroup_mark_under_oom(struct mem_cgroup * memcg)1776 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
1777 {
1778 struct mem_cgroup *iter;
1779
1780 spin_lock(&memcg_oom_lock);
1781 for_each_mem_cgroup_tree(iter, memcg)
1782 iter->under_oom++;
1783 spin_unlock(&memcg_oom_lock);
1784 }
1785
mem_cgroup_unmark_under_oom(struct mem_cgroup * memcg)1786 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
1787 {
1788 struct mem_cgroup *iter;
1789
1790 /*
1791 * Be careful about under_oom underflows because a child memcg
1792 * could have been added after mem_cgroup_mark_under_oom.
1793 */
1794 spin_lock(&memcg_oom_lock);
1795 for_each_mem_cgroup_tree(iter, memcg)
1796 if (iter->under_oom > 0)
1797 iter->under_oom--;
1798 spin_unlock(&memcg_oom_lock);
1799 }
1800
1801 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1802
1803 struct oom_wait_info {
1804 struct mem_cgroup *memcg;
1805 wait_queue_entry_t wait;
1806 };
1807
memcg_oom_wake_function(wait_queue_entry_t * wait,unsigned mode,int sync,void * arg)1808 static int memcg_oom_wake_function(wait_queue_entry_t *wait,
1809 unsigned mode, int sync, void *arg)
1810 {
1811 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1812 struct mem_cgroup *oom_wait_memcg;
1813 struct oom_wait_info *oom_wait_info;
1814
1815 oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1816 oom_wait_memcg = oom_wait_info->memcg;
1817
1818 if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
1819 !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
1820 return 0;
1821 return autoremove_wake_function(wait, mode, sync, arg);
1822 }
1823
memcg_oom_recover(struct mem_cgroup * memcg)1824 static void memcg_oom_recover(struct mem_cgroup *memcg)
1825 {
1826 /*
1827 * For the following lockless ->under_oom test, the only required
1828 * guarantee is that it must see the state asserted by an OOM when
1829 * this function is called as a result of userland actions
1830 * triggered by the notification of the OOM. This is trivially
1831 * achieved by invoking mem_cgroup_mark_under_oom() before
1832 * triggering notification.
1833 */
1834 if (memcg && memcg->under_oom)
1835 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
1836 }
1837
1838 enum oom_status {
1839 OOM_SUCCESS,
1840 OOM_FAILED,
1841 OOM_ASYNC,
1842 OOM_SKIPPED
1843 };
1844
mem_cgroup_oom(struct mem_cgroup * memcg,gfp_t mask,int order)1845 static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1846 {
1847 enum oom_status ret;
1848 bool locked;
1849
1850 if (order > PAGE_ALLOC_COSTLY_ORDER)
1851 return OOM_SKIPPED;
1852
1853 memcg_memory_event(memcg, MEMCG_OOM);
1854
1855 /*
1856 * We are in the middle of the charge context here, so we
1857 * don't want to block when potentially sitting on a callstack
1858 * that holds all kinds of filesystem and mm locks.
1859 *
1860 * cgroup1 allows disabling the OOM killer and waiting for outside
1861 * handling until the charge can succeed; remember the context and put
1862 * the task to sleep at the end of the page fault when all locks are
1863 * released.
1864 *
1865 * On the other hand, in-kernel OOM killer allows for an async victim
1866 * memory reclaim (oom_reaper) and that means that we are not solely
1867 * relying on the oom victim to make a forward progress and we can
1868 * invoke the oom killer here.
1869 *
1870 * Please note that mem_cgroup_out_of_memory might fail to find a
1871 * victim and then we have to bail out from the charge path.
1872 */
1873 if (memcg->oom_kill_disable) {
1874 if (!current->in_user_fault)
1875 return OOM_SKIPPED;
1876 css_get(&memcg->css);
1877 current->memcg_in_oom = memcg;
1878 current->memcg_oom_gfp_mask = mask;
1879 current->memcg_oom_order = order;
1880
1881 return OOM_ASYNC;
1882 }
1883
1884 mem_cgroup_mark_under_oom(memcg);
1885
1886 locked = mem_cgroup_oom_trylock(memcg);
1887
1888 if (locked)
1889 mem_cgroup_oom_notify(memcg);
1890
1891 mem_cgroup_unmark_under_oom(memcg);
1892 if (mem_cgroup_out_of_memory(memcg, mask, order))
1893 ret = OOM_SUCCESS;
1894 else
1895 ret = OOM_FAILED;
1896
1897 if (locked)
1898 mem_cgroup_oom_unlock(memcg);
1899
1900 return ret;
1901 }
1902
1903 /**
1904 * mem_cgroup_oom_synchronize - complete memcg OOM handling
1905 * @handle: actually kill/wait or just clean up the OOM state
1906 *
1907 * This has to be called at the end of a page fault if the memcg OOM
1908 * handler was enabled.
1909 *
1910 * Memcg supports userspace OOM handling where failed allocations must
1911 * sleep on a waitqueue until the userspace task resolves the
1912 * situation. Sleeping directly in the charge context with all kinds
1913 * of locks held is not a good idea, instead we remember an OOM state
1914 * in the task and mem_cgroup_oom_synchronize() has to be called at
1915 * the end of the page fault to complete the OOM handling.
1916 *
1917 * Returns %true if an ongoing memcg OOM situation was detected and
1918 * completed, %false otherwise.
1919 */
mem_cgroup_oom_synchronize(bool handle)1920 bool mem_cgroup_oom_synchronize(bool handle)
1921 {
1922 struct mem_cgroup *memcg = current->memcg_in_oom;
1923 struct oom_wait_info owait;
1924 bool locked;
1925
1926 /* OOM is global, do not handle */
1927 if (!memcg)
1928 return false;
1929
1930 if (!handle)
1931 goto cleanup;
1932
1933 owait.memcg = memcg;
1934 owait.wait.flags = 0;
1935 owait.wait.func = memcg_oom_wake_function;
1936 owait.wait.private = current;
1937 INIT_LIST_HEAD(&owait.wait.entry);
1938
1939 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1940 mem_cgroup_mark_under_oom(memcg);
1941
1942 locked = mem_cgroup_oom_trylock(memcg);
1943
1944 if (locked)
1945 mem_cgroup_oom_notify(memcg);
1946
1947 if (locked && !memcg->oom_kill_disable) {
1948 mem_cgroup_unmark_under_oom(memcg);
1949 finish_wait(&memcg_oom_waitq, &owait.wait);
1950 mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask,
1951 current->memcg_oom_order);
1952 } else {
1953 schedule();
1954 mem_cgroup_unmark_under_oom(memcg);
1955 finish_wait(&memcg_oom_waitq, &owait.wait);
1956 }
1957
1958 if (locked) {
1959 mem_cgroup_oom_unlock(memcg);
1960 /*
1961 * There is no guarantee that an OOM-lock contender
1962 * sees the wakeups triggered by the OOM kill
1963 * uncharges. Wake any sleepers explicitly.
1964 */
1965 memcg_oom_recover(memcg);
1966 }
1967 cleanup:
1968 current->memcg_in_oom = NULL;
1969 css_put(&memcg->css);
1970 return true;
1971 }
1972
1973 /**
1974 * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM
1975 * @victim: task to be killed by the OOM killer
1976 * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM
1977 *
1978 * Returns a pointer to a memory cgroup, which has to be cleaned up
1979 * by killing all belonging OOM-killable tasks.
1980 *
1981 * Caller has to call mem_cgroup_put() on the returned non-NULL memcg.
1982 */
mem_cgroup_get_oom_group(struct task_struct * victim,struct mem_cgroup * oom_domain)1983 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
1984 struct mem_cgroup *oom_domain)
1985 {
1986 struct mem_cgroup *oom_group = NULL;
1987 struct mem_cgroup *memcg;
1988
1989 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
1990 return NULL;
1991
1992 if (!oom_domain)
1993 oom_domain = root_mem_cgroup;
1994
1995 rcu_read_lock();
1996
1997 memcg = mem_cgroup_from_task(victim);
1998 if (memcg == root_mem_cgroup)
1999 goto out;
2000
2001 /*
2002 * If the victim task has been asynchronously moved to a different
2003 * memory cgroup, we might end up killing tasks outside oom_domain.
2004 * In this case it's better to ignore memory.group.oom.
2005 */
2006 if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain)))
2007 goto out;
2008
2009 /*
2010 * Traverse the memory cgroup hierarchy from the victim task's
2011 * cgroup up to the OOMing cgroup (or root) to find the
2012 * highest-level memory cgroup with oom.group set.
2013 */
2014 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
2015 if (memcg->oom_group)
2016 oom_group = memcg;
2017
2018 if (memcg == oom_domain)
2019 break;
2020 }
2021
2022 if (oom_group)
2023 css_get(&oom_group->css);
2024 out:
2025 rcu_read_unlock();
2026
2027 return oom_group;
2028 }
2029
mem_cgroup_print_oom_group(struct mem_cgroup * memcg)2030 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
2031 {
2032 pr_info("Tasks in ");
2033 pr_cont_cgroup_path(memcg->css.cgroup);
2034 pr_cont(" are going to be killed due to memory.oom.group set\n");
2035 }
2036
2037 /**
2038 * lock_page_memcg - lock a page and memcg binding
2039 * @page: the page
2040 *
2041 * This function protects unlocked LRU pages from being moved to
2042 * another cgroup.
2043 *
2044 * It ensures lifetime of the locked memcg. Caller is responsible
2045 * for the lifetime of the page.
2046 */
lock_page_memcg(struct page * page)2047 void lock_page_memcg(struct page *page)
2048 {
2049 struct page *head = compound_head(page); /* rmap on tail pages */
2050 struct mem_cgroup *memcg;
2051 unsigned long flags;
2052
2053 /*
2054 * The RCU lock is held throughout the transaction. The fast
2055 * path can get away without acquiring the memcg->move_lock
2056 * because page moving starts with an RCU grace period.
2057 */
2058 rcu_read_lock();
2059
2060 if (mem_cgroup_disabled())
2061 return;
2062 again:
2063 memcg = page_memcg(head);
2064 if (unlikely(!memcg))
2065 return;
2066
2067 #ifdef CONFIG_PROVE_LOCKING
2068 local_irq_save(flags);
2069 might_lock(&memcg->move_lock);
2070 local_irq_restore(flags);
2071 #endif
2072
2073 if (atomic_read(&memcg->moving_account) <= 0)
2074 return;
2075
2076 spin_lock_irqsave(&memcg->move_lock, flags);
2077 if (memcg != page_memcg(head)) {
2078 spin_unlock_irqrestore(&memcg->move_lock, flags);
2079 goto again;
2080 }
2081
2082 /*
2083 * When charge migration first begins, we can have multiple
2084 * critical sections holding the fast-path RCU lock and one
2085 * holding the slowpath move_lock. Track the task who has the
2086 * move_lock for unlock_page_memcg().
2087 */
2088 memcg->move_lock_task = current;
2089 memcg->move_lock_flags = flags;
2090 }
2091 EXPORT_SYMBOL(lock_page_memcg);
2092
__unlock_page_memcg(struct mem_cgroup * memcg)2093 static void __unlock_page_memcg(struct mem_cgroup *memcg)
2094 {
2095 if (memcg && memcg->move_lock_task == current) {
2096 unsigned long flags = memcg->move_lock_flags;
2097
2098 memcg->move_lock_task = NULL;
2099 memcg->move_lock_flags = 0;
2100
2101 spin_unlock_irqrestore(&memcg->move_lock, flags);
2102 }
2103
2104 rcu_read_unlock();
2105 }
2106
2107 /**
2108 * unlock_page_memcg - unlock a page and memcg binding
2109 * @page: the page
2110 */
unlock_page_memcg(struct page * page)2111 void unlock_page_memcg(struct page *page)
2112 {
2113 struct page *head = compound_head(page);
2114
2115 __unlock_page_memcg(page_memcg(head));
2116 }
2117 EXPORT_SYMBOL(unlock_page_memcg);
2118
2119 struct obj_stock {
2120 #ifdef CONFIG_MEMCG_KMEM
2121 struct obj_cgroup *cached_objcg;
2122 struct pglist_data *cached_pgdat;
2123 unsigned int nr_bytes;
2124 int nr_slab_reclaimable_b;
2125 int nr_slab_unreclaimable_b;
2126 #else
2127 int dummy[0];
2128 #endif
2129 };
2130
2131 struct memcg_stock_pcp {
2132 struct mem_cgroup *cached; /* this never be root cgroup */
2133 unsigned int nr_pages;
2134 struct obj_stock task_obj;
2135 struct obj_stock irq_obj;
2136
2137 struct work_struct work;
2138 unsigned long flags;
2139 #define FLUSHING_CACHED_CHARGE 0
2140 };
2141 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
2142 static DEFINE_MUTEX(percpu_charge_mutex);
2143
2144 #ifdef CONFIG_MEMCG_KMEM
2145 static void drain_obj_stock(struct obj_stock *stock);
2146 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2147 struct mem_cgroup *root_memcg);
2148
2149 #else
drain_obj_stock(struct obj_stock * stock)2150 static inline void drain_obj_stock(struct obj_stock *stock)
2151 {
2152 }
obj_stock_flush_required(struct memcg_stock_pcp * stock,struct mem_cgroup * root_memcg)2153 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2154 struct mem_cgroup *root_memcg)
2155 {
2156 return false;
2157 }
2158 #endif
2159
2160 /*
2161 * Most kmem_cache_alloc() calls are from user context. The irq disable/enable
2162 * sequence used in this case to access content from object stock is slow.
2163 * To optimize for user context access, there are now two object stocks for
2164 * task context and interrupt context access respectively.
2165 *
2166 * The task context object stock can be accessed by disabling preemption only
2167 * which is cheap in non-preempt kernel. The interrupt context object stock
2168 * can only be accessed after disabling interrupt. User context code can
2169 * access interrupt object stock, but not vice versa.
2170 */
get_obj_stock(unsigned long * pflags)2171 static inline struct obj_stock *get_obj_stock(unsigned long *pflags)
2172 {
2173 struct memcg_stock_pcp *stock;
2174
2175 if (likely(in_task())) {
2176 *pflags = 0UL;
2177 preempt_disable();
2178 stock = this_cpu_ptr(&memcg_stock);
2179 return &stock->task_obj;
2180 }
2181
2182 local_irq_save(*pflags);
2183 stock = this_cpu_ptr(&memcg_stock);
2184 return &stock->irq_obj;
2185 }
2186
put_obj_stock(unsigned long flags)2187 static inline void put_obj_stock(unsigned long flags)
2188 {
2189 if (likely(in_task()))
2190 preempt_enable();
2191 else
2192 local_irq_restore(flags);
2193 }
2194
2195 /**
2196 * consume_stock: Try to consume stocked charge on this cpu.
2197 * @memcg: memcg to consume from.
2198 * @nr_pages: how many pages to charge.
2199 *
2200 * The charges will only happen if @memcg matches the current cpu's memcg
2201 * stock, and at least @nr_pages are available in that stock. Failure to
2202 * service an allocation will refill the stock.
2203 *
2204 * returns true if successful, false otherwise.
2205 */
consume_stock(struct mem_cgroup * memcg,unsigned int nr_pages)2206 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2207 {
2208 struct memcg_stock_pcp *stock;
2209 unsigned long flags;
2210 bool ret = false;
2211
2212 if (nr_pages > MEMCG_CHARGE_BATCH)
2213 return ret;
2214
2215 local_irq_save(flags);
2216
2217 stock = this_cpu_ptr(&memcg_stock);
2218 if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
2219 stock->nr_pages -= nr_pages;
2220 ret = true;
2221 }
2222
2223 local_irq_restore(flags);
2224
2225 return ret;
2226 }
2227
2228 /*
2229 * Returns stocks cached in percpu and reset cached information.
2230 */
drain_stock(struct memcg_stock_pcp * stock)2231 static void drain_stock(struct memcg_stock_pcp *stock)
2232 {
2233 struct mem_cgroup *old = stock->cached;
2234
2235 if (!old)
2236 return;
2237
2238 if (stock->nr_pages) {
2239 page_counter_uncharge(&old->memory, stock->nr_pages);
2240 if (do_memsw_account())
2241 page_counter_uncharge(&old->memsw, stock->nr_pages);
2242 stock->nr_pages = 0;
2243 }
2244
2245 css_put(&old->css);
2246 stock->cached = NULL;
2247 }
2248
drain_local_stock(struct work_struct * dummy)2249 static void drain_local_stock(struct work_struct *dummy)
2250 {
2251 struct memcg_stock_pcp *stock;
2252 unsigned long flags;
2253
2254 /*
2255 * The only protection from cpu hotplug (memcg_hotplug_cpu_dead) vs.
2256 * drain_stock races is that we always operate on local CPU stock
2257 * here with IRQ disabled
2258 */
2259 local_irq_save(flags);
2260
2261 stock = this_cpu_ptr(&memcg_stock);
2262 drain_obj_stock(&stock->irq_obj);
2263 if (in_task())
2264 drain_obj_stock(&stock->task_obj);
2265 drain_stock(stock);
2266 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
2267
2268 local_irq_restore(flags);
2269 }
2270
2271 /*
2272 * Cache charges(val) to local per_cpu area.
2273 * This will be consumed by consume_stock() function, later.
2274 */
refill_stock(struct mem_cgroup * memcg,unsigned int nr_pages)2275 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2276 {
2277 struct memcg_stock_pcp *stock;
2278 unsigned long flags;
2279
2280 local_irq_save(flags);
2281
2282 stock = this_cpu_ptr(&memcg_stock);
2283 if (stock->cached != memcg) { /* reset if necessary */
2284 drain_stock(stock);
2285 css_get(&memcg->css);
2286 stock->cached = memcg;
2287 }
2288 stock->nr_pages += nr_pages;
2289
2290 if (stock->nr_pages > MEMCG_CHARGE_BATCH)
2291 drain_stock(stock);
2292
2293 local_irq_restore(flags);
2294 }
2295
2296 /*
2297 * Drains all per-CPU charge caches for given root_memcg resp. subtree
2298 * of the hierarchy under it.
2299 */
drain_all_stock(struct mem_cgroup * root_memcg)2300 static void drain_all_stock(struct mem_cgroup *root_memcg)
2301 {
2302 int cpu, curcpu;
2303
2304 /* If someone's already draining, avoid adding running more workers. */
2305 if (!mutex_trylock(&percpu_charge_mutex))
2306 return;
2307 /*
2308 * Notify other cpus that system-wide "drain" is running
2309 * We do not care about races with the cpu hotplug because cpu down
2310 * as well as workers from this path always operate on the local
2311 * per-cpu data. CPU up doesn't touch memcg_stock at all.
2312 */
2313 curcpu = get_cpu();
2314 for_each_online_cpu(cpu) {
2315 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2316 struct mem_cgroup *memcg;
2317 bool flush = false;
2318
2319 rcu_read_lock();
2320 memcg = stock->cached;
2321 if (memcg && stock->nr_pages &&
2322 mem_cgroup_is_descendant(memcg, root_memcg))
2323 flush = true;
2324 else if (obj_stock_flush_required(stock, root_memcg))
2325 flush = true;
2326 rcu_read_unlock();
2327
2328 if (flush &&
2329 !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
2330 if (cpu == curcpu)
2331 drain_local_stock(&stock->work);
2332 else
2333 schedule_work_on(cpu, &stock->work);
2334 }
2335 }
2336 put_cpu();
2337 mutex_unlock(&percpu_charge_mutex);
2338 }
2339
memcg_hotplug_cpu_dead(unsigned int cpu)2340 static int memcg_hotplug_cpu_dead(unsigned int cpu)
2341 {
2342 struct memcg_stock_pcp *stock;
2343
2344 stock = &per_cpu(memcg_stock, cpu);
2345 drain_stock(stock);
2346
2347 return 0;
2348 }
2349
reclaim_high(struct mem_cgroup * memcg,unsigned int nr_pages,gfp_t gfp_mask)2350 static unsigned long reclaim_high(struct mem_cgroup *memcg,
2351 unsigned int nr_pages,
2352 gfp_t gfp_mask)
2353 {
2354 unsigned long nr_reclaimed = 0;
2355
2356 do {
2357 unsigned long pflags;
2358
2359 if (page_counter_read(&memcg->memory) <=
2360 READ_ONCE(memcg->memory.high))
2361 continue;
2362
2363 memcg_memory_event(memcg, MEMCG_HIGH);
2364
2365 psi_memstall_enter(&pflags);
2366 nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages,
2367 gfp_mask,
2368 MEMCG_RECLAIM_MAY_SWAP);
2369 psi_memstall_leave(&pflags);
2370 } while ((memcg = parent_mem_cgroup(memcg)) &&
2371 !mem_cgroup_is_root(memcg));
2372
2373 return nr_reclaimed;
2374 }
2375
high_work_func(struct work_struct * work)2376 static void high_work_func(struct work_struct *work)
2377 {
2378 struct mem_cgroup *memcg;
2379
2380 memcg = container_of(work, struct mem_cgroup, high_work);
2381 reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
2382 }
2383
2384 /*
2385 * Clamp the maximum sleep time per allocation batch to 2 seconds. This is
2386 * enough to still cause a significant slowdown in most cases, while still
2387 * allowing diagnostics and tracing to proceed without becoming stuck.
2388 */
2389 #define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ)
2390
2391 /*
2392 * When calculating the delay, we use these either side of the exponentiation to
2393 * maintain precision and scale to a reasonable number of jiffies (see the table
2394 * below.
2395 *
2396 * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the
2397 * overage ratio to a delay.
2398 * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the
2399 * proposed penalty in order to reduce to a reasonable number of jiffies, and
2400 * to produce a reasonable delay curve.
2401 *
2402 * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a
2403 * reasonable delay curve compared to precision-adjusted overage, not
2404 * penalising heavily at first, but still making sure that growth beyond the
2405 * limit penalises misbehaviour cgroups by slowing them down exponentially. For
2406 * example, with a high of 100 megabytes:
2407 *
2408 * +-------+------------------------+
2409 * | usage | time to allocate in ms |
2410 * +-------+------------------------+
2411 * | 100M | 0 |
2412 * | 101M | 6 |
2413 * | 102M | 25 |
2414 * | 103M | 57 |
2415 * | 104M | 102 |
2416 * | 105M | 159 |
2417 * | 106M | 230 |
2418 * | 107M | 313 |
2419 * | 108M | 409 |
2420 * | 109M | 518 |
2421 * | 110M | 639 |
2422 * | 111M | 774 |
2423 * | 112M | 921 |
2424 * | 113M | 1081 |
2425 * | 114M | 1254 |
2426 * | 115M | 1439 |
2427 * | 116M | 1638 |
2428 * | 117M | 1849 |
2429 * | 118M | 2000 |
2430 * | 119M | 2000 |
2431 * | 120M | 2000 |
2432 * +-------+------------------------+
2433 */
2434 #define MEMCG_DELAY_PRECISION_SHIFT 20
2435 #define MEMCG_DELAY_SCALING_SHIFT 14
2436
calculate_overage(unsigned long usage,unsigned long high)2437 static u64 calculate_overage(unsigned long usage, unsigned long high)
2438 {
2439 u64 overage;
2440
2441 if (usage <= high)
2442 return 0;
2443
2444 /*
2445 * Prevent division by 0 in overage calculation by acting as if
2446 * it was a threshold of 1 page
2447 */
2448 high = max(high, 1UL);
2449
2450 overage = usage - high;
2451 overage <<= MEMCG_DELAY_PRECISION_SHIFT;
2452 return div64_u64(overage, high);
2453 }
2454
mem_find_max_overage(struct mem_cgroup * memcg)2455 static u64 mem_find_max_overage(struct mem_cgroup *memcg)
2456 {
2457 u64 overage, max_overage = 0;
2458
2459 do {
2460 overage = calculate_overage(page_counter_read(&memcg->memory),
2461 READ_ONCE(memcg->memory.high));
2462 max_overage = max(overage, max_overage);
2463 } while ((memcg = parent_mem_cgroup(memcg)) &&
2464 !mem_cgroup_is_root(memcg));
2465
2466 return max_overage;
2467 }
2468
swap_find_max_overage(struct mem_cgroup * memcg)2469 static u64 swap_find_max_overage(struct mem_cgroup *memcg)
2470 {
2471 u64 overage, max_overage = 0;
2472
2473 do {
2474 overage = calculate_overage(page_counter_read(&memcg->swap),
2475 READ_ONCE(memcg->swap.high));
2476 if (overage)
2477 memcg_memory_event(memcg, MEMCG_SWAP_HIGH);
2478 max_overage = max(overage, max_overage);
2479 } while ((memcg = parent_mem_cgroup(memcg)) &&
2480 !mem_cgroup_is_root(memcg));
2481
2482 return max_overage;
2483 }
2484
2485 /*
2486 * Get the number of jiffies that we should penalise a mischievous cgroup which
2487 * is exceeding its memory.high by checking both it and its ancestors.
2488 */
calculate_high_delay(struct mem_cgroup * memcg,unsigned int nr_pages,u64 max_overage)2489 static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
2490 unsigned int nr_pages,
2491 u64 max_overage)
2492 {
2493 unsigned long penalty_jiffies;
2494
2495 if (!max_overage)
2496 return 0;
2497
2498 /*
2499 * We use overage compared to memory.high to calculate the number of
2500 * jiffies to sleep (penalty_jiffies). Ideally this value should be
2501 * fairly lenient on small overages, and increasingly harsh when the
2502 * memcg in question makes it clear that it has no intention of stopping
2503 * its crazy behaviour, so we exponentially increase the delay based on
2504 * overage amount.
2505 */
2506 penalty_jiffies = max_overage * max_overage * HZ;
2507 penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT;
2508 penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT;
2509
2510 /*
2511 * Factor in the task's own contribution to the overage, such that four
2512 * N-sized allocations are throttled approximately the same as one
2513 * 4N-sized allocation.
2514 *
2515 * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or
2516 * larger the current charge patch is than that.
2517 */
2518 return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH;
2519 }
2520
2521 /*
2522 * Scheduled by try_charge() to be executed from the userland return path
2523 * and reclaims memory over the high limit.
2524 */
mem_cgroup_handle_over_high(void)2525 void mem_cgroup_handle_over_high(void)
2526 {
2527 unsigned long penalty_jiffies;
2528 unsigned long pflags;
2529 unsigned long nr_reclaimed;
2530 unsigned int nr_pages = current->memcg_nr_pages_over_high;
2531 int nr_retries = MAX_RECLAIM_RETRIES;
2532 struct mem_cgroup *memcg;
2533 bool in_retry = false;
2534
2535 if (likely(!nr_pages))
2536 return;
2537
2538 memcg = get_mem_cgroup_from_mm(current->mm);
2539 current->memcg_nr_pages_over_high = 0;
2540
2541 retry_reclaim:
2542 /*
2543 * The allocating task should reclaim at least the batch size, but for
2544 * subsequent retries we only want to do what's necessary to prevent oom
2545 * or breaching resource isolation.
2546 *
2547 * This is distinct from memory.max or page allocator behaviour because
2548 * memory.high is currently batched, whereas memory.max and the page
2549 * allocator run every time an allocation is made.
2550 */
2551 nr_reclaimed = reclaim_high(memcg,
2552 in_retry ? SWAP_CLUSTER_MAX : nr_pages,
2553 GFP_KERNEL);
2554
2555 /*
2556 * memory.high is breached and reclaim is unable to keep up. Throttle
2557 * allocators proactively to slow down excessive growth.
2558 */
2559 penalty_jiffies = calculate_high_delay(memcg, nr_pages,
2560 mem_find_max_overage(memcg));
2561
2562 penalty_jiffies += calculate_high_delay(memcg, nr_pages,
2563 swap_find_max_overage(memcg));
2564
2565 /*
2566 * Clamp the max delay per usermode return so as to still keep the
2567 * application moving forwards and also permit diagnostics, albeit
2568 * extremely slowly.
2569 */
2570 penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES);
2571
2572 /*
2573 * Don't sleep if the amount of jiffies this memcg owes us is so low
2574 * that it's not even worth doing, in an attempt to be nice to those who
2575 * go only a small amount over their memory.high value and maybe haven't
2576 * been aggressively reclaimed enough yet.
2577 */
2578 if (penalty_jiffies <= HZ / 100)
2579 goto out;
2580
2581 /*
2582 * If reclaim is making forward progress but we're still over
2583 * memory.high, we want to encourage that rather than doing allocator
2584 * throttling.
2585 */
2586 if (nr_reclaimed || nr_retries--) {
2587 in_retry = true;
2588 goto retry_reclaim;
2589 }
2590
2591 /*
2592 * If we exit early, we're guaranteed to die (since
2593 * schedule_timeout_killable sets TASK_KILLABLE). This means we don't
2594 * need to account for any ill-begotten jiffies to pay them off later.
2595 */
2596 psi_memstall_enter(&pflags);
2597 schedule_timeout_killable(penalty_jiffies);
2598 psi_memstall_leave(&pflags);
2599
2600 out:
2601 css_put(&memcg->css);
2602 }
2603
try_charge_memcg(struct mem_cgroup * memcg,gfp_t gfp_mask,unsigned int nr_pages)2604 static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
2605 unsigned int nr_pages)
2606 {
2607 unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
2608 int nr_retries = MAX_RECLAIM_RETRIES;
2609 struct mem_cgroup *mem_over_limit;
2610 struct page_counter *counter;
2611 enum oom_status oom_status;
2612 unsigned long nr_reclaimed;
2613 bool passed_oom = false;
2614 unsigned int reclaim_options = MEMCG_RECLAIM_MAY_SWAP;
2615 bool drained = false;
2616 unsigned long pflags;
2617
2618 retry:
2619 if (consume_stock(memcg, nr_pages))
2620 return 0;
2621
2622 if (!do_memsw_account() ||
2623 page_counter_try_charge(&memcg->memsw, batch, &counter)) {
2624 if (page_counter_try_charge(&memcg->memory, batch, &counter))
2625 goto done_restock;
2626 if (do_memsw_account())
2627 page_counter_uncharge(&memcg->memsw, batch);
2628 mem_over_limit = mem_cgroup_from_counter(counter, memory);
2629 } else {
2630 mem_over_limit = mem_cgroup_from_counter(counter, memsw);
2631 reclaim_options &= ~MEMCG_RECLAIM_MAY_SWAP;
2632 }
2633
2634 if (batch > nr_pages) {
2635 batch = nr_pages;
2636 goto retry;
2637 }
2638
2639 /*
2640 * Memcg doesn't have a dedicated reserve for atomic
2641 * allocations. But like the global atomic pool, we need to
2642 * put the burden of reclaim on regular allocation requests
2643 * and let these go through as privileged allocations.
2644 */
2645 if (gfp_mask & __GFP_ATOMIC)
2646 goto force;
2647
2648 /*
2649 * Prevent unbounded recursion when reclaim operations need to
2650 * allocate memory. This might exceed the limits temporarily,
2651 * but we prefer facilitating memory reclaim and getting back
2652 * under the limit over triggering OOM kills in these cases.
2653 */
2654 if (unlikely(current->flags & PF_MEMALLOC))
2655 goto force;
2656
2657 if (unlikely(task_in_memcg_oom(current)))
2658 goto nomem;
2659
2660 if (!gfpflags_allow_blocking(gfp_mask))
2661 goto nomem;
2662
2663 memcg_memory_event(mem_over_limit, MEMCG_MAX);
2664
2665 psi_memstall_enter(&pflags);
2666 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
2667 gfp_mask, reclaim_options);
2668 psi_memstall_leave(&pflags);
2669
2670 if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2671 goto retry;
2672
2673 if (!drained) {
2674 drain_all_stock(mem_over_limit);
2675 drained = true;
2676 goto retry;
2677 }
2678
2679 if (gfp_mask & __GFP_NORETRY)
2680 goto nomem;
2681 /*
2682 * Even though the limit is exceeded at this point, reclaim
2683 * may have been able to free some pages. Retry the charge
2684 * before killing the task.
2685 *
2686 * Only for regular pages, though: huge pages are rather
2687 * unlikely to succeed so close to the limit, and we fall back
2688 * to regular pages anyway in case of failure.
2689 */
2690 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
2691 goto retry;
2692 /*
2693 * At task move, charge accounts can be doubly counted. So, it's
2694 * better to wait until the end of task_move if something is going on.
2695 */
2696 if (mem_cgroup_wait_acct_move(mem_over_limit))
2697 goto retry;
2698
2699 if (nr_retries--)
2700 goto retry;
2701
2702 if (gfp_mask & __GFP_RETRY_MAYFAIL)
2703 goto nomem;
2704
2705 /* Avoid endless loop for tasks bypassed by the oom killer */
2706 if (passed_oom && task_is_dying())
2707 goto nomem;
2708
2709 /*
2710 * keep retrying as long as the memcg oom killer is able to make
2711 * a forward progress or bypass the charge if the oom killer
2712 * couldn't make any progress.
2713 */
2714 oom_status = mem_cgroup_oom(mem_over_limit, gfp_mask,
2715 get_order(nr_pages * PAGE_SIZE));
2716 if (oom_status == OOM_SUCCESS) {
2717 passed_oom = true;
2718 nr_retries = MAX_RECLAIM_RETRIES;
2719 goto retry;
2720 }
2721 nomem:
2722 if (!(gfp_mask & __GFP_NOFAIL))
2723 return -ENOMEM;
2724 force:
2725 /*
2726 * The allocation either can't fail or will lead to more memory
2727 * being freed very soon. Allow memory usage go over the limit
2728 * temporarily by force charging it.
2729 */
2730 page_counter_charge(&memcg->memory, nr_pages);
2731 if (do_memsw_account())
2732 page_counter_charge(&memcg->memsw, nr_pages);
2733
2734 return 0;
2735
2736 done_restock:
2737 if (batch > nr_pages)
2738 refill_stock(memcg, batch - nr_pages);
2739
2740 /*
2741 * If the hierarchy is above the normal consumption range, schedule
2742 * reclaim on returning to userland. We can perform reclaim here
2743 * if __GFP_RECLAIM but let's always punt for simplicity and so that
2744 * GFP_KERNEL can consistently be used during reclaim. @memcg is
2745 * not recorded as it most likely matches current's and won't
2746 * change in the meantime. As high limit is checked again before
2747 * reclaim, the cost of mismatch is negligible.
2748 */
2749 do {
2750 bool mem_high, swap_high;
2751
2752 mem_high = page_counter_read(&memcg->memory) >
2753 READ_ONCE(memcg->memory.high);
2754 swap_high = page_counter_read(&memcg->swap) >
2755 READ_ONCE(memcg->swap.high);
2756
2757 /* Don't bother a random interrupted task */
2758 if (in_interrupt()) {
2759 if (mem_high) {
2760 schedule_work(&memcg->high_work);
2761 break;
2762 }
2763 continue;
2764 }
2765
2766 if (mem_high || swap_high) {
2767 /*
2768 * The allocating tasks in this cgroup will need to do
2769 * reclaim or be throttled to prevent further growth
2770 * of the memory or swap footprints.
2771 *
2772 * Target some best-effort fairness between the tasks,
2773 * and distribute reclaim work and delay penalties
2774 * based on how much each task is actually allocating.
2775 */
2776 current->memcg_nr_pages_over_high += batch;
2777 set_notify_resume(current);
2778 break;
2779 }
2780 } while ((memcg = parent_mem_cgroup(memcg)));
2781
2782 return 0;
2783 }
2784
try_charge(struct mem_cgroup * memcg,gfp_t gfp_mask,unsigned int nr_pages)2785 static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2786 unsigned int nr_pages)
2787 {
2788 if (mem_cgroup_is_root(memcg))
2789 return 0;
2790
2791 return try_charge_memcg(memcg, gfp_mask, nr_pages);
2792 }
2793
2794 #if defined(CONFIG_MEMCG_KMEM) || defined(CONFIG_MMU)
cancel_charge(struct mem_cgroup * memcg,unsigned int nr_pages)2795 static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
2796 {
2797 if (mem_cgroup_is_root(memcg))
2798 return;
2799
2800 page_counter_uncharge(&memcg->memory, nr_pages);
2801 if (do_memsw_account())
2802 page_counter_uncharge(&memcg->memsw, nr_pages);
2803 }
2804 #endif
2805
commit_charge(struct page * page,struct mem_cgroup * memcg)2806 static void commit_charge(struct page *page, struct mem_cgroup *memcg)
2807 {
2808 VM_BUG_ON_PAGE(page_memcg(page), page);
2809 /*
2810 * Any of the following ensures page's memcg stability:
2811 *
2812 * - the page lock
2813 * - LRU isolation
2814 * - lock_page_memcg()
2815 * - exclusive reference
2816 * - mem_cgroup_trylock_pages()
2817 */
2818 page->memcg_data = (unsigned long)memcg;
2819 }
2820
get_mem_cgroup_from_objcg(struct obj_cgroup * objcg)2821 static struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg)
2822 {
2823 struct mem_cgroup *memcg;
2824
2825 rcu_read_lock();
2826 retry:
2827 memcg = obj_cgroup_memcg(objcg);
2828 if (unlikely(!css_tryget(&memcg->css)))
2829 goto retry;
2830 rcu_read_unlock();
2831
2832 return memcg;
2833 }
2834
2835 #ifdef CONFIG_MEMCG_KMEM
2836 /*
2837 * The allocated objcg pointers array is not accounted directly.
2838 * Moreover, it should not come from DMA buffer and is not readily
2839 * reclaimable. So those GFP bits should be masked off.
2840 */
2841 #define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | \
2842 __GFP_ACCOUNT | __GFP_NOFAIL)
2843
memcg_alloc_page_obj_cgroups(struct page * page,struct kmem_cache * s,gfp_t gfp,bool new_page)2844 int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s,
2845 gfp_t gfp, bool new_page)
2846 {
2847 unsigned int objects = objs_per_slab_page(s, page);
2848 unsigned long memcg_data;
2849 void *vec;
2850
2851 gfp &= ~OBJCGS_CLEAR_MASK;
2852 vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp,
2853 page_to_nid(page));
2854 if (!vec)
2855 return -ENOMEM;
2856
2857 memcg_data = (unsigned long) vec | MEMCG_DATA_OBJCGS;
2858 if (new_page) {
2859 /*
2860 * If the slab page is brand new and nobody can yet access
2861 * it's memcg_data, no synchronization is required and
2862 * memcg_data can be simply assigned.
2863 */
2864 page->memcg_data = memcg_data;
2865 } else if (cmpxchg(&page->memcg_data, 0, memcg_data)) {
2866 /*
2867 * If the slab page is already in use, somebody can allocate
2868 * and assign obj_cgroups in parallel. In this case the existing
2869 * objcg vector should be reused.
2870 */
2871 kfree(vec);
2872 return 0;
2873 }
2874
2875 kmemleak_not_leak(vec);
2876 return 0;
2877 }
2878
2879 /*
2880 * Returns a pointer to the memory cgroup to which the kernel object is charged.
2881 *
2882 * A passed kernel object can be a slab object or a generic kernel page, so
2883 * different mechanisms for getting the memory cgroup pointer should be used.
2884 * In certain cases (e.g. kernel stacks or large kmallocs with SLUB) the caller
2885 * can not know for sure how the kernel object is implemented.
2886 * mem_cgroup_from_obj() can be safely used in such cases.
2887 *
2888 * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
2889 * cgroup_mutex, etc.
2890 */
mem_cgroup_from_obj(void * p)2891 struct mem_cgroup *mem_cgroup_from_obj(void *p)
2892 {
2893 struct page *page;
2894
2895 if (mem_cgroup_disabled())
2896 return NULL;
2897
2898 page = virt_to_head_page(p);
2899
2900 /*
2901 * Slab objects are accounted individually, not per-page.
2902 * Memcg membership data for each individual object is saved in
2903 * the page->obj_cgroups.
2904 */
2905 if (page_objcgs_check(page)) {
2906 struct obj_cgroup *objcg;
2907 unsigned int off;
2908
2909 off = obj_to_index(page->slab_cache, page, p);
2910 objcg = page_objcgs(page)[off];
2911 if (objcg)
2912 return obj_cgroup_memcg(objcg);
2913
2914 return NULL;
2915 }
2916
2917 /*
2918 * page_memcg_check() is used here, because page_has_obj_cgroups()
2919 * check above could fail because the object cgroups vector wasn't set
2920 * at that moment, but it can be set concurrently.
2921 * page_memcg_check(page) will guarantee that a proper memory
2922 * cgroup pointer or NULL will be returned.
2923 */
2924 return page_memcg_check(page);
2925 }
2926
get_obj_cgroup_from_current(void)2927 __always_inline struct obj_cgroup *get_obj_cgroup_from_current(void)
2928 {
2929 struct obj_cgroup *objcg = NULL;
2930 struct mem_cgroup *memcg;
2931
2932 if (memcg_kmem_bypass())
2933 return NULL;
2934
2935 rcu_read_lock();
2936 if (unlikely(active_memcg()))
2937 memcg = active_memcg();
2938 else
2939 memcg = mem_cgroup_from_task(current);
2940
2941 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) {
2942 objcg = rcu_dereference(memcg->objcg);
2943 if (objcg && obj_cgroup_tryget(objcg))
2944 break;
2945 objcg = NULL;
2946 }
2947 rcu_read_unlock();
2948
2949 return objcg;
2950 }
2951
memcg_alloc_cache_id(void)2952 static int memcg_alloc_cache_id(void)
2953 {
2954 int id, size;
2955 int err;
2956
2957 id = ida_simple_get(&memcg_cache_ida,
2958 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
2959 if (id < 0)
2960 return id;
2961
2962 if (id < memcg_nr_cache_ids)
2963 return id;
2964
2965 /*
2966 * There's no space for the new id in memcg_caches arrays,
2967 * so we have to grow them.
2968 */
2969 down_write(&memcg_cache_ids_sem);
2970
2971 size = 2 * (id + 1);
2972 if (size < MEMCG_CACHES_MIN_SIZE)
2973 size = MEMCG_CACHES_MIN_SIZE;
2974 else if (size > MEMCG_CACHES_MAX_SIZE)
2975 size = MEMCG_CACHES_MAX_SIZE;
2976
2977 err = memcg_update_all_list_lrus(size);
2978 if (!err)
2979 memcg_nr_cache_ids = size;
2980
2981 up_write(&memcg_cache_ids_sem);
2982
2983 if (err) {
2984 ida_simple_remove(&memcg_cache_ida, id);
2985 return err;
2986 }
2987 return id;
2988 }
2989
memcg_free_cache_id(int id)2990 static void memcg_free_cache_id(int id)
2991 {
2992 ida_simple_remove(&memcg_cache_ida, id);
2993 }
2994
2995 /*
2996 * obj_cgroup_uncharge_pages: uncharge a number of kernel pages from a objcg
2997 * @objcg: object cgroup to uncharge
2998 * @nr_pages: number of pages to uncharge
2999 */
obj_cgroup_uncharge_pages(struct obj_cgroup * objcg,unsigned int nr_pages)3000 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
3001 unsigned int nr_pages)
3002 {
3003 struct mem_cgroup *memcg;
3004
3005 memcg = get_mem_cgroup_from_objcg(objcg);
3006
3007 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
3008 page_counter_uncharge(&memcg->kmem, nr_pages);
3009 refill_stock(memcg, nr_pages);
3010
3011 css_put(&memcg->css);
3012 }
3013
3014 /*
3015 * obj_cgroup_charge_pages: charge a number of kernel pages to a objcg
3016 * @objcg: object cgroup to charge
3017 * @gfp: reclaim mode
3018 * @nr_pages: number of pages to charge
3019 *
3020 * Returns 0 on success, an error code on failure.
3021 */
obj_cgroup_charge_pages(struct obj_cgroup * objcg,gfp_t gfp,unsigned int nr_pages)3022 static int obj_cgroup_charge_pages(struct obj_cgroup *objcg, gfp_t gfp,
3023 unsigned int nr_pages)
3024 {
3025 struct page_counter *counter;
3026 struct mem_cgroup *memcg;
3027 int ret;
3028
3029 memcg = get_mem_cgroup_from_objcg(objcg);
3030
3031 ret = try_charge_memcg(memcg, gfp, nr_pages);
3032 if (ret)
3033 goto out;
3034
3035 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
3036 !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) {
3037
3038 /*
3039 * Enforce __GFP_NOFAIL allocation because callers are not
3040 * prepared to see failures and likely do not have any failure
3041 * handling code.
3042 */
3043 if (gfp & __GFP_NOFAIL) {
3044 page_counter_charge(&memcg->kmem, nr_pages);
3045 goto out;
3046 }
3047 cancel_charge(memcg, nr_pages);
3048 ret = -ENOMEM;
3049 }
3050 out:
3051 css_put(&memcg->css);
3052
3053 return ret;
3054 }
3055
3056 /**
3057 * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup
3058 * @page: page to charge
3059 * @gfp: reclaim mode
3060 * @order: allocation order
3061 *
3062 * Returns 0 on success, an error code on failure.
3063 */
__memcg_kmem_charge_page(struct page * page,gfp_t gfp,int order)3064 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
3065 {
3066 struct obj_cgroup *objcg;
3067 int ret = 0;
3068
3069 objcg = get_obj_cgroup_from_current();
3070 if (objcg) {
3071 ret = obj_cgroup_charge_pages(objcg, gfp, 1 << order);
3072 if (!ret) {
3073 page->memcg_data = (unsigned long)objcg |
3074 MEMCG_DATA_KMEM;
3075 return 0;
3076 }
3077 obj_cgroup_put(objcg);
3078 }
3079 return ret;
3080 }
3081
3082 /**
3083 * __memcg_kmem_uncharge_page: uncharge a kmem page
3084 * @page: page to uncharge
3085 * @order: allocation order
3086 */
__memcg_kmem_uncharge_page(struct page * page,int order)3087 void __memcg_kmem_uncharge_page(struct page *page, int order)
3088 {
3089 struct obj_cgroup *objcg;
3090 unsigned int nr_pages = 1 << order;
3091
3092 if (!PageMemcgKmem(page))
3093 return;
3094
3095 objcg = __page_objcg(page);
3096 obj_cgroup_uncharge_pages(objcg, nr_pages);
3097 page->memcg_data = 0;
3098 obj_cgroup_put(objcg);
3099 }
3100
mod_objcg_state(struct obj_cgroup * objcg,struct pglist_data * pgdat,enum node_stat_item idx,int nr)3101 void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
3102 enum node_stat_item idx, int nr)
3103 {
3104 unsigned long flags;
3105 struct obj_stock *stock = get_obj_stock(&flags);
3106 int *bytes;
3107
3108 /*
3109 * Save vmstat data in stock and skip vmstat array update unless
3110 * accumulating over a page of vmstat data or when pgdat or idx
3111 * changes.
3112 */
3113 if (stock->cached_objcg != objcg) {
3114 drain_obj_stock(stock);
3115 obj_cgroup_get(objcg);
3116 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
3117 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
3118 stock->cached_objcg = objcg;
3119 stock->cached_pgdat = pgdat;
3120 } else if (stock->cached_pgdat != pgdat) {
3121 /* Flush the existing cached vmstat data */
3122 struct pglist_data *oldpg = stock->cached_pgdat;
3123
3124 if (stock->nr_slab_reclaimable_b) {
3125 mod_objcg_mlstate(objcg, oldpg, NR_SLAB_RECLAIMABLE_B,
3126 stock->nr_slab_reclaimable_b);
3127 stock->nr_slab_reclaimable_b = 0;
3128 }
3129 if (stock->nr_slab_unreclaimable_b) {
3130 mod_objcg_mlstate(objcg, oldpg, NR_SLAB_UNRECLAIMABLE_B,
3131 stock->nr_slab_unreclaimable_b);
3132 stock->nr_slab_unreclaimable_b = 0;
3133 }
3134 stock->cached_pgdat = pgdat;
3135 }
3136
3137 bytes = (idx == NR_SLAB_RECLAIMABLE_B) ? &stock->nr_slab_reclaimable_b
3138 : &stock->nr_slab_unreclaimable_b;
3139 /*
3140 * Even for large object >= PAGE_SIZE, the vmstat data will still be
3141 * cached locally at least once before pushing it out.
3142 */
3143 if (!*bytes) {
3144 *bytes = nr;
3145 nr = 0;
3146 } else {
3147 *bytes += nr;
3148 if (abs(*bytes) > PAGE_SIZE) {
3149 nr = *bytes;
3150 *bytes = 0;
3151 } else {
3152 nr = 0;
3153 }
3154 }
3155 if (nr)
3156 mod_objcg_mlstate(objcg, pgdat, idx, nr);
3157
3158 put_obj_stock(flags);
3159 }
3160
consume_obj_stock(struct obj_cgroup * objcg,unsigned int nr_bytes)3161 static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
3162 {
3163 unsigned long flags;
3164 struct obj_stock *stock = get_obj_stock(&flags);
3165 bool ret = false;
3166
3167 if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) {
3168 stock->nr_bytes -= nr_bytes;
3169 ret = true;
3170 }
3171
3172 put_obj_stock(flags);
3173
3174 return ret;
3175 }
3176
drain_obj_stock(struct obj_stock * stock)3177 static void drain_obj_stock(struct obj_stock *stock)
3178 {
3179 struct obj_cgroup *old = stock->cached_objcg;
3180
3181 if (!old)
3182 return;
3183
3184 if (stock->nr_bytes) {
3185 unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3186 unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1);
3187
3188 if (nr_pages)
3189 obj_cgroup_uncharge_pages(old, nr_pages);
3190
3191 /*
3192 * The leftover is flushed to the centralized per-memcg value.
3193 * On the next attempt to refill obj stock it will be moved
3194 * to a per-cpu stock (probably, on an other CPU), see
3195 * refill_obj_stock().
3196 *
3197 * How often it's flushed is a trade-off between the memory
3198 * limit enforcement accuracy and potential CPU contention,
3199 * so it might be changed in the future.
3200 */
3201 atomic_add(nr_bytes, &old->nr_charged_bytes);
3202 stock->nr_bytes = 0;
3203 }
3204
3205 /*
3206 * Flush the vmstat data in current stock
3207 */
3208 if (stock->nr_slab_reclaimable_b || stock->nr_slab_unreclaimable_b) {
3209 if (stock->nr_slab_reclaimable_b) {
3210 mod_objcg_mlstate(old, stock->cached_pgdat,
3211 NR_SLAB_RECLAIMABLE_B,
3212 stock->nr_slab_reclaimable_b);
3213 stock->nr_slab_reclaimable_b = 0;
3214 }
3215 if (stock->nr_slab_unreclaimable_b) {
3216 mod_objcg_mlstate(old, stock->cached_pgdat,
3217 NR_SLAB_UNRECLAIMABLE_B,
3218 stock->nr_slab_unreclaimable_b);
3219 stock->nr_slab_unreclaimable_b = 0;
3220 }
3221 stock->cached_pgdat = NULL;
3222 }
3223
3224 obj_cgroup_put(old);
3225 stock->cached_objcg = NULL;
3226 }
3227
obj_stock_flush_required(struct memcg_stock_pcp * stock,struct mem_cgroup * root_memcg)3228 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
3229 struct mem_cgroup *root_memcg)
3230 {
3231 struct mem_cgroup *memcg;
3232
3233 if (in_task() && stock->task_obj.cached_objcg) {
3234 memcg = obj_cgroup_memcg(stock->task_obj.cached_objcg);
3235 if (memcg && mem_cgroup_is_descendant(memcg, root_memcg))
3236 return true;
3237 }
3238 if (stock->irq_obj.cached_objcg) {
3239 memcg = obj_cgroup_memcg(stock->irq_obj.cached_objcg);
3240 if (memcg && mem_cgroup_is_descendant(memcg, root_memcg))
3241 return true;
3242 }
3243
3244 return false;
3245 }
3246
refill_obj_stock(struct obj_cgroup * objcg,unsigned int nr_bytes,bool allow_uncharge)3247 static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
3248 bool allow_uncharge)
3249 {
3250 unsigned long flags;
3251 struct obj_stock *stock = get_obj_stock(&flags);
3252 unsigned int nr_pages = 0;
3253
3254 if (stock->cached_objcg != objcg) { /* reset if necessary */
3255 drain_obj_stock(stock);
3256 obj_cgroup_get(objcg);
3257 stock->cached_objcg = objcg;
3258 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
3259 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
3260 allow_uncharge = true; /* Allow uncharge when objcg changes */
3261 }
3262 stock->nr_bytes += nr_bytes;
3263
3264 if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) {
3265 nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3266 stock->nr_bytes &= (PAGE_SIZE - 1);
3267 }
3268
3269 put_obj_stock(flags);
3270
3271 if (nr_pages)
3272 obj_cgroup_uncharge_pages(objcg, nr_pages);
3273 }
3274
obj_cgroup_charge(struct obj_cgroup * objcg,gfp_t gfp,size_t size)3275 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
3276 {
3277 unsigned int nr_pages, nr_bytes;
3278 int ret;
3279
3280 if (consume_obj_stock(objcg, size))
3281 return 0;
3282
3283 /*
3284 * In theory, objcg->nr_charged_bytes can have enough
3285 * pre-charged bytes to satisfy the allocation. However,
3286 * flushing objcg->nr_charged_bytes requires two atomic
3287 * operations, and objcg->nr_charged_bytes can't be big.
3288 * The shared objcg->nr_charged_bytes can also become a
3289 * performance bottleneck if all tasks of the same memcg are
3290 * trying to update it. So it's better to ignore it and try
3291 * grab some new pages. The stock's nr_bytes will be flushed to
3292 * objcg->nr_charged_bytes later on when objcg changes.
3293 *
3294 * The stock's nr_bytes may contain enough pre-charged bytes
3295 * to allow one less page from being charged, but we can't rely
3296 * on the pre-charged bytes not being changed outside of
3297 * consume_obj_stock() or refill_obj_stock(). So ignore those
3298 * pre-charged bytes as well when charging pages. To avoid a
3299 * page uncharge right after a page charge, we set the
3300 * allow_uncharge flag to false when calling refill_obj_stock()
3301 * to temporarily allow the pre-charged bytes to exceed the page
3302 * size limit. The maximum reachable value of the pre-charged
3303 * bytes is (sizeof(object) + PAGE_SIZE - 2) if there is no data
3304 * race.
3305 */
3306 nr_pages = size >> PAGE_SHIFT;
3307 nr_bytes = size & (PAGE_SIZE - 1);
3308
3309 if (nr_bytes)
3310 nr_pages += 1;
3311
3312 ret = obj_cgroup_charge_pages(objcg, gfp, nr_pages);
3313 if (!ret && nr_bytes)
3314 refill_obj_stock(objcg, PAGE_SIZE - nr_bytes, false);
3315
3316 return ret;
3317 }
3318
obj_cgroup_uncharge(struct obj_cgroup * objcg,size_t size)3319 void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size)
3320 {
3321 refill_obj_stock(objcg, size, true);
3322 }
3323
3324 #endif /* CONFIG_MEMCG_KMEM */
3325
3326 /*
3327 * Because page_memcg(head) is not set on tails, set it now.
3328 */
split_page_memcg(struct page * head,unsigned int nr)3329 void split_page_memcg(struct page *head, unsigned int nr)
3330 {
3331 struct mem_cgroup *memcg = page_memcg(head);
3332 int i;
3333
3334 if (mem_cgroup_disabled() || !memcg)
3335 return;
3336
3337 for (i = 1; i < nr; i++)
3338 head[i].memcg_data = head->memcg_data;
3339
3340 if (PageMemcgKmem(head))
3341 obj_cgroup_get_many(__page_objcg(head), nr - 1);
3342 else
3343 css_get_many(&memcg->css, nr - 1);
3344 }
3345
3346 #ifdef CONFIG_MEMCG_SWAP
3347 /**
3348 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
3349 * @entry: swap entry to be moved
3350 * @from: mem_cgroup which the entry is moved from
3351 * @to: mem_cgroup which the entry is moved to
3352 *
3353 * It succeeds only when the swap_cgroup's record for this entry is the same
3354 * as the mem_cgroup's id of @from.
3355 *
3356 * Returns 0 on success, -EINVAL on failure.
3357 *
3358 * The caller must have charged to @to, IOW, called page_counter_charge() about
3359 * both res and memsw, and called css_get().
3360 */
mem_cgroup_move_swap_account(swp_entry_t entry,struct mem_cgroup * from,struct mem_cgroup * to)3361 static int mem_cgroup_move_swap_account(swp_entry_t entry,
3362 struct mem_cgroup *from, struct mem_cgroup *to)
3363 {
3364 unsigned short old_id, new_id;
3365
3366 old_id = mem_cgroup_id(from);
3367 new_id = mem_cgroup_id(to);
3368
3369 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
3370 mod_memcg_state(from, MEMCG_SWAP, -1);
3371 mod_memcg_state(to, MEMCG_SWAP, 1);
3372 return 0;
3373 }
3374 return -EINVAL;
3375 }
3376 #else
mem_cgroup_move_swap_account(swp_entry_t entry,struct mem_cgroup * from,struct mem_cgroup * to)3377 static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
3378 struct mem_cgroup *from, struct mem_cgroup *to)
3379 {
3380 return -EINVAL;
3381 }
3382 #endif
3383
3384 static DEFINE_MUTEX(memcg_max_mutex);
3385
mem_cgroup_resize_max(struct mem_cgroup * memcg,unsigned long max,bool memsw)3386 static int mem_cgroup_resize_max(struct mem_cgroup *memcg,
3387 unsigned long max, bool memsw)
3388 {
3389 bool enlarge = false;
3390 bool drained = false;
3391 int ret;
3392 bool limits_invariant;
3393 struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory;
3394
3395 do {
3396 if (signal_pending(current)) {
3397 ret = -EINTR;
3398 break;
3399 }
3400
3401 mutex_lock(&memcg_max_mutex);
3402 /*
3403 * Make sure that the new limit (memsw or memory limit) doesn't
3404 * break our basic invariant rule memory.max <= memsw.max.
3405 */
3406 limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) :
3407 max <= memcg->memsw.max;
3408 if (!limits_invariant) {
3409 mutex_unlock(&memcg_max_mutex);
3410 ret = -EINVAL;
3411 break;
3412 }
3413 if (max > counter->max)
3414 enlarge = true;
3415 ret = page_counter_set_max(counter, max);
3416 mutex_unlock(&memcg_max_mutex);
3417
3418 if (!ret)
3419 break;
3420
3421 if (!drained) {
3422 drain_all_stock(memcg);
3423 drained = true;
3424 continue;
3425 }
3426
3427 if (!try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL,
3428 memsw ? 0 : MEMCG_RECLAIM_MAY_SWAP)) {
3429 ret = -EBUSY;
3430 break;
3431 }
3432 } while (true);
3433
3434 if (!ret && enlarge)
3435 memcg_oom_recover(memcg);
3436
3437 return ret;
3438 }
3439
mem_cgroup_soft_limit_reclaim(pg_data_t * pgdat,int order,gfp_t gfp_mask,unsigned long * total_scanned)3440 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
3441 gfp_t gfp_mask,
3442 unsigned long *total_scanned)
3443 {
3444 unsigned long nr_reclaimed = 0;
3445 struct mem_cgroup_per_node *mz, *next_mz = NULL;
3446 unsigned long reclaimed;
3447 int loop = 0;
3448 struct mem_cgroup_tree_per_node *mctz;
3449 unsigned long excess;
3450 unsigned long nr_scanned;
3451
3452 if (lru_gen_enabled())
3453 return 0;
3454
3455 if (order > 0)
3456 return 0;
3457
3458 mctz = soft_limit_tree_node(pgdat->node_id);
3459
3460 /*
3461 * Do not even bother to check the largest node if the root
3462 * is empty. Do it lockless to prevent lock bouncing. Races
3463 * are acceptable as soft limit is best effort anyway.
3464 */
3465 if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root))
3466 return 0;
3467
3468 /*
3469 * This loop can run a while, specially if mem_cgroup's continuously
3470 * keep exceeding their soft limit and putting the system under
3471 * pressure
3472 */
3473 do {
3474 if (next_mz)
3475 mz = next_mz;
3476 else
3477 mz = mem_cgroup_largest_soft_limit_node(mctz);
3478 if (!mz)
3479 break;
3480
3481 nr_scanned = 0;
3482 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat,
3483 gfp_mask, &nr_scanned);
3484 nr_reclaimed += reclaimed;
3485 *total_scanned += nr_scanned;
3486 spin_lock_irq(&mctz->lock);
3487 __mem_cgroup_remove_exceeded(mz, mctz);
3488
3489 /*
3490 * If we failed to reclaim anything from this memory cgroup
3491 * it is time to move on to the next cgroup
3492 */
3493 next_mz = NULL;
3494 if (!reclaimed)
3495 next_mz = __mem_cgroup_largest_soft_limit_node(mctz);
3496
3497 excess = soft_limit_excess(mz->memcg);
3498 /*
3499 * One school of thought says that we should not add
3500 * back the node to the tree if reclaim returns 0.
3501 * But our reclaim could return 0, simply because due
3502 * to priority we are exposing a smaller subset of
3503 * memory to reclaim from. Consider this as a longer
3504 * term TODO.
3505 */
3506 /* If excess == 0, no tree ops */
3507 __mem_cgroup_insert_exceeded(mz, mctz, excess);
3508 spin_unlock_irq(&mctz->lock);
3509 css_put(&mz->memcg->css);
3510 loop++;
3511 /*
3512 * Could not reclaim anything and there are no more
3513 * mem cgroups to try or we seem to be looping without
3514 * reclaiming anything.
3515 */
3516 if (!nr_reclaimed &&
3517 (next_mz == NULL ||
3518 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
3519 break;
3520 } while (!nr_reclaimed);
3521 if (next_mz)
3522 css_put(&next_mz->memcg->css);
3523 return nr_reclaimed;
3524 }
3525
3526 /*
3527 * Reclaims as many pages from the given memcg as possible.
3528 *
3529 * Caller is responsible for holding css reference for memcg.
3530 */
mem_cgroup_force_empty(struct mem_cgroup * memcg)3531 static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
3532 {
3533 int nr_retries = MAX_RECLAIM_RETRIES;
3534
3535 /* we call try-to-free pages for make this cgroup empty */
3536 lru_add_drain_all();
3537
3538 drain_all_stock(memcg);
3539
3540 /* try to free all pages in this cgroup */
3541 while (nr_retries && page_counter_read(&memcg->memory)) {
3542 int progress;
3543
3544 if (signal_pending(current))
3545 return -EINTR;
3546
3547 progress = try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL,
3548 MEMCG_RECLAIM_MAY_SWAP);
3549 if (!progress) {
3550 nr_retries--;
3551 /* maybe some writeback is necessary */
3552 congestion_wait(BLK_RW_ASYNC, HZ/10);
3553 }
3554
3555 }
3556
3557 return 0;
3558 }
3559
mem_cgroup_force_empty_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)3560 static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
3561 char *buf, size_t nbytes,
3562 loff_t off)
3563 {
3564 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3565
3566 if (mem_cgroup_is_root(memcg))
3567 return -EINVAL;
3568 return mem_cgroup_force_empty(memcg) ?: nbytes;
3569 }
3570
mem_cgroup_hierarchy_read(struct cgroup_subsys_state * css,struct cftype * cft)3571 static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
3572 struct cftype *cft)
3573 {
3574 return 1;
3575 }
3576
mem_cgroup_hierarchy_write(struct cgroup_subsys_state * css,struct cftype * cft,u64 val)3577 static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
3578 struct cftype *cft, u64 val)
3579 {
3580 if (val == 1)
3581 return 0;
3582
3583 pr_warn_once("Non-hierarchical mode is deprecated. "
3584 "Please report your usecase to linux-mm@kvack.org if you "
3585 "depend on this functionality.\n");
3586
3587 return -EINVAL;
3588 }
3589
mem_cgroup_usage(struct mem_cgroup * memcg,bool swap)3590 static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
3591 {
3592 unsigned long val;
3593
3594 if (mem_cgroup_is_root(memcg)) {
3595 mem_cgroup_flush_stats();
3596 val = memcg_page_state(memcg, NR_FILE_PAGES) +
3597 memcg_page_state(memcg, NR_ANON_MAPPED);
3598 if (swap)
3599 val += memcg_page_state(memcg, MEMCG_SWAP);
3600 } else {
3601 if (!swap)
3602 val = page_counter_read(&memcg->memory);
3603 else
3604 val = page_counter_read(&memcg->memsw);
3605 }
3606 return val;
3607 }
3608
3609 enum {
3610 RES_USAGE,
3611 RES_LIMIT,
3612 RES_MAX_USAGE,
3613 RES_FAILCNT,
3614 RES_SOFT_LIMIT,
3615 };
3616
mem_cgroup_read_u64(struct cgroup_subsys_state * css,struct cftype * cft)3617 static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
3618 struct cftype *cft)
3619 {
3620 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3621 struct page_counter *counter;
3622
3623 switch (MEMFILE_TYPE(cft->private)) {
3624 case _MEM:
3625 counter = &memcg->memory;
3626 break;
3627 case _MEMSWAP:
3628 counter = &memcg->memsw;
3629 break;
3630 case _KMEM:
3631 counter = &memcg->kmem;
3632 break;
3633 case _TCP:
3634 counter = &memcg->tcpmem;
3635 break;
3636 default:
3637 BUG();
3638 }
3639
3640 switch (MEMFILE_ATTR(cft->private)) {
3641 case RES_USAGE:
3642 if (counter == &memcg->memory)
3643 return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
3644 if (counter == &memcg->memsw)
3645 return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
3646 return (u64)page_counter_read(counter) * PAGE_SIZE;
3647 case RES_LIMIT:
3648 return (u64)counter->max * PAGE_SIZE;
3649 case RES_MAX_USAGE:
3650 return (u64)counter->watermark * PAGE_SIZE;
3651 case RES_FAILCNT:
3652 return counter->failcnt;
3653 case RES_SOFT_LIMIT:
3654 return (u64)memcg->soft_limit * PAGE_SIZE;
3655 default:
3656 BUG();
3657 }
3658 }
3659
3660 #ifdef CONFIG_MEMCG_KMEM
memcg_online_kmem(struct mem_cgroup * memcg)3661 static int memcg_online_kmem(struct mem_cgroup *memcg)
3662 {
3663 struct obj_cgroup *objcg;
3664 int memcg_id;
3665
3666 if (cgroup_memory_nokmem)
3667 return 0;
3668
3669 BUG_ON(memcg->kmemcg_id >= 0);
3670 BUG_ON(memcg->kmem_state);
3671
3672 memcg_id = memcg_alloc_cache_id();
3673 if (memcg_id < 0)
3674 return memcg_id;
3675
3676 objcg = obj_cgroup_alloc();
3677 if (!objcg) {
3678 memcg_free_cache_id(memcg_id);
3679 return -ENOMEM;
3680 }
3681 objcg->memcg = memcg;
3682 rcu_assign_pointer(memcg->objcg, objcg);
3683
3684 static_branch_enable(&memcg_kmem_enabled_key);
3685
3686 memcg->kmemcg_id = memcg_id;
3687 memcg->kmem_state = KMEM_ONLINE;
3688
3689 return 0;
3690 }
3691
memcg_offline_kmem(struct mem_cgroup * memcg)3692 static void memcg_offline_kmem(struct mem_cgroup *memcg)
3693 {
3694 struct cgroup_subsys_state *css;
3695 struct mem_cgroup *parent, *child;
3696 int kmemcg_id;
3697
3698 if (memcg->kmem_state != KMEM_ONLINE)
3699 return;
3700
3701 memcg->kmem_state = KMEM_ALLOCATED;
3702
3703 parent = parent_mem_cgroup(memcg);
3704 if (!parent)
3705 parent = root_mem_cgroup;
3706
3707 memcg_reparent_objcgs(memcg, parent);
3708
3709 kmemcg_id = memcg->kmemcg_id;
3710 BUG_ON(kmemcg_id < 0);
3711
3712 /*
3713 * Change kmemcg_id of this cgroup and all its descendants to the
3714 * parent's id, and then move all entries from this cgroup's list_lrus
3715 * to ones of the parent. After we have finished, all list_lrus
3716 * corresponding to this cgroup are guaranteed to remain empty. The
3717 * ordering is imposed by list_lru_node->lock taken by
3718 * memcg_drain_all_list_lrus().
3719 */
3720 rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */
3721 css_for_each_descendant_pre(css, &memcg->css) {
3722 child = mem_cgroup_from_css(css);
3723 BUG_ON(child->kmemcg_id != kmemcg_id);
3724 child->kmemcg_id = parent->kmemcg_id;
3725 }
3726 rcu_read_unlock();
3727
3728 memcg_drain_all_list_lrus(kmemcg_id, parent);
3729
3730 memcg_free_cache_id(kmemcg_id);
3731 }
3732
memcg_free_kmem(struct mem_cgroup * memcg)3733 static void memcg_free_kmem(struct mem_cgroup *memcg)
3734 {
3735 /* css_alloc() failed, offlining didn't happen */
3736 if (unlikely(memcg->kmem_state == KMEM_ONLINE))
3737 memcg_offline_kmem(memcg);
3738 }
3739 #else
memcg_online_kmem(struct mem_cgroup * memcg)3740 static int memcg_online_kmem(struct mem_cgroup *memcg)
3741 {
3742 return 0;
3743 }
memcg_offline_kmem(struct mem_cgroup * memcg)3744 static void memcg_offline_kmem(struct mem_cgroup *memcg)
3745 {
3746 }
memcg_free_kmem(struct mem_cgroup * memcg)3747 static void memcg_free_kmem(struct mem_cgroup *memcg)
3748 {
3749 }
3750 #endif /* CONFIG_MEMCG_KMEM */
3751
memcg_update_kmem_max(struct mem_cgroup * memcg,unsigned long max)3752 static int memcg_update_kmem_max(struct mem_cgroup *memcg,
3753 unsigned long max)
3754 {
3755 int ret;
3756
3757 mutex_lock(&memcg_max_mutex);
3758 ret = page_counter_set_max(&memcg->kmem, max);
3759 mutex_unlock(&memcg_max_mutex);
3760 return ret;
3761 }
3762
memcg_update_tcp_max(struct mem_cgroup * memcg,unsigned long max)3763 static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max)
3764 {
3765 int ret;
3766
3767 mutex_lock(&memcg_max_mutex);
3768
3769 ret = page_counter_set_max(&memcg->tcpmem, max);
3770 if (ret)
3771 goto out;
3772
3773 if (!memcg->tcpmem_active) {
3774 /*
3775 * The active flag needs to be written after the static_key
3776 * update. This is what guarantees that the socket activation
3777 * function is the last one to run. See mem_cgroup_sk_alloc()
3778 * for details, and note that we don't mark any socket as
3779 * belonging to this memcg until that flag is up.
3780 *
3781 * We need to do this, because static_keys will span multiple
3782 * sites, but we can't control their order. If we mark a socket
3783 * as accounted, but the accounting functions are not patched in
3784 * yet, we'll lose accounting.
3785 *
3786 * We never race with the readers in mem_cgroup_sk_alloc(),
3787 * because when this value change, the code to process it is not
3788 * patched in yet.
3789 */
3790 static_branch_inc(&memcg_sockets_enabled_key);
3791 memcg->tcpmem_active = true;
3792 }
3793 out:
3794 mutex_unlock(&memcg_max_mutex);
3795 return ret;
3796 }
3797
3798 /*
3799 * The user of this function is...
3800 * RES_LIMIT.
3801 */
mem_cgroup_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)3802 static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
3803 char *buf, size_t nbytes, loff_t off)
3804 {
3805 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3806 unsigned long nr_pages;
3807 int ret;
3808
3809 buf = strstrip(buf);
3810 ret = page_counter_memparse(buf, "-1", &nr_pages);
3811 if (ret)
3812 return ret;
3813
3814 switch (MEMFILE_ATTR(of_cft(of)->private)) {
3815 case RES_LIMIT:
3816 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
3817 ret = -EINVAL;
3818 break;
3819 }
3820 switch (MEMFILE_TYPE(of_cft(of)->private)) {
3821 case _MEM:
3822 ret = mem_cgroup_resize_max(memcg, nr_pages, false);
3823 break;
3824 case _MEMSWAP:
3825 ret = mem_cgroup_resize_max(memcg, nr_pages, true);
3826 break;
3827 case _KMEM:
3828 pr_warn_once("kmem.limit_in_bytes is deprecated and will be removed. "
3829 "Please report your usecase to linux-mm@kvack.org if you "
3830 "depend on this functionality.\n");
3831 ret = memcg_update_kmem_max(memcg, nr_pages);
3832 break;
3833 case _TCP:
3834 ret = memcg_update_tcp_max(memcg, nr_pages);
3835 break;
3836 }
3837 break;
3838 case RES_SOFT_LIMIT:
3839 memcg->soft_limit = nr_pages;
3840 ret = 0;
3841 break;
3842 }
3843 return ret ?: nbytes;
3844 }
3845
mem_cgroup_reset(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)3846 static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
3847 size_t nbytes, loff_t off)
3848 {
3849 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3850 struct page_counter *counter;
3851
3852 switch (MEMFILE_TYPE(of_cft(of)->private)) {
3853 case _MEM:
3854 counter = &memcg->memory;
3855 break;
3856 case _MEMSWAP:
3857 counter = &memcg->memsw;
3858 break;
3859 case _KMEM:
3860 counter = &memcg->kmem;
3861 break;
3862 case _TCP:
3863 counter = &memcg->tcpmem;
3864 break;
3865 default:
3866 BUG();
3867 }
3868
3869 switch (MEMFILE_ATTR(of_cft(of)->private)) {
3870 case RES_MAX_USAGE:
3871 page_counter_reset_watermark(counter);
3872 break;
3873 case RES_FAILCNT:
3874 counter->failcnt = 0;
3875 break;
3876 default:
3877 BUG();
3878 }
3879
3880 return nbytes;
3881 }
3882
mem_cgroup_move_charge_read(struct cgroup_subsys_state * css,struct cftype * cft)3883 static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
3884 struct cftype *cft)
3885 {
3886 return mem_cgroup_from_css(css)->move_charge_at_immigrate;
3887 }
3888
3889 #ifdef CONFIG_MMU
mem_cgroup_move_charge_write(struct cgroup_subsys_state * css,struct cftype * cft,u64 val)3890 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3891 struct cftype *cft, u64 val)
3892 {
3893 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3894
3895 pr_warn_once("Cgroup memory moving (move_charge_at_immigrate) is deprecated. "
3896 "Please report your usecase to linux-mm@kvack.org if you "
3897 "depend on this functionality.\n");
3898
3899 if (val & ~MOVE_MASK)
3900 return -EINVAL;
3901
3902 /*
3903 * No kind of locking is needed in here, because ->can_attach() will
3904 * check this value once in the beginning of the process, and then carry
3905 * on with stale data. This means that changes to this value will only
3906 * affect task migrations starting after the change.
3907 */
3908 memcg->move_charge_at_immigrate = val;
3909 return 0;
3910 }
3911 #else
mem_cgroup_move_charge_write(struct cgroup_subsys_state * css,struct cftype * cft,u64 val)3912 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3913 struct cftype *cft, u64 val)
3914 {
3915 return -ENOSYS;
3916 }
3917 #endif
3918
3919 #ifdef CONFIG_NUMA
3920
3921 #define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
3922 #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
3923 #define LRU_ALL ((1 << NR_LRU_LISTS) - 1)
3924
mem_cgroup_node_nr_lru_pages(struct mem_cgroup * memcg,int nid,unsigned int lru_mask,bool tree)3925 static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
3926 int nid, unsigned int lru_mask, bool tree)
3927 {
3928 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
3929 unsigned long nr = 0;
3930 enum lru_list lru;
3931
3932 VM_BUG_ON((unsigned)nid >= nr_node_ids);
3933
3934 for_each_lru(lru) {
3935 if (!(BIT(lru) & lru_mask))
3936 continue;
3937 if (tree)
3938 nr += lruvec_page_state(lruvec, NR_LRU_BASE + lru);
3939 else
3940 nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru);
3941 }
3942 return nr;
3943 }
3944
mem_cgroup_nr_lru_pages(struct mem_cgroup * memcg,unsigned int lru_mask,bool tree)3945 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
3946 unsigned int lru_mask,
3947 bool tree)
3948 {
3949 unsigned long nr = 0;
3950 enum lru_list lru;
3951
3952 for_each_lru(lru) {
3953 if (!(BIT(lru) & lru_mask))
3954 continue;
3955 if (tree)
3956 nr += memcg_page_state(memcg, NR_LRU_BASE + lru);
3957 else
3958 nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru);
3959 }
3960 return nr;
3961 }
3962
memcg_numa_stat_show(struct seq_file * m,void * v)3963 static int memcg_numa_stat_show(struct seq_file *m, void *v)
3964 {
3965 struct numa_stat {
3966 const char *name;
3967 unsigned int lru_mask;
3968 };
3969
3970 static const struct numa_stat stats[] = {
3971 { "total", LRU_ALL },
3972 { "file", LRU_ALL_FILE },
3973 { "anon", LRU_ALL_ANON },
3974 { "unevictable", BIT(LRU_UNEVICTABLE) },
3975 };
3976 const struct numa_stat *stat;
3977 int nid;
3978 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
3979
3980 mem_cgroup_flush_stats();
3981
3982 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3983 seq_printf(m, "%s=%lu", stat->name,
3984 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
3985 false));
3986 for_each_node_state(nid, N_MEMORY)
3987 seq_printf(m, " N%d=%lu", nid,
3988 mem_cgroup_node_nr_lru_pages(memcg, nid,
3989 stat->lru_mask, false));
3990 seq_putc(m, '\n');
3991 }
3992
3993 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3994
3995 seq_printf(m, "hierarchical_%s=%lu", stat->name,
3996 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
3997 true));
3998 for_each_node_state(nid, N_MEMORY)
3999 seq_printf(m, " N%d=%lu", nid,
4000 mem_cgroup_node_nr_lru_pages(memcg, nid,
4001 stat->lru_mask, true));
4002 seq_putc(m, '\n');
4003 }
4004
4005 return 0;
4006 }
4007 #endif /* CONFIG_NUMA */
4008
4009 static const unsigned int memcg1_stats[] = {
4010 NR_FILE_PAGES,
4011 NR_ANON_MAPPED,
4012 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4013 NR_ANON_THPS,
4014 #endif
4015 NR_SHMEM,
4016 NR_FILE_MAPPED,
4017 NR_FILE_DIRTY,
4018 NR_WRITEBACK,
4019 MEMCG_SWAP,
4020 };
4021
4022 static const char *const memcg1_stat_names[] = {
4023 "cache",
4024 "rss",
4025 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4026 "rss_huge",
4027 #endif
4028 "shmem",
4029 "mapped_file",
4030 "dirty",
4031 "writeback",
4032 "swap",
4033 };
4034
4035 /* Universal VM events cgroup1 shows, original sort order */
4036 static const unsigned int memcg1_events[] = {
4037 PGPGIN,
4038 PGPGOUT,
4039 PGFAULT,
4040 PGMAJFAULT,
4041 };
4042
memcg_stat_show(struct seq_file * m,void * v)4043 static int memcg_stat_show(struct seq_file *m, void *v)
4044 {
4045 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4046 unsigned long memory, memsw;
4047 struct mem_cgroup *mi;
4048 unsigned int i;
4049
4050 BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats));
4051
4052 mem_cgroup_flush_stats();
4053
4054 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
4055 unsigned long nr;
4056
4057 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
4058 continue;
4059 nr = memcg_page_state_local(memcg, memcg1_stats[i]);
4060 seq_printf(m, "%s %lu\n", memcg1_stat_names[i], nr * PAGE_SIZE);
4061 }
4062
4063 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
4064 seq_printf(m, "%s %lu\n", vm_event_name(memcg1_events[i]),
4065 memcg_events_local(memcg, memcg1_events[i]));
4066
4067 for (i = 0; i < NR_LRU_LISTS; i++)
4068 seq_printf(m, "%s %lu\n", lru_list_name(i),
4069 memcg_page_state_local(memcg, NR_LRU_BASE + i) *
4070 PAGE_SIZE);
4071
4072 /* Hierarchical information */
4073 memory = memsw = PAGE_COUNTER_MAX;
4074 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
4075 memory = min(memory, READ_ONCE(mi->memory.max));
4076 memsw = min(memsw, READ_ONCE(mi->memsw.max));
4077 }
4078 seq_printf(m, "hierarchical_memory_limit %llu\n",
4079 (u64)memory * PAGE_SIZE);
4080 if (do_memsw_account())
4081 seq_printf(m, "hierarchical_memsw_limit %llu\n",
4082 (u64)memsw * PAGE_SIZE);
4083
4084 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
4085 unsigned long nr;
4086
4087 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
4088 continue;
4089 nr = memcg_page_state(memcg, memcg1_stats[i]);
4090 seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i],
4091 (u64)nr * PAGE_SIZE);
4092 }
4093
4094 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
4095 seq_printf(m, "total_%s %llu\n",
4096 vm_event_name(memcg1_events[i]),
4097 (u64)memcg_events(memcg, memcg1_events[i]));
4098
4099 for (i = 0; i < NR_LRU_LISTS; i++)
4100 seq_printf(m, "total_%s %llu\n", lru_list_name(i),
4101 (u64)memcg_page_state(memcg, NR_LRU_BASE + i) *
4102 PAGE_SIZE);
4103
4104 #ifdef CONFIG_DEBUG_VM
4105 {
4106 pg_data_t *pgdat;
4107 struct mem_cgroup_per_node *mz;
4108 unsigned long anon_cost = 0;
4109 unsigned long file_cost = 0;
4110
4111 for_each_online_pgdat(pgdat) {
4112 mz = memcg->nodeinfo[pgdat->node_id];
4113
4114 anon_cost += mz->lruvec.anon_cost;
4115 file_cost += mz->lruvec.file_cost;
4116 }
4117 seq_printf(m, "anon_cost %lu\n", anon_cost);
4118 seq_printf(m, "file_cost %lu\n", file_cost);
4119 }
4120 #endif
4121
4122 return 0;
4123 }
4124
mem_cgroup_swappiness_read(struct cgroup_subsys_state * css,struct cftype * cft)4125 static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
4126 struct cftype *cft)
4127 {
4128 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4129
4130 return mem_cgroup_swappiness(memcg);
4131 }
4132
mem_cgroup_swappiness_write(struct cgroup_subsys_state * css,struct cftype * cft,u64 val)4133 static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
4134 struct cftype *cft, u64 val)
4135 {
4136 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4137
4138 if (val > 200)
4139 return -EINVAL;
4140
4141 if (!mem_cgroup_is_root(memcg))
4142 memcg->swappiness = val;
4143 else
4144 vm_swappiness = val;
4145
4146 return 0;
4147 }
4148
__mem_cgroup_threshold(struct mem_cgroup * memcg,bool swap)4149 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
4150 {
4151 struct mem_cgroup_threshold_ary *t;
4152 unsigned long usage;
4153 int i;
4154
4155 rcu_read_lock();
4156 if (!swap)
4157 t = rcu_dereference(memcg->thresholds.primary);
4158 else
4159 t = rcu_dereference(memcg->memsw_thresholds.primary);
4160
4161 if (!t)
4162 goto unlock;
4163
4164 usage = mem_cgroup_usage(memcg, swap);
4165
4166 /*
4167 * current_threshold points to threshold just below or equal to usage.
4168 * If it's not true, a threshold was crossed after last
4169 * call of __mem_cgroup_threshold().
4170 */
4171 i = t->current_threshold;
4172
4173 /*
4174 * Iterate backward over array of thresholds starting from
4175 * current_threshold and check if a threshold is crossed.
4176 * If none of thresholds below usage is crossed, we read
4177 * only one element of the array here.
4178 */
4179 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
4180 eventfd_signal(t->entries[i].eventfd, 1);
4181
4182 /* i = current_threshold + 1 */
4183 i++;
4184
4185 /*
4186 * Iterate forward over array of thresholds starting from
4187 * current_threshold+1 and check if a threshold is crossed.
4188 * If none of thresholds above usage is crossed, we read
4189 * only one element of the array here.
4190 */
4191 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
4192 eventfd_signal(t->entries[i].eventfd, 1);
4193
4194 /* Update current_threshold */
4195 t->current_threshold = i - 1;
4196 unlock:
4197 rcu_read_unlock();
4198 }
4199
mem_cgroup_threshold(struct mem_cgroup * memcg)4200 static void mem_cgroup_threshold(struct mem_cgroup *memcg)
4201 {
4202 while (memcg) {
4203 __mem_cgroup_threshold(memcg, false);
4204 if (do_memsw_account())
4205 __mem_cgroup_threshold(memcg, true);
4206
4207 memcg = parent_mem_cgroup(memcg);
4208 }
4209 }
4210
compare_thresholds(const void * a,const void * b)4211 static int compare_thresholds(const void *a, const void *b)
4212 {
4213 const struct mem_cgroup_threshold *_a = a;
4214 const struct mem_cgroup_threshold *_b = b;
4215
4216 if (_a->threshold > _b->threshold)
4217 return 1;
4218
4219 if (_a->threshold < _b->threshold)
4220 return -1;
4221
4222 return 0;
4223 }
4224
mem_cgroup_oom_notify_cb(struct mem_cgroup * memcg)4225 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
4226 {
4227 struct mem_cgroup_eventfd_list *ev;
4228
4229 spin_lock(&memcg_oom_lock);
4230
4231 list_for_each_entry(ev, &memcg->oom_notify, list)
4232 eventfd_signal(ev->eventfd, 1);
4233
4234 spin_unlock(&memcg_oom_lock);
4235 return 0;
4236 }
4237
mem_cgroup_oom_notify(struct mem_cgroup * memcg)4238 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
4239 {
4240 struct mem_cgroup *iter;
4241
4242 for_each_mem_cgroup_tree(iter, memcg)
4243 mem_cgroup_oom_notify_cb(iter);
4244 }
4245
__mem_cgroup_usage_register_event(struct mem_cgroup * memcg,struct eventfd_ctx * eventfd,const char * args,enum res_type type)4246 static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
4247 struct eventfd_ctx *eventfd, const char *args, enum res_type type)
4248 {
4249 struct mem_cgroup_thresholds *thresholds;
4250 struct mem_cgroup_threshold_ary *new;
4251 unsigned long threshold;
4252 unsigned long usage;
4253 int i, size, ret;
4254
4255 ret = page_counter_memparse(args, "-1", &threshold);
4256 if (ret)
4257 return ret;
4258
4259 mutex_lock(&memcg->thresholds_lock);
4260
4261 if (type == _MEM) {
4262 thresholds = &memcg->thresholds;
4263 usage = mem_cgroup_usage(memcg, false);
4264 } else if (type == _MEMSWAP) {
4265 thresholds = &memcg->memsw_thresholds;
4266 usage = mem_cgroup_usage(memcg, true);
4267 } else
4268 BUG();
4269
4270 /* Check if a threshold crossed before adding a new one */
4271 if (thresholds->primary)
4272 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
4273
4274 size = thresholds->primary ? thresholds->primary->size + 1 : 1;
4275
4276 /* Allocate memory for new array of thresholds */
4277 new = kmalloc(struct_size(new, entries, size), GFP_KERNEL);
4278 if (!new) {
4279 ret = -ENOMEM;
4280 goto unlock;
4281 }
4282 new->size = size;
4283
4284 /* Copy thresholds (if any) to new array */
4285 if (thresholds->primary)
4286 memcpy(new->entries, thresholds->primary->entries,
4287 flex_array_size(new, entries, size - 1));
4288
4289 /* Add new threshold */
4290 new->entries[size - 1].eventfd = eventfd;
4291 new->entries[size - 1].threshold = threshold;
4292
4293 /* Sort thresholds. Registering of new threshold isn't time-critical */
4294 sort(new->entries, size, sizeof(*new->entries),
4295 compare_thresholds, NULL);
4296
4297 /* Find current threshold */
4298 new->current_threshold = -1;
4299 for (i = 0; i < size; i++) {
4300 if (new->entries[i].threshold <= usage) {
4301 /*
4302 * new->current_threshold will not be used until
4303 * rcu_assign_pointer(), so it's safe to increment
4304 * it here.
4305 */
4306 ++new->current_threshold;
4307 } else
4308 break;
4309 }
4310
4311 /* Free old spare buffer and save old primary buffer as spare */
4312 kfree(thresholds->spare);
4313 thresholds->spare = thresholds->primary;
4314
4315 rcu_assign_pointer(thresholds->primary, new);
4316
4317 /* To be sure that nobody uses thresholds */
4318 synchronize_rcu();
4319
4320 unlock:
4321 mutex_unlock(&memcg->thresholds_lock);
4322
4323 return ret;
4324 }
4325
mem_cgroup_usage_register_event(struct mem_cgroup * memcg,struct eventfd_ctx * eventfd,const char * args)4326 static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
4327 struct eventfd_ctx *eventfd, const char *args)
4328 {
4329 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
4330 }
4331
memsw_cgroup_usage_register_event(struct mem_cgroup * memcg,struct eventfd_ctx * eventfd,const char * args)4332 static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
4333 struct eventfd_ctx *eventfd, const char *args)
4334 {
4335 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
4336 }
4337
__mem_cgroup_usage_unregister_event(struct mem_cgroup * memcg,struct eventfd_ctx * eventfd,enum res_type type)4338 static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4339 struct eventfd_ctx *eventfd, enum res_type type)
4340 {
4341 struct mem_cgroup_thresholds *thresholds;
4342 struct mem_cgroup_threshold_ary *new;
4343 unsigned long usage;
4344 int i, j, size, entries;
4345
4346 mutex_lock(&memcg->thresholds_lock);
4347
4348 if (type == _MEM) {
4349 thresholds = &memcg->thresholds;
4350 usage = mem_cgroup_usage(memcg, false);
4351 } else if (type == _MEMSWAP) {
4352 thresholds = &memcg->memsw_thresholds;
4353 usage = mem_cgroup_usage(memcg, true);
4354 } else
4355 BUG();
4356
4357 if (!thresholds->primary)
4358 goto unlock;
4359
4360 /* Check if a threshold crossed before removing */
4361 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
4362
4363 /* Calculate new number of threshold */
4364 size = entries = 0;
4365 for (i = 0; i < thresholds->primary->size; i++) {
4366 if (thresholds->primary->entries[i].eventfd != eventfd)
4367 size++;
4368 else
4369 entries++;
4370 }
4371
4372 new = thresholds->spare;
4373
4374 /* If no items related to eventfd have been cleared, nothing to do */
4375 if (!entries)
4376 goto unlock;
4377
4378 /* Set thresholds array to NULL if we don't have thresholds */
4379 if (!size) {
4380 kfree(new);
4381 new = NULL;
4382 goto swap_buffers;
4383 }
4384
4385 new->size = size;
4386
4387 /* Copy thresholds and find current threshold */
4388 new->current_threshold = -1;
4389 for (i = 0, j = 0; i < thresholds->primary->size; i++) {
4390 if (thresholds->primary->entries[i].eventfd == eventfd)
4391 continue;
4392
4393 new->entries[j] = thresholds->primary->entries[i];
4394 if (new->entries[j].threshold <= usage) {
4395 /*
4396 * new->current_threshold will not be used
4397 * until rcu_assign_pointer(), so it's safe to increment
4398 * it here.
4399 */
4400 ++new->current_threshold;
4401 }
4402 j++;
4403 }
4404
4405 swap_buffers:
4406 /* Swap primary and spare array */
4407 thresholds->spare = thresholds->primary;
4408
4409 rcu_assign_pointer(thresholds->primary, new);
4410
4411 /* To be sure that nobody uses thresholds */
4412 synchronize_rcu();
4413
4414 /* If all events are unregistered, free the spare array */
4415 if (!new) {
4416 kfree(thresholds->spare);
4417 thresholds->spare = NULL;
4418 }
4419 unlock:
4420 mutex_unlock(&memcg->thresholds_lock);
4421 }
4422
mem_cgroup_usage_unregister_event(struct mem_cgroup * memcg,struct eventfd_ctx * eventfd)4423 static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4424 struct eventfd_ctx *eventfd)
4425 {
4426 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
4427 }
4428
memsw_cgroup_usage_unregister_event(struct mem_cgroup * memcg,struct eventfd_ctx * eventfd)4429 static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4430 struct eventfd_ctx *eventfd)
4431 {
4432 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
4433 }
4434
mem_cgroup_oom_register_event(struct mem_cgroup * memcg,struct eventfd_ctx * eventfd,const char * args)4435 static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
4436 struct eventfd_ctx *eventfd, const char *args)
4437 {
4438 struct mem_cgroup_eventfd_list *event;
4439
4440 event = kmalloc(sizeof(*event), GFP_KERNEL);
4441 if (!event)
4442 return -ENOMEM;
4443
4444 spin_lock(&memcg_oom_lock);
4445
4446 event->eventfd = eventfd;
4447 list_add(&event->list, &memcg->oom_notify);
4448
4449 /* already in OOM ? */
4450 if (memcg->under_oom)
4451 eventfd_signal(eventfd, 1);
4452 spin_unlock(&memcg_oom_lock);
4453
4454 return 0;
4455 }
4456
mem_cgroup_oom_unregister_event(struct mem_cgroup * memcg,struct eventfd_ctx * eventfd)4457 static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
4458 struct eventfd_ctx *eventfd)
4459 {
4460 struct mem_cgroup_eventfd_list *ev, *tmp;
4461
4462 spin_lock(&memcg_oom_lock);
4463
4464 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
4465 if (ev->eventfd == eventfd) {
4466 list_del(&ev->list);
4467 kfree(ev);
4468 }
4469 }
4470
4471 spin_unlock(&memcg_oom_lock);
4472 }
4473
mem_cgroup_oom_control_read(struct seq_file * sf,void * v)4474 static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
4475 {
4476 struct mem_cgroup *memcg = mem_cgroup_from_seq(sf);
4477
4478 seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
4479 seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
4480 seq_printf(sf, "oom_kill %lu\n",
4481 atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL]));
4482 return 0;
4483 }
4484
mem_cgroup_oom_control_write(struct cgroup_subsys_state * css,struct cftype * cft,u64 val)4485 static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
4486 struct cftype *cft, u64 val)
4487 {
4488 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4489
4490 /* cannot set to root cgroup and only 0 and 1 are allowed */
4491 if (mem_cgroup_is_root(memcg) || !((val == 0) || (val == 1)))
4492 return -EINVAL;
4493
4494 memcg->oom_kill_disable = val;
4495 if (!val)
4496 memcg_oom_recover(memcg);
4497
4498 return 0;
4499 }
4500
4501 #ifdef CONFIG_CGROUP_WRITEBACK
4502
4503 #include <trace/events/writeback.h>
4504
memcg_wb_domain_init(struct mem_cgroup * memcg,gfp_t gfp)4505 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4506 {
4507 return wb_domain_init(&memcg->cgwb_domain, gfp);
4508 }
4509
memcg_wb_domain_exit(struct mem_cgroup * memcg)4510 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4511 {
4512 wb_domain_exit(&memcg->cgwb_domain);
4513 }
4514
memcg_wb_domain_size_changed(struct mem_cgroup * memcg)4515 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4516 {
4517 wb_domain_size_changed(&memcg->cgwb_domain);
4518 }
4519
mem_cgroup_wb_domain(struct bdi_writeback * wb)4520 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
4521 {
4522 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4523
4524 if (!memcg->css.parent)
4525 return NULL;
4526
4527 return &memcg->cgwb_domain;
4528 }
4529
4530 /**
4531 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
4532 * @wb: bdi_writeback in question
4533 * @pfilepages: out parameter for number of file pages
4534 * @pheadroom: out parameter for number of allocatable pages according to memcg
4535 * @pdirty: out parameter for number of dirty pages
4536 * @pwriteback: out parameter for number of pages under writeback
4537 *
4538 * Determine the numbers of file, headroom, dirty, and writeback pages in
4539 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom
4540 * is a bit more involved.
4541 *
4542 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the
4543 * headroom is calculated as the lowest headroom of itself and the
4544 * ancestors. Note that this doesn't consider the actual amount of
4545 * available memory in the system. The caller should further cap
4546 * *@pheadroom accordingly.
4547 */
mem_cgroup_wb_stats(struct bdi_writeback * wb,unsigned long * pfilepages,unsigned long * pheadroom,unsigned long * pdirty,unsigned long * pwriteback)4548 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
4549 unsigned long *pheadroom, unsigned long *pdirty,
4550 unsigned long *pwriteback)
4551 {
4552 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4553 struct mem_cgroup *parent;
4554
4555 mem_cgroup_flush_stats();
4556
4557 *pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
4558 *pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
4559 *pfilepages = memcg_page_state(memcg, NR_INACTIVE_FILE) +
4560 memcg_page_state(memcg, NR_ACTIVE_FILE);
4561
4562 *pheadroom = PAGE_COUNTER_MAX;
4563 while ((parent = parent_mem_cgroup(memcg))) {
4564 unsigned long ceiling = min(READ_ONCE(memcg->memory.max),
4565 READ_ONCE(memcg->memory.high));
4566 unsigned long used = page_counter_read(&memcg->memory);
4567
4568 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
4569 memcg = parent;
4570 }
4571 }
4572
4573 /*
4574 * Foreign dirty flushing
4575 *
4576 * There's an inherent mismatch between memcg and writeback. The former
4577 * tracks ownership per-page while the latter per-inode. This was a
4578 * deliberate design decision because honoring per-page ownership in the
4579 * writeback path is complicated, may lead to higher CPU and IO overheads
4580 * and deemed unnecessary given that write-sharing an inode across
4581 * different cgroups isn't a common use-case.
4582 *
4583 * Combined with inode majority-writer ownership switching, this works well
4584 * enough in most cases but there are some pathological cases. For
4585 * example, let's say there are two cgroups A and B which keep writing to
4586 * different but confined parts of the same inode. B owns the inode and
4587 * A's memory is limited far below B's. A's dirty ratio can rise enough to
4588 * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid
4589 * triggering background writeback. A will be slowed down without a way to
4590 * make writeback of the dirty pages happen.
4591 *
4592 * Conditions like the above can lead to a cgroup getting repeatedly and
4593 * severely throttled after making some progress after each
4594 * dirty_expire_interval while the underlying IO device is almost
4595 * completely idle.
4596 *
4597 * Solving this problem completely requires matching the ownership tracking
4598 * granularities between memcg and writeback in either direction. However,
4599 * the more egregious behaviors can be avoided by simply remembering the
4600 * most recent foreign dirtying events and initiating remote flushes on
4601 * them when local writeback isn't enough to keep the memory clean enough.
4602 *
4603 * The following two functions implement such mechanism. When a foreign
4604 * page - a page whose memcg and writeback ownerships don't match - is
4605 * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning
4606 * bdi_writeback on the page owning memcg. When balance_dirty_pages()
4607 * decides that the memcg needs to sleep due to high dirty ratio, it calls
4608 * mem_cgroup_flush_foreign() which queues writeback on the recorded
4609 * foreign bdi_writebacks which haven't expired. Both the numbers of
4610 * recorded bdi_writebacks and concurrent in-flight foreign writebacks are
4611 * limited to MEMCG_CGWB_FRN_CNT.
4612 *
4613 * The mechanism only remembers IDs and doesn't hold any object references.
4614 * As being wrong occasionally doesn't matter, updates and accesses to the
4615 * records are lockless and racy.
4616 */
mem_cgroup_track_foreign_dirty_slowpath(struct page * page,struct bdi_writeback * wb)4617 void mem_cgroup_track_foreign_dirty_slowpath(struct page *page,
4618 struct bdi_writeback *wb)
4619 {
4620 struct mem_cgroup *memcg = page_memcg(page);
4621 struct memcg_cgwb_frn *frn;
4622 u64 now = get_jiffies_64();
4623 u64 oldest_at = now;
4624 int oldest = -1;
4625 int i;
4626
4627 trace_track_foreign_dirty(page, wb);
4628
4629 /*
4630 * Pick the slot to use. If there is already a slot for @wb, keep
4631 * using it. If not replace the oldest one which isn't being
4632 * written out.
4633 */
4634 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4635 frn = &memcg->cgwb_frn[i];
4636 if (frn->bdi_id == wb->bdi->id &&
4637 frn->memcg_id == wb->memcg_css->id)
4638 break;
4639 if (time_before64(frn->at, oldest_at) &&
4640 atomic_read(&frn->done.cnt) == 1) {
4641 oldest = i;
4642 oldest_at = frn->at;
4643 }
4644 }
4645
4646 if (i < MEMCG_CGWB_FRN_CNT) {
4647 /*
4648 * Re-using an existing one. Update timestamp lazily to
4649 * avoid making the cacheline hot. We want them to be
4650 * reasonably up-to-date and significantly shorter than
4651 * dirty_expire_interval as that's what expires the record.
4652 * Use the shorter of 1s and dirty_expire_interval / 8.
4653 */
4654 unsigned long update_intv =
4655 min_t(unsigned long, HZ,
4656 msecs_to_jiffies(dirty_expire_interval * 10) / 8);
4657
4658 if (time_before64(frn->at, now - update_intv))
4659 frn->at = now;
4660 } else if (oldest >= 0) {
4661 /* replace the oldest free one */
4662 frn = &memcg->cgwb_frn[oldest];
4663 frn->bdi_id = wb->bdi->id;
4664 frn->memcg_id = wb->memcg_css->id;
4665 frn->at = now;
4666 }
4667 }
4668
4669 /* issue foreign writeback flushes for recorded foreign dirtying events */
mem_cgroup_flush_foreign(struct bdi_writeback * wb)4670 void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
4671 {
4672 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4673 unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10);
4674 u64 now = jiffies_64;
4675 int i;
4676
4677 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4678 struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i];
4679
4680 /*
4681 * If the record is older than dirty_expire_interval,
4682 * writeback on it has already started. No need to kick it
4683 * off again. Also, don't start a new one if there's
4684 * already one in flight.
4685 */
4686 if (time_after64(frn->at, now - intv) &&
4687 atomic_read(&frn->done.cnt) == 1) {
4688 frn->at = 0;
4689 trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id);
4690 cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id,
4691 WB_REASON_FOREIGN_FLUSH,
4692 &frn->done);
4693 }
4694 }
4695 }
4696
4697 #else /* CONFIG_CGROUP_WRITEBACK */
4698
memcg_wb_domain_init(struct mem_cgroup * memcg,gfp_t gfp)4699 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4700 {
4701 return 0;
4702 }
4703
memcg_wb_domain_exit(struct mem_cgroup * memcg)4704 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4705 {
4706 }
4707
memcg_wb_domain_size_changed(struct mem_cgroup * memcg)4708 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4709 {
4710 }
4711
4712 #endif /* CONFIG_CGROUP_WRITEBACK */
4713
4714 /*
4715 * DO NOT USE IN NEW FILES.
4716 *
4717 * "cgroup.event_control" implementation.
4718 *
4719 * This is way over-engineered. It tries to support fully configurable
4720 * events for each user. Such level of flexibility is completely
4721 * unnecessary especially in the light of the planned unified hierarchy.
4722 *
4723 * Please deprecate this and replace with something simpler if at all
4724 * possible.
4725 */
4726
4727 /*
4728 * Unregister event and free resources.
4729 *
4730 * Gets called from workqueue.
4731 */
memcg_event_remove(struct work_struct * work)4732 static void memcg_event_remove(struct work_struct *work)
4733 {
4734 struct mem_cgroup_event *event =
4735 container_of(work, struct mem_cgroup_event, remove);
4736 struct mem_cgroup *memcg = event->memcg;
4737
4738 remove_wait_queue(event->wqh, &event->wait);
4739
4740 event->unregister_event(memcg, event->eventfd);
4741
4742 /* Notify userspace the event is going away. */
4743 eventfd_signal(event->eventfd, 1);
4744
4745 eventfd_ctx_put(event->eventfd);
4746 kfree(event);
4747 css_put(&memcg->css);
4748 }
4749
4750 /*
4751 * Gets called on EPOLLHUP on eventfd when user closes it.
4752 *
4753 * Called with wqh->lock held and interrupts disabled.
4754 */
memcg_event_wake(wait_queue_entry_t * wait,unsigned mode,int sync,void * key)4755 static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode,
4756 int sync, void *key)
4757 {
4758 struct mem_cgroup_event *event =
4759 container_of(wait, struct mem_cgroup_event, wait);
4760 struct mem_cgroup *memcg = event->memcg;
4761 __poll_t flags = key_to_poll(key);
4762
4763 if (flags & EPOLLHUP) {
4764 /*
4765 * If the event has been detached at cgroup removal, we
4766 * can simply return knowing the other side will cleanup
4767 * for us.
4768 *
4769 * We can't race against event freeing since the other
4770 * side will require wqh->lock via remove_wait_queue(),
4771 * which we hold.
4772 */
4773 spin_lock(&memcg->event_list_lock);
4774 if (!list_empty(&event->list)) {
4775 list_del_init(&event->list);
4776 /*
4777 * We are in atomic context, but cgroup_event_remove()
4778 * may sleep, so we have to call it in workqueue.
4779 */
4780 schedule_work(&event->remove);
4781 }
4782 spin_unlock(&memcg->event_list_lock);
4783 }
4784
4785 return 0;
4786 }
4787
memcg_event_ptable_queue_proc(struct file * file,wait_queue_head_t * wqh,poll_table * pt)4788 static void memcg_event_ptable_queue_proc(struct file *file,
4789 wait_queue_head_t *wqh, poll_table *pt)
4790 {
4791 struct mem_cgroup_event *event =
4792 container_of(pt, struct mem_cgroup_event, pt);
4793
4794 event->wqh = wqh;
4795 add_wait_queue(wqh, &event->wait);
4796 }
4797
4798 /*
4799 * DO NOT USE IN NEW FILES.
4800 *
4801 * Parse input and register new cgroup event handler.
4802 *
4803 * Input must be in format '<event_fd> <control_fd> <args>'.
4804 * Interpretation of args is defined by control file implementation.
4805 */
memcg_write_event_control(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4806 static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
4807 char *buf, size_t nbytes, loff_t off)
4808 {
4809 struct cgroup_subsys_state *css = of_css(of);
4810 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4811 struct mem_cgroup_event *event;
4812 struct cgroup_subsys_state *cfile_css;
4813 unsigned int efd, cfd;
4814 struct fd efile;
4815 struct fd cfile;
4816 struct dentry *cdentry;
4817 const char *name;
4818 char *endp;
4819 int ret;
4820
4821 buf = strstrip(buf);
4822
4823 efd = simple_strtoul(buf, &endp, 10);
4824 if (*endp != ' ')
4825 return -EINVAL;
4826 buf = endp + 1;
4827
4828 cfd = simple_strtoul(buf, &endp, 10);
4829 if ((*endp != ' ') && (*endp != '\0'))
4830 return -EINVAL;
4831 buf = endp + 1;
4832
4833 event = kzalloc(sizeof(*event), GFP_KERNEL);
4834 if (!event)
4835 return -ENOMEM;
4836
4837 event->memcg = memcg;
4838 INIT_LIST_HEAD(&event->list);
4839 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
4840 init_waitqueue_func_entry(&event->wait, memcg_event_wake);
4841 INIT_WORK(&event->remove, memcg_event_remove);
4842
4843 efile = fdget(efd);
4844 if (!efile.file) {
4845 ret = -EBADF;
4846 goto out_kfree;
4847 }
4848
4849 event->eventfd = eventfd_ctx_fileget(efile.file);
4850 if (IS_ERR(event->eventfd)) {
4851 ret = PTR_ERR(event->eventfd);
4852 goto out_put_efile;
4853 }
4854
4855 cfile = fdget(cfd);
4856 if (!cfile.file) {
4857 ret = -EBADF;
4858 goto out_put_eventfd;
4859 }
4860
4861 /* the process need read permission on control file */
4862 /* AV: shouldn't we check that it's been opened for read instead? */
4863 ret = file_permission(cfile.file, MAY_READ);
4864 if (ret < 0)
4865 goto out_put_cfile;
4866
4867 /*
4868 * The control file must be a regular cgroup1 file. As a regular cgroup
4869 * file can't be renamed, it's safe to access its name afterwards.
4870 */
4871 cdentry = cfile.file->f_path.dentry;
4872 if (cdentry->d_sb->s_type != &cgroup_fs_type || !d_is_reg(cdentry)) {
4873 ret = -EINVAL;
4874 goto out_put_cfile;
4875 }
4876
4877 /*
4878 * Determine the event callbacks and set them in @event. This used
4879 * to be done via struct cftype but cgroup core no longer knows
4880 * about these events. The following is crude but the whole thing
4881 * is for compatibility anyway.
4882 *
4883 * DO NOT ADD NEW FILES.
4884 */
4885 name = cdentry->d_name.name;
4886
4887 if (!strcmp(name, "memory.usage_in_bytes")) {
4888 event->register_event = mem_cgroup_usage_register_event;
4889 event->unregister_event = mem_cgroup_usage_unregister_event;
4890 } else if (!strcmp(name, "memory.oom_control")) {
4891 event->register_event = mem_cgroup_oom_register_event;
4892 event->unregister_event = mem_cgroup_oom_unregister_event;
4893 } else if (!strcmp(name, "memory.pressure_level")) {
4894 event->register_event = vmpressure_register_event;
4895 event->unregister_event = vmpressure_unregister_event;
4896 } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
4897 event->register_event = memsw_cgroup_usage_register_event;
4898 event->unregister_event = memsw_cgroup_usage_unregister_event;
4899 } else {
4900 ret = -EINVAL;
4901 goto out_put_cfile;
4902 }
4903
4904 /*
4905 * Verify @cfile should belong to @css. Also, remaining events are
4906 * automatically removed on cgroup destruction but the removal is
4907 * asynchronous, so take an extra ref on @css.
4908 */
4909 cfile_css = css_tryget_online_from_dir(cdentry->d_parent,
4910 &memory_cgrp_subsys);
4911 ret = -EINVAL;
4912 if (IS_ERR(cfile_css))
4913 goto out_put_cfile;
4914 if (cfile_css != css) {
4915 css_put(cfile_css);
4916 goto out_put_cfile;
4917 }
4918
4919 ret = event->register_event(memcg, event->eventfd, buf);
4920 if (ret)
4921 goto out_put_css;
4922
4923 vfs_poll(efile.file, &event->pt);
4924
4925 spin_lock_irq(&memcg->event_list_lock);
4926 list_add(&event->list, &memcg->event_list);
4927 spin_unlock_irq(&memcg->event_list_lock);
4928
4929 fdput(cfile);
4930 fdput(efile);
4931
4932 return nbytes;
4933
4934 out_put_css:
4935 css_put(css);
4936 out_put_cfile:
4937 fdput(cfile);
4938 out_put_eventfd:
4939 eventfd_ctx_put(event->eventfd);
4940 out_put_efile:
4941 fdput(efile);
4942 out_kfree:
4943 kfree(event);
4944
4945 return ret;
4946 }
4947
4948 static struct cftype mem_cgroup_legacy_files[] = {
4949 {
4950 .name = "usage_in_bytes",
4951 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
4952 .read_u64 = mem_cgroup_read_u64,
4953 },
4954 {
4955 .name = "max_usage_in_bytes",
4956 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
4957 .write = mem_cgroup_reset,
4958 .read_u64 = mem_cgroup_read_u64,
4959 },
4960 {
4961 .name = "limit_in_bytes",
4962 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
4963 .write = mem_cgroup_write,
4964 .read_u64 = mem_cgroup_read_u64,
4965 },
4966 {
4967 .name = "soft_limit_in_bytes",
4968 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
4969 .write = mem_cgroup_write,
4970 .read_u64 = mem_cgroup_read_u64,
4971 },
4972 {
4973 .name = "failcnt",
4974 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
4975 .write = mem_cgroup_reset,
4976 .read_u64 = mem_cgroup_read_u64,
4977 },
4978 {
4979 .name = "stat",
4980 .seq_show = memcg_stat_show,
4981 },
4982 {
4983 .name = "force_empty",
4984 .write = mem_cgroup_force_empty_write,
4985 },
4986 {
4987 .name = "use_hierarchy",
4988 .write_u64 = mem_cgroup_hierarchy_write,
4989 .read_u64 = mem_cgroup_hierarchy_read,
4990 },
4991 {
4992 .name = "cgroup.event_control", /* XXX: for compat */
4993 .write = memcg_write_event_control,
4994 .flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,
4995 },
4996 {
4997 .name = "swappiness",
4998 .read_u64 = mem_cgroup_swappiness_read,
4999 .write_u64 = mem_cgroup_swappiness_write,
5000 },
5001 {
5002 .name = "move_charge_at_immigrate",
5003 .read_u64 = mem_cgroup_move_charge_read,
5004 .write_u64 = mem_cgroup_move_charge_write,
5005 },
5006 {
5007 .name = "oom_control",
5008 .seq_show = mem_cgroup_oom_control_read,
5009 .write_u64 = mem_cgroup_oom_control_write,
5010 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
5011 },
5012 {
5013 .name = "pressure_level",
5014 },
5015 #ifdef CONFIG_NUMA
5016 {
5017 .name = "numa_stat",
5018 .seq_show = memcg_numa_stat_show,
5019 },
5020 #endif
5021 {
5022 .name = "kmem.limit_in_bytes",
5023 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
5024 .write = mem_cgroup_write,
5025 .read_u64 = mem_cgroup_read_u64,
5026 },
5027 {
5028 .name = "kmem.usage_in_bytes",
5029 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
5030 .read_u64 = mem_cgroup_read_u64,
5031 },
5032 {
5033 .name = "kmem.failcnt",
5034 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
5035 .write = mem_cgroup_reset,
5036 .read_u64 = mem_cgroup_read_u64,
5037 },
5038 {
5039 .name = "kmem.max_usage_in_bytes",
5040 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
5041 .write = mem_cgroup_reset,
5042 .read_u64 = mem_cgroup_read_u64,
5043 },
5044 #if defined(CONFIG_MEMCG_KMEM) && \
5045 (defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG))
5046 {
5047 .name = "kmem.slabinfo",
5048 .seq_show = memcg_slab_show,
5049 },
5050 #endif
5051 {
5052 .name = "kmem.tcp.limit_in_bytes",
5053 .private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
5054 .write = mem_cgroup_write,
5055 .read_u64 = mem_cgroup_read_u64,
5056 },
5057 {
5058 .name = "kmem.tcp.usage_in_bytes",
5059 .private = MEMFILE_PRIVATE(_TCP, RES_USAGE),
5060 .read_u64 = mem_cgroup_read_u64,
5061 },
5062 {
5063 .name = "kmem.tcp.failcnt",
5064 .private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT),
5065 .write = mem_cgroup_reset,
5066 .read_u64 = mem_cgroup_read_u64,
5067 },
5068 {
5069 .name = "kmem.tcp.max_usage_in_bytes",
5070 .private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE),
5071 .write = mem_cgroup_reset,
5072 .read_u64 = mem_cgroup_read_u64,
5073 },
5074 { }, /* terminate */
5075 };
5076
5077 /*
5078 * Private memory cgroup IDR
5079 *
5080 * Swap-out records and page cache shadow entries need to store memcg
5081 * references in constrained space, so we maintain an ID space that is
5082 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
5083 * memory-controlled cgroups to 64k.
5084 *
5085 * However, there usually are many references to the offline CSS after
5086 * the cgroup has been destroyed, such as page cache or reclaimable
5087 * slab objects, that don't need to hang on to the ID. We want to keep
5088 * those dead CSS from occupying IDs, or we might quickly exhaust the
5089 * relatively small ID space and prevent the creation of new cgroups
5090 * even when there are much fewer than 64k cgroups - possibly none.
5091 *
5092 * Maintain a private 16-bit ID space for memcg, and allow the ID to
5093 * be freed and recycled when it's no longer needed, which is usually
5094 * when the CSS is offlined.
5095 *
5096 * The only exception to that are records of swapped out tmpfs/shmem
5097 * pages that need to be attributed to live ancestors on swapin. But
5098 * those references are manageable from userspace.
5099 */
5100
5101 static DEFINE_IDR(mem_cgroup_idr);
5102
mem_cgroup_id_remove(struct mem_cgroup * memcg)5103 static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
5104 {
5105 if (memcg->id.id > 0) {
5106 trace_android_vh_mem_cgroup_id_remove(memcg);
5107 idr_remove(&mem_cgroup_idr, memcg->id.id);
5108 memcg->id.id = 0;
5109 }
5110 }
5111
mem_cgroup_id_get_many(struct mem_cgroup * memcg,unsigned int n)5112 static void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg,
5113 unsigned int n)
5114 {
5115 refcount_add(n, &memcg->id.ref);
5116 }
5117
mem_cgroup_id_put_many(struct mem_cgroup * memcg,unsigned int n)5118 static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
5119 {
5120 if (refcount_sub_and_test(n, &memcg->id.ref)) {
5121 mem_cgroup_id_remove(memcg);
5122
5123 /* Memcg ID pins CSS */
5124 css_put(&memcg->css);
5125 }
5126 }
5127
mem_cgroup_id_put(struct mem_cgroup * memcg)5128 static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
5129 {
5130 mem_cgroup_id_put_many(memcg, 1);
5131 }
5132
5133 /**
5134 * mem_cgroup_from_id - look up a memcg from a memcg id
5135 * @id: the memcg id to look up
5136 *
5137 * Caller must hold rcu_read_lock().
5138 */
mem_cgroup_from_id(unsigned short id)5139 struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
5140 {
5141 WARN_ON_ONCE(!rcu_read_lock_held());
5142 return idr_find(&mem_cgroup_idr, id);
5143 }
5144 EXPORT_SYMBOL_GPL(mem_cgroup_from_id);
5145
alloc_mem_cgroup_per_node_info(struct mem_cgroup * memcg,int node)5146 static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
5147 {
5148 struct mem_cgroup_per_node *pn;
5149 int tmp = node;
5150 /*
5151 * This routine is called against possible nodes.
5152 * But it's BUG to call kmalloc() against offline node.
5153 *
5154 * TODO: this routine can waste much memory for nodes which will
5155 * never be onlined. It's better to use memory hotplug callback
5156 * function.
5157 */
5158 if (!node_state(node, N_NORMAL_MEMORY))
5159 tmp = -1;
5160 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
5161 if (!pn)
5162 return 1;
5163
5164 pn->lruvec_stats_percpu = alloc_percpu_gfp(struct lruvec_stats_percpu,
5165 GFP_KERNEL_ACCOUNT);
5166 if (!pn->lruvec_stats_percpu) {
5167 kfree(pn);
5168 return 1;
5169 }
5170
5171 lruvec_init(&pn->lruvec);
5172 pn->usage_in_excess = 0;
5173 pn->on_tree = false;
5174 pn->memcg = memcg;
5175
5176 memcg->nodeinfo[node] = pn;
5177 return 0;
5178 }
5179
free_mem_cgroup_per_node_info(struct mem_cgroup * memcg,int node)5180 static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
5181 {
5182 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
5183
5184 if (!pn)
5185 return;
5186
5187 free_percpu(pn->lruvec_stats_percpu);
5188 kfree(pn);
5189 }
5190
__mem_cgroup_free(struct mem_cgroup * memcg)5191 static void __mem_cgroup_free(struct mem_cgroup *memcg)
5192 {
5193 int node;
5194
5195 trace_android_vh_mem_cgroup_free(memcg);
5196 for_each_node(node)
5197 free_mem_cgroup_per_node_info(memcg, node);
5198 free_percpu(memcg->vmstats_percpu);
5199 kfree(memcg);
5200 }
5201
mem_cgroup_free(struct mem_cgroup * memcg)5202 static void mem_cgroup_free(struct mem_cgroup *memcg)
5203 {
5204 lru_gen_exit_memcg(memcg);
5205 memcg_wb_domain_exit(memcg);
5206 __mem_cgroup_free(memcg);
5207 }
5208
mem_cgroup_alloc(void)5209 static struct mem_cgroup *mem_cgroup_alloc(void)
5210 {
5211 struct mem_cgroup *memcg;
5212 unsigned int size;
5213 int node;
5214 int __maybe_unused i;
5215 long error = -ENOMEM;
5216
5217 size = sizeof(struct mem_cgroup);
5218 size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
5219
5220 memcg = kzalloc(size, GFP_KERNEL);
5221 if (!memcg)
5222 return ERR_PTR(error);
5223
5224 memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
5225 1, MEM_CGROUP_ID_MAX,
5226 GFP_KERNEL);
5227 if (memcg->id.id < 0) {
5228 error = memcg->id.id;
5229 goto fail;
5230 }
5231
5232 memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu,
5233 GFP_KERNEL_ACCOUNT);
5234 if (!memcg->vmstats_percpu)
5235 goto fail;
5236
5237 for_each_node(node)
5238 if (alloc_mem_cgroup_per_node_info(memcg, node))
5239 goto fail;
5240
5241 if (memcg_wb_domain_init(memcg, GFP_KERNEL))
5242 goto fail;
5243
5244 INIT_WORK(&memcg->high_work, high_work_func);
5245 INIT_LIST_HEAD(&memcg->oom_notify);
5246 mutex_init(&memcg->thresholds_lock);
5247 spin_lock_init(&memcg->move_lock);
5248 vmpressure_init(&memcg->vmpressure);
5249 INIT_LIST_HEAD(&memcg->event_list);
5250 spin_lock_init(&memcg->event_list_lock);
5251 memcg->socket_pressure = jiffies;
5252 trace_android_rvh_memcgv2_init(memcg);
5253 #ifdef CONFIG_MEMCG_KMEM
5254 memcg->kmemcg_id = -1;
5255 INIT_LIST_HEAD(&memcg->objcg_list);
5256 #endif
5257 #ifdef CONFIG_CGROUP_WRITEBACK
5258 INIT_LIST_HEAD(&memcg->cgwb_list);
5259 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5260 memcg->cgwb_frn[i].done =
5261 __WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq);
5262 #endif
5263 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5264 spin_lock_init(&memcg->deferred_split_queue.split_queue_lock);
5265 INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
5266 memcg->deferred_split_queue.split_queue_len = 0;
5267 #endif
5268 idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
5269 lru_gen_init_memcg(memcg);
5270 trace_android_vh_mem_cgroup_alloc(memcg);
5271 return memcg;
5272 fail:
5273 mem_cgroup_id_remove(memcg);
5274 __mem_cgroup_free(memcg);
5275 return ERR_PTR(error);
5276 }
5277
5278 static struct cgroup_subsys_state * __ref
mem_cgroup_css_alloc(struct cgroup_subsys_state * parent_css)5279 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
5280 {
5281 struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
5282 struct mem_cgroup *memcg, *old_memcg;
5283 long error = -ENOMEM;
5284
5285 old_memcg = set_active_memcg(parent);
5286 memcg = mem_cgroup_alloc();
5287 set_active_memcg(old_memcg);
5288 if (IS_ERR(memcg))
5289 return ERR_CAST(memcg);
5290
5291 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
5292 memcg->soft_limit = PAGE_COUNTER_MAX;
5293 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
5294 if (parent) {
5295 memcg->swappiness = mem_cgroup_swappiness(parent);
5296 memcg->oom_kill_disable = parent->oom_kill_disable;
5297
5298 page_counter_init(&memcg->memory, &parent->memory);
5299 page_counter_init(&memcg->swap, &parent->swap);
5300 page_counter_init(&memcg->kmem, &parent->kmem);
5301 page_counter_init(&memcg->tcpmem, &parent->tcpmem);
5302 } else {
5303 page_counter_init(&memcg->memory, NULL);
5304 page_counter_init(&memcg->swap, NULL);
5305 page_counter_init(&memcg->kmem, NULL);
5306 page_counter_init(&memcg->tcpmem, NULL);
5307
5308 root_mem_cgroup = memcg;
5309 return &memcg->css;
5310 }
5311
5312 /* The following stuff does not apply to the root */
5313 error = memcg_online_kmem(memcg);
5314 if (error)
5315 goto fail;
5316
5317 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5318 static_branch_inc(&memcg_sockets_enabled_key);
5319
5320 return &memcg->css;
5321 fail:
5322 mem_cgroup_id_remove(memcg);
5323 mem_cgroup_free(memcg);
5324 return ERR_PTR(error);
5325 }
5326
mem_cgroup_css_online(struct cgroup_subsys_state * css)5327 static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
5328 {
5329 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5330
5331 /*
5332 * A memcg must be visible for expand_shrinker_info()
5333 * by the time the maps are allocated. So, we allocate maps
5334 * here, when for_each_mem_cgroup() can't skip it.
5335 */
5336 if (alloc_shrinker_info(memcg)) {
5337 mem_cgroup_id_remove(memcg);
5338 return -ENOMEM;
5339 }
5340
5341 /* Online state pins memcg ID, memcg ID pins CSS */
5342 refcount_set(&memcg->id.ref, 1);
5343 css_get(css);
5344
5345 if (unlikely(mem_cgroup_is_root(memcg)) && !mem_cgroup_disabled())
5346 queue_delayed_work(system_unbound_wq, &stats_flush_dwork,
5347 2UL*HZ);
5348
5349 trace_android_vh_mem_cgroup_css_online(css, memcg);
5350 lru_gen_online_memcg(memcg);
5351 return 0;
5352 }
5353
mem_cgroup_css_offline(struct cgroup_subsys_state * css)5354 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
5355 {
5356 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5357 struct mem_cgroup_event *event, *tmp;
5358
5359 trace_android_vh_mem_cgroup_css_offline(css, memcg);
5360 /*
5361 * Unregister events and notify userspace.
5362 * Notify userspace about cgroup removing only after rmdir of cgroup
5363 * directory to avoid race between userspace and kernelspace.
5364 */
5365 spin_lock_irq(&memcg->event_list_lock);
5366 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
5367 list_del_init(&event->list);
5368 schedule_work(&event->remove);
5369 }
5370 spin_unlock_irq(&memcg->event_list_lock);
5371
5372 page_counter_set_min(&memcg->memory, 0);
5373 page_counter_set_low(&memcg->memory, 0);
5374
5375 memcg_offline_kmem(memcg);
5376 reparent_shrinker_deferred(memcg);
5377 wb_memcg_offline(memcg);
5378 lru_gen_offline_memcg(memcg);
5379
5380 drain_all_stock(memcg);
5381
5382 mem_cgroup_id_put(memcg);
5383 }
5384
mem_cgroup_css_released(struct cgroup_subsys_state * css)5385 static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
5386 {
5387 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5388
5389 invalidate_reclaim_iterators(memcg);
5390 lru_gen_release_memcg(memcg);
5391 }
5392
mem_cgroup_css_free(struct cgroup_subsys_state * css)5393 static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
5394 {
5395 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5396 int __maybe_unused i;
5397
5398 #ifdef CONFIG_CGROUP_WRITEBACK
5399 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5400 wb_wait_for_completion(&memcg->cgwb_frn[i].done);
5401 #endif
5402 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5403 static_branch_dec(&memcg_sockets_enabled_key);
5404
5405 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
5406 static_branch_dec(&memcg_sockets_enabled_key);
5407
5408 vmpressure_cleanup(&memcg->vmpressure);
5409 cancel_work_sync(&memcg->high_work);
5410 mem_cgroup_remove_from_trees(memcg);
5411 free_shrinker_info(memcg);
5412 memcg_free_kmem(memcg);
5413 mem_cgroup_free(memcg);
5414 }
5415
5416 /**
5417 * mem_cgroup_css_reset - reset the states of a mem_cgroup
5418 * @css: the target css
5419 *
5420 * Reset the states of the mem_cgroup associated with @css. This is
5421 * invoked when the userland requests disabling on the default hierarchy
5422 * but the memcg is pinned through dependency. The memcg should stop
5423 * applying policies and should revert to the vanilla state as it may be
5424 * made visible again.
5425 *
5426 * The current implementation only resets the essential configurations.
5427 * This needs to be expanded to cover all the visible parts.
5428 */
mem_cgroup_css_reset(struct cgroup_subsys_state * css)5429 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
5430 {
5431 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5432
5433 page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX);
5434 page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX);
5435 page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX);
5436 page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX);
5437 page_counter_set_min(&memcg->memory, 0);
5438 page_counter_set_low(&memcg->memory, 0);
5439 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
5440 memcg->soft_limit = PAGE_COUNTER_MAX;
5441 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
5442 memcg_wb_domain_size_changed(memcg);
5443 }
5444
mem_cgroup_css_rstat_flush(struct cgroup_subsys_state * css,int cpu)5445 static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu)
5446 {
5447 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5448 struct mem_cgroup *parent = parent_mem_cgroup(memcg);
5449 struct memcg_vmstats_percpu *statc;
5450 long delta, v;
5451 int i, nid;
5452
5453 statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
5454
5455 for (i = 0; i < MEMCG_NR_STAT; i++) {
5456 /*
5457 * Collect the aggregated propagation counts of groups
5458 * below us. We're in a per-cpu loop here and this is
5459 * a global counter, so the first cycle will get them.
5460 */
5461 delta = memcg->vmstats.state_pending[i];
5462 if (delta)
5463 memcg->vmstats.state_pending[i] = 0;
5464
5465 /* Add CPU changes on this level since the last flush */
5466 v = READ_ONCE(statc->state[i]);
5467 if (v != statc->state_prev[i]) {
5468 delta += v - statc->state_prev[i];
5469 statc->state_prev[i] = v;
5470 }
5471
5472 if (!delta)
5473 continue;
5474
5475 /* Aggregate counts on this level and propagate upwards */
5476 memcg->vmstats.state[i] += delta;
5477 if (parent)
5478 parent->vmstats.state_pending[i] += delta;
5479 }
5480
5481 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
5482 delta = memcg->vmstats.events_pending[i];
5483 if (delta)
5484 memcg->vmstats.events_pending[i] = 0;
5485
5486 v = READ_ONCE(statc->events[i]);
5487 if (v != statc->events_prev[i]) {
5488 delta += v - statc->events_prev[i];
5489 statc->events_prev[i] = v;
5490 }
5491
5492 if (!delta)
5493 continue;
5494
5495 memcg->vmstats.events[i] += delta;
5496 if (parent)
5497 parent->vmstats.events_pending[i] += delta;
5498 }
5499
5500 for_each_node_state(nid, N_MEMORY) {
5501 struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid];
5502 struct mem_cgroup_per_node *ppn = NULL;
5503 struct lruvec_stats_percpu *lstatc;
5504
5505 if (parent)
5506 ppn = parent->nodeinfo[nid];
5507
5508 lstatc = per_cpu_ptr(pn->lruvec_stats_percpu, cpu);
5509
5510 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
5511 delta = pn->lruvec_stats.state_pending[i];
5512 if (delta)
5513 pn->lruvec_stats.state_pending[i] = 0;
5514
5515 v = READ_ONCE(lstatc->state[i]);
5516 if (v != lstatc->state_prev[i]) {
5517 delta += v - lstatc->state_prev[i];
5518 lstatc->state_prev[i] = v;
5519 }
5520
5521 if (!delta)
5522 continue;
5523
5524 pn->lruvec_stats.state[i] += delta;
5525 if (ppn)
5526 ppn->lruvec_stats.state_pending[i] += delta;
5527 }
5528 }
5529 }
5530
5531 #ifdef CONFIG_MMU
5532 /* Handlers for move charge at task migration. */
mem_cgroup_do_precharge(unsigned long count)5533 static int mem_cgroup_do_precharge(unsigned long count)
5534 {
5535 int ret;
5536
5537 /* Try a single bulk charge without reclaim first, kswapd may wake */
5538 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count);
5539 if (!ret) {
5540 mc.precharge += count;
5541 return ret;
5542 }
5543
5544 /* Try charges one by one with reclaim, but do not retry */
5545 while (count--) {
5546 ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1);
5547 if (ret)
5548 return ret;
5549 mc.precharge++;
5550 cond_resched();
5551 }
5552 return 0;
5553 }
5554
5555 union mc_target {
5556 struct page *page;
5557 swp_entry_t ent;
5558 };
5559
5560 enum mc_target_type {
5561 MC_TARGET_NONE = 0,
5562 MC_TARGET_PAGE,
5563 MC_TARGET_SWAP,
5564 MC_TARGET_DEVICE,
5565 };
5566
mc_handle_present_pte(struct vm_area_struct * vma,unsigned long addr,pte_t ptent)5567 static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
5568 unsigned long addr, pte_t ptent)
5569 {
5570 struct page *page = vm_normal_page(vma, addr, ptent);
5571
5572 if (!page || !page_mapped(page))
5573 return NULL;
5574 if (PageAnon(page)) {
5575 if (!(mc.flags & MOVE_ANON))
5576 return NULL;
5577 } else {
5578 if (!(mc.flags & MOVE_FILE))
5579 return NULL;
5580 }
5581 if (!get_page_unless_zero(page))
5582 return NULL;
5583
5584 return page;
5585 }
5586
5587 #if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE)
mc_handle_swap_pte(struct vm_area_struct * vma,pte_t ptent,swp_entry_t * entry)5588 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5589 pte_t ptent, swp_entry_t *entry)
5590 {
5591 struct page *page = NULL;
5592 swp_entry_t ent = pte_to_swp_entry(ptent);
5593
5594 if (!(mc.flags & MOVE_ANON))
5595 return NULL;
5596
5597 /*
5598 * Handle MEMORY_DEVICE_PRIVATE which are ZONE_DEVICE page belonging to
5599 * a device and because they are not accessible by CPU they are store
5600 * as special swap entry in the CPU page table.
5601 */
5602 if (is_device_private_entry(ent)) {
5603 page = pfn_swap_entry_to_page(ent);
5604 /*
5605 * MEMORY_DEVICE_PRIVATE means ZONE_DEVICE page and which have
5606 * a refcount of 1 when free (unlike normal page)
5607 */
5608 if (!page_ref_add_unless(page, 1, 1))
5609 return NULL;
5610 return page;
5611 }
5612
5613 if (non_swap_entry(ent))
5614 return NULL;
5615
5616 /*
5617 * Because lookup_swap_cache() updates some statistics counter,
5618 * we call find_get_page() with swapper_space directly.
5619 */
5620 page = find_get_page(swap_address_space(ent), swp_offset(ent));
5621 entry->val = ent.val;
5622
5623 return page;
5624 }
5625 #else
mc_handle_swap_pte(struct vm_area_struct * vma,pte_t ptent,swp_entry_t * entry)5626 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5627 pte_t ptent, swp_entry_t *entry)
5628 {
5629 return NULL;
5630 }
5631 #endif
5632
mc_handle_file_pte(struct vm_area_struct * vma,unsigned long addr,pte_t ptent,swp_entry_t * entry)5633 static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
5634 unsigned long addr, pte_t ptent, swp_entry_t *entry)
5635 {
5636 if (!vma->vm_file) /* anonymous vma */
5637 return NULL;
5638 if (!(mc.flags & MOVE_FILE))
5639 return NULL;
5640
5641 /* page is moved even if it's not RSS of this task(page-faulted). */
5642 /* shmem/tmpfs may report page out on swap: account for that too. */
5643 return find_get_incore_page(vma->vm_file->f_mapping,
5644 linear_page_index(vma, addr));
5645 }
5646
5647 /**
5648 * mem_cgroup_move_account - move account of the page
5649 * @page: the page
5650 * @compound: charge the page as compound or small page
5651 * @from: mem_cgroup which the page is moved from.
5652 * @to: mem_cgroup which the page is moved to. @from != @to.
5653 *
5654 * The caller must make sure the page is not on LRU (isolate_page() is useful.)
5655 *
5656 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
5657 * from old cgroup.
5658 */
mem_cgroup_move_account(struct page * page,bool compound,struct mem_cgroup * from,struct mem_cgroup * to)5659 static int mem_cgroup_move_account(struct page *page,
5660 bool compound,
5661 struct mem_cgroup *from,
5662 struct mem_cgroup *to)
5663 {
5664 struct lruvec *from_vec, *to_vec;
5665 struct pglist_data *pgdat;
5666 unsigned int nr_pages = compound ? thp_nr_pages(page) : 1;
5667 int ret;
5668
5669 VM_BUG_ON(from == to);
5670 VM_BUG_ON_PAGE(PageLRU(page), page);
5671 VM_BUG_ON(compound && !PageTransHuge(page));
5672
5673 /*
5674 * Prevent mem_cgroup_migrate() from looking at
5675 * page's memory cgroup of its source page while we change it.
5676 */
5677 ret = -EBUSY;
5678 if (!trylock_page(page))
5679 goto out;
5680
5681 ret = -EINVAL;
5682 if (page_memcg(page) != from)
5683 goto out_unlock;
5684
5685 pgdat = page_pgdat(page);
5686 from_vec = mem_cgroup_lruvec(from, pgdat);
5687 to_vec = mem_cgroup_lruvec(to, pgdat);
5688
5689 lock_page_memcg(page);
5690
5691 if (PageAnon(page)) {
5692 if (page_mapped(page)) {
5693 __mod_lruvec_state(from_vec, NR_ANON_MAPPED, -nr_pages);
5694 __mod_lruvec_state(to_vec, NR_ANON_MAPPED, nr_pages);
5695 if (PageTransHuge(page)) {
5696 __mod_lruvec_state(from_vec, NR_ANON_THPS,
5697 -nr_pages);
5698 __mod_lruvec_state(to_vec, NR_ANON_THPS,
5699 nr_pages);
5700 }
5701 }
5702 } else {
5703 __mod_lruvec_state(from_vec, NR_FILE_PAGES, -nr_pages);
5704 __mod_lruvec_state(to_vec, NR_FILE_PAGES, nr_pages);
5705
5706 if (PageSwapBacked(page)) {
5707 __mod_lruvec_state(from_vec, NR_SHMEM, -nr_pages);
5708 __mod_lruvec_state(to_vec, NR_SHMEM, nr_pages);
5709 }
5710
5711 if (page_mapped(page)) {
5712 __mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages);
5713 __mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages);
5714 }
5715
5716 if (PageDirty(page)) {
5717 struct address_space *mapping = page_mapping(page);
5718
5719 if (mapping_can_writeback(mapping)) {
5720 __mod_lruvec_state(from_vec, NR_FILE_DIRTY,
5721 -nr_pages);
5722 __mod_lruvec_state(to_vec, NR_FILE_DIRTY,
5723 nr_pages);
5724 }
5725 }
5726 }
5727
5728 if (PageWriteback(page)) {
5729 __mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages);
5730 __mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages);
5731 }
5732
5733 /*
5734 * All state has been migrated, let's switch to the new memcg.
5735 *
5736 * It is safe to change page's memcg here because the page
5737 * is referenced, charged, isolated, and locked: we can't race
5738 * with (un)charging, migration, LRU putback, or anything else
5739 * that would rely on a stable page's memory cgroup.
5740 *
5741 * Note that lock_page_memcg is a memcg lock, not a page lock,
5742 * to save space. As soon as we switch page's memory cgroup to a
5743 * new memcg that isn't locked, the above state can change
5744 * concurrently again. Make sure we're truly done with it.
5745 */
5746 smp_mb();
5747
5748 css_get(&to->css);
5749 css_put(&from->css);
5750
5751 page->memcg_data = (unsigned long)to;
5752
5753 __unlock_page_memcg(from);
5754
5755 ret = 0;
5756
5757 local_irq_disable();
5758 mem_cgroup_charge_statistics(to, page, nr_pages);
5759 memcg_check_events(to, page);
5760 mem_cgroup_charge_statistics(from, page, -nr_pages);
5761 memcg_check_events(from, page);
5762 local_irq_enable();
5763 out_unlock:
5764 unlock_page(page);
5765 out:
5766 return ret;
5767 }
5768
5769 /**
5770 * get_mctgt_type - get target type of moving charge
5771 * @vma: the vma the pte to be checked belongs
5772 * @addr: the address corresponding to the pte to be checked
5773 * @ptent: the pte to be checked
5774 * @target: the pointer the target page or swap ent will be stored(can be NULL)
5775 *
5776 * Returns
5777 * 0(MC_TARGET_NONE): if the pte is not a target for move charge.
5778 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
5779 * move charge. if @target is not NULL, the page is stored in target->page
5780 * with extra refcnt got(Callers should handle it).
5781 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
5782 * target for charge migration. if @target is not NULL, the entry is stored
5783 * in target->ent.
5784 * 3(MC_TARGET_DEVICE): like MC_TARGET_PAGE but page is MEMORY_DEVICE_PRIVATE
5785 * (so ZONE_DEVICE page and thus not on the lru).
5786 * For now we such page is charge like a regular page would be as for all
5787 * intent and purposes it is just special memory taking the place of a
5788 * regular page.
5789 *
5790 * See Documentations/vm/hmm.txt and include/linux/hmm.h
5791 *
5792 * Called with pte lock held.
5793 */
5794
get_mctgt_type(struct vm_area_struct * vma,unsigned long addr,pte_t ptent,union mc_target * target)5795 static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
5796 unsigned long addr, pte_t ptent, union mc_target *target)
5797 {
5798 struct page *page = NULL;
5799 enum mc_target_type ret = MC_TARGET_NONE;
5800 swp_entry_t ent = { .val = 0 };
5801
5802 if (pte_present(ptent))
5803 page = mc_handle_present_pte(vma, addr, ptent);
5804 else if (is_swap_pte(ptent))
5805 page = mc_handle_swap_pte(vma, ptent, &ent);
5806 else if (pte_none(ptent))
5807 page = mc_handle_file_pte(vma, addr, ptent, &ent);
5808
5809 if (!page && !ent.val)
5810 return ret;
5811 if (page) {
5812 /*
5813 * Do only loose check w/o serialization.
5814 * mem_cgroup_move_account() checks the page is valid or
5815 * not under LRU exclusion.
5816 */
5817 if (page_memcg(page) == mc.from) {
5818 ret = MC_TARGET_PAGE;
5819 if (is_device_private_page(page))
5820 ret = MC_TARGET_DEVICE;
5821 if (target)
5822 target->page = page;
5823 }
5824 if (!ret || !target)
5825 put_page(page);
5826 }
5827 /*
5828 * There is a swap entry and a page doesn't exist or isn't charged.
5829 * But we cannot move a tail-page in a THP.
5830 */
5831 if (ent.val && !ret && (!page || !PageTransCompound(page)) &&
5832 mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
5833 ret = MC_TARGET_SWAP;
5834 if (target)
5835 target->ent = ent;
5836 }
5837 return ret;
5838 }
5839
5840 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5841 /*
5842 * We don't consider PMD mapped swapping or file mapped pages because THP does
5843 * not support them for now.
5844 * Caller should make sure that pmd_trans_huge(pmd) is true.
5845 */
get_mctgt_type_thp(struct vm_area_struct * vma,unsigned long addr,pmd_t pmd,union mc_target * target)5846 static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
5847 unsigned long addr, pmd_t pmd, union mc_target *target)
5848 {
5849 struct page *page = NULL;
5850 enum mc_target_type ret = MC_TARGET_NONE;
5851
5852 if (unlikely(is_swap_pmd(pmd))) {
5853 VM_BUG_ON(thp_migration_supported() &&
5854 !is_pmd_migration_entry(pmd));
5855 return ret;
5856 }
5857 page = pmd_page(pmd);
5858 VM_BUG_ON_PAGE(!page || !PageHead(page), page);
5859 if (!(mc.flags & MOVE_ANON))
5860 return ret;
5861 if (page_memcg(page) == mc.from) {
5862 ret = MC_TARGET_PAGE;
5863 if (target) {
5864 get_page(page);
5865 target->page = page;
5866 }
5867 }
5868 return ret;
5869 }
5870 #else
get_mctgt_type_thp(struct vm_area_struct * vma,unsigned long addr,pmd_t pmd,union mc_target * target)5871 static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
5872 unsigned long addr, pmd_t pmd, union mc_target *target)
5873 {
5874 return MC_TARGET_NONE;
5875 }
5876 #endif
5877
mem_cgroup_count_precharge_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,struct mm_walk * walk)5878 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
5879 unsigned long addr, unsigned long end,
5880 struct mm_walk *walk)
5881 {
5882 struct vm_area_struct *vma = walk->vma;
5883 pte_t *pte;
5884 spinlock_t *ptl;
5885
5886 ptl = pmd_trans_huge_lock(pmd, vma);
5887 if (ptl) {
5888 /*
5889 * Note their can not be MC_TARGET_DEVICE for now as we do not
5890 * support transparent huge page with MEMORY_DEVICE_PRIVATE but
5891 * this might change.
5892 */
5893 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
5894 mc.precharge += HPAGE_PMD_NR;
5895 spin_unlock(ptl);
5896 return 0;
5897 }
5898
5899 if (pmd_trans_unstable(pmd))
5900 return 0;
5901 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
5902 for (; addr != end; pte++, addr += PAGE_SIZE)
5903 if (get_mctgt_type(vma, addr, *pte, NULL))
5904 mc.precharge++; /* increment precharge temporarily */
5905 pte_unmap_unlock(pte - 1, ptl);
5906 cond_resched();
5907
5908 return 0;
5909 }
5910
5911 static const struct mm_walk_ops precharge_walk_ops = {
5912 .pmd_entry = mem_cgroup_count_precharge_pte_range,
5913 };
5914
mem_cgroup_count_precharge(struct mm_struct * mm)5915 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
5916 {
5917 unsigned long precharge;
5918
5919 mmap_read_lock(mm);
5920 walk_page_range(mm, 0, mm->highest_vm_end, &precharge_walk_ops, NULL);
5921 mmap_read_unlock(mm);
5922
5923 precharge = mc.precharge;
5924 mc.precharge = 0;
5925
5926 return precharge;
5927 }
5928
mem_cgroup_precharge_mc(struct mm_struct * mm)5929 static int mem_cgroup_precharge_mc(struct mm_struct *mm)
5930 {
5931 unsigned long precharge = mem_cgroup_count_precharge(mm);
5932
5933 VM_BUG_ON(mc.moving_task);
5934 mc.moving_task = current;
5935 return mem_cgroup_do_precharge(precharge);
5936 }
5937
5938 /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
__mem_cgroup_clear_mc(void)5939 static void __mem_cgroup_clear_mc(void)
5940 {
5941 struct mem_cgroup *from = mc.from;
5942 struct mem_cgroup *to = mc.to;
5943
5944 /* we must uncharge all the leftover precharges from mc.to */
5945 if (mc.precharge) {
5946 cancel_charge(mc.to, mc.precharge);
5947 mc.precharge = 0;
5948 }
5949 /*
5950 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
5951 * we must uncharge here.
5952 */
5953 if (mc.moved_charge) {
5954 cancel_charge(mc.from, mc.moved_charge);
5955 mc.moved_charge = 0;
5956 }
5957 /* we must fixup refcnts and charges */
5958 if (mc.moved_swap) {
5959 /* uncharge swap account from the old cgroup */
5960 if (!mem_cgroup_is_root(mc.from))
5961 page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
5962
5963 mem_cgroup_id_put_many(mc.from, mc.moved_swap);
5964
5965 /*
5966 * we charged both to->memory and to->memsw, so we
5967 * should uncharge to->memory.
5968 */
5969 if (!mem_cgroup_is_root(mc.to))
5970 page_counter_uncharge(&mc.to->memory, mc.moved_swap);
5971
5972 mc.moved_swap = 0;
5973 }
5974 memcg_oom_recover(from);
5975 memcg_oom_recover(to);
5976 wake_up_all(&mc.waitq);
5977 }
5978
mem_cgroup_clear_mc(void)5979 static void mem_cgroup_clear_mc(void)
5980 {
5981 struct mm_struct *mm = mc.mm;
5982
5983 /*
5984 * we must clear moving_task before waking up waiters at the end of
5985 * task migration.
5986 */
5987 mc.moving_task = NULL;
5988 __mem_cgroup_clear_mc();
5989 spin_lock(&mc.lock);
5990 mc.from = NULL;
5991 mc.to = NULL;
5992 mc.mm = NULL;
5993 spin_unlock(&mc.lock);
5994
5995 mmput(mm);
5996 }
5997
mem_cgroup_can_attach(struct cgroup_taskset * tset)5998 static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
5999 {
6000 struct cgroup_subsys_state *css;
6001 struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */
6002 struct mem_cgroup *from;
6003 struct task_struct *leader, *p;
6004 struct mm_struct *mm;
6005 unsigned long move_flags;
6006 int ret = 0;
6007
6008 /* charge immigration isn't supported on the default hierarchy */
6009 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
6010 return 0;
6011
6012 /*
6013 * Multi-process migrations only happen on the default hierarchy
6014 * where charge immigration is not used. Perform charge
6015 * immigration if @tset contains a leader and whine if there are
6016 * multiple.
6017 */
6018 p = NULL;
6019 cgroup_taskset_for_each_leader(leader, css, tset) {
6020 WARN_ON_ONCE(p);
6021 p = leader;
6022 memcg = mem_cgroup_from_css(css);
6023 }
6024 if (!p)
6025 return 0;
6026
6027 /*
6028 * We are now committed to this value whatever it is. Changes in this
6029 * tunable will only affect upcoming migrations, not the current one.
6030 * So we need to save it, and keep it going.
6031 */
6032 move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
6033 if (!move_flags)
6034 return 0;
6035
6036 from = mem_cgroup_from_task(p);
6037
6038 VM_BUG_ON(from == memcg);
6039
6040 mm = get_task_mm(p);
6041 if (!mm)
6042 return 0;
6043 /* We move charges only when we move a owner of the mm */
6044 if (mm->owner == p) {
6045 VM_BUG_ON(mc.from);
6046 VM_BUG_ON(mc.to);
6047 VM_BUG_ON(mc.precharge);
6048 VM_BUG_ON(mc.moved_charge);
6049 VM_BUG_ON(mc.moved_swap);
6050
6051 spin_lock(&mc.lock);
6052 mc.mm = mm;
6053 mc.from = from;
6054 mc.to = memcg;
6055 mc.flags = move_flags;
6056 spin_unlock(&mc.lock);
6057 /* We set mc.moving_task later */
6058
6059 ret = mem_cgroup_precharge_mc(mm);
6060 if (ret)
6061 mem_cgroup_clear_mc();
6062 } else {
6063 mmput(mm);
6064 }
6065 return ret;
6066 }
6067
mem_cgroup_cancel_attach(struct cgroup_taskset * tset)6068 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
6069 {
6070 if (mc.to)
6071 mem_cgroup_clear_mc();
6072 }
6073
mem_cgroup_move_charge_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,struct mm_walk * walk)6074 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
6075 unsigned long addr, unsigned long end,
6076 struct mm_walk *walk)
6077 {
6078 int ret = 0;
6079 struct vm_area_struct *vma = walk->vma;
6080 pte_t *pte;
6081 spinlock_t *ptl;
6082 enum mc_target_type target_type;
6083 union mc_target target;
6084 struct page *page;
6085
6086 ptl = pmd_trans_huge_lock(pmd, vma);
6087 if (ptl) {
6088 if (mc.precharge < HPAGE_PMD_NR) {
6089 spin_unlock(ptl);
6090 return 0;
6091 }
6092 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
6093 if (target_type == MC_TARGET_PAGE) {
6094 page = target.page;
6095 if (!isolate_lru_page(page)) {
6096 if (!mem_cgroup_move_account(page, true,
6097 mc.from, mc.to)) {
6098 mc.precharge -= HPAGE_PMD_NR;
6099 mc.moved_charge += HPAGE_PMD_NR;
6100 }
6101 putback_lru_page(page);
6102 }
6103 put_page(page);
6104 } else if (target_type == MC_TARGET_DEVICE) {
6105 page = target.page;
6106 if (!mem_cgroup_move_account(page, true,
6107 mc.from, mc.to)) {
6108 mc.precharge -= HPAGE_PMD_NR;
6109 mc.moved_charge += HPAGE_PMD_NR;
6110 }
6111 put_page(page);
6112 }
6113 spin_unlock(ptl);
6114 return 0;
6115 }
6116
6117 if (pmd_trans_unstable(pmd))
6118 return 0;
6119 retry:
6120 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
6121 for (; addr != end; addr += PAGE_SIZE) {
6122 pte_t ptent = *(pte++);
6123 bool device = false;
6124 swp_entry_t ent;
6125
6126 if (!mc.precharge)
6127 break;
6128
6129 switch (get_mctgt_type(vma, addr, ptent, &target)) {
6130 case MC_TARGET_DEVICE:
6131 device = true;
6132 fallthrough;
6133 case MC_TARGET_PAGE:
6134 page = target.page;
6135 /*
6136 * We can have a part of the split pmd here. Moving it
6137 * can be done but it would be too convoluted so simply
6138 * ignore such a partial THP and keep it in original
6139 * memcg. There should be somebody mapping the head.
6140 */
6141 if (PageTransCompound(page))
6142 goto put;
6143 if (!device && isolate_lru_page(page))
6144 goto put;
6145 if (!mem_cgroup_move_account(page, false,
6146 mc.from, mc.to)) {
6147 mc.precharge--;
6148 /* we uncharge from mc.from later. */
6149 mc.moved_charge++;
6150 }
6151 if (!device)
6152 putback_lru_page(page);
6153 put: /* get_mctgt_type() gets the page */
6154 put_page(page);
6155 break;
6156 case MC_TARGET_SWAP:
6157 ent = target.ent;
6158 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
6159 mc.precharge--;
6160 mem_cgroup_id_get_many(mc.to, 1);
6161 /* we fixup other refcnts and charges later. */
6162 mc.moved_swap++;
6163 }
6164 break;
6165 default:
6166 break;
6167 }
6168 }
6169 pte_unmap_unlock(pte - 1, ptl);
6170 cond_resched();
6171
6172 if (addr != end) {
6173 /*
6174 * We have consumed all precharges we got in can_attach().
6175 * We try charge one by one, but don't do any additional
6176 * charges to mc.to if we have failed in charge once in attach()
6177 * phase.
6178 */
6179 ret = mem_cgroup_do_precharge(1);
6180 if (!ret)
6181 goto retry;
6182 }
6183
6184 return ret;
6185 }
6186
6187 static const struct mm_walk_ops charge_walk_ops = {
6188 .pmd_entry = mem_cgroup_move_charge_pte_range,
6189 };
6190
mem_cgroup_move_charge(void)6191 static void mem_cgroup_move_charge(void)
6192 {
6193 lru_add_drain_all();
6194 /*
6195 * Signal lock_page_memcg() to take the memcg's move_lock
6196 * while we're moving its pages to another memcg. Then wait
6197 * for already started RCU-only updates to finish.
6198 */
6199 atomic_inc(&mc.from->moving_account);
6200 synchronize_rcu();
6201 retry:
6202 if (unlikely(!mmap_read_trylock(mc.mm))) {
6203 /*
6204 * Someone who are holding the mmap_lock might be waiting in
6205 * waitq. So we cancel all extra charges, wake up all waiters,
6206 * and retry. Because we cancel precharges, we might not be able
6207 * to move enough charges, but moving charge is a best-effort
6208 * feature anyway, so it wouldn't be a big problem.
6209 */
6210 __mem_cgroup_clear_mc();
6211 cond_resched();
6212 goto retry;
6213 }
6214 /*
6215 * When we have consumed all precharges and failed in doing
6216 * additional charge, the page walk just aborts.
6217 */
6218 walk_page_range(mc.mm, 0, mc.mm->highest_vm_end, &charge_walk_ops,
6219 NULL);
6220
6221 mmap_read_unlock(mc.mm);
6222 atomic_dec(&mc.from->moving_account);
6223 }
6224
mem_cgroup_move_task(void)6225 static void mem_cgroup_move_task(void)
6226 {
6227 if (mc.to) {
6228 mem_cgroup_move_charge();
6229 mem_cgroup_clear_mc();
6230 }
6231 }
6232 #else /* !CONFIG_MMU */
mem_cgroup_can_attach(struct cgroup_taskset * tset)6233 static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
6234 {
6235 return 0;
6236 }
mem_cgroup_cancel_attach(struct cgroup_taskset * tset)6237 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
6238 {
6239 }
mem_cgroup_move_task(void)6240 static void mem_cgroup_move_task(void)
6241 {
6242 }
6243 #endif
6244
6245 #ifdef CONFIG_LRU_GEN
mem_cgroup_attach(struct cgroup_taskset * tset)6246 static void mem_cgroup_attach(struct cgroup_taskset *tset)
6247 {
6248 struct task_struct *task;
6249 struct cgroup_subsys_state *css;
6250
6251 /* find the first leader if there is any */
6252 cgroup_taskset_for_each_leader(task, css, tset)
6253 break;
6254
6255 if (!task)
6256 return;
6257
6258 task_lock(task);
6259 if (task->mm && READ_ONCE(task->mm->owner) == task)
6260 lru_gen_migrate_mm(task->mm);
6261 task_unlock(task);
6262 }
6263 #else
mem_cgroup_attach(struct cgroup_taskset * tset)6264 static void mem_cgroup_attach(struct cgroup_taskset *tset)
6265 {
6266 }
6267 #endif /* CONFIG_LRU_GEN */
6268
seq_puts_memcg_tunable(struct seq_file * m,unsigned long value)6269 static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
6270 {
6271 if (value == PAGE_COUNTER_MAX)
6272 seq_puts(m, "max\n");
6273 else
6274 seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE);
6275
6276 return 0;
6277 }
6278
memory_current_read(struct cgroup_subsys_state * css,struct cftype * cft)6279 static u64 memory_current_read(struct cgroup_subsys_state *css,
6280 struct cftype *cft)
6281 {
6282 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6283
6284 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
6285 }
6286
memory_min_show(struct seq_file * m,void * v)6287 static int memory_min_show(struct seq_file *m, void *v)
6288 {
6289 return seq_puts_memcg_tunable(m,
6290 READ_ONCE(mem_cgroup_from_seq(m)->memory.min));
6291 }
6292
memory_min_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)6293 static ssize_t memory_min_write(struct kernfs_open_file *of,
6294 char *buf, size_t nbytes, loff_t off)
6295 {
6296 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6297 unsigned long min;
6298 int err;
6299
6300 buf = strstrip(buf);
6301 err = page_counter_memparse(buf, "max", &min);
6302 if (err)
6303 return err;
6304
6305 page_counter_set_min(&memcg->memory, min);
6306
6307 return nbytes;
6308 }
6309
memory_low_show(struct seq_file * m,void * v)6310 static int memory_low_show(struct seq_file *m, void *v)
6311 {
6312 return seq_puts_memcg_tunable(m,
6313 READ_ONCE(mem_cgroup_from_seq(m)->memory.low));
6314 }
6315
memory_low_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)6316 static ssize_t memory_low_write(struct kernfs_open_file *of,
6317 char *buf, size_t nbytes, loff_t off)
6318 {
6319 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6320 unsigned long low;
6321 int err;
6322
6323 buf = strstrip(buf);
6324 err = page_counter_memparse(buf, "max", &low);
6325 if (err)
6326 return err;
6327
6328 page_counter_set_low(&memcg->memory, low);
6329
6330 return nbytes;
6331 }
6332
memory_high_show(struct seq_file * m,void * v)6333 static int memory_high_show(struct seq_file *m, void *v)
6334 {
6335 return seq_puts_memcg_tunable(m,
6336 READ_ONCE(mem_cgroup_from_seq(m)->memory.high));
6337 }
6338
memory_high_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)6339 static ssize_t memory_high_write(struct kernfs_open_file *of,
6340 char *buf, size_t nbytes, loff_t off)
6341 {
6342 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6343 unsigned int nr_retries = MAX_RECLAIM_RETRIES;
6344 bool drained = false;
6345 unsigned long high;
6346 int err;
6347
6348 buf = strstrip(buf);
6349 err = page_counter_memparse(buf, "max", &high);
6350 if (err)
6351 return err;
6352
6353 page_counter_set_high(&memcg->memory, high);
6354
6355 for (;;) {
6356 unsigned long nr_pages = page_counter_read(&memcg->memory);
6357 unsigned long reclaimed;
6358
6359 if (nr_pages <= high)
6360 break;
6361
6362 if (signal_pending(current))
6363 break;
6364
6365 if (!drained) {
6366 drain_all_stock(memcg);
6367 drained = true;
6368 continue;
6369 }
6370
6371 reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
6372 GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP);
6373
6374 if (!reclaimed && !nr_retries--)
6375 break;
6376 }
6377
6378 memcg_wb_domain_size_changed(memcg);
6379 return nbytes;
6380 }
6381
memory_max_show(struct seq_file * m,void * v)6382 static int memory_max_show(struct seq_file *m, void *v)
6383 {
6384 return seq_puts_memcg_tunable(m,
6385 READ_ONCE(mem_cgroup_from_seq(m)->memory.max));
6386 }
6387
memory_max_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)6388 static ssize_t memory_max_write(struct kernfs_open_file *of,
6389 char *buf, size_t nbytes, loff_t off)
6390 {
6391 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6392 unsigned int nr_reclaims = MAX_RECLAIM_RETRIES;
6393 bool drained = false;
6394 unsigned long max;
6395 int err;
6396
6397 buf = strstrip(buf);
6398 err = page_counter_memparse(buf, "max", &max);
6399 if (err)
6400 return err;
6401
6402 xchg(&memcg->memory.max, max);
6403
6404 for (;;) {
6405 unsigned long nr_pages = page_counter_read(&memcg->memory);
6406
6407 if (nr_pages <= max)
6408 break;
6409
6410 if (signal_pending(current))
6411 break;
6412
6413 if (!drained) {
6414 drain_all_stock(memcg);
6415 drained = true;
6416 continue;
6417 }
6418
6419 if (nr_reclaims) {
6420 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
6421 GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP))
6422 nr_reclaims--;
6423 continue;
6424 }
6425
6426 memcg_memory_event(memcg, MEMCG_OOM);
6427 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
6428 break;
6429 }
6430
6431 memcg_wb_domain_size_changed(memcg);
6432 return nbytes;
6433 }
6434
__memory_events_show(struct seq_file * m,atomic_long_t * events)6435 static void __memory_events_show(struct seq_file *m, atomic_long_t *events)
6436 {
6437 seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW]));
6438 seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH]));
6439 seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX]));
6440 seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM]));
6441 seq_printf(m, "oom_kill %lu\n",
6442 atomic_long_read(&events[MEMCG_OOM_KILL]));
6443 }
6444
memory_events_show(struct seq_file * m,void * v)6445 static int memory_events_show(struct seq_file *m, void *v)
6446 {
6447 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6448
6449 __memory_events_show(m, memcg->memory_events);
6450 return 0;
6451 }
6452
memory_events_local_show(struct seq_file * m,void * v)6453 static int memory_events_local_show(struct seq_file *m, void *v)
6454 {
6455 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6456
6457 __memory_events_show(m, memcg->memory_events_local);
6458 return 0;
6459 }
6460
memory_stat_show(struct seq_file * m,void * v)6461 static int memory_stat_show(struct seq_file *m, void *v)
6462 {
6463 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6464 char *buf;
6465
6466 buf = memory_stat_format(memcg);
6467 if (!buf)
6468 return -ENOMEM;
6469 seq_puts(m, buf);
6470 kfree(buf);
6471 return 0;
6472 }
6473
6474 #ifdef CONFIG_NUMA
lruvec_page_state_output(struct lruvec * lruvec,int item)6475 static inline unsigned long lruvec_page_state_output(struct lruvec *lruvec,
6476 int item)
6477 {
6478 return lruvec_page_state(lruvec, item) * memcg_page_state_unit(item);
6479 }
6480
memory_numa_stat_show(struct seq_file * m,void * v)6481 static int memory_numa_stat_show(struct seq_file *m, void *v)
6482 {
6483 int i;
6484 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6485
6486 mem_cgroup_flush_stats();
6487
6488 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
6489 int nid;
6490
6491 if (memory_stats[i].idx >= NR_VM_NODE_STAT_ITEMS)
6492 continue;
6493
6494 seq_printf(m, "%s", memory_stats[i].name);
6495 for_each_node_state(nid, N_MEMORY) {
6496 u64 size;
6497 struct lruvec *lruvec;
6498
6499 lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
6500 size = lruvec_page_state_output(lruvec,
6501 memory_stats[i].idx);
6502 seq_printf(m, " N%d=%llu", nid, size);
6503 }
6504 seq_putc(m, '\n');
6505 }
6506
6507 return 0;
6508 }
6509 #endif
6510
memory_oom_group_show(struct seq_file * m,void * v)6511 static int memory_oom_group_show(struct seq_file *m, void *v)
6512 {
6513 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6514
6515 seq_printf(m, "%d\n", memcg->oom_group);
6516
6517 return 0;
6518 }
6519
memory_oom_group_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)6520 static ssize_t memory_oom_group_write(struct kernfs_open_file *of,
6521 char *buf, size_t nbytes, loff_t off)
6522 {
6523 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6524 int ret, oom_group;
6525
6526 buf = strstrip(buf);
6527 if (!buf)
6528 return -EINVAL;
6529
6530 ret = kstrtoint(buf, 0, &oom_group);
6531 if (ret)
6532 return ret;
6533
6534 if (oom_group != 0 && oom_group != 1)
6535 return -EINVAL;
6536
6537 memcg->oom_group = oom_group;
6538
6539 return nbytes;
6540 }
6541
memory_reclaim(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)6542 static ssize_t memory_reclaim(struct kernfs_open_file *of, char *buf,
6543 size_t nbytes, loff_t off)
6544 {
6545 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6546 unsigned int nr_retries = MAX_RECLAIM_RETRIES;
6547 unsigned long nr_to_reclaim, nr_reclaimed = 0;
6548 unsigned int reclaim_options;
6549 int err;
6550
6551 buf = strstrip(buf);
6552 err = page_counter_memparse(buf, "", &nr_to_reclaim);
6553 if (err)
6554 return err;
6555
6556 reclaim_options = MEMCG_RECLAIM_MAY_SWAP | MEMCG_RECLAIM_PROACTIVE;
6557 while (nr_reclaimed < nr_to_reclaim) {
6558 unsigned long reclaimed;
6559
6560 if (signal_pending(current))
6561 return -EINTR;
6562
6563 /*
6564 * This is the final attempt, drain percpu lru caches in the
6565 * hope of introducing more evictable pages for
6566 * try_to_free_mem_cgroup_pages().
6567 */
6568 if (!nr_retries)
6569 lru_add_drain_all();
6570
6571 reclaimed = try_to_free_mem_cgroup_pages(memcg,
6572 nr_to_reclaim - nr_reclaimed,
6573 GFP_KERNEL, reclaim_options);
6574
6575 if (!reclaimed && !nr_retries--)
6576 return -EAGAIN;
6577
6578 nr_reclaimed += reclaimed;
6579 }
6580
6581 return nbytes;
6582 }
6583
6584 static struct cftype memory_files[] = {
6585 {
6586 .name = "current",
6587 .flags = CFTYPE_NOT_ON_ROOT,
6588 .read_u64 = memory_current_read,
6589 },
6590 {
6591 .name = "min",
6592 .flags = CFTYPE_NOT_ON_ROOT,
6593 .seq_show = memory_min_show,
6594 .write = memory_min_write,
6595 },
6596 {
6597 .name = "low",
6598 .flags = CFTYPE_NOT_ON_ROOT,
6599 .seq_show = memory_low_show,
6600 .write = memory_low_write,
6601 },
6602 {
6603 .name = "high",
6604 .flags = CFTYPE_NOT_ON_ROOT,
6605 .seq_show = memory_high_show,
6606 .write = memory_high_write,
6607 },
6608 {
6609 .name = "max",
6610 .flags = CFTYPE_NOT_ON_ROOT,
6611 .seq_show = memory_max_show,
6612 .write = memory_max_write,
6613 },
6614 {
6615 .name = "events",
6616 .flags = CFTYPE_NOT_ON_ROOT,
6617 .file_offset = offsetof(struct mem_cgroup, events_file),
6618 .seq_show = memory_events_show,
6619 },
6620 {
6621 .name = "events.local",
6622 .flags = CFTYPE_NOT_ON_ROOT,
6623 .file_offset = offsetof(struct mem_cgroup, events_local_file),
6624 .seq_show = memory_events_local_show,
6625 },
6626 {
6627 .name = "stat",
6628 .seq_show = memory_stat_show,
6629 },
6630 #ifdef CONFIG_NUMA
6631 {
6632 .name = "numa_stat",
6633 .seq_show = memory_numa_stat_show,
6634 },
6635 #endif
6636 {
6637 .name = "oom.group",
6638 .flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE,
6639 .seq_show = memory_oom_group_show,
6640 .write = memory_oom_group_write,
6641 },
6642 {
6643 .name = "reclaim",
6644 .flags = CFTYPE_NS_DELEGATABLE,
6645 .write = memory_reclaim,
6646 },
6647 { } /* terminate */
6648 };
6649
6650 struct cgroup_subsys memory_cgrp_subsys = {
6651 .css_alloc = mem_cgroup_css_alloc,
6652 .css_online = mem_cgroup_css_online,
6653 .css_offline = mem_cgroup_css_offline,
6654 .css_released = mem_cgroup_css_released,
6655 .css_free = mem_cgroup_css_free,
6656 .css_reset = mem_cgroup_css_reset,
6657 .css_rstat_flush = mem_cgroup_css_rstat_flush,
6658 .can_attach = mem_cgroup_can_attach,
6659 .attach = mem_cgroup_attach,
6660 .cancel_attach = mem_cgroup_cancel_attach,
6661 .post_attach = mem_cgroup_move_task,
6662 .dfl_cftypes = memory_files,
6663 .legacy_cftypes = mem_cgroup_legacy_files,
6664 .early_init = 0,
6665 };
6666
6667 /*
6668 * This function calculates an individual cgroup's effective
6669 * protection which is derived from its own memory.min/low, its
6670 * parent's and siblings' settings, as well as the actual memory
6671 * distribution in the tree.
6672 *
6673 * The following rules apply to the effective protection values:
6674 *
6675 * 1. At the first level of reclaim, effective protection is equal to
6676 * the declared protection in memory.min and memory.low.
6677 *
6678 * 2. To enable safe delegation of the protection configuration, at
6679 * subsequent levels the effective protection is capped to the
6680 * parent's effective protection.
6681 *
6682 * 3. To make complex and dynamic subtrees easier to configure, the
6683 * user is allowed to overcommit the declared protection at a given
6684 * level. If that is the case, the parent's effective protection is
6685 * distributed to the children in proportion to how much protection
6686 * they have declared and how much of it they are utilizing.
6687 *
6688 * This makes distribution proportional, but also work-conserving:
6689 * if one cgroup claims much more protection than it uses memory,
6690 * the unused remainder is available to its siblings.
6691 *
6692 * 4. Conversely, when the declared protection is undercommitted at a
6693 * given level, the distribution of the larger parental protection
6694 * budget is NOT proportional. A cgroup's protection from a sibling
6695 * is capped to its own memory.min/low setting.
6696 *
6697 * 5. However, to allow protecting recursive subtrees from each other
6698 * without having to declare each individual cgroup's fixed share
6699 * of the ancestor's claim to protection, any unutilized -
6700 * "floating" - protection from up the tree is distributed in
6701 * proportion to each cgroup's *usage*. This makes the protection
6702 * neutral wrt sibling cgroups and lets them compete freely over
6703 * the shared parental protection budget, but it protects the
6704 * subtree as a whole from neighboring subtrees.
6705 *
6706 * Note that 4. and 5. are not in conflict: 4. is about protecting
6707 * against immediate siblings whereas 5. is about protecting against
6708 * neighboring subtrees.
6709 */
effective_protection(unsigned long usage,unsigned long parent_usage,unsigned long setting,unsigned long parent_effective,unsigned long siblings_protected)6710 static unsigned long effective_protection(unsigned long usage,
6711 unsigned long parent_usage,
6712 unsigned long setting,
6713 unsigned long parent_effective,
6714 unsigned long siblings_protected)
6715 {
6716 unsigned long protected;
6717 unsigned long ep;
6718
6719 protected = min(usage, setting);
6720 /*
6721 * If all cgroups at this level combined claim and use more
6722 * protection then what the parent affords them, distribute
6723 * shares in proportion to utilization.
6724 *
6725 * We are using actual utilization rather than the statically
6726 * claimed protection in order to be work-conserving: claimed
6727 * but unused protection is available to siblings that would
6728 * otherwise get a smaller chunk than what they claimed.
6729 */
6730 if (siblings_protected > parent_effective)
6731 return protected * parent_effective / siblings_protected;
6732
6733 /*
6734 * Ok, utilized protection of all children is within what the
6735 * parent affords them, so we know whatever this child claims
6736 * and utilizes is effectively protected.
6737 *
6738 * If there is unprotected usage beyond this value, reclaim
6739 * will apply pressure in proportion to that amount.
6740 *
6741 * If there is unutilized protection, the cgroup will be fully
6742 * shielded from reclaim, but we do return a smaller value for
6743 * protection than what the group could enjoy in theory. This
6744 * is okay. With the overcommit distribution above, effective
6745 * protection is always dependent on how memory is actually
6746 * consumed among the siblings anyway.
6747 */
6748 ep = protected;
6749
6750 /*
6751 * If the children aren't claiming (all of) the protection
6752 * afforded to them by the parent, distribute the remainder in
6753 * proportion to the (unprotected) memory of each cgroup. That
6754 * way, cgroups that aren't explicitly prioritized wrt each
6755 * other compete freely over the allowance, but they are
6756 * collectively protected from neighboring trees.
6757 *
6758 * We're using unprotected memory for the weight so that if
6759 * some cgroups DO claim explicit protection, we don't protect
6760 * the same bytes twice.
6761 *
6762 * Check both usage and parent_usage against the respective
6763 * protected values. One should imply the other, but they
6764 * aren't read atomically - make sure the division is sane.
6765 */
6766 if (!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT))
6767 return ep;
6768 if (parent_effective > siblings_protected &&
6769 parent_usage > siblings_protected &&
6770 usage > protected) {
6771 unsigned long unclaimed;
6772
6773 unclaimed = parent_effective - siblings_protected;
6774 unclaimed *= usage - protected;
6775 unclaimed /= parent_usage - siblings_protected;
6776
6777 ep += unclaimed;
6778 }
6779
6780 return ep;
6781 }
6782
6783 /**
6784 * mem_cgroup_calculate_protection - check if memory consumption is in the normal range
6785 * @root: the top ancestor of the sub-tree being checked
6786 * @memcg: the memory cgroup to check
6787 *
6788 * WARNING: This function is not stateless! It can only be used as part
6789 * of a top-down tree iteration, not for isolated queries.
6790 */
mem_cgroup_calculate_protection(struct mem_cgroup * root,struct mem_cgroup * memcg)6791 void mem_cgroup_calculate_protection(struct mem_cgroup *root,
6792 struct mem_cgroup *memcg)
6793 {
6794 unsigned long usage, parent_usage;
6795 struct mem_cgroup *parent;
6796
6797 if (mem_cgroup_disabled())
6798 return;
6799
6800 if (!root)
6801 root = root_mem_cgroup;
6802
6803 /*
6804 * Effective values of the reclaim targets are ignored so they
6805 * can be stale. Have a look at mem_cgroup_protection for more
6806 * details.
6807 * TODO: calculation should be more robust so that we do not need
6808 * that special casing.
6809 */
6810 if (memcg == root)
6811 return;
6812
6813 usage = page_counter_read(&memcg->memory);
6814 if (!usage)
6815 return;
6816
6817 parent = parent_mem_cgroup(memcg);
6818 /* No parent means a non-hierarchical mode on v1 memcg */
6819 if (!parent)
6820 return;
6821
6822 trace_android_rvh_memcgv2_calc_decayed_watermark(memcg);
6823
6824 if (parent == root) {
6825 memcg->memory.emin = READ_ONCE(memcg->memory.min);
6826 memcg->memory.elow = READ_ONCE(memcg->memory.low);
6827 return;
6828 }
6829
6830 parent_usage = page_counter_read(&parent->memory);
6831
6832 WRITE_ONCE(memcg->memory.emin, effective_protection(usage, parent_usage,
6833 READ_ONCE(memcg->memory.min),
6834 READ_ONCE(parent->memory.emin),
6835 atomic_long_read(&parent->memory.children_min_usage)));
6836
6837 WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage,
6838 READ_ONCE(memcg->memory.low),
6839 READ_ONCE(parent->memory.elow),
6840 atomic_long_read(&parent->memory.children_low_usage)));
6841 }
6842
charge_memcg(struct page * page,struct mem_cgroup * memcg,gfp_t gfp)6843 static int charge_memcg(struct page *page, struct mem_cgroup *memcg, gfp_t gfp)
6844 {
6845 unsigned int nr_pages = thp_nr_pages(page);
6846 int ret;
6847
6848 ret = try_charge(memcg, gfp, nr_pages);
6849 if (ret)
6850 goto out;
6851
6852 css_get(&memcg->css);
6853 commit_charge(page, memcg);
6854
6855 local_irq_disable();
6856 mem_cgroup_charge_statistics(memcg, page, nr_pages);
6857 memcg_check_events(memcg, page);
6858 local_irq_enable();
6859 out:
6860 return ret;
6861 }
6862
6863 /**
6864 * __mem_cgroup_charge - charge a newly allocated page to a cgroup
6865 * @page: page to charge
6866 * @mm: mm context of the victim
6867 * @gfp_mask: reclaim mode
6868 *
6869 * Try to charge @page to the memcg that @mm belongs to, reclaiming
6870 * pages according to @gfp_mask if necessary. if @mm is NULL, try to
6871 * charge to the active memcg.
6872 *
6873 * Do not use this for pages allocated for swapin.
6874 *
6875 * Returns 0 on success. Otherwise, an error code is returned.
6876 */
__mem_cgroup_charge(struct page * page,struct mm_struct * mm,gfp_t gfp_mask)6877 int __mem_cgroup_charge(struct page *page, struct mm_struct *mm,
6878 gfp_t gfp_mask)
6879 {
6880 struct mem_cgroup *memcg;
6881 int ret;
6882
6883 memcg = get_mem_cgroup_from_mm(mm);
6884 ret = charge_memcg(page, memcg, gfp_mask);
6885 css_put(&memcg->css);
6886
6887 return ret;
6888 }
6889
6890 /**
6891 * mem_cgroup_swapin_charge_page - charge a newly allocated page for swapin
6892 * @page: page to charge
6893 * @mm: mm context of the victim
6894 * @gfp: reclaim mode
6895 * @entry: swap entry for which the page is allocated
6896 *
6897 * This function charges a page allocated for swapin. Please call this before
6898 * adding the page to the swapcache.
6899 *
6900 * Returns 0 on success. Otherwise, an error code is returned.
6901 */
mem_cgroup_swapin_charge_page(struct page * page,struct mm_struct * mm,gfp_t gfp,swp_entry_t entry)6902 int mem_cgroup_swapin_charge_page(struct page *page, struct mm_struct *mm,
6903 gfp_t gfp, swp_entry_t entry)
6904 {
6905 struct mem_cgroup *memcg;
6906 unsigned short id;
6907 int ret;
6908
6909 if (mem_cgroup_disabled())
6910 return 0;
6911
6912 id = lookup_swap_cgroup_id(entry);
6913 rcu_read_lock();
6914 memcg = mem_cgroup_from_id(id);
6915 if (!memcg || !css_tryget_online(&memcg->css))
6916 memcg = get_mem_cgroup_from_mm(mm);
6917 rcu_read_unlock();
6918
6919 ret = charge_memcg(page, memcg, gfp);
6920
6921 css_put(&memcg->css);
6922 return ret;
6923 }
6924
6925 /*
6926 * mem_cgroup_swapin_uncharge_swap - uncharge swap slot
6927 * @entry: swap entry for which the page is charged
6928 *
6929 * Call this function after successfully adding the charged page to swapcache.
6930 *
6931 * Note: This function assumes the page for which swap slot is being uncharged
6932 * is order 0 page.
6933 */
mem_cgroup_swapin_uncharge_swap(swp_entry_t entry)6934 void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry)
6935 {
6936 /*
6937 * Cgroup1's unified memory+swap counter has been charged with the
6938 * new swapcache page, finish the transfer by uncharging the swap
6939 * slot. The swap slot would also get uncharged when it dies, but
6940 * it can stick around indefinitely and we'd count the page twice
6941 * the entire time.
6942 *
6943 * Cgroup2 has separate resource counters for memory and swap,
6944 * so this is a non-issue here. Memory and swap charge lifetimes
6945 * correspond 1:1 to page and swap slot lifetimes: we charge the
6946 * page to memory here, and uncharge swap when the slot is freed.
6947 */
6948 if (!mem_cgroup_disabled() && do_memsw_account()) {
6949 /*
6950 * The swap entry might not get freed for a long time,
6951 * let's not wait for it. The page already received a
6952 * memory+swap charge, drop the swap entry duplicate.
6953 */
6954 mem_cgroup_uncharge_swap(entry, 1);
6955 }
6956 }
6957
6958 struct uncharge_gather {
6959 struct mem_cgroup *memcg;
6960 unsigned long nr_memory;
6961 unsigned long pgpgout;
6962 unsigned long nr_kmem;
6963 struct page *dummy_page;
6964 };
6965
uncharge_gather_clear(struct uncharge_gather * ug)6966 static inline void uncharge_gather_clear(struct uncharge_gather *ug)
6967 {
6968 memset(ug, 0, sizeof(*ug));
6969 }
6970
uncharge_batch(const struct uncharge_gather * ug)6971 static void uncharge_batch(const struct uncharge_gather *ug)
6972 {
6973 unsigned long flags;
6974
6975 if (ug->nr_memory) {
6976 page_counter_uncharge(&ug->memcg->memory, ug->nr_memory);
6977 if (do_memsw_account())
6978 page_counter_uncharge(&ug->memcg->memsw, ug->nr_memory);
6979 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && ug->nr_kmem)
6980 page_counter_uncharge(&ug->memcg->kmem, ug->nr_kmem);
6981 memcg_oom_recover(ug->memcg);
6982 }
6983
6984 local_irq_save(flags);
6985 __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
6986 __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_memory);
6987 memcg_check_events(ug->memcg, ug->dummy_page);
6988 local_irq_restore(flags);
6989
6990 /* drop reference from uncharge_page */
6991 css_put(&ug->memcg->css);
6992 }
6993
uncharge_page(struct page * page,struct uncharge_gather * ug)6994 static void uncharge_page(struct page *page, struct uncharge_gather *ug)
6995 {
6996 unsigned long nr_pages;
6997 struct mem_cgroup *memcg;
6998 struct obj_cgroup *objcg;
6999 bool use_objcg = PageMemcgKmem(page);
7000
7001 VM_BUG_ON_PAGE(PageLRU(page), page);
7002
7003 /*
7004 * Nobody should be changing or seriously looking at
7005 * page memcg or objcg at this point, we have fully
7006 * exclusive access to the page.
7007 */
7008 if (use_objcg) {
7009 objcg = __page_objcg(page);
7010 /*
7011 * This get matches the put at the end of the function and
7012 * kmem pages do not hold memcg references anymore.
7013 */
7014 memcg = get_mem_cgroup_from_objcg(objcg);
7015 } else {
7016 memcg = __page_memcg(page);
7017 }
7018
7019 if (!memcg)
7020 return;
7021
7022 if (ug->memcg != memcg) {
7023 if (ug->memcg) {
7024 uncharge_batch(ug);
7025 uncharge_gather_clear(ug);
7026 }
7027 ug->memcg = memcg;
7028 ug->dummy_page = page;
7029
7030 /* pairs with css_put in uncharge_batch */
7031 css_get(&memcg->css);
7032 }
7033
7034 nr_pages = compound_nr(page);
7035
7036 if (use_objcg) {
7037 ug->nr_memory += nr_pages;
7038 ug->nr_kmem += nr_pages;
7039
7040 page->memcg_data = 0;
7041 obj_cgroup_put(objcg);
7042 } else {
7043 /* LRU pages aren't accounted at the root level */
7044 if (!mem_cgroup_is_root(memcg))
7045 ug->nr_memory += nr_pages;
7046 ug->pgpgout++;
7047
7048 page->memcg_data = 0;
7049 }
7050
7051 css_put(&memcg->css);
7052 }
7053
7054 /**
7055 * __mem_cgroup_uncharge - uncharge a page
7056 * @page: page to uncharge
7057 *
7058 * Uncharge a page previously charged with __mem_cgroup_charge().
7059 */
__mem_cgroup_uncharge(struct page * page)7060 void __mem_cgroup_uncharge(struct page *page)
7061 {
7062 struct uncharge_gather ug;
7063
7064 /* Don't touch page->lru of any random page, pre-check: */
7065 if (!page_memcg(page))
7066 return;
7067
7068 uncharge_gather_clear(&ug);
7069 uncharge_page(page, &ug);
7070 uncharge_batch(&ug);
7071 }
7072
7073 /**
7074 * __mem_cgroup_uncharge_list - uncharge a list of page
7075 * @page_list: list of pages to uncharge
7076 *
7077 * Uncharge a list of pages previously charged with
7078 * __mem_cgroup_charge().
7079 */
__mem_cgroup_uncharge_list(struct list_head * page_list)7080 void __mem_cgroup_uncharge_list(struct list_head *page_list)
7081 {
7082 struct uncharge_gather ug;
7083 struct page *page;
7084
7085 uncharge_gather_clear(&ug);
7086 list_for_each_entry(page, page_list, lru)
7087 uncharge_page(page, &ug);
7088 if (ug.memcg)
7089 uncharge_batch(&ug);
7090 }
7091
7092 /**
7093 * mem_cgroup_migrate - charge a page's replacement
7094 * @oldpage: currently circulating page
7095 * @newpage: replacement page
7096 *
7097 * Charge @newpage as a replacement page for @oldpage. @oldpage will
7098 * be uncharged upon free.
7099 *
7100 * Both pages must be locked, @newpage->mapping must be set up.
7101 */
mem_cgroup_migrate(struct page * oldpage,struct page * newpage)7102 void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
7103 {
7104 struct mem_cgroup *memcg;
7105 unsigned int nr_pages;
7106 unsigned long flags;
7107
7108 VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
7109 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
7110 VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage);
7111 VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage),
7112 newpage);
7113
7114 if (mem_cgroup_disabled())
7115 return;
7116
7117 /* Page cache replacement: new page already charged? */
7118 if (page_memcg(newpage))
7119 return;
7120
7121 memcg = page_memcg(oldpage);
7122 VM_WARN_ON_ONCE_PAGE(!memcg, oldpage);
7123 if (!memcg)
7124 return;
7125
7126 /* Force-charge the new page. The old one will be freed soon */
7127 nr_pages = thp_nr_pages(newpage);
7128
7129 if (!mem_cgroup_is_root(memcg)) {
7130 page_counter_charge(&memcg->memory, nr_pages);
7131 if (do_memsw_account())
7132 page_counter_charge(&memcg->memsw, nr_pages);
7133 }
7134
7135 css_get(&memcg->css);
7136 commit_charge(newpage, memcg);
7137
7138 local_irq_save(flags);
7139 mem_cgroup_charge_statistics(memcg, newpage, nr_pages);
7140 memcg_check_events(memcg, newpage);
7141 local_irq_restore(flags);
7142 }
7143
7144 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
7145 EXPORT_SYMBOL(memcg_sockets_enabled_key);
7146
mem_cgroup_sk_alloc(struct sock * sk)7147 void mem_cgroup_sk_alloc(struct sock *sk)
7148 {
7149 struct mem_cgroup *memcg;
7150
7151 if (!mem_cgroup_sockets_enabled)
7152 return;
7153
7154 /* Do not associate the sock with unrelated interrupted task's memcg. */
7155 if (in_interrupt())
7156 return;
7157
7158 rcu_read_lock();
7159 memcg = mem_cgroup_from_task(current);
7160 if (memcg == root_mem_cgroup)
7161 goto out;
7162 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
7163 goto out;
7164 if (css_tryget(&memcg->css))
7165 sk->sk_memcg = memcg;
7166 out:
7167 rcu_read_unlock();
7168 }
7169
mem_cgroup_sk_free(struct sock * sk)7170 void mem_cgroup_sk_free(struct sock *sk)
7171 {
7172 if (sk->sk_memcg)
7173 css_put(&sk->sk_memcg->css);
7174 }
7175
7176 /**
7177 * mem_cgroup_charge_skmem - charge socket memory
7178 * @memcg: memcg to charge
7179 * @nr_pages: number of pages to charge
7180 * @gfp_mask: reclaim mode
7181 *
7182 * Charges @nr_pages to @memcg. Returns %true if the charge fit within
7183 * @memcg's configured limit, %false if it doesn't.
7184 */
mem_cgroup_charge_skmem(struct mem_cgroup * memcg,unsigned int nr_pages,gfp_t gfp_mask)7185 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
7186 gfp_t gfp_mask)
7187 {
7188 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
7189 struct page_counter *fail;
7190
7191 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
7192 memcg->tcpmem_pressure = 0;
7193 return true;
7194 }
7195 memcg->tcpmem_pressure = 1;
7196 if (gfp_mask & __GFP_NOFAIL) {
7197 page_counter_charge(&memcg->tcpmem, nr_pages);
7198 return true;
7199 }
7200 return false;
7201 }
7202
7203 if (try_charge(memcg, gfp_mask, nr_pages) == 0) {
7204 mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
7205 return true;
7206 }
7207
7208 return false;
7209 }
7210
7211 /**
7212 * mem_cgroup_uncharge_skmem - uncharge socket memory
7213 * @memcg: memcg to uncharge
7214 * @nr_pages: number of pages to uncharge
7215 */
mem_cgroup_uncharge_skmem(struct mem_cgroup * memcg,unsigned int nr_pages)7216 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
7217 {
7218 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
7219 page_counter_uncharge(&memcg->tcpmem, nr_pages);
7220 return;
7221 }
7222
7223 mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
7224
7225 refill_stock(memcg, nr_pages);
7226 }
7227
cgroup_memory(char * s)7228 static int __init cgroup_memory(char *s)
7229 {
7230 char *token;
7231
7232 while ((token = strsep(&s, ",")) != NULL) {
7233 if (!*token)
7234 continue;
7235 if (!strcmp(token, "nosocket"))
7236 cgroup_memory_nosocket = true;
7237 if (!strcmp(token, "nokmem"))
7238 cgroup_memory_nokmem = true;
7239 }
7240 return 1;
7241 }
7242 __setup("cgroup.memory=", cgroup_memory);
7243
7244 /*
7245 * subsys_initcall() for memory controller.
7246 *
7247 * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
7248 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
7249 * basically everything that doesn't depend on a specific mem_cgroup structure
7250 * should be initialized from here.
7251 */
mem_cgroup_init(void)7252 static int __init mem_cgroup_init(void)
7253 {
7254 int cpu, node;
7255
7256 /*
7257 * Currently s32 type (can refer to struct batched_lruvec_stat) is
7258 * used for per-memcg-per-cpu caching of per-node statistics. In order
7259 * to work fine, we should make sure that the overfill threshold can't
7260 * exceed S32_MAX / PAGE_SIZE.
7261 */
7262 BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S32_MAX / PAGE_SIZE);
7263
7264 cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
7265 memcg_hotplug_cpu_dead);
7266
7267 for_each_possible_cpu(cpu)
7268 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
7269 drain_local_stock);
7270
7271 for_each_node(node) {
7272 struct mem_cgroup_tree_per_node *rtpn;
7273
7274 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
7275 node_online(node) ? node : NUMA_NO_NODE);
7276
7277 rtpn->rb_root = RB_ROOT;
7278 rtpn->rb_rightmost = NULL;
7279 spin_lock_init(&rtpn->lock);
7280 soft_limit_tree.rb_tree_per_node[node] = rtpn;
7281 }
7282
7283 return 0;
7284 }
7285 subsys_initcall(mem_cgroup_init);
7286
7287 #ifdef CONFIG_MEMCG_SWAP
mem_cgroup_id_get_online(struct mem_cgroup * memcg)7288 static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
7289 {
7290 while (!refcount_inc_not_zero(&memcg->id.ref)) {
7291 /*
7292 * The root cgroup cannot be destroyed, so it's refcount must
7293 * always be >= 1.
7294 */
7295 if (WARN_ON_ONCE(memcg == root_mem_cgroup)) {
7296 VM_BUG_ON(1);
7297 break;
7298 }
7299 memcg = parent_mem_cgroup(memcg);
7300 if (!memcg)
7301 memcg = root_mem_cgroup;
7302 }
7303 return memcg;
7304 }
7305
7306 /**
7307 * mem_cgroup_swapout - transfer a memsw charge to swap
7308 * @page: page whose memsw charge to transfer
7309 * @entry: swap entry to move the charge to
7310 *
7311 * Transfer the memsw charge of @page to @entry.
7312 */
mem_cgroup_swapout(struct page * page,swp_entry_t entry)7313 void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
7314 {
7315 struct mem_cgroup *memcg, *swap_memcg;
7316 unsigned int nr_entries;
7317 unsigned short oldid;
7318
7319 VM_BUG_ON_PAGE(PageLRU(page), page);
7320 VM_BUG_ON_PAGE(page_count(page), page);
7321
7322 if (mem_cgroup_disabled())
7323 return;
7324
7325 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
7326 return;
7327
7328 memcg = page_memcg(page);
7329
7330 VM_WARN_ON_ONCE_PAGE(!memcg, page);
7331 if (!memcg)
7332 return;
7333
7334 /*
7335 * In case the memcg owning these pages has been offlined and doesn't
7336 * have an ID allocated to it anymore, charge the closest online
7337 * ancestor for the swap instead and transfer the memory+swap charge.
7338 */
7339 swap_memcg = mem_cgroup_id_get_online(memcg);
7340 nr_entries = thp_nr_pages(page);
7341 /* Get references for the tail pages, too */
7342 if (nr_entries > 1)
7343 mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
7344 oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg),
7345 nr_entries);
7346 VM_BUG_ON_PAGE(oldid, page);
7347 mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
7348
7349 page->memcg_data = 0;
7350
7351 if (!mem_cgroup_is_root(memcg))
7352 page_counter_uncharge(&memcg->memory, nr_entries);
7353
7354 if (!cgroup_memory_noswap && memcg != swap_memcg) {
7355 if (!mem_cgroup_is_root(swap_memcg))
7356 page_counter_charge(&swap_memcg->memsw, nr_entries);
7357 page_counter_uncharge(&memcg->memsw, nr_entries);
7358 }
7359
7360 /*
7361 * Interrupts should be disabled here because the caller holds the
7362 * i_pages lock which is taken with interrupts-off. It is
7363 * important here to have the interrupts disabled because it is the
7364 * only synchronisation we have for updating the per-CPU variables.
7365 */
7366 VM_BUG_ON(!irqs_disabled());
7367 mem_cgroup_charge_statistics(memcg, page, -nr_entries);
7368 memcg_check_events(memcg, page);
7369
7370 css_put(&memcg->css);
7371 }
7372
7373 /**
7374 * __mem_cgroup_try_charge_swap - try charging swap space for a page
7375 * @page: page being added to swap
7376 * @entry: swap entry to charge
7377 *
7378 * Try to charge @page's memcg for the swap space at @entry.
7379 *
7380 * Returns 0 on success, -ENOMEM on failure.
7381 */
__mem_cgroup_try_charge_swap(struct page * page,swp_entry_t entry)7382 int __mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
7383 {
7384 unsigned int nr_pages = thp_nr_pages(page);
7385 struct page_counter *counter;
7386 struct mem_cgroup *memcg;
7387 unsigned short oldid;
7388
7389 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
7390 return 0;
7391
7392 memcg = page_memcg(page);
7393
7394 VM_WARN_ON_ONCE_PAGE(!memcg, page);
7395 if (!memcg)
7396 return 0;
7397
7398 if (!entry.val) {
7399 memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7400 return 0;
7401 }
7402
7403 memcg = mem_cgroup_id_get_online(memcg);
7404
7405 if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg) &&
7406 !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
7407 memcg_memory_event(memcg, MEMCG_SWAP_MAX);
7408 memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7409 mem_cgroup_id_put(memcg);
7410 return -ENOMEM;
7411 }
7412
7413 /* Get references for the tail pages, too */
7414 if (nr_pages > 1)
7415 mem_cgroup_id_get_many(memcg, nr_pages - 1);
7416 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages);
7417 VM_BUG_ON_PAGE(oldid, page);
7418 mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
7419
7420 return 0;
7421 }
7422
7423 /**
7424 * __mem_cgroup_uncharge_swap - uncharge swap space
7425 * @entry: swap entry to uncharge
7426 * @nr_pages: the amount of swap space to uncharge
7427 */
__mem_cgroup_uncharge_swap(swp_entry_t entry,unsigned int nr_pages)7428 void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
7429 {
7430 struct mem_cgroup *memcg;
7431 unsigned short id;
7432
7433 id = swap_cgroup_record(entry, 0, nr_pages);
7434 rcu_read_lock();
7435 memcg = mem_cgroup_from_id(id);
7436 if (memcg) {
7437 if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg)) {
7438 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
7439 page_counter_uncharge(&memcg->swap, nr_pages);
7440 else
7441 page_counter_uncharge(&memcg->memsw, nr_pages);
7442 }
7443 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
7444 mem_cgroup_id_put_many(memcg, nr_pages);
7445 }
7446 rcu_read_unlock();
7447 }
7448
mem_cgroup_get_nr_swap_pages(struct mem_cgroup * memcg)7449 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
7450 {
7451 long nr_swap_pages = get_nr_swap_pages();
7452
7453 if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
7454 return nr_swap_pages;
7455 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
7456 nr_swap_pages = min_t(long, nr_swap_pages,
7457 READ_ONCE(memcg->swap.max) -
7458 page_counter_read(&memcg->swap));
7459 return nr_swap_pages;
7460 }
7461
mem_cgroup_swap_full(struct page * page)7462 bool mem_cgroup_swap_full(struct page *page)
7463 {
7464 struct mem_cgroup *memcg;
7465
7466 VM_BUG_ON_PAGE(!PageLocked(page), page);
7467
7468 if (vm_swap_full())
7469 return true;
7470 if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
7471 return false;
7472
7473 memcg = page_memcg(page);
7474 if (!memcg)
7475 return false;
7476
7477 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) {
7478 unsigned long usage = page_counter_read(&memcg->swap);
7479
7480 if (usage * 2 >= READ_ONCE(memcg->swap.high) ||
7481 usage * 2 >= READ_ONCE(memcg->swap.max))
7482 return true;
7483 }
7484
7485 return false;
7486 }
7487
setup_swap_account(char * s)7488 static int __init setup_swap_account(char *s)
7489 {
7490 if (!strcmp(s, "1"))
7491 cgroup_memory_noswap = false;
7492 else if (!strcmp(s, "0"))
7493 cgroup_memory_noswap = true;
7494 return 1;
7495 }
7496 __setup("swapaccount=", setup_swap_account);
7497
swap_current_read(struct cgroup_subsys_state * css,struct cftype * cft)7498 static u64 swap_current_read(struct cgroup_subsys_state *css,
7499 struct cftype *cft)
7500 {
7501 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
7502
7503 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
7504 }
7505
swap_high_show(struct seq_file * m,void * v)7506 static int swap_high_show(struct seq_file *m, void *v)
7507 {
7508 return seq_puts_memcg_tunable(m,
7509 READ_ONCE(mem_cgroup_from_seq(m)->swap.high));
7510 }
7511
swap_high_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)7512 static ssize_t swap_high_write(struct kernfs_open_file *of,
7513 char *buf, size_t nbytes, loff_t off)
7514 {
7515 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
7516 unsigned long high;
7517 int err;
7518
7519 buf = strstrip(buf);
7520 err = page_counter_memparse(buf, "max", &high);
7521 if (err)
7522 return err;
7523
7524 page_counter_set_high(&memcg->swap, high);
7525
7526 return nbytes;
7527 }
7528
swap_max_show(struct seq_file * m,void * v)7529 static int swap_max_show(struct seq_file *m, void *v)
7530 {
7531 return seq_puts_memcg_tunable(m,
7532 READ_ONCE(mem_cgroup_from_seq(m)->swap.max));
7533 }
7534
swap_max_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)7535 static ssize_t swap_max_write(struct kernfs_open_file *of,
7536 char *buf, size_t nbytes, loff_t off)
7537 {
7538 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
7539 unsigned long max;
7540 int err;
7541
7542 buf = strstrip(buf);
7543 err = page_counter_memparse(buf, "max", &max);
7544 if (err)
7545 return err;
7546
7547 xchg(&memcg->swap.max, max);
7548
7549 return nbytes;
7550 }
7551
swap_events_show(struct seq_file * m,void * v)7552 static int swap_events_show(struct seq_file *m, void *v)
7553 {
7554 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
7555
7556 seq_printf(m, "high %lu\n",
7557 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH]));
7558 seq_printf(m, "max %lu\n",
7559 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
7560 seq_printf(m, "fail %lu\n",
7561 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL]));
7562
7563 return 0;
7564 }
7565
7566 static struct cftype swap_files[] = {
7567 {
7568 .name = "swap.current",
7569 .flags = CFTYPE_NOT_ON_ROOT,
7570 .read_u64 = swap_current_read,
7571 },
7572 {
7573 .name = "swap.high",
7574 .flags = CFTYPE_NOT_ON_ROOT,
7575 .seq_show = swap_high_show,
7576 .write = swap_high_write,
7577 },
7578 {
7579 .name = "swap.max",
7580 .flags = CFTYPE_NOT_ON_ROOT,
7581 .seq_show = swap_max_show,
7582 .write = swap_max_write,
7583 },
7584 {
7585 .name = "swap.events",
7586 .flags = CFTYPE_NOT_ON_ROOT,
7587 .file_offset = offsetof(struct mem_cgroup, swap_events_file),
7588 .seq_show = swap_events_show,
7589 },
7590 { } /* terminate */
7591 };
7592
7593 static struct cftype memsw_files[] = {
7594 {
7595 .name = "memsw.usage_in_bytes",
7596 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
7597 .read_u64 = mem_cgroup_read_u64,
7598 },
7599 {
7600 .name = "memsw.max_usage_in_bytes",
7601 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
7602 .write = mem_cgroup_reset,
7603 .read_u64 = mem_cgroup_read_u64,
7604 },
7605 {
7606 .name = "memsw.limit_in_bytes",
7607 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
7608 .write = mem_cgroup_write,
7609 .read_u64 = mem_cgroup_read_u64,
7610 },
7611 {
7612 .name = "memsw.failcnt",
7613 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
7614 .write = mem_cgroup_reset,
7615 .read_u64 = mem_cgroup_read_u64,
7616 },
7617 { }, /* terminate */
7618 };
7619
7620 /*
7621 * If mem_cgroup_swap_init() is implemented as a subsys_initcall()
7622 * instead of a core_initcall(), this could mean cgroup_memory_noswap still
7623 * remains set to false even when memcg is disabled via "cgroup_disable=memory"
7624 * boot parameter. This may result in premature OOPS inside
7625 * mem_cgroup_get_nr_swap_pages() function in corner cases.
7626 */
mem_cgroup_swap_init(void)7627 static int __init mem_cgroup_swap_init(void)
7628 {
7629 /* No memory control -> no swap control */
7630 if (mem_cgroup_disabled())
7631 cgroup_memory_noswap = true;
7632
7633 if (cgroup_memory_noswap)
7634 return 0;
7635
7636 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files));
7637 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files));
7638
7639 return 0;
7640 }
7641 core_initcall(mem_cgroup_swap_init);
7642
7643 #endif /* CONFIG_MEMCG_SWAP */
7644