1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* memcontrol.c - Memory Controller
3 *
4 * Copyright IBM Corporation, 2007
5 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6 *
7 * Copyright 2007 OpenVZ SWsoft Inc
8 * Author: Pavel Emelianov <xemul@openvz.org>
9 *
10 * Memory thresholds
11 * Copyright (C) 2009 Nokia Corporation
12 * Author: Kirill A. Shutemov
13 *
14 * Kernel Memory Controller
15 * Copyright (C) 2012 Parallels Inc. and Google Inc.
16 * Authors: Glauber Costa and Suleiman Souhlal
17 *
18 * Native page reclaim
19 * Charge lifetime sanitation
20 * Lockless page tracking & accounting
21 * Unified hierarchy configuration model
22 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
23 *
24 * Per memcg lru locking
25 * Copyright (C) 2020 Alibaba, Inc, Alex Shi
26 */
27
28 #include <linux/page_counter.h>
29 #include <linux/memcontrol.h>
30 #include <linux/cgroup.h>
31 #include <linux/pagewalk.h>
32 #include <linux/sched/mm.h>
33 #include <linux/shmem_fs.h>
34 #include <linux/hugetlb.h>
35 #include <linux/pagemap.h>
36 #include <linux/vm_event_item.h>
37 #include <linux/smp.h>
38 #include <linux/page-flags.h>
39 #include <linux/backing-dev.h>
40 #include <linux/bit_spinlock.h>
41 #include <linux/rcupdate.h>
42 #include <linux/limits.h>
43 #include <linux/export.h>
44 #include <linux/mutex.h>
45 #include <linux/rbtree.h>
46 #include <linux/slab.h>
47 #include <linux/swap.h>
48 #include <linux/swapops.h>
49 #include <linux/spinlock.h>
50 #include <linux/eventfd.h>
51 #include <linux/poll.h>
52 #include <linux/sort.h>
53 #include <linux/fs.h>
54 #include <linux/seq_file.h>
55 #include <linux/vmpressure.h>
56 #include <linux/memremap.h>
57 #include <linux/mm_inline.h>
58 #include <linux/swap_cgroup.h>
59 #include <linux/cpu.h>
60 #include <linux/oom.h>
61 #include <linux/lockdep.h>
62 #include <linux/file.h>
63 #include <linux/resume_user_mode.h>
64 #include <linux/psi.h>
65 #include <linux/seq_buf.h>
66 #include <linux/sched/isolation.h>
67 #include "internal.h"
68 #include <net/sock.h>
69 #include <net/ip.h>
70 #include "slab.h"
71 #include "swap.h"
72
73 #include <linux/uaccess.h>
74
75 #include <trace/events/vmscan.h>
76 #include <trace/hooks/mm.h>
77 #include <trace/hooks/vmscan.h>
78
79 struct cgroup_subsys memory_cgrp_subsys __read_mostly;
80 EXPORT_SYMBOL(memory_cgrp_subsys);
81
82 struct mem_cgroup *root_mem_cgroup __read_mostly;
83 EXPORT_SYMBOL_GPL(root_mem_cgroup);
84
85 /* Active memory cgroup to use from an interrupt context */
86 DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg);
87 EXPORT_PER_CPU_SYMBOL_GPL(int_active_memcg);
88
89 /* Socket memory accounting disabled? */
90 static bool cgroup_memory_nosocket __ro_after_init;
91
92 /* Kernel memory accounting disabled? */
93 static bool cgroup_memory_nokmem __ro_after_init;
94
95 /* BPF memory accounting disabled? */
96 static bool cgroup_memory_nobpf __ro_after_init;
97
98 #ifdef CONFIG_CGROUP_WRITEBACK
99 static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq);
100 #endif
101
102 /* Whether legacy memory+swap accounting is active */
do_memsw_account(void)103 static bool do_memsw_account(void)
104 {
105 return !cgroup_subsys_on_dfl(memory_cgrp_subsys);
106 }
107
108 #define THRESHOLDS_EVENTS_TARGET 128
109 #define SOFTLIMIT_EVENTS_TARGET 1024
110
111 /*
112 * Cgroups above their limits are maintained in a RB-Tree, independent of
113 * their hierarchy representation
114 */
115
116 struct mem_cgroup_tree_per_node {
117 struct rb_root rb_root;
118 struct rb_node *rb_rightmost;
119 spinlock_t lock;
120 };
121
122 struct mem_cgroup_tree {
123 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
124 };
125
126 static struct mem_cgroup_tree soft_limit_tree __read_mostly;
127
128 /* for OOM */
129 struct mem_cgroup_eventfd_list {
130 struct list_head list;
131 struct eventfd_ctx *eventfd;
132 };
133
134 /*
135 * cgroup_event represents events which userspace want to receive.
136 */
137 struct mem_cgroup_event {
138 /*
139 * memcg which the event belongs to.
140 */
141 struct mem_cgroup *memcg;
142 /*
143 * eventfd to signal userspace about the event.
144 */
145 struct eventfd_ctx *eventfd;
146 /*
147 * Each of these stored in a list by the cgroup.
148 */
149 struct list_head list;
150 /*
151 * register_event() callback will be used to add new userspace
152 * waiter for changes related to this event. Use eventfd_signal()
153 * on eventfd to send notification to userspace.
154 */
155 int (*register_event)(struct mem_cgroup *memcg,
156 struct eventfd_ctx *eventfd, const char *args);
157 /*
158 * unregister_event() callback will be called when userspace closes
159 * the eventfd or on cgroup removing. This callback must be set,
160 * if you want provide notification functionality.
161 */
162 void (*unregister_event)(struct mem_cgroup *memcg,
163 struct eventfd_ctx *eventfd);
164 /*
165 * All fields below needed to unregister event when
166 * userspace closes eventfd.
167 */
168 poll_table pt;
169 wait_queue_head_t *wqh;
170 wait_queue_entry_t wait;
171 struct work_struct remove;
172 };
173
174 static void mem_cgroup_threshold(struct mem_cgroup *memcg);
175 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
176
177 /* Stuffs for move charges at task migration. */
178 /*
179 * Types of charges to be moved.
180 */
181 #define MOVE_ANON 0x1U
182 #define MOVE_FILE 0x2U
183 #define MOVE_MASK (MOVE_ANON | MOVE_FILE)
184
185 /* "mc" and its members are protected by cgroup_mutex */
186 static struct move_charge_struct {
187 spinlock_t lock; /* for from, to */
188 struct mm_struct *mm;
189 struct mem_cgroup *from;
190 struct mem_cgroup *to;
191 unsigned long flags;
192 unsigned long precharge;
193 unsigned long moved_charge;
194 unsigned long moved_swap;
195 struct task_struct *moving_task; /* a task moving charges */
196 wait_queue_head_t waitq; /* a waitq for other context */
197 } mc = {
198 .lock = __SPIN_LOCK_UNLOCKED(mc.lock),
199 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
200 };
201
202 /*
203 * Maximum loops in mem_cgroup_soft_reclaim(), used for soft
204 * limit reclaim to prevent infinite loops, if they ever occur.
205 */
206 #define MEM_CGROUP_MAX_RECLAIM_LOOPS 100
207 #define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2
208
209 /* for encoding cft->private value on file */
210 enum res_type {
211 _MEM,
212 _MEMSWAP,
213 _KMEM,
214 _TCP,
215 };
216
217 #define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val))
218 #define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff)
219 #define MEMFILE_ATTR(val) ((val) & 0xffff)
220
221 /*
222 * Iteration constructs for visiting all cgroups (under a tree). If
223 * loops are exited prematurely (break), mem_cgroup_iter_break() must
224 * be used for reference counting.
225 */
226 #define for_each_mem_cgroup_tree(iter, root) \
227 for (iter = mem_cgroup_iter(root, NULL, NULL); \
228 iter != NULL; \
229 iter = mem_cgroup_iter(root, iter, NULL))
230
231 #define for_each_mem_cgroup(iter) \
232 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \
233 iter != NULL; \
234 iter = mem_cgroup_iter(NULL, iter, NULL))
235
task_is_dying(void)236 static inline bool task_is_dying(void)
237 {
238 return tsk_is_oom_victim(current) || fatal_signal_pending(current) ||
239 (current->flags & PF_EXITING);
240 }
241
242 /* Some nice accessors for the vmpressure. */
memcg_to_vmpressure(struct mem_cgroup * memcg)243 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
244 {
245 if (!memcg)
246 memcg = root_mem_cgroup;
247 return &memcg->vmpressure;
248 }
249
vmpressure_to_memcg(struct vmpressure * vmpr)250 struct mem_cgroup *vmpressure_to_memcg(struct vmpressure *vmpr)
251 {
252 return container_of(vmpr, struct mem_cgroup, vmpressure);
253 }
254
255 /*
256 * trace_android_vh_use_vm_swappiness is called in include/linux/swap.h by
257 * including include/trace/hooks/vmscan.h, which will result to build-err.
258 * So we create func: _trace_android_vh_use_vm_swappiness.
259 */
_trace_android_vh_use_vm_swappiness(bool * use_vm_swappiness)260 void _trace_android_vh_use_vm_swappiness(bool *use_vm_swappiness)
261 {
262 trace_android_vh_use_vm_swappiness(use_vm_swappiness);
263 }
264
265 #ifdef CONFIG_MEMCG_KMEM
266 static DEFINE_SPINLOCK(objcg_lock);
267
mem_cgroup_kmem_disabled(void)268 bool mem_cgroup_kmem_disabled(void)
269 {
270 return cgroup_memory_nokmem;
271 }
272
273 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
274 unsigned int nr_pages);
275
obj_cgroup_release(struct percpu_ref * ref)276 static void obj_cgroup_release(struct percpu_ref *ref)
277 {
278 struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt);
279 unsigned int nr_bytes;
280 unsigned int nr_pages;
281 unsigned long flags;
282
283 /*
284 * At this point all allocated objects are freed, and
285 * objcg->nr_charged_bytes can't have an arbitrary byte value.
286 * However, it can be PAGE_SIZE or (x * PAGE_SIZE).
287 *
288 * The following sequence can lead to it:
289 * 1) CPU0: objcg == stock->cached_objcg
290 * 2) CPU1: we do a small allocation (e.g. 92 bytes),
291 * PAGE_SIZE bytes are charged
292 * 3) CPU1: a process from another memcg is allocating something,
293 * the stock if flushed,
294 * objcg->nr_charged_bytes = PAGE_SIZE - 92
295 * 5) CPU0: we do release this object,
296 * 92 bytes are added to stock->nr_bytes
297 * 6) CPU0: stock is flushed,
298 * 92 bytes are added to objcg->nr_charged_bytes
299 *
300 * In the result, nr_charged_bytes == PAGE_SIZE.
301 * This page will be uncharged in obj_cgroup_release().
302 */
303 nr_bytes = atomic_read(&objcg->nr_charged_bytes);
304 WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1));
305 nr_pages = nr_bytes >> PAGE_SHIFT;
306
307 if (nr_pages)
308 obj_cgroup_uncharge_pages(objcg, nr_pages);
309
310 spin_lock_irqsave(&objcg_lock, flags);
311 list_del(&objcg->list);
312 spin_unlock_irqrestore(&objcg_lock, flags);
313
314 percpu_ref_exit(ref);
315 kfree_rcu(objcg, rcu);
316 }
317
obj_cgroup_alloc(void)318 static struct obj_cgroup *obj_cgroup_alloc(void)
319 {
320 struct obj_cgroup *objcg;
321 int ret;
322
323 objcg = kzalloc(sizeof(struct obj_cgroup), GFP_KERNEL);
324 if (!objcg)
325 return NULL;
326
327 ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0,
328 GFP_KERNEL);
329 if (ret) {
330 kfree(objcg);
331 return NULL;
332 }
333 INIT_LIST_HEAD(&objcg->list);
334 return objcg;
335 }
336
memcg_reparent_objcgs(struct mem_cgroup * memcg,struct mem_cgroup * parent)337 static void memcg_reparent_objcgs(struct mem_cgroup *memcg,
338 struct mem_cgroup *parent)
339 {
340 struct obj_cgroup *objcg, *iter;
341
342 objcg = rcu_replace_pointer(memcg->objcg, NULL, true);
343
344 spin_lock_irq(&objcg_lock);
345
346 /* 1) Ready to reparent active objcg. */
347 list_add(&objcg->list, &memcg->objcg_list);
348 /* 2) Reparent active objcg and already reparented objcgs to parent. */
349 list_for_each_entry(iter, &memcg->objcg_list, list)
350 WRITE_ONCE(iter->memcg, parent);
351 /* 3) Move already reparented objcgs to the parent's list */
352 list_splice(&memcg->objcg_list, &parent->objcg_list);
353
354 spin_unlock_irq(&objcg_lock);
355
356 percpu_ref_kill(&objcg->refcnt);
357 }
358
359 /*
360 * A lot of the calls to the cache allocation functions are expected to be
361 * inlined by the compiler. Since the calls to memcg_slab_pre_alloc_hook() are
362 * conditional to this static branch, we'll have to allow modules that does
363 * kmem_cache_alloc and the such to see this symbol as well
364 */
365 DEFINE_STATIC_KEY_FALSE(memcg_kmem_online_key);
366 EXPORT_SYMBOL(memcg_kmem_online_key);
367
368 DEFINE_STATIC_KEY_FALSE(memcg_bpf_enabled_key);
369 EXPORT_SYMBOL(memcg_bpf_enabled_key);
370 #endif
371
372 /**
373 * mem_cgroup_css_from_folio - css of the memcg associated with a folio
374 * @folio: folio of interest
375 *
376 * If memcg is bound to the default hierarchy, css of the memcg associated
377 * with @folio is returned. The returned css remains associated with @folio
378 * until it is released.
379 *
380 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
381 * is returned.
382 */
mem_cgroup_css_from_folio(struct folio * folio)383 struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio)
384 {
385 struct mem_cgroup *memcg = folio_memcg(folio);
386
387 if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
388 memcg = root_mem_cgroup;
389
390 return &memcg->css;
391 }
392
393 /**
394 * page_cgroup_ino - return inode number of the memcg a page is charged to
395 * @page: the page
396 *
397 * Look up the closest online ancestor of the memory cgroup @page is charged to
398 * and return its inode number or 0 if @page is not charged to any cgroup. It
399 * is safe to call this function without holding a reference to @page.
400 *
401 * Note, this function is inherently racy, because there is nothing to prevent
402 * the cgroup inode from getting torn down and potentially reallocated a moment
403 * after page_cgroup_ino() returns, so it only should be used by callers that
404 * do not care (such as procfs interfaces).
405 */
page_cgroup_ino(struct page * page)406 ino_t page_cgroup_ino(struct page *page)
407 {
408 struct mem_cgroup *memcg;
409 unsigned long ino = 0;
410
411 rcu_read_lock();
412 /* page_folio() is racy here, but the entire function is racy anyway */
413 memcg = folio_memcg_check(page_folio(page));
414
415 while (memcg && !(memcg->css.flags & CSS_ONLINE))
416 memcg = parent_mem_cgroup(memcg);
417 if (memcg)
418 ino = cgroup_ino(memcg->css.cgroup);
419 rcu_read_unlock();
420 return ino;
421 }
422
__mem_cgroup_insert_exceeded(struct mem_cgroup_per_node * mz,struct mem_cgroup_tree_per_node * mctz,unsigned long new_usage_in_excess)423 static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz,
424 struct mem_cgroup_tree_per_node *mctz,
425 unsigned long new_usage_in_excess)
426 {
427 struct rb_node **p = &mctz->rb_root.rb_node;
428 struct rb_node *parent = NULL;
429 struct mem_cgroup_per_node *mz_node;
430 bool rightmost = true;
431
432 if (mz->on_tree)
433 return;
434
435 mz->usage_in_excess = new_usage_in_excess;
436 if (!mz->usage_in_excess)
437 return;
438 while (*p) {
439 parent = *p;
440 mz_node = rb_entry(parent, struct mem_cgroup_per_node,
441 tree_node);
442 if (mz->usage_in_excess < mz_node->usage_in_excess) {
443 p = &(*p)->rb_left;
444 rightmost = false;
445 } else {
446 p = &(*p)->rb_right;
447 }
448 }
449
450 if (rightmost)
451 mctz->rb_rightmost = &mz->tree_node;
452
453 rb_link_node(&mz->tree_node, parent, p);
454 rb_insert_color(&mz->tree_node, &mctz->rb_root);
455 mz->on_tree = true;
456 }
457
__mem_cgroup_remove_exceeded(struct mem_cgroup_per_node * mz,struct mem_cgroup_tree_per_node * mctz)458 static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
459 struct mem_cgroup_tree_per_node *mctz)
460 {
461 if (!mz->on_tree)
462 return;
463
464 if (&mz->tree_node == mctz->rb_rightmost)
465 mctz->rb_rightmost = rb_prev(&mz->tree_node);
466
467 rb_erase(&mz->tree_node, &mctz->rb_root);
468 mz->on_tree = false;
469 }
470
mem_cgroup_remove_exceeded(struct mem_cgroup_per_node * mz,struct mem_cgroup_tree_per_node * mctz)471 static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
472 struct mem_cgroup_tree_per_node *mctz)
473 {
474 unsigned long flags;
475
476 spin_lock_irqsave(&mctz->lock, flags);
477 __mem_cgroup_remove_exceeded(mz, mctz);
478 spin_unlock_irqrestore(&mctz->lock, flags);
479 }
480
soft_limit_excess(struct mem_cgroup * memcg)481 static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
482 {
483 unsigned long nr_pages = page_counter_read(&memcg->memory);
484 unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
485 unsigned long excess = 0;
486
487 if (nr_pages > soft_limit)
488 excess = nr_pages - soft_limit;
489
490 return excess;
491 }
492
mem_cgroup_update_tree(struct mem_cgroup * memcg,int nid)493 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, int nid)
494 {
495 unsigned long excess;
496 struct mem_cgroup_per_node *mz;
497 struct mem_cgroup_tree_per_node *mctz;
498
499 if (lru_gen_enabled()) {
500 if (soft_limit_excess(memcg))
501 lru_gen_soft_reclaim(memcg, nid);
502 return;
503 }
504
505 mctz = soft_limit_tree.rb_tree_per_node[nid];
506 if (!mctz)
507 return;
508 /*
509 * Necessary to update all ancestors when hierarchy is used.
510 * because their event counter is not touched.
511 */
512 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
513 mz = memcg->nodeinfo[nid];
514 excess = soft_limit_excess(memcg);
515 /*
516 * We have to update the tree if mz is on RB-tree or
517 * mem is over its softlimit.
518 */
519 if (excess || mz->on_tree) {
520 unsigned long flags;
521
522 spin_lock_irqsave(&mctz->lock, flags);
523 /* if on-tree, remove it */
524 if (mz->on_tree)
525 __mem_cgroup_remove_exceeded(mz, mctz);
526 /*
527 * Insert again. mz->usage_in_excess will be updated.
528 * If excess is 0, no tree ops.
529 */
530 __mem_cgroup_insert_exceeded(mz, mctz, excess);
531 spin_unlock_irqrestore(&mctz->lock, flags);
532 }
533 }
534 }
535
mem_cgroup_remove_from_trees(struct mem_cgroup * memcg)536 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
537 {
538 struct mem_cgroup_tree_per_node *mctz;
539 struct mem_cgroup_per_node *mz;
540 int nid;
541
542 for_each_node(nid) {
543 mz = memcg->nodeinfo[nid];
544 mctz = soft_limit_tree.rb_tree_per_node[nid];
545 if (mctz)
546 mem_cgroup_remove_exceeded(mz, mctz);
547 }
548 }
549
550 static struct mem_cgroup_per_node *
__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node * mctz)551 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
552 {
553 struct mem_cgroup_per_node *mz;
554
555 retry:
556 mz = NULL;
557 if (!mctz->rb_rightmost)
558 goto done; /* Nothing to reclaim from */
559
560 mz = rb_entry(mctz->rb_rightmost,
561 struct mem_cgroup_per_node, tree_node);
562 /*
563 * Remove the node now but someone else can add it back,
564 * we will to add it back at the end of reclaim to its correct
565 * position in the tree.
566 */
567 __mem_cgroup_remove_exceeded(mz, mctz);
568 if (!soft_limit_excess(mz->memcg) ||
569 !css_tryget(&mz->memcg->css))
570 goto retry;
571 done:
572 return mz;
573 }
574
575 static struct mem_cgroup_per_node *
mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node * mctz)576 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
577 {
578 struct mem_cgroup_per_node *mz;
579
580 spin_lock_irq(&mctz->lock);
581 mz = __mem_cgroup_largest_soft_limit_node(mctz);
582 spin_unlock_irq(&mctz->lock);
583 return mz;
584 }
585
586 /* Subset of vm_event_item to report for memcg event stats */
587 static const unsigned int memcg_vm_event_stat[] = {
588 PGPGIN,
589 PGPGOUT,
590 PGSCAN_KSWAPD,
591 PGSCAN_DIRECT,
592 PGSCAN_KHUGEPAGED,
593 PGSTEAL_KSWAPD,
594 PGSTEAL_DIRECT,
595 PGSTEAL_KHUGEPAGED,
596 PGFAULT,
597 PGMAJFAULT,
598 PGREFILL,
599 PGACTIVATE,
600 PGDEACTIVATE,
601 PGLAZYFREE,
602 PGLAZYFREED,
603 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
604 ZSWPIN,
605 ZSWPOUT,
606 #endif
607 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
608 THP_FAULT_ALLOC,
609 THP_COLLAPSE_ALLOC,
610 #endif
611 };
612
613 #define NR_MEMCG_EVENTS ARRAY_SIZE(memcg_vm_event_stat)
614 static int mem_cgroup_events_index[NR_VM_EVENT_ITEMS] __read_mostly;
615
init_memcg_events(void)616 static void init_memcg_events(void)
617 {
618 int i;
619
620 for (i = 0; i < NR_MEMCG_EVENTS; ++i)
621 mem_cgroup_events_index[memcg_vm_event_stat[i]] = i + 1;
622 }
623
memcg_events_index(enum vm_event_item idx)624 static inline int memcg_events_index(enum vm_event_item idx)
625 {
626 return mem_cgroup_events_index[idx] - 1;
627 }
628
629 struct memcg_vmstats_percpu {
630 /* Stats updates since the last flush */
631 unsigned int stats_updates;
632
633 /* Cached pointers for fast iteration in memcg_rstat_updated() */
634 struct memcg_vmstats_percpu *parent;
635 struct memcg_vmstats *vmstats;
636
637 /* The above should fit a single cacheline for memcg_rstat_updated() */
638
639 /* Local (CPU and cgroup) page state & events */
640 long state[MEMCG_NR_STAT];
641 unsigned long events[NR_MEMCG_EVENTS];
642
643 /* Delta calculation for lockless upward propagation */
644 long state_prev[MEMCG_NR_STAT];
645 unsigned long events_prev[NR_MEMCG_EVENTS];
646
647 /* Cgroup1: threshold notifications & softlimit tree updates */
648 unsigned long nr_page_events;
649 unsigned long targets[MEM_CGROUP_NTARGETS];
650 } ____cacheline_aligned;
651
652 struct memcg_vmstats {
653 /* Aggregated (CPU and subtree) page state & events */
654 long state[MEMCG_NR_STAT];
655 unsigned long events[NR_MEMCG_EVENTS];
656
657 /* Non-hierarchical (CPU aggregated) page state & events */
658 long state_local[MEMCG_NR_STAT];
659 unsigned long events_local[NR_MEMCG_EVENTS];
660
661 /* Pending child counts during tree propagation */
662 long state_pending[MEMCG_NR_STAT];
663 unsigned long events_pending[NR_MEMCG_EVENTS];
664
665 /* Stats updates since the last flush */
666 atomic64_t stats_updates;
667 };
668
669 /*
670 * memcg and lruvec stats flushing
671 *
672 * Many codepaths leading to stats update or read are performance sensitive and
673 * adding stats flushing in such codepaths is not desirable. So, to optimize the
674 * flushing the kernel does:
675 *
676 * 1) Periodically and asynchronously flush the stats every 2 seconds to not let
677 * rstat update tree grow unbounded.
678 *
679 * 2) Flush the stats synchronously on reader side only when there are more than
680 * (MEMCG_CHARGE_BATCH * nr_cpus) update events. Though this optimization
681 * will let stats be out of sync by atmost (MEMCG_CHARGE_BATCH * nr_cpus) but
682 * only for 2 seconds due to (1).
683 */
684 static void flush_memcg_stats_dwork(struct work_struct *w);
685 static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork);
686 static u64 flush_last_time;
687
688 #define FLUSH_TIME (2UL*HZ)
689
690 /*
691 * Accessors to ensure that preemption is disabled on PREEMPT_RT because it can
692 * not rely on this as part of an acquired spinlock_t lock. These functions are
693 * never used in hardirq context on PREEMPT_RT and therefore disabling preemtion
694 * is sufficient.
695 */
memcg_stats_lock(void)696 static void memcg_stats_lock(void)
697 {
698 preempt_disable_nested();
699 VM_WARN_ON_IRQS_ENABLED();
700 }
701
__memcg_stats_lock(void)702 static void __memcg_stats_lock(void)
703 {
704 preempt_disable_nested();
705 }
706
memcg_stats_unlock(void)707 static void memcg_stats_unlock(void)
708 {
709 preempt_enable_nested();
710 }
711
712
memcg_vmstats_needs_flush(struct memcg_vmstats * vmstats)713 static bool memcg_vmstats_needs_flush(struct memcg_vmstats *vmstats)
714 {
715 return atomic64_read(&vmstats->stats_updates) >
716 MEMCG_CHARGE_BATCH * num_online_cpus();
717 }
718
memcg_rstat_updated(struct mem_cgroup * memcg,int val)719 static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val)
720 {
721 struct memcg_vmstats_percpu *statc;
722 int cpu = smp_processor_id();
723
724 if (!val)
725 return;
726
727 cgroup_rstat_updated(memcg->css.cgroup, cpu);
728 statc = this_cpu_ptr(memcg->vmstats_percpu);
729 for (; statc; statc = statc->parent) {
730 statc->stats_updates += abs(val);
731 if (statc->stats_updates < MEMCG_CHARGE_BATCH)
732 continue;
733
734 /*
735 * If @memcg is already flush-able, increasing stats_updates is
736 * redundant. Avoid the overhead of the atomic update.
737 */
738 if (!memcg_vmstats_needs_flush(statc->vmstats))
739 atomic64_add(statc->stats_updates,
740 &statc->vmstats->stats_updates);
741 statc->stats_updates = 0;
742 }
743 }
744
do_flush_stats(struct mem_cgroup * memcg)745 static void do_flush_stats(struct mem_cgroup *memcg)
746 {
747 if (mem_cgroup_is_root(memcg))
748 WRITE_ONCE(flush_last_time, jiffies_64);
749
750 cgroup_rstat_flush(memcg->css.cgroup);
751 }
752
753 /*
754 * mem_cgroup_flush_stats - flush the stats of a memory cgroup subtree
755 * @memcg: root of the subtree to flush
756 *
757 * Flushing is serialized by the underlying global rstat lock. There is also a
758 * minimum amount of work to be done even if there are no stat updates to flush.
759 * Hence, we only flush the stats if the updates delta exceeds a threshold. This
760 * avoids unnecessary work and contention on the underlying lock.
761 */
mem_cgroup_flush_stats(struct mem_cgroup * memcg)762 void mem_cgroup_flush_stats(struct mem_cgroup *memcg)
763 {
764 if (mem_cgroup_disabled())
765 return;
766
767 if (!memcg)
768 memcg = root_mem_cgroup;
769
770 if (memcg_vmstats_needs_flush(memcg->vmstats))
771 do_flush_stats(memcg);
772 }
773
mem_cgroup_flush_stats_ratelimited(struct mem_cgroup * memcg)774 void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg)
775 {
776 /* Only flush if the periodic flusher is one full cycle late */
777 if (time_after64(jiffies_64, READ_ONCE(flush_last_time) + 2*FLUSH_TIME))
778 mem_cgroup_flush_stats(memcg);
779 }
780
flush_memcg_stats_dwork(struct work_struct * w)781 static void flush_memcg_stats_dwork(struct work_struct *w)
782 {
783 /*
784 * Deliberately ignore memcg_vmstats_needs_flush() here so that flushing
785 * in latency-sensitive paths is as cheap as possible.
786 */
787 do_flush_stats(root_mem_cgroup);
788 queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME);
789 }
790
memcg_page_state(struct mem_cgroup * memcg,int idx)791 unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
792 {
793 long x = READ_ONCE(memcg->vmstats->state[idx]);
794 #ifdef CONFIG_SMP
795 if (x < 0)
796 x = 0;
797 #endif
798 return x;
799 }
800
801 /**
802 * __mod_memcg_state - update cgroup memory statistics
803 * @memcg: the memory cgroup
804 * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item
805 * @val: delta to add to the counter, can be negative
806 */
__mod_memcg_state(struct mem_cgroup * memcg,int idx,int val)807 void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val)
808 {
809 if (mem_cgroup_disabled())
810 return;
811
812 __this_cpu_add(memcg->vmstats_percpu->state[idx], val);
813 memcg_rstat_updated(memcg, val);
814 }
815
816 /* idx can be of type enum memcg_stat_item or node_stat_item. */
memcg_page_state_local(struct mem_cgroup * memcg,int idx)817 static unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx)
818 {
819 long x = READ_ONCE(memcg->vmstats->state_local[idx]);
820
821 #ifdef CONFIG_SMP
822 if (x < 0)
823 x = 0;
824 #endif
825 return x;
826 }
827
__mod_memcg_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx,int val)828 void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
829 int val)
830 {
831 struct mem_cgroup_per_node *pn;
832 struct mem_cgroup *memcg;
833
834 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
835 memcg = pn->memcg;
836
837 /*
838 * The caller from rmap relay on disabled preemption becase they never
839 * update their counter from in-interrupt context. For these two
840 * counters we check that the update is never performed from an
841 * interrupt context while other caller need to have disabled interrupt.
842 */
843 __memcg_stats_lock();
844 if (IS_ENABLED(CONFIG_DEBUG_VM)) {
845 switch (idx) {
846 case NR_ANON_MAPPED:
847 case NR_FILE_MAPPED:
848 case NR_ANON_THPS:
849 case NR_SHMEM_PMDMAPPED:
850 case NR_FILE_PMDMAPPED:
851 WARN_ON_ONCE(!in_task());
852 break;
853 default:
854 VM_WARN_ON_IRQS_ENABLED();
855 }
856 }
857
858 /* Update memcg */
859 __this_cpu_add(memcg->vmstats_percpu->state[idx], val);
860
861 /* Update lruvec */
862 __this_cpu_add(pn->lruvec_stats_percpu->state[idx], val);
863
864 memcg_rstat_updated(memcg, val);
865 memcg_stats_unlock();
866 }
867
868 /**
869 * __mod_lruvec_state - update lruvec memory statistics
870 * @lruvec: the lruvec
871 * @idx: the stat item
872 * @val: delta to add to the counter, can be negative
873 *
874 * The lruvec is the intersection of the NUMA node and a cgroup. This
875 * function updates the all three counters that are affected by a
876 * change of state at this level: per-node, per-cgroup, per-lruvec.
877 */
__mod_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx,int val)878 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
879 int val)
880 {
881 /* Update node */
882 __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
883
884 /* Update memcg and lruvec */
885 if (!mem_cgroup_disabled())
886 __mod_memcg_lruvec_state(lruvec, idx, val);
887 }
888 EXPORT_SYMBOL_GPL(__mod_lruvec_state);
889
__mod_lruvec_page_state(struct page * page,enum node_stat_item idx,int val)890 void __mod_lruvec_page_state(struct page *page, enum node_stat_item idx,
891 int val)
892 {
893 struct page *head = compound_head(page); /* rmap on tail pages */
894 struct mem_cgroup *memcg;
895 pg_data_t *pgdat = page_pgdat(page);
896 struct lruvec *lruvec;
897
898 rcu_read_lock();
899 memcg = page_memcg(head);
900 /* Untracked pages have no memcg, no lruvec. Update only the node */
901 if (!memcg) {
902 rcu_read_unlock();
903 __mod_node_page_state(pgdat, idx, val);
904 return;
905 }
906
907 lruvec = mem_cgroup_lruvec(memcg, pgdat);
908 __mod_lruvec_state(lruvec, idx, val);
909 rcu_read_unlock();
910 }
911 EXPORT_SYMBOL(__mod_lruvec_page_state);
912
__mod_lruvec_kmem_state(void * p,enum node_stat_item idx,int val)913 void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
914 {
915 pg_data_t *pgdat = page_pgdat(virt_to_page(p));
916 struct mem_cgroup *memcg;
917 struct lruvec *lruvec;
918
919 rcu_read_lock();
920 memcg = mem_cgroup_from_slab_obj(p);
921
922 /*
923 * Untracked pages have no memcg, no lruvec. Update only the
924 * node. If we reparent the slab objects to the root memcg,
925 * when we free the slab object, we need to update the per-memcg
926 * vmstats to keep it correct for the root memcg.
927 */
928 if (!memcg) {
929 __mod_node_page_state(pgdat, idx, val);
930 } else {
931 lruvec = mem_cgroup_lruvec(memcg, pgdat);
932 __mod_lruvec_state(lruvec, idx, val);
933 }
934 rcu_read_unlock();
935 }
936
937 /**
938 * __count_memcg_events - account VM events in a cgroup
939 * @memcg: the memory cgroup
940 * @idx: the event item
941 * @count: the number of events that occurred
942 */
__count_memcg_events(struct mem_cgroup * memcg,enum vm_event_item idx,unsigned long count)943 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
944 unsigned long count)
945 {
946 int index = memcg_events_index(idx);
947
948 if (mem_cgroup_disabled() || index < 0)
949 return;
950
951 memcg_stats_lock();
952 __this_cpu_add(memcg->vmstats_percpu->events[index], count);
953 memcg_rstat_updated(memcg, count);
954 memcg_stats_unlock();
955 }
956
memcg_events(struct mem_cgroup * memcg,int event)957 static unsigned long memcg_events(struct mem_cgroup *memcg, int event)
958 {
959 int index = memcg_events_index(event);
960
961 if (index < 0)
962 return 0;
963 return READ_ONCE(memcg->vmstats->events[index]);
964 }
965
memcg_events_local(struct mem_cgroup * memcg,int event)966 static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
967 {
968 int index = memcg_events_index(event);
969
970 if (index < 0)
971 return 0;
972
973 return READ_ONCE(memcg->vmstats->events_local[index]);
974 }
975
mem_cgroup_charge_statistics(struct mem_cgroup * memcg,int nr_pages)976 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
977 int nr_pages)
978 {
979 /* pagein of a big page is an event. So, ignore page size */
980 if (nr_pages > 0)
981 __count_memcg_events(memcg, PGPGIN, 1);
982 else {
983 __count_memcg_events(memcg, PGPGOUT, 1);
984 nr_pages = -nr_pages; /* for event */
985 }
986
987 __this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages);
988 }
989
mem_cgroup_event_ratelimit(struct mem_cgroup * memcg,enum mem_cgroup_events_target target)990 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
991 enum mem_cgroup_events_target target)
992 {
993 unsigned long val, next;
994
995 val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events);
996 next = __this_cpu_read(memcg->vmstats_percpu->targets[target]);
997 /* from time_after() in jiffies.h */
998 if ((long)(next - val) < 0) {
999 switch (target) {
1000 case MEM_CGROUP_TARGET_THRESH:
1001 next = val + THRESHOLDS_EVENTS_TARGET;
1002 break;
1003 case MEM_CGROUP_TARGET_SOFTLIMIT:
1004 next = val + SOFTLIMIT_EVENTS_TARGET;
1005 break;
1006 default:
1007 break;
1008 }
1009 __this_cpu_write(memcg->vmstats_percpu->targets[target], next);
1010 return true;
1011 }
1012 return false;
1013 }
1014
1015 /*
1016 * Check events in order.
1017 *
1018 */
memcg_check_events(struct mem_cgroup * memcg,int nid)1019 static void memcg_check_events(struct mem_cgroup *memcg, int nid)
1020 {
1021 if (IS_ENABLED(CONFIG_PREEMPT_RT))
1022 return;
1023
1024 /* threshold event is triggered in finer grain than soft limit */
1025 if (unlikely(mem_cgroup_event_ratelimit(memcg,
1026 MEM_CGROUP_TARGET_THRESH))) {
1027 bool do_softlimit;
1028
1029 do_softlimit = mem_cgroup_event_ratelimit(memcg,
1030 MEM_CGROUP_TARGET_SOFTLIMIT);
1031 mem_cgroup_threshold(memcg);
1032 if (unlikely(do_softlimit))
1033 mem_cgroup_update_tree(memcg, nid);
1034 }
1035 }
1036
mem_cgroup_from_task(struct task_struct * p)1037 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
1038 {
1039 /*
1040 * mm_update_next_owner() may clear mm->owner to NULL
1041 * if it races with swapoff, page migration, etc.
1042 * So this can be called with p == NULL.
1043 */
1044 if (unlikely(!p))
1045 return NULL;
1046
1047 return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
1048 }
1049 EXPORT_SYMBOL(mem_cgroup_from_task);
1050
active_memcg(void)1051 static __always_inline struct mem_cgroup *active_memcg(void)
1052 {
1053 if (!in_task())
1054 return this_cpu_read(int_active_memcg);
1055 else
1056 return current->active_memcg;
1057 }
1058
1059 /**
1060 * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg.
1061 * @mm: mm from which memcg should be extracted. It can be NULL.
1062 *
1063 * Obtain a reference on mm->memcg and returns it if successful. If mm
1064 * is NULL, then the memcg is chosen as follows:
1065 * 1) The active memcg, if set.
1066 * 2) current->mm->memcg, if available
1067 * 3) root memcg
1068 * If mem_cgroup is disabled, NULL is returned.
1069 */
get_mem_cgroup_from_mm(struct mm_struct * mm)1070 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
1071 {
1072 struct mem_cgroup *memcg;
1073
1074 if (mem_cgroup_disabled())
1075 return NULL;
1076
1077 /*
1078 * Page cache insertions can happen without an
1079 * actual mm context, e.g. during disk probing
1080 * on boot, loopback IO, acct() writes etc.
1081 *
1082 * No need to css_get on root memcg as the reference
1083 * counting is disabled on the root level in the
1084 * cgroup core. See CSS_NO_REF.
1085 */
1086 if (unlikely(!mm)) {
1087 memcg = active_memcg();
1088 if (unlikely(memcg)) {
1089 /* remote memcg must hold a ref */
1090 css_get(&memcg->css);
1091 return memcg;
1092 }
1093 mm = current->mm;
1094 if (unlikely(!mm))
1095 return root_mem_cgroup;
1096 }
1097
1098 rcu_read_lock();
1099 do {
1100 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1101 if (unlikely(!memcg))
1102 memcg = root_mem_cgroup;
1103 } while (!css_tryget(&memcg->css));
1104 rcu_read_unlock();
1105 return memcg;
1106 }
1107 EXPORT_SYMBOL(get_mem_cgroup_from_mm);
1108
memcg_kmem_bypass(void)1109 static __always_inline bool memcg_kmem_bypass(void)
1110 {
1111 /* Allow remote memcg charging from any context. */
1112 if (unlikely(active_memcg()))
1113 return false;
1114
1115 /* Memcg to charge can't be determined. */
1116 if (!in_task() || !current->mm || (current->flags & PF_KTHREAD))
1117 return true;
1118
1119 return false;
1120 }
1121
1122 /**
1123 * mem_cgroup_iter - iterate over memory cgroup hierarchy
1124 * @root: hierarchy root
1125 * @prev: previously returned memcg, NULL on first invocation
1126 * @reclaim: cookie for shared reclaim walks, NULL for full walks
1127 *
1128 * Returns references to children of the hierarchy below @root, or
1129 * @root itself, or %NULL after a full round-trip.
1130 *
1131 * Caller must pass the return value in @prev on subsequent
1132 * invocations for reference counting, or use mem_cgroup_iter_break()
1133 * to cancel a hierarchy walk before the round-trip is complete.
1134 *
1135 * Reclaimers can specify a node in @reclaim to divide up the memcgs
1136 * in the hierarchy among all concurrent reclaimers operating on the
1137 * same node.
1138 */
mem_cgroup_iter(struct mem_cgroup * root,struct mem_cgroup * prev,struct mem_cgroup_reclaim_cookie * reclaim)1139 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
1140 struct mem_cgroup *prev,
1141 struct mem_cgroup_reclaim_cookie *reclaim)
1142 {
1143 struct mem_cgroup_reclaim_iter *iter;
1144 struct cgroup_subsys_state *css = NULL;
1145 struct mem_cgroup *memcg = NULL;
1146 struct mem_cgroup *pos = NULL;
1147
1148 if (mem_cgroup_disabled())
1149 return NULL;
1150
1151 if (!root)
1152 root = root_mem_cgroup;
1153
1154 rcu_read_lock();
1155
1156 if (reclaim) {
1157 struct mem_cgroup_per_node *mz;
1158
1159 mz = root->nodeinfo[reclaim->pgdat->node_id];
1160 iter = &mz->iter;
1161
1162 /*
1163 * On start, join the current reclaim iteration cycle.
1164 * Exit when a concurrent walker completes it.
1165 */
1166 if (!prev)
1167 reclaim->generation = iter->generation;
1168 else if (reclaim->generation != iter->generation)
1169 goto out_unlock;
1170
1171 while (1) {
1172 pos = READ_ONCE(iter->position);
1173 if (!pos || css_tryget(&pos->css))
1174 break;
1175 /*
1176 * css reference reached zero, so iter->position will
1177 * be cleared by ->css_released. However, we should not
1178 * rely on this happening soon, because ->css_released
1179 * is called from a work queue, and by busy-waiting we
1180 * might block it. So we clear iter->position right
1181 * away.
1182 */
1183 (void)cmpxchg(&iter->position, pos, NULL);
1184 }
1185 } else if (prev) {
1186 pos = prev;
1187 }
1188
1189 if (pos)
1190 css = &pos->css;
1191
1192 for (;;) {
1193 css = css_next_descendant_pre(css, &root->css);
1194 if (!css) {
1195 /*
1196 * Reclaimers share the hierarchy walk, and a
1197 * new one might jump in right at the end of
1198 * the hierarchy - make sure they see at least
1199 * one group and restart from the beginning.
1200 */
1201 if (!prev)
1202 continue;
1203 break;
1204 }
1205
1206 /*
1207 * Verify the css and acquire a reference. The root
1208 * is provided by the caller, so we know it's alive
1209 * and kicking, and don't take an extra reference.
1210 */
1211 if (css == &root->css || css_tryget(css)) {
1212 memcg = mem_cgroup_from_css(css);
1213 break;
1214 }
1215 }
1216
1217 if (reclaim) {
1218 /*
1219 * The position could have already been updated by a competing
1220 * thread, so check that the value hasn't changed since we read
1221 * it to avoid reclaiming from the same cgroup twice.
1222 */
1223 (void)cmpxchg(&iter->position, pos, memcg);
1224
1225 if (pos)
1226 css_put(&pos->css);
1227
1228 if (!memcg)
1229 iter->generation++;
1230 }
1231
1232 out_unlock:
1233 rcu_read_unlock();
1234 if (prev && prev != root)
1235 css_put(&prev->css);
1236
1237 return memcg;
1238 }
1239
1240 /**
1241 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1242 * @root: hierarchy root
1243 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1244 */
mem_cgroup_iter_break(struct mem_cgroup * root,struct mem_cgroup * prev)1245 void mem_cgroup_iter_break(struct mem_cgroup *root,
1246 struct mem_cgroup *prev)
1247 {
1248 if (!root)
1249 root = root_mem_cgroup;
1250 if (prev && prev != root)
1251 css_put(&prev->css);
1252 }
1253
__invalidate_reclaim_iterators(struct mem_cgroup * from,struct mem_cgroup * dead_memcg)1254 static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
1255 struct mem_cgroup *dead_memcg)
1256 {
1257 struct mem_cgroup_reclaim_iter *iter;
1258 struct mem_cgroup_per_node *mz;
1259 int nid;
1260
1261 for_each_node(nid) {
1262 mz = from->nodeinfo[nid];
1263 iter = &mz->iter;
1264 cmpxchg(&iter->position, dead_memcg, NULL);
1265 }
1266 }
1267
invalidate_reclaim_iterators(struct mem_cgroup * dead_memcg)1268 static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
1269 {
1270 struct mem_cgroup *memcg = dead_memcg;
1271 struct mem_cgroup *last;
1272
1273 do {
1274 __invalidate_reclaim_iterators(memcg, dead_memcg);
1275 last = memcg;
1276 } while ((memcg = parent_mem_cgroup(memcg)));
1277
1278 /*
1279 * When cgroup1 non-hierarchy mode is used,
1280 * parent_mem_cgroup() does not walk all the way up to the
1281 * cgroup root (root_mem_cgroup). So we have to handle
1282 * dead_memcg from cgroup root separately.
1283 */
1284 if (!mem_cgroup_is_root(last))
1285 __invalidate_reclaim_iterators(root_mem_cgroup,
1286 dead_memcg);
1287 }
1288
1289 /**
1290 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1291 * @memcg: hierarchy root
1292 * @fn: function to call for each task
1293 * @arg: argument passed to @fn
1294 *
1295 * This function iterates over tasks attached to @memcg or to any of its
1296 * descendants and calls @fn for each task. If @fn returns a non-zero
1297 * value, the function breaks the iteration loop. Otherwise, it will iterate
1298 * over all tasks and return 0.
1299 *
1300 * This function must not be called for the root memory cgroup.
1301 */
mem_cgroup_scan_tasks(struct mem_cgroup * memcg,int (* fn)(struct task_struct *,void *),void * arg)1302 void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1303 int (*fn)(struct task_struct *, void *), void *arg)
1304 {
1305 struct mem_cgroup *iter;
1306 int ret = 0;
1307
1308 BUG_ON(mem_cgroup_is_root(memcg));
1309
1310 for_each_mem_cgroup_tree(iter, memcg) {
1311 struct css_task_iter it;
1312 struct task_struct *task;
1313
1314 css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
1315 while (!ret && (task = css_task_iter_next(&it)))
1316 ret = fn(task, arg);
1317 css_task_iter_end(&it);
1318 if (ret) {
1319 mem_cgroup_iter_break(memcg, iter);
1320 break;
1321 }
1322 }
1323 }
1324
1325 #ifdef CONFIG_DEBUG_VM
lruvec_memcg_debug(struct lruvec * lruvec,struct folio * folio)1326 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
1327 {
1328 struct mem_cgroup *memcg;
1329
1330 if (mem_cgroup_disabled())
1331 return;
1332
1333 memcg = folio_memcg(folio);
1334
1335 if (!memcg)
1336 VM_BUG_ON_FOLIO(!mem_cgroup_is_root(lruvec_memcg(lruvec)), folio);
1337 else
1338 VM_BUG_ON_FOLIO(lruvec_memcg(lruvec) != memcg, folio);
1339 }
1340 #endif
1341
1342 /**
1343 * folio_lruvec_lock - Lock the lruvec for a folio.
1344 * @folio: Pointer to the folio.
1345 *
1346 * These functions are safe to use under any of the following conditions:
1347 * - folio locked
1348 * - folio_test_lru false
1349 * - folio_memcg_lock()
1350 * - folio frozen (refcount of 0)
1351 *
1352 * Return: The lruvec this folio is on with its lock held.
1353 */
folio_lruvec_lock(struct folio * folio)1354 struct lruvec *folio_lruvec_lock(struct folio *folio)
1355 {
1356 struct lruvec *lruvec = folio_lruvec(folio);
1357
1358 spin_lock(&lruvec->lru_lock);
1359 lruvec_memcg_debug(lruvec, folio);
1360
1361 return lruvec;
1362 }
1363
1364 /**
1365 * folio_lruvec_lock_irq - Lock the lruvec for a folio.
1366 * @folio: Pointer to the folio.
1367 *
1368 * These functions are safe to use under any of the following conditions:
1369 * - folio locked
1370 * - folio_test_lru false
1371 * - folio_memcg_lock()
1372 * - folio frozen (refcount of 0)
1373 *
1374 * Return: The lruvec this folio is on with its lock held and interrupts
1375 * disabled.
1376 */
folio_lruvec_lock_irq(struct folio * folio)1377 struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
1378 {
1379 struct lruvec *lruvec = folio_lruvec(folio);
1380
1381 spin_lock_irq(&lruvec->lru_lock);
1382 lruvec_memcg_debug(lruvec, folio);
1383
1384 return lruvec;
1385 }
1386
1387 /**
1388 * folio_lruvec_lock_irqsave - Lock the lruvec for a folio.
1389 * @folio: Pointer to the folio.
1390 * @flags: Pointer to irqsave flags.
1391 *
1392 * These functions are safe to use under any of the following conditions:
1393 * - folio locked
1394 * - folio_test_lru false
1395 * - folio_memcg_lock()
1396 * - folio frozen (refcount of 0)
1397 *
1398 * Return: The lruvec this folio is on with its lock held and interrupts
1399 * disabled.
1400 */
folio_lruvec_lock_irqsave(struct folio * folio,unsigned long * flags)1401 struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
1402 unsigned long *flags)
1403 {
1404 struct lruvec *lruvec = folio_lruvec(folio);
1405
1406 spin_lock_irqsave(&lruvec->lru_lock, *flags);
1407 lruvec_memcg_debug(lruvec, folio);
1408
1409 return lruvec;
1410 }
1411
do_traversal_all_lruvec(void)1412 void do_traversal_all_lruvec(void)
1413 {
1414 pg_data_t *pgdat;
1415
1416 for_each_online_pgdat(pgdat) {
1417 struct mem_cgroup *memcg = NULL;
1418
1419 memcg = mem_cgroup_iter(NULL, NULL, NULL);
1420 do {
1421 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
1422
1423 trace_android_vh_do_traversal_lruvec(lruvec);
1424
1425 memcg = mem_cgroup_iter(NULL, memcg, NULL);
1426 } while (memcg);
1427 }
1428 }
1429 EXPORT_SYMBOL_GPL(do_traversal_all_lruvec);
1430
1431 /**
1432 * mem_cgroup_update_lru_size - account for adding or removing an lru page
1433 * @lruvec: mem_cgroup per zone lru vector
1434 * @lru: index of lru list the page is sitting on
1435 * @zid: zone id of the accounted pages
1436 * @nr_pages: positive when adding or negative when removing
1437 *
1438 * This function must be called under lru_lock, just before a page is added
1439 * to or just after a page is removed from an lru list.
1440 */
mem_cgroup_update_lru_size(struct lruvec * lruvec,enum lru_list lru,int zid,int nr_pages)1441 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1442 int zid, int nr_pages)
1443 {
1444 struct mem_cgroup_per_node *mz;
1445 unsigned long *lru_size;
1446 long size;
1447
1448 if (mem_cgroup_disabled())
1449 return;
1450
1451 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
1452 lru_size = &mz->lru_zone_size[zid][lru];
1453
1454 if (nr_pages < 0)
1455 *lru_size += nr_pages;
1456
1457 size = *lru_size;
1458 if (WARN_ONCE(size < 0,
1459 "%s(%p, %d, %d): lru_size %ld\n",
1460 __func__, lruvec, lru, nr_pages, size)) {
1461 VM_BUG_ON(1);
1462 *lru_size = 0;
1463 }
1464
1465 if (nr_pages > 0)
1466 *lru_size += nr_pages;
1467 }
1468 EXPORT_SYMBOL_GPL(mem_cgroup_update_lru_size);
1469
1470 /**
1471 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1472 * @memcg: the memory cgroup
1473 *
1474 * Returns the maximum amount of memory @mem can be charged with, in
1475 * pages.
1476 */
mem_cgroup_margin(struct mem_cgroup * memcg)1477 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1478 {
1479 unsigned long margin = 0;
1480 unsigned long count;
1481 unsigned long limit;
1482
1483 count = page_counter_read(&memcg->memory);
1484 limit = READ_ONCE(memcg->memory.max);
1485 if (count < limit)
1486 margin = limit - count;
1487
1488 if (do_memsw_account()) {
1489 count = page_counter_read(&memcg->memsw);
1490 limit = READ_ONCE(memcg->memsw.max);
1491 if (count < limit)
1492 margin = min(margin, limit - count);
1493 else
1494 margin = 0;
1495 }
1496
1497 return margin;
1498 }
1499
1500 /*
1501 * A routine for checking "mem" is under move_account() or not.
1502 *
1503 * Checking a cgroup is mc.from or mc.to or under hierarchy of
1504 * moving cgroups. This is for waiting at high-memory pressure
1505 * caused by "move".
1506 */
mem_cgroup_under_move(struct mem_cgroup * memcg)1507 static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1508 {
1509 struct mem_cgroup *from;
1510 struct mem_cgroup *to;
1511 bool ret = false;
1512 /*
1513 * Unlike task_move routines, we access mc.to, mc.from not under
1514 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1515 */
1516 spin_lock(&mc.lock);
1517 from = mc.from;
1518 to = mc.to;
1519 if (!from)
1520 goto unlock;
1521
1522 ret = mem_cgroup_is_descendant(from, memcg) ||
1523 mem_cgroup_is_descendant(to, memcg);
1524 unlock:
1525 spin_unlock(&mc.lock);
1526 return ret;
1527 }
1528
mem_cgroup_wait_acct_move(struct mem_cgroup * memcg)1529 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1530 {
1531 if (mc.moving_task && current != mc.moving_task) {
1532 if (mem_cgroup_under_move(memcg)) {
1533 DEFINE_WAIT(wait);
1534 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1535 /* moving charge context might have finished. */
1536 if (mc.moving_task)
1537 schedule();
1538 finish_wait(&mc.waitq, &wait);
1539 return true;
1540 }
1541 }
1542 return false;
1543 }
1544
1545 struct memory_stat {
1546 const char *name;
1547 unsigned int idx;
1548 };
1549
1550 static const struct memory_stat memory_stats[] = {
1551 { "anon", NR_ANON_MAPPED },
1552 { "file", NR_FILE_PAGES },
1553 { "kernel", MEMCG_KMEM },
1554 { "kernel_stack", NR_KERNEL_STACK_KB },
1555 { "pagetables", NR_PAGETABLE },
1556 { "sec_pagetables", NR_SECONDARY_PAGETABLE },
1557 { "percpu", MEMCG_PERCPU_B },
1558 { "sock", MEMCG_SOCK },
1559 { "vmalloc", MEMCG_VMALLOC },
1560 { "shmem", NR_SHMEM },
1561 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
1562 { "zswap", MEMCG_ZSWAP_B },
1563 { "zswapped", MEMCG_ZSWAPPED },
1564 #endif
1565 { "file_mapped", NR_FILE_MAPPED },
1566 { "file_dirty", NR_FILE_DIRTY },
1567 { "file_writeback", NR_WRITEBACK },
1568 #ifdef CONFIG_SWAP
1569 { "swapcached", NR_SWAPCACHE },
1570 #endif
1571 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1572 { "anon_thp", NR_ANON_THPS },
1573 { "file_thp", NR_FILE_THPS },
1574 { "shmem_thp", NR_SHMEM_THPS },
1575 #endif
1576 { "inactive_anon", NR_INACTIVE_ANON },
1577 { "active_anon", NR_ACTIVE_ANON },
1578 { "inactive_file", NR_INACTIVE_FILE },
1579 { "active_file", NR_ACTIVE_FILE },
1580 { "unevictable", NR_UNEVICTABLE },
1581 { "slab_reclaimable", NR_SLAB_RECLAIMABLE_B },
1582 { "slab_unreclaimable", NR_SLAB_UNRECLAIMABLE_B },
1583
1584 /* The memory events */
1585 { "workingset_refault_anon", WORKINGSET_REFAULT_ANON },
1586 { "workingset_refault_file", WORKINGSET_REFAULT_FILE },
1587 { "workingset_activate_anon", WORKINGSET_ACTIVATE_ANON },
1588 { "workingset_activate_file", WORKINGSET_ACTIVATE_FILE },
1589 { "workingset_restore_anon", WORKINGSET_RESTORE_ANON },
1590 { "workingset_restore_file", WORKINGSET_RESTORE_FILE },
1591 { "workingset_nodereclaim", WORKINGSET_NODERECLAIM },
1592 };
1593
1594 /* Translate stat items to the correct unit for memory.stat output */
memcg_page_state_unit(int item)1595 static int memcg_page_state_unit(int item)
1596 {
1597 switch (item) {
1598 case MEMCG_PERCPU_B:
1599 case MEMCG_ZSWAP_B:
1600 case NR_SLAB_RECLAIMABLE_B:
1601 case NR_SLAB_UNRECLAIMABLE_B:
1602 case WORKINGSET_REFAULT_ANON:
1603 case WORKINGSET_REFAULT_FILE:
1604 case WORKINGSET_ACTIVATE_ANON:
1605 case WORKINGSET_ACTIVATE_FILE:
1606 case WORKINGSET_RESTORE_ANON:
1607 case WORKINGSET_RESTORE_FILE:
1608 case WORKINGSET_NODERECLAIM:
1609 return 1;
1610 case NR_KERNEL_STACK_KB:
1611 return SZ_1K;
1612 default:
1613 return PAGE_SIZE;
1614 }
1615 }
1616
memcg_page_state_output(struct mem_cgroup * memcg,int item)1617 static inline unsigned long memcg_page_state_output(struct mem_cgroup *memcg,
1618 int item)
1619 {
1620 return memcg_page_state(memcg, item) * memcg_page_state_unit(item);
1621 }
1622
memcg_stat_format(struct mem_cgroup * memcg,struct seq_buf * s)1623 static void memcg_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
1624 {
1625 int i;
1626
1627 /*
1628 * Provide statistics on the state of the memory subsystem as
1629 * well as cumulative event counters that show past behavior.
1630 *
1631 * This list is ordered following a combination of these gradients:
1632 * 1) generic big picture -> specifics and details
1633 * 2) reflecting userspace activity -> reflecting kernel heuristics
1634 *
1635 * Current memory state:
1636 */
1637 mem_cgroup_flush_stats(memcg);
1638
1639 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
1640 u64 size;
1641
1642 size = memcg_page_state_output(memcg, memory_stats[i].idx);
1643 seq_buf_printf(s, "%s %llu\n", memory_stats[i].name, size);
1644
1645 if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) {
1646 size += memcg_page_state_output(memcg,
1647 NR_SLAB_RECLAIMABLE_B);
1648 seq_buf_printf(s, "slab %llu\n", size);
1649 }
1650 }
1651
1652 /* Accumulated memory events */
1653 seq_buf_printf(s, "pgscan %lu\n",
1654 memcg_events(memcg, PGSCAN_KSWAPD) +
1655 memcg_events(memcg, PGSCAN_DIRECT) +
1656 memcg_events(memcg, PGSCAN_KHUGEPAGED));
1657 seq_buf_printf(s, "pgsteal %lu\n",
1658 memcg_events(memcg, PGSTEAL_KSWAPD) +
1659 memcg_events(memcg, PGSTEAL_DIRECT) +
1660 memcg_events(memcg, PGSTEAL_KHUGEPAGED));
1661
1662 for (i = 0; i < ARRAY_SIZE(memcg_vm_event_stat); i++) {
1663 if (memcg_vm_event_stat[i] == PGPGIN ||
1664 memcg_vm_event_stat[i] == PGPGOUT)
1665 continue;
1666
1667 seq_buf_printf(s, "%s %lu\n",
1668 vm_event_name(memcg_vm_event_stat[i]),
1669 memcg_events(memcg, memcg_vm_event_stat[i]));
1670 }
1671
1672 /* The above should easily fit into one page */
1673 WARN_ON_ONCE(seq_buf_has_overflowed(s));
1674 }
1675
1676 static void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s);
1677
memory_stat_format(struct mem_cgroup * memcg,struct seq_buf * s)1678 static void memory_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
1679 {
1680 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1681 memcg_stat_format(memcg, s);
1682 else
1683 memcg1_stat_format(memcg, s);
1684 WARN_ON_ONCE(seq_buf_has_overflowed(s));
1685 }
1686
1687 /**
1688 * mem_cgroup_print_oom_context: Print OOM information relevant to
1689 * memory controller.
1690 * @memcg: The memory cgroup that went over limit
1691 * @p: Task that is going to be killed
1692 *
1693 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1694 * enabled
1695 */
mem_cgroup_print_oom_context(struct mem_cgroup * memcg,struct task_struct * p)1696 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1697 {
1698 rcu_read_lock();
1699
1700 if (memcg) {
1701 pr_cont(",oom_memcg=");
1702 pr_cont_cgroup_path(memcg->css.cgroup);
1703 } else
1704 pr_cont(",global_oom");
1705 if (p) {
1706 pr_cont(",task_memcg=");
1707 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1708 }
1709 rcu_read_unlock();
1710 }
1711
1712 /**
1713 * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to
1714 * memory controller.
1715 * @memcg: The memory cgroup that went over limit
1716 */
mem_cgroup_print_oom_meminfo(struct mem_cgroup * memcg)1717 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1718 {
1719 /* Use static buffer, for the caller is holding oom_lock. */
1720 static char buf[PAGE_SIZE];
1721 struct seq_buf s;
1722
1723 lockdep_assert_held(&oom_lock);
1724
1725 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1726 K((u64)page_counter_read(&memcg->memory)),
1727 K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt);
1728 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1729 pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n",
1730 K((u64)page_counter_read(&memcg->swap)),
1731 K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt);
1732 else {
1733 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1734 K((u64)page_counter_read(&memcg->memsw)),
1735 K((u64)memcg->memsw.max), memcg->memsw.failcnt);
1736 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1737 K((u64)page_counter_read(&memcg->kmem)),
1738 K((u64)memcg->kmem.max), memcg->kmem.failcnt);
1739 }
1740
1741 pr_info("Memory cgroup stats for ");
1742 pr_cont_cgroup_path(memcg->css.cgroup);
1743 pr_cont(":");
1744 seq_buf_init(&s, buf, sizeof(buf));
1745 memory_stat_format(memcg, &s);
1746 seq_buf_do_printk(&s, KERN_INFO);
1747 }
1748
1749 /*
1750 * Return the memory (and swap, if configured) limit for a memcg.
1751 */
mem_cgroup_get_max(struct mem_cgroup * memcg)1752 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
1753 {
1754 unsigned long max = READ_ONCE(memcg->memory.max);
1755
1756 if (do_memsw_account()) {
1757 if (mem_cgroup_swappiness(memcg)) {
1758 /* Calculate swap excess capacity from memsw limit */
1759 unsigned long swap = READ_ONCE(memcg->memsw.max) - max;
1760
1761 max += min(swap, (unsigned long)total_swap_pages);
1762 }
1763 } else {
1764 if (mem_cgroup_swappiness(memcg))
1765 max += min(READ_ONCE(memcg->swap.max),
1766 (unsigned long)total_swap_pages);
1767 }
1768 return max;
1769 }
1770
mem_cgroup_size(struct mem_cgroup * memcg)1771 unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1772 {
1773 return page_counter_read(&memcg->memory);
1774 }
1775
mem_cgroup_out_of_memory(struct mem_cgroup * memcg,gfp_t gfp_mask,int order)1776 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1777 int order)
1778 {
1779 struct oom_control oc = {
1780 .zonelist = NULL,
1781 .nodemask = NULL,
1782 .memcg = memcg,
1783 .gfp_mask = gfp_mask,
1784 .order = order,
1785 };
1786 bool ret = true;
1787
1788 if (mutex_lock_killable(&oom_lock))
1789 return true;
1790
1791 if (mem_cgroup_margin(memcg) >= (1 << order))
1792 goto unlock;
1793
1794 /*
1795 * A few threads which were not waiting at mutex_lock_killable() can
1796 * fail to bail out. Therefore, check again after holding oom_lock.
1797 */
1798 ret = task_is_dying() || out_of_memory(&oc);
1799
1800 unlock:
1801 mutex_unlock(&oom_lock);
1802 return ret;
1803 }
1804
mem_cgroup_soft_reclaim(struct mem_cgroup * root_memcg,pg_data_t * pgdat,gfp_t gfp_mask,unsigned long * total_scanned)1805 static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1806 pg_data_t *pgdat,
1807 gfp_t gfp_mask,
1808 unsigned long *total_scanned)
1809 {
1810 struct mem_cgroup *victim = NULL;
1811 int total = 0;
1812 int loop = 0;
1813 unsigned long excess;
1814 unsigned long nr_scanned;
1815 struct mem_cgroup_reclaim_cookie reclaim = {
1816 .pgdat = pgdat,
1817 };
1818
1819 excess = soft_limit_excess(root_memcg);
1820
1821 while (1) {
1822 victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1823 if (!victim) {
1824 loop++;
1825 if (loop >= 2) {
1826 /*
1827 * If we have not been able to reclaim
1828 * anything, it might because there are
1829 * no reclaimable pages under this hierarchy
1830 */
1831 if (!total)
1832 break;
1833 /*
1834 * We want to do more targeted reclaim.
1835 * excess >> 2 is not to excessive so as to
1836 * reclaim too much, nor too less that we keep
1837 * coming back to reclaim from this cgroup
1838 */
1839 if (total >= (excess >> 2) ||
1840 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1841 break;
1842 }
1843 continue;
1844 }
1845 total += mem_cgroup_shrink_node(victim, gfp_mask, false,
1846 pgdat, &nr_scanned);
1847 *total_scanned += nr_scanned;
1848 if (!soft_limit_excess(root_memcg))
1849 break;
1850 }
1851 mem_cgroup_iter_break(root_memcg, victim);
1852 return total;
1853 }
1854
1855 #ifdef CONFIG_LOCKDEP
1856 static struct lockdep_map memcg_oom_lock_dep_map = {
1857 .name = "memcg_oom_lock",
1858 };
1859 #endif
1860
1861 static DEFINE_SPINLOCK(memcg_oom_lock);
1862
1863 /*
1864 * Check OOM-Killer is already running under our hierarchy.
1865 * If someone is running, return false.
1866 */
mem_cgroup_oom_trylock(struct mem_cgroup * memcg)1867 static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
1868 {
1869 struct mem_cgroup *iter, *failed = NULL;
1870
1871 spin_lock(&memcg_oom_lock);
1872
1873 for_each_mem_cgroup_tree(iter, memcg) {
1874 if (iter->oom_lock) {
1875 /*
1876 * this subtree of our hierarchy is already locked
1877 * so we cannot give a lock.
1878 */
1879 failed = iter;
1880 mem_cgroup_iter_break(memcg, iter);
1881 break;
1882 } else
1883 iter->oom_lock = true;
1884 }
1885
1886 if (failed) {
1887 /*
1888 * OK, we failed to lock the whole subtree so we have
1889 * to clean up what we set up to the failing subtree
1890 */
1891 for_each_mem_cgroup_tree(iter, memcg) {
1892 if (iter == failed) {
1893 mem_cgroup_iter_break(memcg, iter);
1894 break;
1895 }
1896 iter->oom_lock = false;
1897 }
1898 } else
1899 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
1900
1901 spin_unlock(&memcg_oom_lock);
1902
1903 return !failed;
1904 }
1905
mem_cgroup_oom_unlock(struct mem_cgroup * memcg)1906 static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
1907 {
1908 struct mem_cgroup *iter;
1909
1910 spin_lock(&memcg_oom_lock);
1911 mutex_release(&memcg_oom_lock_dep_map, _RET_IP_);
1912 for_each_mem_cgroup_tree(iter, memcg)
1913 iter->oom_lock = false;
1914 spin_unlock(&memcg_oom_lock);
1915 }
1916
mem_cgroup_mark_under_oom(struct mem_cgroup * memcg)1917 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
1918 {
1919 struct mem_cgroup *iter;
1920
1921 spin_lock(&memcg_oom_lock);
1922 for_each_mem_cgroup_tree(iter, memcg)
1923 iter->under_oom++;
1924 spin_unlock(&memcg_oom_lock);
1925 }
1926
mem_cgroup_unmark_under_oom(struct mem_cgroup * memcg)1927 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
1928 {
1929 struct mem_cgroup *iter;
1930
1931 /*
1932 * Be careful about under_oom underflows because a child memcg
1933 * could have been added after mem_cgroup_mark_under_oom.
1934 */
1935 spin_lock(&memcg_oom_lock);
1936 for_each_mem_cgroup_tree(iter, memcg)
1937 if (iter->under_oom > 0)
1938 iter->under_oom--;
1939 spin_unlock(&memcg_oom_lock);
1940 }
1941
1942 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1943
1944 struct oom_wait_info {
1945 struct mem_cgroup *memcg;
1946 wait_queue_entry_t wait;
1947 };
1948
memcg_oom_wake_function(wait_queue_entry_t * wait,unsigned mode,int sync,void * arg)1949 static int memcg_oom_wake_function(wait_queue_entry_t *wait,
1950 unsigned mode, int sync, void *arg)
1951 {
1952 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1953 struct mem_cgroup *oom_wait_memcg;
1954 struct oom_wait_info *oom_wait_info;
1955
1956 oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1957 oom_wait_memcg = oom_wait_info->memcg;
1958
1959 if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
1960 !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
1961 return 0;
1962 return autoremove_wake_function(wait, mode, sync, arg);
1963 }
1964
memcg_oom_recover(struct mem_cgroup * memcg)1965 static void memcg_oom_recover(struct mem_cgroup *memcg)
1966 {
1967 /*
1968 * For the following lockless ->under_oom test, the only required
1969 * guarantee is that it must see the state asserted by an OOM when
1970 * this function is called as a result of userland actions
1971 * triggered by the notification of the OOM. This is trivially
1972 * achieved by invoking mem_cgroup_mark_under_oom() before
1973 * triggering notification.
1974 */
1975 if (memcg && memcg->under_oom)
1976 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
1977 }
1978
1979 /*
1980 * Returns true if successfully killed one or more processes. Though in some
1981 * corner cases it can return true even without killing any process.
1982 */
mem_cgroup_oom(struct mem_cgroup * memcg,gfp_t mask,int order)1983 static bool mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1984 {
1985 bool locked, ret;
1986
1987 if (order > PAGE_ALLOC_COSTLY_ORDER)
1988 return false;
1989
1990 memcg_memory_event(memcg, MEMCG_OOM);
1991
1992 /*
1993 * We are in the middle of the charge context here, so we
1994 * don't want to block when potentially sitting on a callstack
1995 * that holds all kinds of filesystem and mm locks.
1996 *
1997 * cgroup1 allows disabling the OOM killer and waiting for outside
1998 * handling until the charge can succeed; remember the context and put
1999 * the task to sleep at the end of the page fault when all locks are
2000 * released.
2001 *
2002 * On the other hand, in-kernel OOM killer allows for an async victim
2003 * memory reclaim (oom_reaper) and that means that we are not solely
2004 * relying on the oom victim to make a forward progress and we can
2005 * invoke the oom killer here.
2006 *
2007 * Please note that mem_cgroup_out_of_memory might fail to find a
2008 * victim and then we have to bail out from the charge path.
2009 */
2010 if (READ_ONCE(memcg->oom_kill_disable)) {
2011 if (current->in_user_fault) {
2012 css_get(&memcg->css);
2013 current->memcg_in_oom = memcg;
2014 current->memcg_oom_gfp_mask = mask;
2015 current->memcg_oom_order = order;
2016 }
2017 return false;
2018 }
2019
2020 mem_cgroup_mark_under_oom(memcg);
2021
2022 locked = mem_cgroup_oom_trylock(memcg);
2023
2024 if (locked)
2025 mem_cgroup_oom_notify(memcg);
2026
2027 mem_cgroup_unmark_under_oom(memcg);
2028 ret = mem_cgroup_out_of_memory(memcg, mask, order);
2029
2030 if (locked)
2031 mem_cgroup_oom_unlock(memcg);
2032
2033 return ret;
2034 }
2035
2036 /**
2037 * mem_cgroup_oom_synchronize - complete memcg OOM handling
2038 * @handle: actually kill/wait or just clean up the OOM state
2039 *
2040 * This has to be called at the end of a page fault if the memcg OOM
2041 * handler was enabled.
2042 *
2043 * Memcg supports userspace OOM handling where failed allocations must
2044 * sleep on a waitqueue until the userspace task resolves the
2045 * situation. Sleeping directly in the charge context with all kinds
2046 * of locks held is not a good idea, instead we remember an OOM state
2047 * in the task and mem_cgroup_oom_synchronize() has to be called at
2048 * the end of the page fault to complete the OOM handling.
2049 *
2050 * Returns %true if an ongoing memcg OOM situation was detected and
2051 * completed, %false otherwise.
2052 */
mem_cgroup_oom_synchronize(bool handle)2053 bool mem_cgroup_oom_synchronize(bool handle)
2054 {
2055 struct mem_cgroup *memcg = current->memcg_in_oom;
2056 struct oom_wait_info owait;
2057 bool locked;
2058
2059 /* OOM is global, do not handle */
2060 if (!memcg)
2061 return false;
2062
2063 if (!handle)
2064 goto cleanup;
2065
2066 owait.memcg = memcg;
2067 owait.wait.flags = 0;
2068 owait.wait.func = memcg_oom_wake_function;
2069 owait.wait.private = current;
2070 INIT_LIST_HEAD(&owait.wait.entry);
2071
2072 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
2073 mem_cgroup_mark_under_oom(memcg);
2074
2075 locked = mem_cgroup_oom_trylock(memcg);
2076
2077 if (locked)
2078 mem_cgroup_oom_notify(memcg);
2079
2080 schedule();
2081 mem_cgroup_unmark_under_oom(memcg);
2082 finish_wait(&memcg_oom_waitq, &owait.wait);
2083
2084 if (locked)
2085 mem_cgroup_oom_unlock(memcg);
2086 cleanup:
2087 current->memcg_in_oom = NULL;
2088 css_put(&memcg->css);
2089 return true;
2090 }
2091
2092 /**
2093 * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM
2094 * @victim: task to be killed by the OOM killer
2095 * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM
2096 *
2097 * Returns a pointer to a memory cgroup, which has to be cleaned up
2098 * by killing all belonging OOM-killable tasks.
2099 *
2100 * Caller has to call mem_cgroup_put() on the returned non-NULL memcg.
2101 */
mem_cgroup_get_oom_group(struct task_struct * victim,struct mem_cgroup * oom_domain)2102 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
2103 struct mem_cgroup *oom_domain)
2104 {
2105 struct mem_cgroup *oom_group = NULL;
2106 struct mem_cgroup *memcg;
2107
2108 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
2109 return NULL;
2110
2111 if (!oom_domain)
2112 oom_domain = root_mem_cgroup;
2113
2114 rcu_read_lock();
2115
2116 memcg = mem_cgroup_from_task(victim);
2117 if (mem_cgroup_is_root(memcg))
2118 goto out;
2119
2120 /*
2121 * If the victim task has been asynchronously moved to a different
2122 * memory cgroup, we might end up killing tasks outside oom_domain.
2123 * In this case it's better to ignore memory.group.oom.
2124 */
2125 if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain)))
2126 goto out;
2127
2128 /*
2129 * Traverse the memory cgroup hierarchy from the victim task's
2130 * cgroup up to the OOMing cgroup (or root) to find the
2131 * highest-level memory cgroup with oom.group set.
2132 */
2133 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
2134 if (READ_ONCE(memcg->oom_group))
2135 oom_group = memcg;
2136
2137 if (memcg == oom_domain)
2138 break;
2139 }
2140
2141 if (oom_group)
2142 css_get(&oom_group->css);
2143 out:
2144 rcu_read_unlock();
2145
2146 return oom_group;
2147 }
2148
mem_cgroup_print_oom_group(struct mem_cgroup * memcg)2149 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
2150 {
2151 pr_info("Tasks in ");
2152 pr_cont_cgroup_path(memcg->css.cgroup);
2153 pr_cont(" are going to be killed due to memory.oom.group set\n");
2154 }
2155
2156 /**
2157 * folio_memcg_lock - Bind a folio to its memcg.
2158 * @folio: The folio.
2159 *
2160 * This function prevents unlocked LRU folios from being moved to
2161 * another cgroup.
2162 *
2163 * It ensures lifetime of the bound memcg. The caller is responsible
2164 * for the lifetime of the folio.
2165 */
folio_memcg_lock(struct folio * folio)2166 void folio_memcg_lock(struct folio *folio)
2167 {
2168 struct mem_cgroup *memcg;
2169 unsigned long flags;
2170
2171 /*
2172 * The RCU lock is held throughout the transaction. The fast
2173 * path can get away without acquiring the memcg->move_lock
2174 * because page moving starts with an RCU grace period.
2175 */
2176 rcu_read_lock();
2177
2178 if (mem_cgroup_disabled())
2179 return;
2180 again:
2181 memcg = folio_memcg(folio);
2182 if (unlikely(!memcg))
2183 return;
2184
2185 #ifdef CONFIG_PROVE_LOCKING
2186 local_irq_save(flags);
2187 might_lock(&memcg->move_lock);
2188 local_irq_restore(flags);
2189 #endif
2190
2191 if (atomic_read(&memcg->moving_account) <= 0)
2192 return;
2193
2194 spin_lock_irqsave(&memcg->move_lock, flags);
2195 if (memcg != folio_memcg(folio)) {
2196 spin_unlock_irqrestore(&memcg->move_lock, flags);
2197 goto again;
2198 }
2199
2200 /*
2201 * When charge migration first begins, we can have multiple
2202 * critical sections holding the fast-path RCU lock and one
2203 * holding the slowpath move_lock. Track the task who has the
2204 * move_lock for folio_memcg_unlock().
2205 */
2206 memcg->move_lock_task = current;
2207 memcg->move_lock_flags = flags;
2208 }
2209
__folio_memcg_unlock(struct mem_cgroup * memcg)2210 static void __folio_memcg_unlock(struct mem_cgroup *memcg)
2211 {
2212 if (memcg && memcg->move_lock_task == current) {
2213 unsigned long flags = memcg->move_lock_flags;
2214
2215 memcg->move_lock_task = NULL;
2216 memcg->move_lock_flags = 0;
2217
2218 spin_unlock_irqrestore(&memcg->move_lock, flags);
2219 }
2220
2221 rcu_read_unlock();
2222 }
2223
2224 /**
2225 * folio_memcg_unlock - Release the binding between a folio and its memcg.
2226 * @folio: The folio.
2227 *
2228 * This releases the binding created by folio_memcg_lock(). This does
2229 * not change the accounting of this folio to its memcg, but it does
2230 * permit others to change it.
2231 */
folio_memcg_unlock(struct folio * folio)2232 void folio_memcg_unlock(struct folio *folio)
2233 {
2234 __folio_memcg_unlock(folio_memcg(folio));
2235 }
2236
2237 struct memcg_stock_pcp {
2238 local_lock_t stock_lock;
2239 struct mem_cgroup *cached; /* this never be root cgroup */
2240 unsigned int nr_pages;
2241
2242 #ifdef CONFIG_MEMCG_KMEM
2243 struct obj_cgroup *cached_objcg;
2244 struct pglist_data *cached_pgdat;
2245 unsigned int nr_bytes;
2246 int nr_slab_reclaimable_b;
2247 int nr_slab_unreclaimable_b;
2248 #endif
2249
2250 struct work_struct work;
2251 unsigned long flags;
2252 #define FLUSHING_CACHED_CHARGE 0
2253 };
2254 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock) = {
2255 .stock_lock = INIT_LOCAL_LOCK(stock_lock),
2256 };
2257 static DEFINE_MUTEX(percpu_charge_mutex);
2258
2259 #ifdef CONFIG_MEMCG_KMEM
2260 static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock);
2261 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2262 struct mem_cgroup *root_memcg);
2263 static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages);
2264
2265 #else
drain_obj_stock(struct memcg_stock_pcp * stock)2266 static inline struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock)
2267 {
2268 return NULL;
2269 }
obj_stock_flush_required(struct memcg_stock_pcp * stock,struct mem_cgroup * root_memcg)2270 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2271 struct mem_cgroup *root_memcg)
2272 {
2273 return false;
2274 }
memcg_account_kmem(struct mem_cgroup * memcg,int nr_pages)2275 static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages)
2276 {
2277 }
2278 #endif
2279
2280 /**
2281 * consume_stock: Try to consume stocked charge on this cpu.
2282 * @memcg: memcg to consume from.
2283 * @nr_pages: how many pages to charge.
2284 *
2285 * The charges will only happen if @memcg matches the current cpu's memcg
2286 * stock, and at least @nr_pages are available in that stock. Failure to
2287 * service an allocation will refill the stock.
2288 *
2289 * returns true if successful, false otherwise.
2290 */
consume_stock(struct mem_cgroup * memcg,unsigned int nr_pages)2291 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2292 {
2293 struct memcg_stock_pcp *stock;
2294 unsigned long flags;
2295 bool ret = false;
2296
2297 if (nr_pages > MEMCG_CHARGE_BATCH)
2298 return ret;
2299
2300 local_lock_irqsave(&memcg_stock.stock_lock, flags);
2301
2302 stock = this_cpu_ptr(&memcg_stock);
2303 if (memcg == READ_ONCE(stock->cached) && stock->nr_pages >= nr_pages) {
2304 stock->nr_pages -= nr_pages;
2305 ret = true;
2306 }
2307
2308 local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2309
2310 return ret;
2311 }
2312
2313 /*
2314 * Returns stocks cached in percpu and reset cached information.
2315 */
drain_stock(struct memcg_stock_pcp * stock)2316 static void drain_stock(struct memcg_stock_pcp *stock)
2317 {
2318 struct mem_cgroup *old = READ_ONCE(stock->cached);
2319
2320 if (!old)
2321 return;
2322
2323 if (stock->nr_pages) {
2324 page_counter_uncharge(&old->memory, stock->nr_pages);
2325 if (do_memsw_account())
2326 page_counter_uncharge(&old->memsw, stock->nr_pages);
2327 stock->nr_pages = 0;
2328 }
2329
2330 css_put(&old->css);
2331 WRITE_ONCE(stock->cached, NULL);
2332 }
2333
drain_local_stock(struct work_struct * dummy)2334 static void drain_local_stock(struct work_struct *dummy)
2335 {
2336 struct memcg_stock_pcp *stock;
2337 struct obj_cgroup *old = NULL;
2338 unsigned long flags;
2339
2340 /*
2341 * The only protection from cpu hotplug (memcg_hotplug_cpu_dead) vs.
2342 * drain_stock races is that we always operate on local CPU stock
2343 * here with IRQ disabled
2344 */
2345 local_lock_irqsave(&memcg_stock.stock_lock, flags);
2346
2347 stock = this_cpu_ptr(&memcg_stock);
2348 old = drain_obj_stock(stock);
2349 drain_stock(stock);
2350 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
2351
2352 local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2353 if (old)
2354 obj_cgroup_put(old);
2355 }
2356
2357 /*
2358 * Cache charges(val) to local per_cpu area.
2359 * This will be consumed by consume_stock() function, later.
2360 */
__refill_stock(struct mem_cgroup * memcg,unsigned int nr_pages)2361 static void __refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2362 {
2363 struct memcg_stock_pcp *stock;
2364
2365 stock = this_cpu_ptr(&memcg_stock);
2366 if (READ_ONCE(stock->cached) != memcg) { /* reset if necessary */
2367 drain_stock(stock);
2368 css_get(&memcg->css);
2369 WRITE_ONCE(stock->cached, memcg);
2370 }
2371 stock->nr_pages += nr_pages;
2372
2373 if (stock->nr_pages > MEMCG_CHARGE_BATCH)
2374 drain_stock(stock);
2375 }
2376
refill_stock(struct mem_cgroup * memcg,unsigned int nr_pages)2377 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2378 {
2379 unsigned long flags;
2380
2381 local_lock_irqsave(&memcg_stock.stock_lock, flags);
2382 __refill_stock(memcg, nr_pages);
2383 local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2384 }
2385
2386 /*
2387 * Drains all per-CPU charge caches for given root_memcg resp. subtree
2388 * of the hierarchy under it.
2389 */
drain_all_stock(struct mem_cgroup * root_memcg)2390 static void drain_all_stock(struct mem_cgroup *root_memcg)
2391 {
2392 int cpu, curcpu;
2393
2394 /* If someone's already draining, avoid adding running more workers. */
2395 if (!mutex_trylock(&percpu_charge_mutex))
2396 return;
2397 /*
2398 * Notify other cpus that system-wide "drain" is running
2399 * We do not care about races with the cpu hotplug because cpu down
2400 * as well as workers from this path always operate on the local
2401 * per-cpu data. CPU up doesn't touch memcg_stock at all.
2402 */
2403 migrate_disable();
2404 curcpu = smp_processor_id();
2405 for_each_online_cpu(cpu) {
2406 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2407 struct mem_cgroup *memcg;
2408 bool flush = false;
2409
2410 rcu_read_lock();
2411 memcg = READ_ONCE(stock->cached);
2412 if (memcg && stock->nr_pages &&
2413 mem_cgroup_is_descendant(memcg, root_memcg))
2414 flush = true;
2415 else if (obj_stock_flush_required(stock, root_memcg))
2416 flush = true;
2417 rcu_read_unlock();
2418
2419 if (flush &&
2420 !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
2421 if (cpu == curcpu)
2422 drain_local_stock(&stock->work);
2423 else if (!cpu_is_isolated(cpu))
2424 schedule_work_on(cpu, &stock->work);
2425 }
2426 }
2427 migrate_enable();
2428 mutex_unlock(&percpu_charge_mutex);
2429 }
2430
memcg_hotplug_cpu_dead(unsigned int cpu)2431 static int memcg_hotplug_cpu_dead(unsigned int cpu)
2432 {
2433 struct memcg_stock_pcp *stock;
2434
2435 stock = &per_cpu(memcg_stock, cpu);
2436 drain_stock(stock);
2437
2438 return 0;
2439 }
2440
reclaim_high(struct mem_cgroup * memcg,unsigned int nr_pages,gfp_t gfp_mask)2441 static unsigned long reclaim_high(struct mem_cgroup *memcg,
2442 unsigned int nr_pages,
2443 gfp_t gfp_mask)
2444 {
2445 unsigned long nr_reclaimed = 0;
2446
2447 do {
2448 unsigned long pflags;
2449
2450 if (page_counter_read(&memcg->memory) <=
2451 READ_ONCE(memcg->memory.high))
2452 continue;
2453
2454 memcg_memory_event(memcg, MEMCG_HIGH);
2455
2456 psi_memstall_enter(&pflags);
2457 nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages,
2458 gfp_mask,
2459 MEMCG_RECLAIM_MAY_SWAP);
2460 psi_memstall_leave(&pflags);
2461 } while ((memcg = parent_mem_cgroup(memcg)) &&
2462 !mem_cgroup_is_root(memcg));
2463
2464 return nr_reclaimed;
2465 }
2466
high_work_func(struct work_struct * work)2467 static void high_work_func(struct work_struct *work)
2468 {
2469 struct mem_cgroup *memcg;
2470
2471 memcg = container_of(work, struct mem_cgroup, high_work);
2472 reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
2473 }
2474
2475 /*
2476 * Clamp the maximum sleep time per allocation batch to 2 seconds. This is
2477 * enough to still cause a significant slowdown in most cases, while still
2478 * allowing diagnostics and tracing to proceed without becoming stuck.
2479 */
2480 #define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ)
2481
2482 /*
2483 * When calculating the delay, we use these either side of the exponentiation to
2484 * maintain precision and scale to a reasonable number of jiffies (see the table
2485 * below.
2486 *
2487 * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the
2488 * overage ratio to a delay.
2489 * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the
2490 * proposed penalty in order to reduce to a reasonable number of jiffies, and
2491 * to produce a reasonable delay curve.
2492 *
2493 * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a
2494 * reasonable delay curve compared to precision-adjusted overage, not
2495 * penalising heavily at first, but still making sure that growth beyond the
2496 * limit penalises misbehaviour cgroups by slowing them down exponentially. For
2497 * example, with a high of 100 megabytes:
2498 *
2499 * +-------+------------------------+
2500 * | usage | time to allocate in ms |
2501 * +-------+------------------------+
2502 * | 100M | 0 |
2503 * | 101M | 6 |
2504 * | 102M | 25 |
2505 * | 103M | 57 |
2506 * | 104M | 102 |
2507 * | 105M | 159 |
2508 * | 106M | 230 |
2509 * | 107M | 313 |
2510 * | 108M | 409 |
2511 * | 109M | 518 |
2512 * | 110M | 639 |
2513 * | 111M | 774 |
2514 * | 112M | 921 |
2515 * | 113M | 1081 |
2516 * | 114M | 1254 |
2517 * | 115M | 1439 |
2518 * | 116M | 1638 |
2519 * | 117M | 1849 |
2520 * | 118M | 2000 |
2521 * | 119M | 2000 |
2522 * | 120M | 2000 |
2523 * +-------+------------------------+
2524 */
2525 #define MEMCG_DELAY_PRECISION_SHIFT 20
2526 #define MEMCG_DELAY_SCALING_SHIFT 14
2527
calculate_overage(unsigned long usage,unsigned long high)2528 static u64 calculate_overage(unsigned long usage, unsigned long high)
2529 {
2530 u64 overage;
2531
2532 if (usage <= high)
2533 return 0;
2534
2535 /*
2536 * Prevent division by 0 in overage calculation by acting as if
2537 * it was a threshold of 1 page
2538 */
2539 high = max(high, 1UL);
2540
2541 overage = usage - high;
2542 overage <<= MEMCG_DELAY_PRECISION_SHIFT;
2543 return div64_u64(overage, high);
2544 }
2545
mem_find_max_overage(struct mem_cgroup * memcg)2546 static u64 mem_find_max_overage(struct mem_cgroup *memcg)
2547 {
2548 u64 overage, max_overage = 0;
2549
2550 do {
2551 overage = calculate_overage(page_counter_read(&memcg->memory),
2552 READ_ONCE(memcg->memory.high));
2553 max_overage = max(overage, max_overage);
2554 } while ((memcg = parent_mem_cgroup(memcg)) &&
2555 !mem_cgroup_is_root(memcg));
2556
2557 return max_overage;
2558 }
2559
swap_find_max_overage(struct mem_cgroup * memcg)2560 static u64 swap_find_max_overage(struct mem_cgroup *memcg)
2561 {
2562 u64 overage, max_overage = 0;
2563
2564 do {
2565 overage = calculate_overage(page_counter_read(&memcg->swap),
2566 READ_ONCE(memcg->swap.high));
2567 if (overage)
2568 memcg_memory_event(memcg, MEMCG_SWAP_HIGH);
2569 max_overage = max(overage, max_overage);
2570 } while ((memcg = parent_mem_cgroup(memcg)) &&
2571 !mem_cgroup_is_root(memcg));
2572
2573 return max_overage;
2574 }
2575
2576 /*
2577 * Get the number of jiffies that we should penalise a mischievous cgroup which
2578 * is exceeding its memory.high by checking both it and its ancestors.
2579 */
calculate_high_delay(struct mem_cgroup * memcg,unsigned int nr_pages,u64 max_overage)2580 static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
2581 unsigned int nr_pages,
2582 u64 max_overage)
2583 {
2584 unsigned long penalty_jiffies;
2585
2586 if (!max_overage)
2587 return 0;
2588
2589 /*
2590 * We use overage compared to memory.high to calculate the number of
2591 * jiffies to sleep (penalty_jiffies). Ideally this value should be
2592 * fairly lenient on small overages, and increasingly harsh when the
2593 * memcg in question makes it clear that it has no intention of stopping
2594 * its crazy behaviour, so we exponentially increase the delay based on
2595 * overage amount.
2596 */
2597 penalty_jiffies = max_overage * max_overage * HZ;
2598 penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT;
2599 penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT;
2600
2601 /*
2602 * Factor in the task's own contribution to the overage, such that four
2603 * N-sized allocations are throttled approximately the same as one
2604 * 4N-sized allocation.
2605 *
2606 * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or
2607 * larger the current charge patch is than that.
2608 */
2609 return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH;
2610 }
2611
2612 /*
2613 * Scheduled by try_charge() to be executed from the userland return path
2614 * and reclaims memory over the high limit.
2615 */
mem_cgroup_handle_over_high(gfp_t gfp_mask)2616 void mem_cgroup_handle_over_high(gfp_t gfp_mask)
2617 {
2618 unsigned long penalty_jiffies;
2619 unsigned long pflags;
2620 unsigned long nr_reclaimed;
2621 unsigned int nr_pages = current->memcg_nr_pages_over_high;
2622 int nr_retries = MAX_RECLAIM_RETRIES;
2623 struct mem_cgroup *memcg;
2624 bool in_retry = false;
2625
2626 if (likely(!nr_pages))
2627 return;
2628
2629 memcg = get_mem_cgroup_from_mm(current->mm);
2630 current->memcg_nr_pages_over_high = 0;
2631
2632 retry_reclaim:
2633 /*
2634 * The allocating task should reclaim at least the batch size, but for
2635 * subsequent retries we only want to do what's necessary to prevent oom
2636 * or breaching resource isolation.
2637 *
2638 * This is distinct from memory.max or page allocator behaviour because
2639 * memory.high is currently batched, whereas memory.max and the page
2640 * allocator run every time an allocation is made.
2641 */
2642 nr_reclaimed = reclaim_high(memcg,
2643 in_retry ? SWAP_CLUSTER_MAX : nr_pages,
2644 gfp_mask);
2645
2646 /*
2647 * memory.high is breached and reclaim is unable to keep up. Throttle
2648 * allocators proactively to slow down excessive growth.
2649 */
2650 penalty_jiffies = calculate_high_delay(memcg, nr_pages,
2651 mem_find_max_overage(memcg));
2652
2653 penalty_jiffies += calculate_high_delay(memcg, nr_pages,
2654 swap_find_max_overage(memcg));
2655
2656 /*
2657 * Clamp the max delay per usermode return so as to still keep the
2658 * application moving forwards and also permit diagnostics, albeit
2659 * extremely slowly.
2660 */
2661 penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES);
2662
2663 /*
2664 * Don't sleep if the amount of jiffies this memcg owes us is so low
2665 * that it's not even worth doing, in an attempt to be nice to those who
2666 * go only a small amount over their memory.high value and maybe haven't
2667 * been aggressively reclaimed enough yet.
2668 */
2669 if (penalty_jiffies <= HZ / 100)
2670 goto out;
2671
2672 /*
2673 * If reclaim is making forward progress but we're still over
2674 * memory.high, we want to encourage that rather than doing allocator
2675 * throttling.
2676 */
2677 if (nr_reclaimed || nr_retries--) {
2678 in_retry = true;
2679 goto retry_reclaim;
2680 }
2681
2682 /*
2683 * If we exit early, we're guaranteed to die (since
2684 * schedule_timeout_killable sets TASK_KILLABLE). This means we don't
2685 * need to account for any ill-begotten jiffies to pay them off later.
2686 */
2687 psi_memstall_enter(&pflags);
2688 schedule_timeout_killable(penalty_jiffies);
2689 psi_memstall_leave(&pflags);
2690
2691 out:
2692 css_put(&memcg->css);
2693 }
2694
try_charge_memcg(struct mem_cgroup * memcg,gfp_t gfp_mask,unsigned int nr_pages)2695 static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
2696 unsigned int nr_pages)
2697 {
2698 unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
2699 int nr_retries = MAX_RECLAIM_RETRIES;
2700 struct mem_cgroup *mem_over_limit;
2701 struct page_counter *counter;
2702 unsigned long nr_reclaimed;
2703 bool passed_oom = false;
2704 unsigned int reclaim_options = MEMCG_RECLAIM_MAY_SWAP;
2705 bool drained = false;
2706 bool raised_max_event = false;
2707 unsigned long pflags;
2708
2709 retry:
2710 if (consume_stock(memcg, nr_pages))
2711 return 0;
2712
2713 if (!do_memsw_account() ||
2714 page_counter_try_charge(&memcg->memsw, batch, &counter)) {
2715 if (page_counter_try_charge(&memcg->memory, batch, &counter))
2716 goto done_restock;
2717 if (do_memsw_account())
2718 page_counter_uncharge(&memcg->memsw, batch);
2719 mem_over_limit = mem_cgroup_from_counter(counter, memory);
2720 } else {
2721 mem_over_limit = mem_cgroup_from_counter(counter, memsw);
2722 reclaim_options &= ~MEMCG_RECLAIM_MAY_SWAP;
2723 }
2724
2725 if (batch > nr_pages) {
2726 batch = nr_pages;
2727 goto retry;
2728 }
2729
2730 /*
2731 * Prevent unbounded recursion when reclaim operations need to
2732 * allocate memory. This might exceed the limits temporarily,
2733 * but we prefer facilitating memory reclaim and getting back
2734 * under the limit over triggering OOM kills in these cases.
2735 */
2736 if (unlikely(current->flags & PF_MEMALLOC))
2737 goto force;
2738
2739 if (unlikely(task_in_memcg_oom(current)))
2740 goto nomem;
2741
2742 if (!gfpflags_allow_blocking(gfp_mask))
2743 goto nomem;
2744
2745 memcg_memory_event(mem_over_limit, MEMCG_MAX);
2746 raised_max_event = true;
2747
2748 psi_memstall_enter(&pflags);
2749 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
2750 gfp_mask, reclaim_options);
2751 psi_memstall_leave(&pflags);
2752
2753 if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2754 goto retry;
2755
2756 if (!drained) {
2757 drain_all_stock(mem_over_limit);
2758 drained = true;
2759 goto retry;
2760 }
2761
2762 if (gfp_mask & __GFP_NORETRY)
2763 goto nomem;
2764 /*
2765 * Even though the limit is exceeded at this point, reclaim
2766 * may have been able to free some pages. Retry the charge
2767 * before killing the task.
2768 *
2769 * Only for regular pages, though: huge pages are rather
2770 * unlikely to succeed so close to the limit, and we fall back
2771 * to regular pages anyway in case of failure.
2772 */
2773 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
2774 goto retry;
2775 /*
2776 * At task move, charge accounts can be doubly counted. So, it's
2777 * better to wait until the end of task_move if something is going on.
2778 */
2779 if (mem_cgroup_wait_acct_move(mem_over_limit))
2780 goto retry;
2781
2782 if (nr_retries--)
2783 goto retry;
2784
2785 if (gfp_mask & __GFP_RETRY_MAYFAIL)
2786 goto nomem;
2787
2788 /* Avoid endless loop for tasks bypassed by the oom killer */
2789 if (passed_oom && task_is_dying())
2790 goto nomem;
2791
2792 /*
2793 * keep retrying as long as the memcg oom killer is able to make
2794 * a forward progress or bypass the charge if the oom killer
2795 * couldn't make any progress.
2796 */
2797 if (mem_cgroup_oom(mem_over_limit, gfp_mask,
2798 get_order(nr_pages * PAGE_SIZE))) {
2799 passed_oom = true;
2800 nr_retries = MAX_RECLAIM_RETRIES;
2801 goto retry;
2802 }
2803 nomem:
2804 /*
2805 * Memcg doesn't have a dedicated reserve for atomic
2806 * allocations. But like the global atomic pool, we need to
2807 * put the burden of reclaim on regular allocation requests
2808 * and let these go through as privileged allocations.
2809 */
2810 if (!(gfp_mask & (__GFP_NOFAIL | __GFP_HIGH)))
2811 return -ENOMEM;
2812 force:
2813 /*
2814 * If the allocation has to be enforced, don't forget to raise
2815 * a MEMCG_MAX event.
2816 */
2817 if (!raised_max_event)
2818 memcg_memory_event(mem_over_limit, MEMCG_MAX);
2819
2820 /*
2821 * The allocation either can't fail or will lead to more memory
2822 * being freed very soon. Allow memory usage go over the limit
2823 * temporarily by force charging it.
2824 */
2825 page_counter_charge(&memcg->memory, nr_pages);
2826 if (do_memsw_account())
2827 page_counter_charge(&memcg->memsw, nr_pages);
2828
2829 return 0;
2830
2831 done_restock:
2832 if (batch > nr_pages)
2833 refill_stock(memcg, batch - nr_pages);
2834
2835 /*
2836 * If the hierarchy is above the normal consumption range, schedule
2837 * reclaim on returning to userland. We can perform reclaim here
2838 * if __GFP_RECLAIM but let's always punt for simplicity and so that
2839 * GFP_KERNEL can consistently be used during reclaim. @memcg is
2840 * not recorded as it most likely matches current's and won't
2841 * change in the meantime. As high limit is checked again before
2842 * reclaim, the cost of mismatch is negligible.
2843 */
2844 do {
2845 bool mem_high, swap_high;
2846
2847 mem_high = page_counter_read(&memcg->memory) >
2848 READ_ONCE(memcg->memory.high);
2849 swap_high = page_counter_read(&memcg->swap) >
2850 READ_ONCE(memcg->swap.high);
2851
2852 /* Don't bother a random interrupted task */
2853 if (!in_task()) {
2854 if (mem_high) {
2855 schedule_work(&memcg->high_work);
2856 break;
2857 }
2858 continue;
2859 }
2860
2861 if (mem_high || swap_high) {
2862 /*
2863 * The allocating tasks in this cgroup will need to do
2864 * reclaim or be throttled to prevent further growth
2865 * of the memory or swap footprints.
2866 *
2867 * Target some best-effort fairness between the tasks,
2868 * and distribute reclaim work and delay penalties
2869 * based on how much each task is actually allocating.
2870 */
2871 current->memcg_nr_pages_over_high += batch;
2872 set_notify_resume(current);
2873 break;
2874 }
2875 } while ((memcg = parent_mem_cgroup(memcg)));
2876
2877 if (current->memcg_nr_pages_over_high > MEMCG_CHARGE_BATCH &&
2878 !(current->flags & PF_MEMALLOC) &&
2879 gfpflags_allow_blocking(gfp_mask)) {
2880 mem_cgroup_handle_over_high(gfp_mask);
2881 }
2882 return 0;
2883 }
2884
try_charge(struct mem_cgroup * memcg,gfp_t gfp_mask,unsigned int nr_pages)2885 static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2886 unsigned int nr_pages)
2887 {
2888 if (mem_cgroup_is_root(memcg))
2889 return 0;
2890
2891 return try_charge_memcg(memcg, gfp_mask, nr_pages);
2892 }
2893
cancel_charge(struct mem_cgroup * memcg,unsigned int nr_pages)2894 static inline void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
2895 {
2896 if (mem_cgroup_is_root(memcg))
2897 return;
2898
2899 page_counter_uncharge(&memcg->memory, nr_pages);
2900 if (do_memsw_account())
2901 page_counter_uncharge(&memcg->memsw, nr_pages);
2902 }
2903
commit_charge(struct folio * folio,struct mem_cgroup * memcg)2904 static void commit_charge(struct folio *folio, struct mem_cgroup *memcg)
2905 {
2906 VM_BUG_ON_FOLIO(folio_memcg(folio), folio);
2907 /*
2908 * Any of the following ensures page's memcg stability:
2909 *
2910 * - the page lock
2911 * - LRU isolation
2912 * - folio_memcg_lock()
2913 * - exclusive reference
2914 * - mem_cgroup_trylock_pages()
2915 */
2916 folio->memcg_data = (unsigned long)memcg;
2917 }
2918
2919 #ifdef CONFIG_MEMCG_KMEM
2920 /*
2921 * The allocated objcg pointers array is not accounted directly.
2922 * Moreover, it should not come from DMA buffer and is not readily
2923 * reclaimable. So those GFP bits should be masked off.
2924 */
2925 #define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | \
2926 __GFP_ACCOUNT | __GFP_NOFAIL)
2927
2928 /*
2929 * mod_objcg_mlstate() may be called with irq enabled, so
2930 * mod_memcg_lruvec_state() should be used.
2931 */
mod_objcg_mlstate(struct obj_cgroup * objcg,struct pglist_data * pgdat,enum node_stat_item idx,int nr)2932 static inline void mod_objcg_mlstate(struct obj_cgroup *objcg,
2933 struct pglist_data *pgdat,
2934 enum node_stat_item idx, int nr)
2935 {
2936 struct mem_cgroup *memcg;
2937 struct lruvec *lruvec;
2938
2939 rcu_read_lock();
2940 memcg = obj_cgroup_memcg(objcg);
2941 lruvec = mem_cgroup_lruvec(memcg, pgdat);
2942 mod_memcg_lruvec_state(lruvec, idx, nr);
2943 rcu_read_unlock();
2944 }
2945
memcg_alloc_slab_cgroups(struct slab * slab,struct kmem_cache * s,gfp_t gfp,bool new_slab)2946 int memcg_alloc_slab_cgroups(struct slab *slab, struct kmem_cache *s,
2947 gfp_t gfp, bool new_slab)
2948 {
2949 unsigned int objects = objs_per_slab(s, slab);
2950 unsigned long memcg_data;
2951 void *vec;
2952
2953 gfp &= ~OBJCGS_CLEAR_MASK;
2954 vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp,
2955 slab_nid(slab));
2956 if (!vec)
2957 return -ENOMEM;
2958
2959 memcg_data = (unsigned long) vec | MEMCG_DATA_OBJCGS;
2960 if (new_slab) {
2961 /*
2962 * If the slab is brand new and nobody can yet access its
2963 * memcg_data, no synchronization is required and memcg_data can
2964 * be simply assigned.
2965 */
2966 slab->memcg_data = memcg_data;
2967 } else if (cmpxchg(&slab->memcg_data, 0, memcg_data)) {
2968 /*
2969 * If the slab is already in use, somebody can allocate and
2970 * assign obj_cgroups in parallel. In this case the existing
2971 * objcg vector should be reused.
2972 */
2973 kfree(vec);
2974 return 0;
2975 }
2976
2977 kmemleak_not_leak(vec);
2978 return 0;
2979 }
2980
2981 static __always_inline
mem_cgroup_from_obj_folio(struct folio * folio,void * p)2982 struct mem_cgroup *mem_cgroup_from_obj_folio(struct folio *folio, void *p)
2983 {
2984 /*
2985 * Slab objects are accounted individually, not per-page.
2986 * Memcg membership data for each individual object is saved in
2987 * slab->memcg_data.
2988 */
2989 if (folio_test_slab(folio)) {
2990 struct obj_cgroup **objcgs;
2991 struct slab *slab;
2992 unsigned int off;
2993
2994 slab = folio_slab(folio);
2995 objcgs = slab_objcgs(slab);
2996 if (!objcgs)
2997 return NULL;
2998
2999 off = obj_to_index(slab->slab_cache, slab, p);
3000 if (objcgs[off])
3001 return obj_cgroup_memcg(objcgs[off]);
3002
3003 return NULL;
3004 }
3005
3006 /*
3007 * folio_memcg_check() is used here, because in theory we can encounter
3008 * a folio where the slab flag has been cleared already, but
3009 * slab->memcg_data has not been freed yet
3010 * folio_memcg_check() will guarantee that a proper memory
3011 * cgroup pointer or NULL will be returned.
3012 */
3013 return folio_memcg_check(folio);
3014 }
3015
3016 /*
3017 * Returns a pointer to the memory cgroup to which the kernel object is charged.
3018 *
3019 * A passed kernel object can be a slab object, vmalloc object or a generic
3020 * kernel page, so different mechanisms for getting the memory cgroup pointer
3021 * should be used.
3022 *
3023 * In certain cases (e.g. kernel stacks or large kmallocs with SLUB) the caller
3024 * can not know for sure how the kernel object is implemented.
3025 * mem_cgroup_from_obj() can be safely used in such cases.
3026 *
3027 * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
3028 * cgroup_mutex, etc.
3029 */
mem_cgroup_from_obj(void * p)3030 struct mem_cgroup *mem_cgroup_from_obj(void *p)
3031 {
3032 struct folio *folio;
3033
3034 if (mem_cgroup_disabled())
3035 return NULL;
3036
3037 if (unlikely(is_vmalloc_addr(p)))
3038 folio = page_folio(vmalloc_to_page(p));
3039 else
3040 folio = virt_to_folio(p);
3041
3042 return mem_cgroup_from_obj_folio(folio, p);
3043 }
3044
3045 /*
3046 * Returns a pointer to the memory cgroup to which the kernel object is charged.
3047 * Similar to mem_cgroup_from_obj(), but faster and not suitable for objects,
3048 * allocated using vmalloc().
3049 *
3050 * A passed kernel object must be a slab object or a generic kernel page.
3051 *
3052 * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
3053 * cgroup_mutex, etc.
3054 */
mem_cgroup_from_slab_obj(void * p)3055 struct mem_cgroup *mem_cgroup_from_slab_obj(void *p)
3056 {
3057 if (mem_cgroup_disabled())
3058 return NULL;
3059
3060 return mem_cgroup_from_obj_folio(virt_to_folio(p), p);
3061 }
3062
__get_obj_cgroup_from_memcg(struct mem_cgroup * memcg)3063 static struct obj_cgroup *__get_obj_cgroup_from_memcg(struct mem_cgroup *memcg)
3064 {
3065 struct obj_cgroup *objcg = NULL;
3066
3067 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
3068 objcg = rcu_dereference(memcg->objcg);
3069 if (objcg && obj_cgroup_tryget(objcg))
3070 break;
3071 objcg = NULL;
3072 }
3073 return objcg;
3074 }
3075
get_obj_cgroup_from_current(void)3076 __always_inline struct obj_cgroup *get_obj_cgroup_from_current(void)
3077 {
3078 struct obj_cgroup *objcg = NULL;
3079 struct mem_cgroup *memcg;
3080
3081 if (memcg_kmem_bypass())
3082 return NULL;
3083
3084 rcu_read_lock();
3085 if (unlikely(active_memcg()))
3086 memcg = active_memcg();
3087 else
3088 memcg = mem_cgroup_from_task(current);
3089 objcg = __get_obj_cgroup_from_memcg(memcg);
3090 rcu_read_unlock();
3091 return objcg;
3092 }
3093
get_obj_cgroup_from_folio(struct folio * folio)3094 struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio)
3095 {
3096 struct obj_cgroup *objcg;
3097
3098 if (!memcg_kmem_online())
3099 return NULL;
3100
3101 if (folio_memcg_kmem(folio)) {
3102 objcg = __folio_objcg(folio);
3103 obj_cgroup_get(objcg);
3104 } else {
3105 struct mem_cgroup *memcg;
3106
3107 rcu_read_lock();
3108 memcg = __folio_memcg(folio);
3109 if (memcg)
3110 objcg = __get_obj_cgroup_from_memcg(memcg);
3111 else
3112 objcg = NULL;
3113 rcu_read_unlock();
3114 }
3115 return objcg;
3116 }
3117
memcg_account_kmem(struct mem_cgroup * memcg,int nr_pages)3118 static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages)
3119 {
3120 mod_memcg_state(memcg, MEMCG_KMEM, nr_pages);
3121 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
3122 if (nr_pages > 0)
3123 page_counter_charge(&memcg->kmem, nr_pages);
3124 else
3125 page_counter_uncharge(&memcg->kmem, -nr_pages);
3126 }
3127 }
3128
3129
3130 /*
3131 * obj_cgroup_uncharge_pages: uncharge a number of kernel pages from a objcg
3132 * @objcg: object cgroup to uncharge
3133 * @nr_pages: number of pages to uncharge
3134 */
obj_cgroup_uncharge_pages(struct obj_cgroup * objcg,unsigned int nr_pages)3135 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
3136 unsigned int nr_pages)
3137 {
3138 struct mem_cgroup *memcg;
3139
3140 memcg = get_mem_cgroup_from_objcg(objcg);
3141
3142 memcg_account_kmem(memcg, -nr_pages);
3143 refill_stock(memcg, nr_pages);
3144
3145 css_put(&memcg->css);
3146 }
3147
3148 /*
3149 * obj_cgroup_charge_pages: charge a number of kernel pages to a objcg
3150 * @objcg: object cgroup to charge
3151 * @gfp: reclaim mode
3152 * @nr_pages: number of pages to charge
3153 *
3154 * Returns 0 on success, an error code on failure.
3155 */
obj_cgroup_charge_pages(struct obj_cgroup * objcg,gfp_t gfp,unsigned int nr_pages)3156 static int obj_cgroup_charge_pages(struct obj_cgroup *objcg, gfp_t gfp,
3157 unsigned int nr_pages)
3158 {
3159 struct mem_cgroup *memcg;
3160 int ret;
3161
3162 memcg = get_mem_cgroup_from_objcg(objcg);
3163
3164 ret = try_charge_memcg(memcg, gfp, nr_pages);
3165 if (ret)
3166 goto out;
3167
3168 memcg_account_kmem(memcg, nr_pages);
3169 out:
3170 css_put(&memcg->css);
3171
3172 return ret;
3173 }
3174
3175 /**
3176 * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup
3177 * @page: page to charge
3178 * @gfp: reclaim mode
3179 * @order: allocation order
3180 *
3181 * Returns 0 on success, an error code on failure.
3182 */
__memcg_kmem_charge_page(struct page * page,gfp_t gfp,int order)3183 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
3184 {
3185 struct obj_cgroup *objcg;
3186 int ret = 0;
3187
3188 objcg = get_obj_cgroup_from_current();
3189 if (objcg) {
3190 ret = obj_cgroup_charge_pages(objcg, gfp, 1 << order);
3191 if (!ret) {
3192 page->memcg_data = (unsigned long)objcg |
3193 MEMCG_DATA_KMEM;
3194 return 0;
3195 }
3196 obj_cgroup_put(objcg);
3197 }
3198 return ret;
3199 }
3200
3201 /**
3202 * __memcg_kmem_uncharge_page: uncharge a kmem page
3203 * @page: page to uncharge
3204 * @order: allocation order
3205 */
__memcg_kmem_uncharge_page(struct page * page,int order)3206 void __memcg_kmem_uncharge_page(struct page *page, int order)
3207 {
3208 struct folio *folio = page_folio(page);
3209 struct obj_cgroup *objcg;
3210 unsigned int nr_pages = 1 << order;
3211
3212 if (!folio_memcg_kmem(folio))
3213 return;
3214
3215 objcg = __folio_objcg(folio);
3216 obj_cgroup_uncharge_pages(objcg, nr_pages);
3217 folio->memcg_data = 0;
3218 obj_cgroup_put(objcg);
3219 }
3220
mod_objcg_state(struct obj_cgroup * objcg,struct pglist_data * pgdat,enum node_stat_item idx,int nr)3221 void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
3222 enum node_stat_item idx, int nr)
3223 {
3224 struct memcg_stock_pcp *stock;
3225 struct obj_cgroup *old = NULL;
3226 unsigned long flags;
3227 int *bytes;
3228
3229 local_lock_irqsave(&memcg_stock.stock_lock, flags);
3230 stock = this_cpu_ptr(&memcg_stock);
3231
3232 /*
3233 * Save vmstat data in stock and skip vmstat array update unless
3234 * accumulating over a page of vmstat data or when pgdat or idx
3235 * changes.
3236 */
3237 if (READ_ONCE(stock->cached_objcg) != objcg) {
3238 old = drain_obj_stock(stock);
3239 obj_cgroup_get(objcg);
3240 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
3241 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
3242 WRITE_ONCE(stock->cached_objcg, objcg);
3243 stock->cached_pgdat = pgdat;
3244 } else if (stock->cached_pgdat != pgdat) {
3245 /* Flush the existing cached vmstat data */
3246 struct pglist_data *oldpg = stock->cached_pgdat;
3247
3248 if (stock->nr_slab_reclaimable_b) {
3249 mod_objcg_mlstate(objcg, oldpg, NR_SLAB_RECLAIMABLE_B,
3250 stock->nr_slab_reclaimable_b);
3251 stock->nr_slab_reclaimable_b = 0;
3252 }
3253 if (stock->nr_slab_unreclaimable_b) {
3254 mod_objcg_mlstate(objcg, oldpg, NR_SLAB_UNRECLAIMABLE_B,
3255 stock->nr_slab_unreclaimable_b);
3256 stock->nr_slab_unreclaimable_b = 0;
3257 }
3258 stock->cached_pgdat = pgdat;
3259 }
3260
3261 bytes = (idx == NR_SLAB_RECLAIMABLE_B) ? &stock->nr_slab_reclaimable_b
3262 : &stock->nr_slab_unreclaimable_b;
3263 /*
3264 * Even for large object >= PAGE_SIZE, the vmstat data will still be
3265 * cached locally at least once before pushing it out.
3266 */
3267 if (!*bytes) {
3268 *bytes = nr;
3269 nr = 0;
3270 } else {
3271 *bytes += nr;
3272 if (abs(*bytes) > PAGE_SIZE) {
3273 nr = *bytes;
3274 *bytes = 0;
3275 } else {
3276 nr = 0;
3277 }
3278 }
3279 if (nr)
3280 mod_objcg_mlstate(objcg, pgdat, idx, nr);
3281
3282 local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
3283 if (old)
3284 obj_cgroup_put(old);
3285 }
3286
consume_obj_stock(struct obj_cgroup * objcg,unsigned int nr_bytes)3287 static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
3288 {
3289 struct memcg_stock_pcp *stock;
3290 unsigned long flags;
3291 bool ret = false;
3292
3293 local_lock_irqsave(&memcg_stock.stock_lock, flags);
3294
3295 stock = this_cpu_ptr(&memcg_stock);
3296 if (objcg == READ_ONCE(stock->cached_objcg) && stock->nr_bytes >= nr_bytes) {
3297 stock->nr_bytes -= nr_bytes;
3298 ret = true;
3299 }
3300
3301 local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
3302
3303 return ret;
3304 }
3305
drain_obj_stock(struct memcg_stock_pcp * stock)3306 static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock)
3307 {
3308 struct obj_cgroup *old = READ_ONCE(stock->cached_objcg);
3309
3310 if (!old)
3311 return NULL;
3312
3313 if (stock->nr_bytes) {
3314 unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3315 unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1);
3316
3317 if (nr_pages) {
3318 struct mem_cgroup *memcg;
3319
3320 memcg = get_mem_cgroup_from_objcg(old);
3321
3322 memcg_account_kmem(memcg, -nr_pages);
3323 __refill_stock(memcg, nr_pages);
3324
3325 css_put(&memcg->css);
3326 }
3327
3328 /*
3329 * The leftover is flushed to the centralized per-memcg value.
3330 * On the next attempt to refill obj stock it will be moved
3331 * to a per-cpu stock (probably, on an other CPU), see
3332 * refill_obj_stock().
3333 *
3334 * How often it's flushed is a trade-off between the memory
3335 * limit enforcement accuracy and potential CPU contention,
3336 * so it might be changed in the future.
3337 */
3338 atomic_add(nr_bytes, &old->nr_charged_bytes);
3339 stock->nr_bytes = 0;
3340 }
3341
3342 /*
3343 * Flush the vmstat data in current stock
3344 */
3345 if (stock->nr_slab_reclaimable_b || stock->nr_slab_unreclaimable_b) {
3346 if (stock->nr_slab_reclaimable_b) {
3347 mod_objcg_mlstate(old, stock->cached_pgdat,
3348 NR_SLAB_RECLAIMABLE_B,
3349 stock->nr_slab_reclaimable_b);
3350 stock->nr_slab_reclaimable_b = 0;
3351 }
3352 if (stock->nr_slab_unreclaimable_b) {
3353 mod_objcg_mlstate(old, stock->cached_pgdat,
3354 NR_SLAB_UNRECLAIMABLE_B,
3355 stock->nr_slab_unreclaimable_b);
3356 stock->nr_slab_unreclaimable_b = 0;
3357 }
3358 stock->cached_pgdat = NULL;
3359 }
3360
3361 WRITE_ONCE(stock->cached_objcg, NULL);
3362 /*
3363 * The `old' objects needs to be released by the caller via
3364 * obj_cgroup_put() outside of memcg_stock_pcp::stock_lock.
3365 */
3366 return old;
3367 }
3368
obj_stock_flush_required(struct memcg_stock_pcp * stock,struct mem_cgroup * root_memcg)3369 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
3370 struct mem_cgroup *root_memcg)
3371 {
3372 struct obj_cgroup *objcg = READ_ONCE(stock->cached_objcg);
3373 struct mem_cgroup *memcg;
3374
3375 if (objcg) {
3376 memcg = obj_cgroup_memcg(objcg);
3377 if (memcg && mem_cgroup_is_descendant(memcg, root_memcg))
3378 return true;
3379 }
3380
3381 return false;
3382 }
3383
refill_obj_stock(struct obj_cgroup * objcg,unsigned int nr_bytes,bool allow_uncharge)3384 static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
3385 bool allow_uncharge)
3386 {
3387 struct memcg_stock_pcp *stock;
3388 struct obj_cgroup *old = NULL;
3389 unsigned long flags;
3390 unsigned int nr_pages = 0;
3391
3392 local_lock_irqsave(&memcg_stock.stock_lock, flags);
3393
3394 stock = this_cpu_ptr(&memcg_stock);
3395 if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */
3396 old = drain_obj_stock(stock);
3397 obj_cgroup_get(objcg);
3398 WRITE_ONCE(stock->cached_objcg, objcg);
3399 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
3400 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
3401 allow_uncharge = true; /* Allow uncharge when objcg changes */
3402 }
3403 stock->nr_bytes += nr_bytes;
3404
3405 if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) {
3406 nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3407 stock->nr_bytes &= (PAGE_SIZE - 1);
3408 }
3409
3410 local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
3411 if (old)
3412 obj_cgroup_put(old);
3413
3414 if (nr_pages)
3415 obj_cgroup_uncharge_pages(objcg, nr_pages);
3416 }
3417
obj_cgroup_charge(struct obj_cgroup * objcg,gfp_t gfp,size_t size)3418 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
3419 {
3420 unsigned int nr_pages, nr_bytes;
3421 int ret;
3422
3423 if (consume_obj_stock(objcg, size))
3424 return 0;
3425
3426 /*
3427 * In theory, objcg->nr_charged_bytes can have enough
3428 * pre-charged bytes to satisfy the allocation. However,
3429 * flushing objcg->nr_charged_bytes requires two atomic
3430 * operations, and objcg->nr_charged_bytes can't be big.
3431 * The shared objcg->nr_charged_bytes can also become a
3432 * performance bottleneck if all tasks of the same memcg are
3433 * trying to update it. So it's better to ignore it and try
3434 * grab some new pages. The stock's nr_bytes will be flushed to
3435 * objcg->nr_charged_bytes later on when objcg changes.
3436 *
3437 * The stock's nr_bytes may contain enough pre-charged bytes
3438 * to allow one less page from being charged, but we can't rely
3439 * on the pre-charged bytes not being changed outside of
3440 * consume_obj_stock() or refill_obj_stock(). So ignore those
3441 * pre-charged bytes as well when charging pages. To avoid a
3442 * page uncharge right after a page charge, we set the
3443 * allow_uncharge flag to false when calling refill_obj_stock()
3444 * to temporarily allow the pre-charged bytes to exceed the page
3445 * size limit. The maximum reachable value of the pre-charged
3446 * bytes is (sizeof(object) + PAGE_SIZE - 2) if there is no data
3447 * race.
3448 */
3449 nr_pages = size >> PAGE_SHIFT;
3450 nr_bytes = size & (PAGE_SIZE - 1);
3451
3452 if (nr_bytes)
3453 nr_pages += 1;
3454
3455 ret = obj_cgroup_charge_pages(objcg, gfp, nr_pages);
3456 if (!ret && nr_bytes)
3457 refill_obj_stock(objcg, PAGE_SIZE - nr_bytes, false);
3458
3459 return ret;
3460 }
3461
obj_cgroup_uncharge(struct obj_cgroup * objcg,size_t size)3462 void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size)
3463 {
3464 refill_obj_stock(objcg, size, true);
3465 }
3466
3467 #endif /* CONFIG_MEMCG_KMEM */
3468
3469 /*
3470 * Because page_memcg(head) is not set on tails, set it now.
3471 */
split_page_memcg(struct page * head,unsigned int nr)3472 void split_page_memcg(struct page *head, unsigned int nr)
3473 {
3474 struct folio *folio = page_folio(head);
3475 struct mem_cgroup *memcg = folio_memcg(folio);
3476 int i;
3477
3478 if (mem_cgroup_disabled() || !memcg)
3479 return;
3480
3481 for (i = 1; i < nr; i++)
3482 folio_page(folio, i)->memcg_data = folio->memcg_data;
3483
3484 if (folio_memcg_kmem(folio))
3485 obj_cgroup_get_many(__folio_objcg(folio), nr - 1);
3486 else
3487 css_get_many(&memcg->css, nr - 1);
3488 }
3489
folio_copy_memcg(struct folio * src)3490 void folio_copy_memcg(struct folio *src)
3491 {
3492 int i;
3493 unsigned long flags;
3494 int delta = 0;
3495 int nr_pages = folio_nr_pages(src);
3496 struct mem_cgroup *memcg = folio_memcg(src);
3497
3498 if (folio_can_split(src))
3499 return;
3500
3501 if (WARN_ON_ONCE(!src->_dst_pp))
3502 return;
3503
3504 if (mem_cgroup_disabled())
3505 return;
3506
3507 if (WARN_ON_ONCE(!memcg))
3508 return;
3509
3510 VM_WARN_ON_ONCE_FOLIO(!folio_test_large(src), src);
3511 VM_WARN_ON_ONCE_FOLIO(folio_ref_count(src), src);
3512
3513 for (i = 0; i < nr_pages; i++) {
3514 struct page *dst = folio_dst_page(src, i);
3515
3516 if (!dst)
3517 continue;
3518
3519 commit_charge(page_folio(dst), memcg);
3520 delta++;
3521 }
3522
3523 if (!mem_cgroup_is_root(memcg)) {
3524 page_counter_charge(&memcg->memory, delta);
3525 if (do_memsw_account())
3526 page_counter_charge(&memcg->memsw, delta);
3527 }
3528
3529 css_get_many(&memcg->css, delta);
3530
3531 local_irq_save(flags);
3532 mem_cgroup_charge_statistics(memcg, delta);
3533 memcg_check_events(memcg, folio_nid(src));
3534 local_irq_restore(flags);
3535 }
3536
3537 #ifdef CONFIG_SWAP
3538 /**
3539 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
3540 * @entry: swap entry to be moved
3541 * @from: mem_cgroup which the entry is moved from
3542 * @to: mem_cgroup which the entry is moved to
3543 *
3544 * It succeeds only when the swap_cgroup's record for this entry is the same
3545 * as the mem_cgroup's id of @from.
3546 *
3547 * Returns 0 on success, -EINVAL on failure.
3548 *
3549 * The caller must have charged to @to, IOW, called page_counter_charge() about
3550 * both res and memsw, and called css_get().
3551 */
mem_cgroup_move_swap_account(swp_entry_t entry,struct mem_cgroup * from,struct mem_cgroup * to)3552 static int mem_cgroup_move_swap_account(swp_entry_t entry,
3553 struct mem_cgroup *from, struct mem_cgroup *to)
3554 {
3555 unsigned short old_id, new_id;
3556
3557 old_id = mem_cgroup_id(from);
3558 new_id = mem_cgroup_id(to);
3559
3560 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
3561 mod_memcg_state(from, MEMCG_SWAP, -1);
3562 mod_memcg_state(to, MEMCG_SWAP, 1);
3563 return 0;
3564 }
3565 return -EINVAL;
3566 }
3567 #else
mem_cgroup_move_swap_account(swp_entry_t entry,struct mem_cgroup * from,struct mem_cgroup * to)3568 static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
3569 struct mem_cgroup *from, struct mem_cgroup *to)
3570 {
3571 return -EINVAL;
3572 }
3573 #endif
3574
3575 static DEFINE_MUTEX(memcg_max_mutex);
3576
mem_cgroup_resize_max(struct mem_cgroup * memcg,unsigned long max,bool memsw)3577 static int mem_cgroup_resize_max(struct mem_cgroup *memcg,
3578 unsigned long max, bool memsw)
3579 {
3580 bool enlarge = false;
3581 bool drained = false;
3582 int ret;
3583 bool limits_invariant;
3584 struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory;
3585
3586 do {
3587 if (signal_pending(current)) {
3588 ret = -EINTR;
3589 break;
3590 }
3591
3592 mutex_lock(&memcg_max_mutex);
3593 /*
3594 * Make sure that the new limit (memsw or memory limit) doesn't
3595 * break our basic invariant rule memory.max <= memsw.max.
3596 */
3597 limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) :
3598 max <= memcg->memsw.max;
3599 if (!limits_invariant) {
3600 mutex_unlock(&memcg_max_mutex);
3601 ret = -EINVAL;
3602 break;
3603 }
3604 if (max > counter->max)
3605 enlarge = true;
3606 ret = page_counter_set_max(counter, max);
3607 mutex_unlock(&memcg_max_mutex);
3608
3609 if (!ret)
3610 break;
3611
3612 if (!drained) {
3613 drain_all_stock(memcg);
3614 drained = true;
3615 continue;
3616 }
3617
3618 if (!try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL,
3619 memsw ? 0 : MEMCG_RECLAIM_MAY_SWAP)) {
3620 ret = -EBUSY;
3621 break;
3622 }
3623 } while (true);
3624
3625 if (!ret && enlarge)
3626 memcg_oom_recover(memcg);
3627
3628 return ret;
3629 }
3630
mem_cgroup_soft_limit_reclaim(pg_data_t * pgdat,int order,gfp_t gfp_mask,unsigned long * total_scanned)3631 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
3632 gfp_t gfp_mask,
3633 unsigned long *total_scanned)
3634 {
3635 unsigned long nr_reclaimed = 0;
3636 struct mem_cgroup_per_node *mz, *next_mz = NULL;
3637 unsigned long reclaimed;
3638 int loop = 0;
3639 struct mem_cgroup_tree_per_node *mctz;
3640 unsigned long excess;
3641
3642 if (lru_gen_enabled())
3643 return 0;
3644
3645 if (order > 0)
3646 return 0;
3647
3648 mctz = soft_limit_tree.rb_tree_per_node[pgdat->node_id];
3649
3650 /*
3651 * Do not even bother to check the largest node if the root
3652 * is empty. Do it lockless to prevent lock bouncing. Races
3653 * are acceptable as soft limit is best effort anyway.
3654 */
3655 if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root))
3656 return 0;
3657
3658 /*
3659 * This loop can run a while, specially if mem_cgroup's continuously
3660 * keep exceeding their soft limit and putting the system under
3661 * pressure
3662 */
3663 do {
3664 if (next_mz)
3665 mz = next_mz;
3666 else
3667 mz = mem_cgroup_largest_soft_limit_node(mctz);
3668 if (!mz)
3669 break;
3670
3671 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat,
3672 gfp_mask, total_scanned);
3673 nr_reclaimed += reclaimed;
3674 spin_lock_irq(&mctz->lock);
3675
3676 /*
3677 * If we failed to reclaim anything from this memory cgroup
3678 * it is time to move on to the next cgroup
3679 */
3680 next_mz = NULL;
3681 if (!reclaimed)
3682 next_mz = __mem_cgroup_largest_soft_limit_node(mctz);
3683
3684 excess = soft_limit_excess(mz->memcg);
3685 /*
3686 * One school of thought says that we should not add
3687 * back the node to the tree if reclaim returns 0.
3688 * But our reclaim could return 0, simply because due
3689 * to priority we are exposing a smaller subset of
3690 * memory to reclaim from. Consider this as a longer
3691 * term TODO.
3692 */
3693 /* If excess == 0, no tree ops */
3694 __mem_cgroup_insert_exceeded(mz, mctz, excess);
3695 spin_unlock_irq(&mctz->lock);
3696 css_put(&mz->memcg->css);
3697 loop++;
3698 /*
3699 * Could not reclaim anything and there are no more
3700 * mem cgroups to try or we seem to be looping without
3701 * reclaiming anything.
3702 */
3703 if (!nr_reclaimed &&
3704 (next_mz == NULL ||
3705 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
3706 break;
3707 } while (!nr_reclaimed);
3708 if (next_mz)
3709 css_put(&next_mz->memcg->css);
3710 return nr_reclaimed;
3711 }
3712
3713 /*
3714 * Reclaims as many pages from the given memcg as possible.
3715 *
3716 * Caller is responsible for holding css reference for memcg.
3717 */
mem_cgroup_force_empty(struct mem_cgroup * memcg)3718 static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
3719 {
3720 int nr_retries = MAX_RECLAIM_RETRIES;
3721
3722 /* we call try-to-free pages for make this cgroup empty */
3723 lru_add_drain_all();
3724
3725 drain_all_stock(memcg);
3726
3727 /* try to free all pages in this cgroup */
3728 while (nr_retries && page_counter_read(&memcg->memory)) {
3729 if (signal_pending(current))
3730 return -EINTR;
3731
3732 if (!try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL,
3733 MEMCG_RECLAIM_MAY_SWAP))
3734 nr_retries--;
3735 }
3736
3737 return 0;
3738 }
3739
mem_cgroup_force_empty_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)3740 static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
3741 char *buf, size_t nbytes,
3742 loff_t off)
3743 {
3744 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3745
3746 if (mem_cgroup_is_root(memcg))
3747 return -EINVAL;
3748 return mem_cgroup_force_empty(memcg) ?: nbytes;
3749 }
3750
mem_cgroup_hierarchy_read(struct cgroup_subsys_state * css,struct cftype * cft)3751 static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
3752 struct cftype *cft)
3753 {
3754 return 1;
3755 }
3756
mem_cgroup_hierarchy_write(struct cgroup_subsys_state * css,struct cftype * cft,u64 val)3757 static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
3758 struct cftype *cft, u64 val)
3759 {
3760 if (val == 1)
3761 return 0;
3762
3763 pr_warn_once("Non-hierarchical mode is deprecated. "
3764 "Please report your usecase to linux-mm@kvack.org if you "
3765 "depend on this functionality.\n");
3766
3767 return -EINVAL;
3768 }
3769
mem_cgroup_usage(struct mem_cgroup * memcg,bool swap)3770 static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
3771 {
3772 unsigned long val;
3773
3774 if (mem_cgroup_is_root(memcg)) {
3775 /*
3776 * Approximate root's usage from global state. This isn't
3777 * perfect, but the root usage was always an approximation.
3778 */
3779 val = global_node_page_state(NR_FILE_PAGES) +
3780 global_node_page_state(NR_ANON_MAPPED);
3781 if (swap)
3782 val += total_swap_pages - get_nr_swap_pages();
3783 } else {
3784 if (!swap)
3785 val = page_counter_read(&memcg->memory);
3786 else
3787 val = page_counter_read(&memcg->memsw);
3788 }
3789 return val;
3790 }
3791
3792 enum {
3793 RES_USAGE,
3794 RES_LIMIT,
3795 RES_MAX_USAGE,
3796 RES_FAILCNT,
3797 RES_SOFT_LIMIT,
3798 };
3799
mem_cgroup_read_u64(struct cgroup_subsys_state * css,struct cftype * cft)3800 static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
3801 struct cftype *cft)
3802 {
3803 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3804 struct page_counter *counter;
3805
3806 switch (MEMFILE_TYPE(cft->private)) {
3807 case _MEM:
3808 counter = &memcg->memory;
3809 break;
3810 case _MEMSWAP:
3811 counter = &memcg->memsw;
3812 break;
3813 case _KMEM:
3814 counter = &memcg->kmem;
3815 break;
3816 case _TCP:
3817 counter = &memcg->tcpmem;
3818 break;
3819 default:
3820 BUG();
3821 }
3822
3823 switch (MEMFILE_ATTR(cft->private)) {
3824 case RES_USAGE:
3825 if (counter == &memcg->memory)
3826 return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
3827 if (counter == &memcg->memsw)
3828 return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
3829 return (u64)page_counter_read(counter) * PAGE_SIZE;
3830 case RES_LIMIT:
3831 return (u64)counter->max * PAGE_SIZE;
3832 case RES_MAX_USAGE:
3833 return (u64)counter->watermark * PAGE_SIZE;
3834 case RES_FAILCNT:
3835 return counter->failcnt;
3836 case RES_SOFT_LIMIT:
3837 return (u64)READ_ONCE(memcg->soft_limit) * PAGE_SIZE;
3838 default:
3839 BUG();
3840 }
3841 }
3842
3843 /*
3844 * This function doesn't do anything useful. Its only job is to provide a read
3845 * handler for a file so that cgroup_file_mode() will add read permissions.
3846 */
mem_cgroup_dummy_seq_show(__always_unused struct seq_file * m,__always_unused void * v)3847 static int mem_cgroup_dummy_seq_show(__always_unused struct seq_file *m,
3848 __always_unused void *v)
3849 {
3850 return -EINVAL;
3851 }
3852
3853 #ifdef CONFIG_MEMCG_KMEM
memcg_online_kmem(struct mem_cgroup * memcg)3854 static int memcg_online_kmem(struct mem_cgroup *memcg)
3855 {
3856 struct obj_cgroup *objcg;
3857
3858 if (mem_cgroup_kmem_disabled())
3859 return 0;
3860
3861 if (unlikely(mem_cgroup_is_root(memcg)))
3862 return 0;
3863
3864 objcg = obj_cgroup_alloc();
3865 if (!objcg)
3866 return -ENOMEM;
3867
3868 objcg->memcg = memcg;
3869 rcu_assign_pointer(memcg->objcg, objcg);
3870
3871 static_branch_enable(&memcg_kmem_online_key);
3872
3873 memcg->kmemcg_id = memcg->id.id;
3874
3875 return 0;
3876 }
3877
memcg_offline_kmem(struct mem_cgroup * memcg)3878 static void memcg_offline_kmem(struct mem_cgroup *memcg)
3879 {
3880 struct mem_cgroup *parent;
3881
3882 if (mem_cgroup_kmem_disabled())
3883 return;
3884
3885 if (unlikely(mem_cgroup_is_root(memcg)))
3886 return;
3887
3888 parent = parent_mem_cgroup(memcg);
3889 if (!parent)
3890 parent = root_mem_cgroup;
3891
3892 memcg_reparent_objcgs(memcg, parent);
3893
3894 /*
3895 * After we have finished memcg_reparent_objcgs(), all list_lrus
3896 * corresponding to this cgroup are guaranteed to remain empty.
3897 * The ordering is imposed by list_lru_node->lock taken by
3898 * memcg_reparent_list_lrus().
3899 */
3900 memcg_reparent_list_lrus(memcg, parent);
3901 }
3902 #else
memcg_online_kmem(struct mem_cgroup * memcg)3903 static int memcg_online_kmem(struct mem_cgroup *memcg)
3904 {
3905 return 0;
3906 }
memcg_offline_kmem(struct mem_cgroup * memcg)3907 static void memcg_offline_kmem(struct mem_cgroup *memcg)
3908 {
3909 }
3910 #endif /* CONFIG_MEMCG_KMEM */
3911
memcg_update_tcp_max(struct mem_cgroup * memcg,unsigned long max)3912 static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max)
3913 {
3914 int ret;
3915
3916 mutex_lock(&memcg_max_mutex);
3917
3918 ret = page_counter_set_max(&memcg->tcpmem, max);
3919 if (ret)
3920 goto out;
3921
3922 if (!memcg->tcpmem_active) {
3923 /*
3924 * The active flag needs to be written after the static_key
3925 * update. This is what guarantees that the socket activation
3926 * function is the last one to run. See mem_cgroup_sk_alloc()
3927 * for details, and note that we don't mark any socket as
3928 * belonging to this memcg until that flag is up.
3929 *
3930 * We need to do this, because static_keys will span multiple
3931 * sites, but we can't control their order. If we mark a socket
3932 * as accounted, but the accounting functions are not patched in
3933 * yet, we'll lose accounting.
3934 *
3935 * We never race with the readers in mem_cgroup_sk_alloc(),
3936 * because when this value change, the code to process it is not
3937 * patched in yet.
3938 */
3939 static_branch_inc(&memcg_sockets_enabled_key);
3940 memcg->tcpmem_active = true;
3941 }
3942 out:
3943 mutex_unlock(&memcg_max_mutex);
3944 return ret;
3945 }
3946
3947 /*
3948 * The user of this function is...
3949 * RES_LIMIT.
3950 */
mem_cgroup_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)3951 static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
3952 char *buf, size_t nbytes, loff_t off)
3953 {
3954 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3955 unsigned long nr_pages;
3956 int ret;
3957
3958 buf = strstrip(buf);
3959 ret = page_counter_memparse(buf, "-1", &nr_pages);
3960 if (ret)
3961 return ret;
3962
3963 switch (MEMFILE_ATTR(of_cft(of)->private)) {
3964 case RES_LIMIT:
3965 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
3966 ret = -EINVAL;
3967 break;
3968 }
3969 switch (MEMFILE_TYPE(of_cft(of)->private)) {
3970 case _MEM:
3971 ret = mem_cgroup_resize_max(memcg, nr_pages, false);
3972 break;
3973 case _MEMSWAP:
3974 ret = mem_cgroup_resize_max(memcg, nr_pages, true);
3975 break;
3976 case _KMEM:
3977 pr_warn_once("kmem.limit_in_bytes is deprecated and will be removed. "
3978 "Writing any value to this file has no effect. "
3979 "Please report your usecase to linux-mm@kvack.org if you "
3980 "depend on this functionality.\n");
3981 ret = 0;
3982 break;
3983 case _TCP:
3984 ret = memcg_update_tcp_max(memcg, nr_pages);
3985 break;
3986 }
3987 break;
3988 case RES_SOFT_LIMIT:
3989 if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
3990 ret = -EOPNOTSUPP;
3991 } else {
3992 WRITE_ONCE(memcg->soft_limit, nr_pages);
3993 ret = 0;
3994 }
3995 break;
3996 }
3997 return ret ?: nbytes;
3998 }
3999
mem_cgroup_reset(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4000 static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
4001 size_t nbytes, loff_t off)
4002 {
4003 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4004 struct page_counter *counter;
4005
4006 switch (MEMFILE_TYPE(of_cft(of)->private)) {
4007 case _MEM:
4008 counter = &memcg->memory;
4009 break;
4010 case _MEMSWAP:
4011 counter = &memcg->memsw;
4012 break;
4013 case _KMEM:
4014 counter = &memcg->kmem;
4015 break;
4016 case _TCP:
4017 counter = &memcg->tcpmem;
4018 break;
4019 default:
4020 BUG();
4021 }
4022
4023 switch (MEMFILE_ATTR(of_cft(of)->private)) {
4024 case RES_MAX_USAGE:
4025 page_counter_reset_watermark(counter);
4026 break;
4027 case RES_FAILCNT:
4028 counter->failcnt = 0;
4029 break;
4030 default:
4031 BUG();
4032 }
4033
4034 return nbytes;
4035 }
4036
mem_cgroup_move_charge_read(struct cgroup_subsys_state * css,struct cftype * cft)4037 static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
4038 struct cftype *cft)
4039 {
4040 return mem_cgroup_from_css(css)->move_charge_at_immigrate;
4041 }
4042
4043 #ifdef CONFIG_MMU
mem_cgroup_move_charge_write(struct cgroup_subsys_state * css,struct cftype * cft,u64 val)4044 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
4045 struct cftype *cft, u64 val)
4046 {
4047 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4048
4049 pr_warn_once("Cgroup memory moving (move_charge_at_immigrate) is deprecated. "
4050 "Please report your usecase to linux-mm@kvack.org if you "
4051 "depend on this functionality.\n");
4052
4053 if (val & ~MOVE_MASK)
4054 return -EINVAL;
4055
4056 /*
4057 * No kind of locking is needed in here, because ->can_attach() will
4058 * check this value once in the beginning of the process, and then carry
4059 * on with stale data. This means that changes to this value will only
4060 * affect task migrations starting after the change.
4061 */
4062 memcg->move_charge_at_immigrate = val;
4063 return 0;
4064 }
4065 #else
mem_cgroup_move_charge_write(struct cgroup_subsys_state * css,struct cftype * cft,u64 val)4066 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
4067 struct cftype *cft, u64 val)
4068 {
4069 return -ENOSYS;
4070 }
4071 #endif
4072
4073 #ifdef CONFIG_NUMA
4074
4075 #define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
4076 #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
4077 #define LRU_ALL ((1 << NR_LRU_LISTS) - 1)
4078
mem_cgroup_node_nr_lru_pages(struct mem_cgroup * memcg,int nid,unsigned int lru_mask,bool tree)4079 static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
4080 int nid, unsigned int lru_mask, bool tree)
4081 {
4082 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
4083 unsigned long nr = 0;
4084 enum lru_list lru;
4085
4086 VM_BUG_ON((unsigned)nid >= nr_node_ids);
4087
4088 for_each_lru(lru) {
4089 if (!(BIT(lru) & lru_mask))
4090 continue;
4091 if (tree)
4092 nr += lruvec_page_state(lruvec, NR_LRU_BASE + lru);
4093 else
4094 nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru);
4095 }
4096 return nr;
4097 }
4098
mem_cgroup_nr_lru_pages(struct mem_cgroup * memcg,unsigned int lru_mask,bool tree)4099 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
4100 unsigned int lru_mask,
4101 bool tree)
4102 {
4103 unsigned long nr = 0;
4104 enum lru_list lru;
4105
4106 for_each_lru(lru) {
4107 if (!(BIT(lru) & lru_mask))
4108 continue;
4109 if (tree)
4110 nr += memcg_page_state(memcg, NR_LRU_BASE + lru);
4111 else
4112 nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru);
4113 }
4114 return nr;
4115 }
4116
memcg_numa_stat_show(struct seq_file * m,void * v)4117 static int memcg_numa_stat_show(struct seq_file *m, void *v)
4118 {
4119 struct numa_stat {
4120 const char *name;
4121 unsigned int lru_mask;
4122 };
4123
4124 static const struct numa_stat stats[] = {
4125 { "total", LRU_ALL },
4126 { "file", LRU_ALL_FILE },
4127 { "anon", LRU_ALL_ANON },
4128 { "unevictable", BIT(LRU_UNEVICTABLE) },
4129 };
4130 const struct numa_stat *stat;
4131 int nid;
4132 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4133
4134 mem_cgroup_flush_stats(memcg);
4135
4136 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
4137 seq_printf(m, "%s=%lu", stat->name,
4138 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
4139 false));
4140 for_each_node_state(nid, N_MEMORY)
4141 seq_printf(m, " N%d=%lu", nid,
4142 mem_cgroup_node_nr_lru_pages(memcg, nid,
4143 stat->lru_mask, false));
4144 seq_putc(m, '\n');
4145 }
4146
4147 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
4148
4149 seq_printf(m, "hierarchical_%s=%lu", stat->name,
4150 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
4151 true));
4152 for_each_node_state(nid, N_MEMORY)
4153 seq_printf(m, " N%d=%lu", nid,
4154 mem_cgroup_node_nr_lru_pages(memcg, nid,
4155 stat->lru_mask, true));
4156 seq_putc(m, '\n');
4157 }
4158
4159 return 0;
4160 }
4161 #endif /* CONFIG_NUMA */
4162
4163 static const unsigned int memcg1_stats[] = {
4164 NR_FILE_PAGES,
4165 NR_ANON_MAPPED,
4166 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4167 NR_ANON_THPS,
4168 #endif
4169 NR_SHMEM,
4170 NR_FILE_MAPPED,
4171 NR_FILE_DIRTY,
4172 NR_WRITEBACK,
4173 WORKINGSET_REFAULT_ANON,
4174 WORKINGSET_REFAULT_FILE,
4175 MEMCG_SWAP,
4176 };
4177
4178 static const char *const memcg1_stat_names[] = {
4179 "cache",
4180 "rss",
4181 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4182 "rss_huge",
4183 #endif
4184 "shmem",
4185 "mapped_file",
4186 "dirty",
4187 "writeback",
4188 "workingset_refault_anon",
4189 "workingset_refault_file",
4190 "swap",
4191 };
4192
4193 /* Universal VM events cgroup1 shows, original sort order */
4194 static const unsigned int memcg1_events[] = {
4195 PGPGIN,
4196 PGPGOUT,
4197 PGFAULT,
4198 PGMAJFAULT,
4199 };
4200
memcg1_stat_format(struct mem_cgroup * memcg,struct seq_buf * s)4201 static void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
4202 {
4203 unsigned long memory, memsw;
4204 struct mem_cgroup *mi;
4205 unsigned int i;
4206
4207 BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats));
4208
4209 mem_cgroup_flush_stats(memcg);
4210
4211 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
4212 unsigned long nr;
4213
4214 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
4215 continue;
4216 nr = memcg_page_state_local(memcg, memcg1_stats[i]);
4217 seq_buf_printf(s, "%s %lu\n", memcg1_stat_names[i],
4218 nr * memcg_page_state_unit(memcg1_stats[i]));
4219 }
4220
4221 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
4222 seq_buf_printf(s, "%s %lu\n", vm_event_name(memcg1_events[i]),
4223 memcg_events_local(memcg, memcg1_events[i]));
4224
4225 for (i = 0; i < NR_LRU_LISTS; i++)
4226 seq_buf_printf(s, "%s %lu\n", lru_list_name(i),
4227 memcg_page_state_local(memcg, NR_LRU_BASE + i) *
4228 PAGE_SIZE);
4229
4230 /* Hierarchical information */
4231 memory = memsw = PAGE_COUNTER_MAX;
4232 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
4233 memory = min(memory, READ_ONCE(mi->memory.max));
4234 memsw = min(memsw, READ_ONCE(mi->memsw.max));
4235 }
4236 seq_buf_printf(s, "hierarchical_memory_limit %llu\n",
4237 (u64)memory * PAGE_SIZE);
4238 if (do_memsw_account())
4239 seq_buf_printf(s, "hierarchical_memsw_limit %llu\n",
4240 (u64)memsw * PAGE_SIZE);
4241
4242 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
4243 unsigned long nr;
4244
4245 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
4246 continue;
4247 nr = memcg_page_state(memcg, memcg1_stats[i]);
4248 seq_buf_printf(s, "total_%s %llu\n", memcg1_stat_names[i],
4249 (u64)nr * memcg_page_state_unit(memcg1_stats[i]));
4250 }
4251
4252 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
4253 seq_buf_printf(s, "total_%s %llu\n",
4254 vm_event_name(memcg1_events[i]),
4255 (u64)memcg_events(memcg, memcg1_events[i]));
4256
4257 for (i = 0; i < NR_LRU_LISTS; i++)
4258 seq_buf_printf(s, "total_%s %llu\n", lru_list_name(i),
4259 (u64)memcg_page_state(memcg, NR_LRU_BASE + i) *
4260 PAGE_SIZE);
4261
4262 #ifdef CONFIG_DEBUG_VM
4263 {
4264 pg_data_t *pgdat;
4265 struct mem_cgroup_per_node *mz;
4266 unsigned long anon_cost = 0;
4267 unsigned long file_cost = 0;
4268
4269 for_each_online_pgdat(pgdat) {
4270 mz = memcg->nodeinfo[pgdat->node_id];
4271
4272 anon_cost += mz->lruvec.anon_cost;
4273 file_cost += mz->lruvec.file_cost;
4274 }
4275 seq_buf_printf(s, "anon_cost %lu\n", anon_cost);
4276 seq_buf_printf(s, "file_cost %lu\n", file_cost);
4277 }
4278 #endif
4279 }
4280
mem_cgroup_swappiness_read(struct cgroup_subsys_state * css,struct cftype * cft)4281 static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
4282 struct cftype *cft)
4283 {
4284 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4285
4286 return mem_cgroup_swappiness(memcg);
4287 }
4288
mem_cgroup_swappiness_write(struct cgroup_subsys_state * css,struct cftype * cft,u64 val)4289 static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
4290 struct cftype *cft, u64 val)
4291 {
4292 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4293
4294 if (val > 200)
4295 return -EINVAL;
4296
4297 if (!mem_cgroup_is_root(memcg))
4298 WRITE_ONCE(memcg->swappiness, val);
4299 else
4300 WRITE_ONCE(vm_swappiness, val);
4301
4302 return 0;
4303 }
4304
__mem_cgroup_threshold(struct mem_cgroup * memcg,bool swap)4305 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
4306 {
4307 struct mem_cgroup_threshold_ary *t;
4308 unsigned long usage;
4309 int i;
4310
4311 rcu_read_lock();
4312 if (!swap)
4313 t = rcu_dereference(memcg->thresholds.primary);
4314 else
4315 t = rcu_dereference(memcg->memsw_thresholds.primary);
4316
4317 if (!t)
4318 goto unlock;
4319
4320 usage = mem_cgroup_usage(memcg, swap);
4321
4322 /*
4323 * current_threshold points to threshold just below or equal to usage.
4324 * If it's not true, a threshold was crossed after last
4325 * call of __mem_cgroup_threshold().
4326 */
4327 i = t->current_threshold;
4328
4329 /*
4330 * Iterate backward over array of thresholds starting from
4331 * current_threshold and check if a threshold is crossed.
4332 * If none of thresholds below usage is crossed, we read
4333 * only one element of the array here.
4334 */
4335 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
4336 eventfd_signal(t->entries[i].eventfd, 1);
4337
4338 /* i = current_threshold + 1 */
4339 i++;
4340
4341 /*
4342 * Iterate forward over array of thresholds starting from
4343 * current_threshold+1 and check if a threshold is crossed.
4344 * If none of thresholds above usage is crossed, we read
4345 * only one element of the array here.
4346 */
4347 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
4348 eventfd_signal(t->entries[i].eventfd, 1);
4349
4350 /* Update current_threshold */
4351 t->current_threshold = i - 1;
4352 unlock:
4353 rcu_read_unlock();
4354 }
4355
mem_cgroup_threshold(struct mem_cgroup * memcg)4356 static void mem_cgroup_threshold(struct mem_cgroup *memcg)
4357 {
4358 while (memcg) {
4359 __mem_cgroup_threshold(memcg, false);
4360 if (do_memsw_account())
4361 __mem_cgroup_threshold(memcg, true);
4362
4363 memcg = parent_mem_cgroup(memcg);
4364 }
4365 }
4366
compare_thresholds(const void * a,const void * b)4367 static int compare_thresholds(const void *a, const void *b)
4368 {
4369 const struct mem_cgroup_threshold *_a = a;
4370 const struct mem_cgroup_threshold *_b = b;
4371
4372 if (_a->threshold > _b->threshold)
4373 return 1;
4374
4375 if (_a->threshold < _b->threshold)
4376 return -1;
4377
4378 return 0;
4379 }
4380
mem_cgroup_oom_notify_cb(struct mem_cgroup * memcg)4381 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
4382 {
4383 struct mem_cgroup_eventfd_list *ev;
4384
4385 spin_lock(&memcg_oom_lock);
4386
4387 list_for_each_entry(ev, &memcg->oom_notify, list)
4388 eventfd_signal(ev->eventfd, 1);
4389
4390 spin_unlock(&memcg_oom_lock);
4391 return 0;
4392 }
4393
mem_cgroup_oom_notify(struct mem_cgroup * memcg)4394 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
4395 {
4396 struct mem_cgroup *iter;
4397
4398 for_each_mem_cgroup_tree(iter, memcg)
4399 mem_cgroup_oom_notify_cb(iter);
4400 }
4401
__mem_cgroup_usage_register_event(struct mem_cgroup * memcg,struct eventfd_ctx * eventfd,const char * args,enum res_type type)4402 static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
4403 struct eventfd_ctx *eventfd, const char *args, enum res_type type)
4404 {
4405 struct mem_cgroup_thresholds *thresholds;
4406 struct mem_cgroup_threshold_ary *new;
4407 unsigned long threshold;
4408 unsigned long usage;
4409 int i, size, ret;
4410
4411 ret = page_counter_memparse(args, "-1", &threshold);
4412 if (ret)
4413 return ret;
4414
4415 mutex_lock(&memcg->thresholds_lock);
4416
4417 if (type == _MEM) {
4418 thresholds = &memcg->thresholds;
4419 usage = mem_cgroup_usage(memcg, false);
4420 } else if (type == _MEMSWAP) {
4421 thresholds = &memcg->memsw_thresholds;
4422 usage = mem_cgroup_usage(memcg, true);
4423 } else
4424 BUG();
4425
4426 /* Check if a threshold crossed before adding a new one */
4427 if (thresholds->primary)
4428 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
4429
4430 size = thresholds->primary ? thresholds->primary->size + 1 : 1;
4431
4432 /* Allocate memory for new array of thresholds */
4433 new = kmalloc(struct_size(new, entries, size), GFP_KERNEL);
4434 if (!new) {
4435 ret = -ENOMEM;
4436 goto unlock;
4437 }
4438 new->size = size;
4439
4440 /* Copy thresholds (if any) to new array */
4441 if (thresholds->primary)
4442 memcpy(new->entries, thresholds->primary->entries,
4443 flex_array_size(new, entries, size - 1));
4444
4445 /* Add new threshold */
4446 new->entries[size - 1].eventfd = eventfd;
4447 new->entries[size - 1].threshold = threshold;
4448
4449 /* Sort thresholds. Registering of new threshold isn't time-critical */
4450 sort(new->entries, size, sizeof(*new->entries),
4451 compare_thresholds, NULL);
4452
4453 /* Find current threshold */
4454 new->current_threshold = -1;
4455 for (i = 0; i < size; i++) {
4456 if (new->entries[i].threshold <= usage) {
4457 /*
4458 * new->current_threshold will not be used until
4459 * rcu_assign_pointer(), so it's safe to increment
4460 * it here.
4461 */
4462 ++new->current_threshold;
4463 } else
4464 break;
4465 }
4466
4467 /* Free old spare buffer and save old primary buffer as spare */
4468 kfree(thresholds->spare);
4469 thresholds->spare = thresholds->primary;
4470
4471 rcu_assign_pointer(thresholds->primary, new);
4472
4473 /* To be sure that nobody uses thresholds */
4474 synchronize_rcu();
4475
4476 unlock:
4477 mutex_unlock(&memcg->thresholds_lock);
4478
4479 return ret;
4480 }
4481
mem_cgroup_usage_register_event(struct mem_cgroup * memcg,struct eventfd_ctx * eventfd,const char * args)4482 static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
4483 struct eventfd_ctx *eventfd, const char *args)
4484 {
4485 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
4486 }
4487
memsw_cgroup_usage_register_event(struct mem_cgroup * memcg,struct eventfd_ctx * eventfd,const char * args)4488 static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
4489 struct eventfd_ctx *eventfd, const char *args)
4490 {
4491 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
4492 }
4493
__mem_cgroup_usage_unregister_event(struct mem_cgroup * memcg,struct eventfd_ctx * eventfd,enum res_type type)4494 static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4495 struct eventfd_ctx *eventfd, enum res_type type)
4496 {
4497 struct mem_cgroup_thresholds *thresholds;
4498 struct mem_cgroup_threshold_ary *new;
4499 unsigned long usage;
4500 int i, j, size, entries;
4501
4502 mutex_lock(&memcg->thresholds_lock);
4503
4504 if (type == _MEM) {
4505 thresholds = &memcg->thresholds;
4506 usage = mem_cgroup_usage(memcg, false);
4507 } else if (type == _MEMSWAP) {
4508 thresholds = &memcg->memsw_thresholds;
4509 usage = mem_cgroup_usage(memcg, true);
4510 } else
4511 BUG();
4512
4513 if (!thresholds->primary)
4514 goto unlock;
4515
4516 /* Check if a threshold crossed before removing */
4517 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
4518
4519 /* Calculate new number of threshold */
4520 size = entries = 0;
4521 for (i = 0; i < thresholds->primary->size; i++) {
4522 if (thresholds->primary->entries[i].eventfd != eventfd)
4523 size++;
4524 else
4525 entries++;
4526 }
4527
4528 new = thresholds->spare;
4529
4530 /* If no items related to eventfd have been cleared, nothing to do */
4531 if (!entries)
4532 goto unlock;
4533
4534 /* Set thresholds array to NULL if we don't have thresholds */
4535 if (!size) {
4536 kfree(new);
4537 new = NULL;
4538 goto swap_buffers;
4539 }
4540
4541 new->size = size;
4542
4543 /* Copy thresholds and find current threshold */
4544 new->current_threshold = -1;
4545 for (i = 0, j = 0; i < thresholds->primary->size; i++) {
4546 if (thresholds->primary->entries[i].eventfd == eventfd)
4547 continue;
4548
4549 new->entries[j] = thresholds->primary->entries[i];
4550 if (new->entries[j].threshold <= usage) {
4551 /*
4552 * new->current_threshold will not be used
4553 * until rcu_assign_pointer(), so it's safe to increment
4554 * it here.
4555 */
4556 ++new->current_threshold;
4557 }
4558 j++;
4559 }
4560
4561 swap_buffers:
4562 /* Swap primary and spare array */
4563 thresholds->spare = thresholds->primary;
4564
4565 rcu_assign_pointer(thresholds->primary, new);
4566
4567 /* To be sure that nobody uses thresholds */
4568 synchronize_rcu();
4569
4570 /* If all events are unregistered, free the spare array */
4571 if (!new) {
4572 kfree(thresholds->spare);
4573 thresholds->spare = NULL;
4574 }
4575 unlock:
4576 mutex_unlock(&memcg->thresholds_lock);
4577 }
4578
mem_cgroup_usage_unregister_event(struct mem_cgroup * memcg,struct eventfd_ctx * eventfd)4579 static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4580 struct eventfd_ctx *eventfd)
4581 {
4582 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
4583 }
4584
memsw_cgroup_usage_unregister_event(struct mem_cgroup * memcg,struct eventfd_ctx * eventfd)4585 static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4586 struct eventfd_ctx *eventfd)
4587 {
4588 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
4589 }
4590
mem_cgroup_oom_register_event(struct mem_cgroup * memcg,struct eventfd_ctx * eventfd,const char * args)4591 static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
4592 struct eventfd_ctx *eventfd, const char *args)
4593 {
4594 struct mem_cgroup_eventfd_list *event;
4595
4596 event = kmalloc(sizeof(*event), GFP_KERNEL);
4597 if (!event)
4598 return -ENOMEM;
4599
4600 spin_lock(&memcg_oom_lock);
4601
4602 event->eventfd = eventfd;
4603 list_add(&event->list, &memcg->oom_notify);
4604
4605 /* already in OOM ? */
4606 if (memcg->under_oom)
4607 eventfd_signal(eventfd, 1);
4608 spin_unlock(&memcg_oom_lock);
4609
4610 return 0;
4611 }
4612
mem_cgroup_oom_unregister_event(struct mem_cgroup * memcg,struct eventfd_ctx * eventfd)4613 static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
4614 struct eventfd_ctx *eventfd)
4615 {
4616 struct mem_cgroup_eventfd_list *ev, *tmp;
4617
4618 spin_lock(&memcg_oom_lock);
4619
4620 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
4621 if (ev->eventfd == eventfd) {
4622 list_del(&ev->list);
4623 kfree(ev);
4624 }
4625 }
4626
4627 spin_unlock(&memcg_oom_lock);
4628 }
4629
mem_cgroup_oom_control_read(struct seq_file * sf,void * v)4630 static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
4631 {
4632 struct mem_cgroup *memcg = mem_cgroup_from_seq(sf);
4633
4634 seq_printf(sf, "oom_kill_disable %d\n", READ_ONCE(memcg->oom_kill_disable));
4635 seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
4636 seq_printf(sf, "oom_kill %lu\n",
4637 atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL]));
4638 return 0;
4639 }
4640
mem_cgroup_oom_control_write(struct cgroup_subsys_state * css,struct cftype * cft,u64 val)4641 static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
4642 struct cftype *cft, u64 val)
4643 {
4644 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4645
4646 /* cannot set to root cgroup and only 0 and 1 are allowed */
4647 if (mem_cgroup_is_root(memcg) || !((val == 0) || (val == 1)))
4648 return -EINVAL;
4649
4650 WRITE_ONCE(memcg->oom_kill_disable, val);
4651 if (!val)
4652 memcg_oom_recover(memcg);
4653
4654 return 0;
4655 }
4656
4657 #ifdef CONFIG_CGROUP_WRITEBACK
4658
4659 #include <trace/events/writeback.h>
4660
memcg_wb_domain_init(struct mem_cgroup * memcg,gfp_t gfp)4661 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4662 {
4663 return wb_domain_init(&memcg->cgwb_domain, gfp);
4664 }
4665
memcg_wb_domain_exit(struct mem_cgroup * memcg)4666 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4667 {
4668 wb_domain_exit(&memcg->cgwb_domain);
4669 }
4670
memcg_wb_domain_size_changed(struct mem_cgroup * memcg)4671 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4672 {
4673 wb_domain_size_changed(&memcg->cgwb_domain);
4674 }
4675
mem_cgroup_wb_domain(struct bdi_writeback * wb)4676 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
4677 {
4678 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4679
4680 if (!memcg->css.parent)
4681 return NULL;
4682
4683 return &memcg->cgwb_domain;
4684 }
4685
4686 /**
4687 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
4688 * @wb: bdi_writeback in question
4689 * @pfilepages: out parameter for number of file pages
4690 * @pheadroom: out parameter for number of allocatable pages according to memcg
4691 * @pdirty: out parameter for number of dirty pages
4692 * @pwriteback: out parameter for number of pages under writeback
4693 *
4694 * Determine the numbers of file, headroom, dirty, and writeback pages in
4695 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom
4696 * is a bit more involved.
4697 *
4698 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the
4699 * headroom is calculated as the lowest headroom of itself and the
4700 * ancestors. Note that this doesn't consider the actual amount of
4701 * available memory in the system. The caller should further cap
4702 * *@pheadroom accordingly.
4703 */
mem_cgroup_wb_stats(struct bdi_writeback * wb,unsigned long * pfilepages,unsigned long * pheadroom,unsigned long * pdirty,unsigned long * pwriteback)4704 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
4705 unsigned long *pheadroom, unsigned long *pdirty,
4706 unsigned long *pwriteback)
4707 {
4708 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4709 struct mem_cgroup *parent;
4710
4711 mem_cgroup_flush_stats(memcg);
4712
4713 *pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
4714 *pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
4715 *pfilepages = memcg_page_state(memcg, NR_INACTIVE_FILE) +
4716 memcg_page_state(memcg, NR_ACTIVE_FILE);
4717
4718 *pheadroom = PAGE_COUNTER_MAX;
4719 while ((parent = parent_mem_cgroup(memcg))) {
4720 unsigned long ceiling = min(READ_ONCE(memcg->memory.max),
4721 READ_ONCE(memcg->memory.high));
4722 unsigned long used = page_counter_read(&memcg->memory);
4723
4724 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
4725 memcg = parent;
4726 }
4727 }
4728
4729 /*
4730 * Foreign dirty flushing
4731 *
4732 * There's an inherent mismatch between memcg and writeback. The former
4733 * tracks ownership per-page while the latter per-inode. This was a
4734 * deliberate design decision because honoring per-page ownership in the
4735 * writeback path is complicated, may lead to higher CPU and IO overheads
4736 * and deemed unnecessary given that write-sharing an inode across
4737 * different cgroups isn't a common use-case.
4738 *
4739 * Combined with inode majority-writer ownership switching, this works well
4740 * enough in most cases but there are some pathological cases. For
4741 * example, let's say there are two cgroups A and B which keep writing to
4742 * different but confined parts of the same inode. B owns the inode and
4743 * A's memory is limited far below B's. A's dirty ratio can rise enough to
4744 * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid
4745 * triggering background writeback. A will be slowed down without a way to
4746 * make writeback of the dirty pages happen.
4747 *
4748 * Conditions like the above can lead to a cgroup getting repeatedly and
4749 * severely throttled after making some progress after each
4750 * dirty_expire_interval while the underlying IO device is almost
4751 * completely idle.
4752 *
4753 * Solving this problem completely requires matching the ownership tracking
4754 * granularities between memcg and writeback in either direction. However,
4755 * the more egregious behaviors can be avoided by simply remembering the
4756 * most recent foreign dirtying events and initiating remote flushes on
4757 * them when local writeback isn't enough to keep the memory clean enough.
4758 *
4759 * The following two functions implement such mechanism. When a foreign
4760 * page - a page whose memcg and writeback ownerships don't match - is
4761 * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning
4762 * bdi_writeback on the page owning memcg. When balance_dirty_pages()
4763 * decides that the memcg needs to sleep due to high dirty ratio, it calls
4764 * mem_cgroup_flush_foreign() which queues writeback on the recorded
4765 * foreign bdi_writebacks which haven't expired. Both the numbers of
4766 * recorded bdi_writebacks and concurrent in-flight foreign writebacks are
4767 * limited to MEMCG_CGWB_FRN_CNT.
4768 *
4769 * The mechanism only remembers IDs and doesn't hold any object references.
4770 * As being wrong occasionally doesn't matter, updates and accesses to the
4771 * records are lockless and racy.
4772 */
mem_cgroup_track_foreign_dirty_slowpath(struct folio * folio,struct bdi_writeback * wb)4773 void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
4774 struct bdi_writeback *wb)
4775 {
4776 struct mem_cgroup *memcg = folio_memcg(folio);
4777 struct memcg_cgwb_frn *frn;
4778 u64 now = get_jiffies_64();
4779 u64 oldest_at = now;
4780 int oldest = -1;
4781 int i;
4782
4783 trace_track_foreign_dirty(folio, wb);
4784
4785 /*
4786 * Pick the slot to use. If there is already a slot for @wb, keep
4787 * using it. If not replace the oldest one which isn't being
4788 * written out.
4789 */
4790 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4791 frn = &memcg->cgwb_frn[i];
4792 if (frn->bdi_id == wb->bdi->id &&
4793 frn->memcg_id == wb->memcg_css->id)
4794 break;
4795 if (time_before64(frn->at, oldest_at) &&
4796 atomic_read(&frn->done.cnt) == 1) {
4797 oldest = i;
4798 oldest_at = frn->at;
4799 }
4800 }
4801
4802 if (i < MEMCG_CGWB_FRN_CNT) {
4803 /*
4804 * Re-using an existing one. Update timestamp lazily to
4805 * avoid making the cacheline hot. We want them to be
4806 * reasonably up-to-date and significantly shorter than
4807 * dirty_expire_interval as that's what expires the record.
4808 * Use the shorter of 1s and dirty_expire_interval / 8.
4809 */
4810 unsigned long update_intv =
4811 min_t(unsigned long, HZ,
4812 msecs_to_jiffies(dirty_expire_interval * 10) / 8);
4813
4814 if (time_before64(frn->at, now - update_intv))
4815 frn->at = now;
4816 } else if (oldest >= 0) {
4817 /* replace the oldest free one */
4818 frn = &memcg->cgwb_frn[oldest];
4819 frn->bdi_id = wb->bdi->id;
4820 frn->memcg_id = wb->memcg_css->id;
4821 frn->at = now;
4822 }
4823 }
4824
4825 /* issue foreign writeback flushes for recorded foreign dirtying events */
mem_cgroup_flush_foreign(struct bdi_writeback * wb)4826 void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
4827 {
4828 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4829 unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10);
4830 u64 now = jiffies_64;
4831 int i;
4832
4833 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4834 struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i];
4835
4836 /*
4837 * If the record is older than dirty_expire_interval,
4838 * writeback on it has already started. No need to kick it
4839 * off again. Also, don't start a new one if there's
4840 * already one in flight.
4841 */
4842 if (time_after64(frn->at, now - intv) &&
4843 atomic_read(&frn->done.cnt) == 1) {
4844 frn->at = 0;
4845 trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id);
4846 cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id,
4847 WB_REASON_FOREIGN_FLUSH,
4848 &frn->done);
4849 }
4850 }
4851 }
4852
4853 #else /* CONFIG_CGROUP_WRITEBACK */
4854
memcg_wb_domain_init(struct mem_cgroup * memcg,gfp_t gfp)4855 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4856 {
4857 return 0;
4858 }
4859
memcg_wb_domain_exit(struct mem_cgroup * memcg)4860 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4861 {
4862 }
4863
memcg_wb_domain_size_changed(struct mem_cgroup * memcg)4864 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4865 {
4866 }
4867
4868 #endif /* CONFIG_CGROUP_WRITEBACK */
4869
4870 /*
4871 * DO NOT USE IN NEW FILES.
4872 *
4873 * "cgroup.event_control" implementation.
4874 *
4875 * This is way over-engineered. It tries to support fully configurable
4876 * events for each user. Such level of flexibility is completely
4877 * unnecessary especially in the light of the planned unified hierarchy.
4878 *
4879 * Please deprecate this and replace with something simpler if at all
4880 * possible.
4881 */
4882
4883 /*
4884 * Unregister event and free resources.
4885 *
4886 * Gets called from workqueue.
4887 */
memcg_event_remove(struct work_struct * work)4888 static void memcg_event_remove(struct work_struct *work)
4889 {
4890 struct mem_cgroup_event *event =
4891 container_of(work, struct mem_cgroup_event, remove);
4892 struct mem_cgroup *memcg = event->memcg;
4893
4894 remove_wait_queue(event->wqh, &event->wait);
4895
4896 event->unregister_event(memcg, event->eventfd);
4897
4898 /* Notify userspace the event is going away. */
4899 eventfd_signal(event->eventfd, 1);
4900
4901 eventfd_ctx_put(event->eventfd);
4902 kfree(event);
4903 css_put(&memcg->css);
4904 }
4905
4906 /*
4907 * Gets called on EPOLLHUP on eventfd when user closes it.
4908 *
4909 * Called with wqh->lock held and interrupts disabled.
4910 */
memcg_event_wake(wait_queue_entry_t * wait,unsigned mode,int sync,void * key)4911 static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode,
4912 int sync, void *key)
4913 {
4914 struct mem_cgroup_event *event =
4915 container_of(wait, struct mem_cgroup_event, wait);
4916 struct mem_cgroup *memcg = event->memcg;
4917 __poll_t flags = key_to_poll(key);
4918
4919 if (flags & EPOLLHUP) {
4920 /*
4921 * If the event has been detached at cgroup removal, we
4922 * can simply return knowing the other side will cleanup
4923 * for us.
4924 *
4925 * We can't race against event freeing since the other
4926 * side will require wqh->lock via remove_wait_queue(),
4927 * which we hold.
4928 */
4929 spin_lock(&memcg->event_list_lock);
4930 if (!list_empty(&event->list)) {
4931 list_del_init(&event->list);
4932 /*
4933 * We are in atomic context, but cgroup_event_remove()
4934 * may sleep, so we have to call it in workqueue.
4935 */
4936 schedule_work(&event->remove);
4937 }
4938 spin_unlock(&memcg->event_list_lock);
4939 }
4940
4941 return 0;
4942 }
4943
memcg_event_ptable_queue_proc(struct file * file,wait_queue_head_t * wqh,poll_table * pt)4944 static void memcg_event_ptable_queue_proc(struct file *file,
4945 wait_queue_head_t *wqh, poll_table *pt)
4946 {
4947 struct mem_cgroup_event *event =
4948 container_of(pt, struct mem_cgroup_event, pt);
4949
4950 event->wqh = wqh;
4951 add_wait_queue(wqh, &event->wait);
4952 }
4953
4954 /*
4955 * DO NOT USE IN NEW FILES.
4956 *
4957 * Parse input and register new cgroup event handler.
4958 *
4959 * Input must be in format '<event_fd> <control_fd> <args>'.
4960 * Interpretation of args is defined by control file implementation.
4961 */
memcg_write_event_control(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4962 static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
4963 char *buf, size_t nbytes, loff_t off)
4964 {
4965 struct cgroup_subsys_state *css = of_css(of);
4966 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4967 struct mem_cgroup_event *event;
4968 struct cgroup_subsys_state *cfile_css;
4969 unsigned int efd, cfd;
4970 struct fd efile;
4971 struct fd cfile;
4972 struct dentry *cdentry;
4973 const char *name;
4974 char *endp;
4975 int ret;
4976
4977 if (IS_ENABLED(CONFIG_PREEMPT_RT))
4978 return -EOPNOTSUPP;
4979
4980 buf = strstrip(buf);
4981
4982 efd = simple_strtoul(buf, &endp, 10);
4983 if (*endp != ' ')
4984 return -EINVAL;
4985 buf = endp + 1;
4986
4987 cfd = simple_strtoul(buf, &endp, 10);
4988 if (*endp == '\0')
4989 buf = endp;
4990 else if (*endp == ' ')
4991 buf = endp + 1;
4992 else
4993 return -EINVAL;
4994
4995 event = kzalloc(sizeof(*event), GFP_KERNEL);
4996 if (!event)
4997 return -ENOMEM;
4998
4999 event->memcg = memcg;
5000 INIT_LIST_HEAD(&event->list);
5001 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
5002 init_waitqueue_func_entry(&event->wait, memcg_event_wake);
5003 INIT_WORK(&event->remove, memcg_event_remove);
5004
5005 efile = fdget(efd);
5006 if (!efile.file) {
5007 ret = -EBADF;
5008 goto out_kfree;
5009 }
5010
5011 event->eventfd = eventfd_ctx_fileget(efile.file);
5012 if (IS_ERR(event->eventfd)) {
5013 ret = PTR_ERR(event->eventfd);
5014 goto out_put_efile;
5015 }
5016
5017 cfile = fdget(cfd);
5018 if (!cfile.file) {
5019 ret = -EBADF;
5020 goto out_put_eventfd;
5021 }
5022
5023 /* the process need read permission on control file */
5024 /* AV: shouldn't we check that it's been opened for read instead? */
5025 ret = file_permission(cfile.file, MAY_READ);
5026 if (ret < 0)
5027 goto out_put_cfile;
5028
5029 /*
5030 * The control file must be a regular cgroup1 file. As a regular cgroup
5031 * file can't be renamed, it's safe to access its name afterwards.
5032 */
5033 cdentry = cfile.file->f_path.dentry;
5034 if (cdentry->d_sb->s_type != &cgroup_fs_type || !d_is_reg(cdentry)) {
5035 ret = -EINVAL;
5036 goto out_put_cfile;
5037 }
5038
5039 /*
5040 * Determine the event callbacks and set them in @event. This used
5041 * to be done via struct cftype but cgroup core no longer knows
5042 * about these events. The following is crude but the whole thing
5043 * is for compatibility anyway.
5044 *
5045 * DO NOT ADD NEW FILES.
5046 */
5047 name = cdentry->d_name.name;
5048
5049 if (!strcmp(name, "memory.usage_in_bytes")) {
5050 event->register_event = mem_cgroup_usage_register_event;
5051 event->unregister_event = mem_cgroup_usage_unregister_event;
5052 } else if (!strcmp(name, "memory.oom_control")) {
5053 event->register_event = mem_cgroup_oom_register_event;
5054 event->unregister_event = mem_cgroup_oom_unregister_event;
5055 } else if (!strcmp(name, "memory.pressure_level")) {
5056 event->register_event = vmpressure_register_event;
5057 event->unregister_event = vmpressure_unregister_event;
5058 } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
5059 event->register_event = memsw_cgroup_usage_register_event;
5060 event->unregister_event = memsw_cgroup_usage_unregister_event;
5061 } else {
5062 ret = -EINVAL;
5063 goto out_put_cfile;
5064 }
5065
5066 /*
5067 * Verify @cfile should belong to @css. Also, remaining events are
5068 * automatically removed on cgroup destruction but the removal is
5069 * asynchronous, so take an extra ref on @css.
5070 */
5071 cfile_css = css_tryget_online_from_dir(cdentry->d_parent,
5072 &memory_cgrp_subsys);
5073 ret = -EINVAL;
5074 if (IS_ERR(cfile_css))
5075 goto out_put_cfile;
5076 if (cfile_css != css) {
5077 css_put(cfile_css);
5078 goto out_put_cfile;
5079 }
5080
5081 ret = event->register_event(memcg, event->eventfd, buf);
5082 if (ret)
5083 goto out_put_css;
5084
5085 vfs_poll(efile.file, &event->pt);
5086
5087 spin_lock_irq(&memcg->event_list_lock);
5088 list_add(&event->list, &memcg->event_list);
5089 spin_unlock_irq(&memcg->event_list_lock);
5090
5091 fdput(cfile);
5092 fdput(efile);
5093
5094 return nbytes;
5095
5096 out_put_css:
5097 css_put(css);
5098 out_put_cfile:
5099 fdput(cfile);
5100 out_put_eventfd:
5101 eventfd_ctx_put(event->eventfd);
5102 out_put_efile:
5103 fdput(efile);
5104 out_kfree:
5105 kfree(event);
5106
5107 return ret;
5108 }
5109
5110 #if defined(CONFIG_MEMCG_KMEM) && (defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG))
mem_cgroup_slab_show(struct seq_file * m,void * p)5111 static int mem_cgroup_slab_show(struct seq_file *m, void *p)
5112 {
5113 /*
5114 * Deprecated.
5115 * Please, take a look at tools/cgroup/memcg_slabinfo.py .
5116 */
5117 return 0;
5118 }
5119 #endif
5120
5121 static int memory_stat_show(struct seq_file *m, void *v);
5122
5123 static struct cftype mem_cgroup_legacy_files[] = {
5124 {
5125 .name = "usage_in_bytes",
5126 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
5127 .read_u64 = mem_cgroup_read_u64,
5128 },
5129 {
5130 .name = "max_usage_in_bytes",
5131 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
5132 .write = mem_cgroup_reset,
5133 .read_u64 = mem_cgroup_read_u64,
5134 },
5135 {
5136 .name = "limit_in_bytes",
5137 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
5138 .write = mem_cgroup_write,
5139 .read_u64 = mem_cgroup_read_u64,
5140 },
5141 {
5142 .name = "soft_limit_in_bytes",
5143 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
5144 .write = mem_cgroup_write,
5145 .read_u64 = mem_cgroup_read_u64,
5146 },
5147 {
5148 .name = "failcnt",
5149 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
5150 .write = mem_cgroup_reset,
5151 .read_u64 = mem_cgroup_read_u64,
5152 },
5153 {
5154 .name = "stat",
5155 .seq_show = memory_stat_show,
5156 },
5157 {
5158 .name = "force_empty",
5159 .write = mem_cgroup_force_empty_write,
5160 },
5161 {
5162 .name = "use_hierarchy",
5163 .write_u64 = mem_cgroup_hierarchy_write,
5164 .read_u64 = mem_cgroup_hierarchy_read,
5165 },
5166 {
5167 .name = "cgroup.event_control", /* XXX: for compat */
5168 .write = memcg_write_event_control,
5169 .flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,
5170 },
5171 {
5172 .name = "swappiness",
5173 .read_u64 = mem_cgroup_swappiness_read,
5174 .write_u64 = mem_cgroup_swappiness_write,
5175 },
5176 {
5177 .name = "move_charge_at_immigrate",
5178 .read_u64 = mem_cgroup_move_charge_read,
5179 .write_u64 = mem_cgroup_move_charge_write,
5180 },
5181 {
5182 .name = "oom_control",
5183 .seq_show = mem_cgroup_oom_control_read,
5184 .write_u64 = mem_cgroup_oom_control_write,
5185 },
5186 {
5187 .name = "pressure_level",
5188 .seq_show = mem_cgroup_dummy_seq_show,
5189 },
5190 #ifdef CONFIG_NUMA
5191 {
5192 .name = "numa_stat",
5193 .seq_show = memcg_numa_stat_show,
5194 },
5195 #endif
5196 {
5197 .name = "kmem.limit_in_bytes",
5198 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
5199 .write = mem_cgroup_write,
5200 .read_u64 = mem_cgroup_read_u64,
5201 },
5202 {
5203 .name = "kmem.usage_in_bytes",
5204 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
5205 .read_u64 = mem_cgroup_read_u64,
5206 },
5207 {
5208 .name = "kmem.failcnt",
5209 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
5210 .write = mem_cgroup_reset,
5211 .read_u64 = mem_cgroup_read_u64,
5212 },
5213 {
5214 .name = "kmem.max_usage_in_bytes",
5215 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
5216 .write = mem_cgroup_reset,
5217 .read_u64 = mem_cgroup_read_u64,
5218 },
5219 #if defined(CONFIG_MEMCG_KMEM) && \
5220 (defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG))
5221 {
5222 .name = "kmem.slabinfo",
5223 .seq_show = mem_cgroup_slab_show,
5224 },
5225 #endif
5226 {
5227 .name = "kmem.tcp.limit_in_bytes",
5228 .private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
5229 .write = mem_cgroup_write,
5230 .read_u64 = mem_cgroup_read_u64,
5231 },
5232 {
5233 .name = "kmem.tcp.usage_in_bytes",
5234 .private = MEMFILE_PRIVATE(_TCP, RES_USAGE),
5235 .read_u64 = mem_cgroup_read_u64,
5236 },
5237 {
5238 .name = "kmem.tcp.failcnt",
5239 .private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT),
5240 .write = mem_cgroup_reset,
5241 .read_u64 = mem_cgroup_read_u64,
5242 },
5243 {
5244 .name = "kmem.tcp.max_usage_in_bytes",
5245 .private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE),
5246 .write = mem_cgroup_reset,
5247 .read_u64 = mem_cgroup_read_u64,
5248 },
5249 { }, /* terminate */
5250 };
5251
5252 /*
5253 * Private memory cgroup IDR
5254 *
5255 * Swap-out records and page cache shadow entries need to store memcg
5256 * references in constrained space, so we maintain an ID space that is
5257 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
5258 * memory-controlled cgroups to 64k.
5259 *
5260 * However, there usually are many references to the offline CSS after
5261 * the cgroup has been destroyed, such as page cache or reclaimable
5262 * slab objects, that don't need to hang on to the ID. We want to keep
5263 * those dead CSS from occupying IDs, or we might quickly exhaust the
5264 * relatively small ID space and prevent the creation of new cgroups
5265 * even when there are much fewer than 64k cgroups - possibly none.
5266 *
5267 * Maintain a private 16-bit ID space for memcg, and allow the ID to
5268 * be freed and recycled when it's no longer needed, which is usually
5269 * when the CSS is offlined.
5270 *
5271 * The only exception to that are records of swapped out tmpfs/shmem
5272 * pages that need to be attributed to live ancestors on swapin. But
5273 * those references are manageable from userspace.
5274 */
5275
5276 #define MEM_CGROUP_ID_MAX ((1UL << MEM_CGROUP_ID_SHIFT) - 1)
5277 static DEFINE_IDR(mem_cgroup_idr);
5278 static DEFINE_SPINLOCK(memcg_idr_lock);
5279
mem_cgroup_alloc_id(void)5280 static int mem_cgroup_alloc_id(void)
5281 {
5282 int ret;
5283
5284 idr_preload(GFP_KERNEL);
5285 spin_lock(&memcg_idr_lock);
5286 ret = idr_alloc(&mem_cgroup_idr, NULL, 1, MEM_CGROUP_ID_MAX + 1,
5287 GFP_NOWAIT);
5288 spin_unlock(&memcg_idr_lock);
5289 idr_preload_end();
5290 return ret;
5291 }
5292
mem_cgroup_id_remove(struct mem_cgroup * memcg)5293 static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
5294 {
5295 if (memcg->id.id > 0) {
5296 trace_android_vh_mem_cgroup_id_remove(memcg);
5297 spin_lock(&memcg_idr_lock);
5298 idr_remove(&mem_cgroup_idr, memcg->id.id);
5299 spin_unlock(&memcg_idr_lock);
5300
5301 memcg->id.id = 0;
5302 }
5303 }
5304
mem_cgroup_id_get_many(struct mem_cgroup * memcg,unsigned int n)5305 static void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg,
5306 unsigned int n)
5307 {
5308 refcount_add(n, &memcg->id.ref);
5309 }
5310
mem_cgroup_id_put_many(struct mem_cgroup * memcg,unsigned int n)5311 static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
5312 {
5313 if (refcount_sub_and_test(n, &memcg->id.ref)) {
5314 mem_cgroup_id_remove(memcg);
5315
5316 /* Memcg ID pins CSS */
5317 css_put(&memcg->css);
5318 }
5319 }
5320
mem_cgroup_id_put(struct mem_cgroup * memcg)5321 static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
5322 {
5323 mem_cgroup_id_put_many(memcg, 1);
5324 }
5325
5326 /**
5327 * mem_cgroup_from_id - look up a memcg from a memcg id
5328 * @id: the memcg id to look up
5329 *
5330 * Caller must hold rcu_read_lock().
5331 */
mem_cgroup_from_id(unsigned short id)5332 struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
5333 {
5334 WARN_ON_ONCE(!rcu_read_lock_held());
5335 return idr_find(&mem_cgroup_idr, id);
5336 }
5337 EXPORT_SYMBOL_GPL(mem_cgroup_from_id);
5338
5339 #ifdef CONFIG_SHRINKER_DEBUG
mem_cgroup_get_from_ino(unsigned long ino)5340 struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino)
5341 {
5342 struct cgroup *cgrp;
5343 struct cgroup_subsys_state *css;
5344 struct mem_cgroup *memcg;
5345
5346 cgrp = cgroup_get_from_id(ino);
5347 if (IS_ERR(cgrp))
5348 return ERR_CAST(cgrp);
5349
5350 css = cgroup_get_e_css(cgrp, &memory_cgrp_subsys);
5351 if (css)
5352 memcg = container_of(css, struct mem_cgroup, css);
5353 else
5354 memcg = ERR_PTR(-ENOENT);
5355
5356 cgroup_put(cgrp);
5357
5358 return memcg;
5359 }
5360 #endif
5361
alloc_mem_cgroup_per_node_info(struct mem_cgroup * memcg,int node)5362 static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
5363 {
5364 struct mem_cgroup_per_node *pn;
5365
5366 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, node);
5367 if (!pn)
5368 return 1;
5369
5370 pn->lruvec_stats_percpu = alloc_percpu_gfp(struct lruvec_stats_percpu,
5371 GFP_KERNEL_ACCOUNT);
5372 if (!pn->lruvec_stats_percpu) {
5373 kfree(pn);
5374 return 1;
5375 }
5376
5377 lruvec_init(&pn->lruvec);
5378 pn->memcg = memcg;
5379
5380 memcg->nodeinfo[node] = pn;
5381 return 0;
5382 }
5383
free_mem_cgroup_per_node_info(struct mem_cgroup * memcg,int node)5384 static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
5385 {
5386 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
5387
5388 if (!pn)
5389 return;
5390
5391 free_percpu(pn->lruvec_stats_percpu);
5392 kfree(pn);
5393 }
5394
__mem_cgroup_free(struct mem_cgroup * memcg)5395 static void __mem_cgroup_free(struct mem_cgroup *memcg)
5396 {
5397 int node;
5398
5399 trace_android_vh_mem_cgroup_free(memcg);
5400 for_each_node(node)
5401 free_mem_cgroup_per_node_info(memcg, node);
5402 kfree(memcg->vmstats);
5403 free_percpu(memcg->vmstats_percpu);
5404 kfree(memcg);
5405 }
5406
mem_cgroup_free(struct mem_cgroup * memcg)5407 static void mem_cgroup_free(struct mem_cgroup *memcg)
5408 {
5409 lru_gen_exit_memcg(memcg);
5410 memcg_wb_domain_exit(memcg);
5411 __mem_cgroup_free(memcg);
5412 }
5413
mem_cgroup_alloc(struct mem_cgroup * parent)5414 static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent)
5415 {
5416 struct memcg_vmstats_percpu *statc, *pstatc;
5417 struct mem_cgroup *memcg;
5418 int node, cpu;
5419 int __maybe_unused i;
5420 long error = -ENOMEM;
5421
5422 memcg = kzalloc(struct_size(memcg, nodeinfo, nr_node_ids), GFP_KERNEL);
5423 if (!memcg)
5424 return ERR_PTR(error);
5425
5426 memcg->id.id = mem_cgroup_alloc_id();
5427 if (memcg->id.id < 0) {
5428 error = memcg->id.id;
5429 goto fail;
5430 }
5431
5432 memcg->vmstats = kzalloc(sizeof(struct memcg_vmstats), GFP_KERNEL);
5433 if (!memcg->vmstats)
5434 goto fail;
5435
5436 memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu,
5437 GFP_KERNEL_ACCOUNT);
5438 if (!memcg->vmstats_percpu)
5439 goto fail;
5440
5441 for_each_possible_cpu(cpu) {
5442 if (parent)
5443 pstatc = per_cpu_ptr(parent->vmstats_percpu, cpu);
5444 statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
5445 statc->parent = parent ? pstatc : NULL;
5446 statc->vmstats = memcg->vmstats;
5447 }
5448
5449 for_each_node(node)
5450 if (alloc_mem_cgroup_per_node_info(memcg, node))
5451 goto fail;
5452
5453 if (memcg_wb_domain_init(memcg, GFP_KERNEL))
5454 goto fail;
5455
5456 INIT_WORK(&memcg->high_work, high_work_func);
5457 INIT_LIST_HEAD(&memcg->oom_notify);
5458 mutex_init(&memcg->thresholds_lock);
5459 spin_lock_init(&memcg->move_lock);
5460 vmpressure_init(&memcg->vmpressure);
5461 INIT_LIST_HEAD(&memcg->event_list);
5462 spin_lock_init(&memcg->event_list_lock);
5463 memcg->socket_pressure = jiffies;
5464 #ifdef CONFIG_MEMCG_KMEM
5465 memcg->kmemcg_id = -1;
5466 INIT_LIST_HEAD(&memcg->objcg_list);
5467 #endif
5468 #ifdef CONFIG_CGROUP_WRITEBACK
5469 INIT_LIST_HEAD(&memcg->cgwb_list);
5470 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5471 memcg->cgwb_frn[i].done =
5472 __WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq);
5473 #endif
5474 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5475 spin_lock_init(&memcg->deferred_split_queue.split_queue_lock);
5476 INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
5477 memcg->deferred_split_queue.split_queue_len = 0;
5478 #endif
5479 lru_gen_init_memcg(memcg);
5480 trace_android_vh_mem_cgroup_alloc(memcg);
5481 return memcg;
5482 fail:
5483 mem_cgroup_id_remove(memcg);
5484 __mem_cgroup_free(memcg);
5485 return ERR_PTR(error);
5486 }
5487
5488 static struct cgroup_subsys_state * __ref
mem_cgroup_css_alloc(struct cgroup_subsys_state * parent_css)5489 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
5490 {
5491 struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
5492 struct mem_cgroup *memcg, *old_memcg;
5493
5494 old_memcg = set_active_memcg(parent);
5495 memcg = mem_cgroup_alloc(parent);
5496 set_active_memcg(old_memcg);
5497 if (IS_ERR(memcg))
5498 return ERR_CAST(memcg);
5499
5500 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
5501 WRITE_ONCE(memcg->soft_limit, PAGE_COUNTER_MAX);
5502 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
5503 memcg->zswap_max = PAGE_COUNTER_MAX;
5504 #endif
5505 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
5506 if (parent) {
5507 WRITE_ONCE(memcg->swappiness, mem_cgroup_swappiness(parent));
5508 WRITE_ONCE(memcg->oom_kill_disable, READ_ONCE(parent->oom_kill_disable));
5509
5510 page_counter_init(&memcg->memory, &parent->memory);
5511 page_counter_init(&memcg->swap, &parent->swap);
5512 page_counter_init(&memcg->kmem, &parent->kmem);
5513 page_counter_init(&memcg->tcpmem, &parent->tcpmem);
5514 } else {
5515 init_memcg_events();
5516 page_counter_init(&memcg->memory, NULL);
5517 page_counter_init(&memcg->swap, NULL);
5518 page_counter_init(&memcg->kmem, NULL);
5519 page_counter_init(&memcg->tcpmem, NULL);
5520
5521 root_mem_cgroup = memcg;
5522 return &memcg->css;
5523 }
5524
5525 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5526 static_branch_inc(&memcg_sockets_enabled_key);
5527
5528 #if defined(CONFIG_MEMCG_KMEM)
5529 if (!cgroup_memory_nobpf)
5530 static_branch_inc(&memcg_bpf_enabled_key);
5531 #endif
5532
5533 return &memcg->css;
5534 }
5535
mem_cgroup_css_online(struct cgroup_subsys_state * css)5536 static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
5537 {
5538 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5539
5540 if (memcg_online_kmem(memcg))
5541 goto remove_id;
5542
5543 /*
5544 * A memcg must be visible for expand_shrinker_info()
5545 * by the time the maps are allocated. So, we allocate maps
5546 * here, when for_each_mem_cgroup() can't skip it.
5547 */
5548 if (alloc_shrinker_info(memcg))
5549 goto offline_kmem;
5550
5551 if (unlikely(mem_cgroup_is_root(memcg)) && !mem_cgroup_disabled())
5552 queue_delayed_work(system_unbound_wq, &stats_flush_dwork,
5553 FLUSH_TIME);
5554 lru_gen_online_memcg(memcg);
5555
5556 /* Online state pins memcg ID, memcg ID pins CSS */
5557 refcount_set(&memcg->id.ref, 1);
5558 css_get(css);
5559
5560 /*
5561 * Ensure mem_cgroup_from_id() works once we're fully online.
5562 *
5563 * We could do this earlier and require callers to filter with
5564 * css_tryget_online(). But right now there are no users that
5565 * need earlier access, and the workingset code relies on the
5566 * cgroup tree linkage (mem_cgroup_get_nr_swap_pages()). So
5567 * publish it here at the end of onlining. This matches the
5568 * regular ID destruction during offlining.
5569 */
5570 spin_lock(&memcg_idr_lock);
5571 idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
5572 spin_unlock(&memcg_idr_lock);
5573
5574 trace_android_vh_mem_cgroup_css_online(css, memcg);
5575 return 0;
5576 offline_kmem:
5577 memcg_offline_kmem(memcg);
5578 remove_id:
5579 mem_cgroup_id_remove(memcg);
5580 return -ENOMEM;
5581 }
5582
mem_cgroup_css_offline(struct cgroup_subsys_state * css)5583 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
5584 {
5585 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5586 struct mem_cgroup_event *event, *tmp;
5587
5588 trace_android_vh_mem_cgroup_css_offline(css, memcg);
5589 /*
5590 * Unregister events and notify userspace.
5591 * Notify userspace about cgroup removing only after rmdir of cgroup
5592 * directory to avoid race between userspace and kernelspace.
5593 */
5594 spin_lock_irq(&memcg->event_list_lock);
5595 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
5596 list_del_init(&event->list);
5597 schedule_work(&event->remove);
5598 }
5599 spin_unlock_irq(&memcg->event_list_lock);
5600
5601 page_counter_set_min(&memcg->memory, 0);
5602 page_counter_set_low(&memcg->memory, 0);
5603
5604 memcg_offline_kmem(memcg);
5605 reparent_shrinker_deferred(memcg);
5606 wb_memcg_offline(memcg);
5607 lru_gen_offline_memcg(memcg);
5608
5609 drain_all_stock(memcg);
5610
5611 mem_cgroup_id_put(memcg);
5612 }
5613
mem_cgroup_css_released(struct cgroup_subsys_state * css)5614 static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
5615 {
5616 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5617
5618 invalidate_reclaim_iterators(memcg);
5619 lru_gen_release_memcg(memcg);
5620 }
5621
mem_cgroup_css_free(struct cgroup_subsys_state * css)5622 static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
5623 {
5624 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5625 int __maybe_unused i;
5626
5627 #ifdef CONFIG_CGROUP_WRITEBACK
5628 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5629 wb_wait_for_completion(&memcg->cgwb_frn[i].done);
5630 #endif
5631 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5632 static_branch_dec(&memcg_sockets_enabled_key);
5633
5634 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
5635 static_branch_dec(&memcg_sockets_enabled_key);
5636
5637 #if defined(CONFIG_MEMCG_KMEM)
5638 if (!cgroup_memory_nobpf)
5639 static_branch_dec(&memcg_bpf_enabled_key);
5640 #endif
5641
5642 vmpressure_cleanup(&memcg->vmpressure);
5643 cancel_work_sync(&memcg->high_work);
5644 mem_cgroup_remove_from_trees(memcg);
5645 free_shrinker_info(memcg);
5646 mem_cgroup_free(memcg);
5647 }
5648
5649 /**
5650 * mem_cgroup_css_reset - reset the states of a mem_cgroup
5651 * @css: the target css
5652 *
5653 * Reset the states of the mem_cgroup associated with @css. This is
5654 * invoked when the userland requests disabling on the default hierarchy
5655 * but the memcg is pinned through dependency. The memcg should stop
5656 * applying policies and should revert to the vanilla state as it may be
5657 * made visible again.
5658 *
5659 * The current implementation only resets the essential configurations.
5660 * This needs to be expanded to cover all the visible parts.
5661 */
mem_cgroup_css_reset(struct cgroup_subsys_state * css)5662 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
5663 {
5664 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5665
5666 page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX);
5667 page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX);
5668 page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX);
5669 page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX);
5670 page_counter_set_min(&memcg->memory, 0);
5671 page_counter_set_low(&memcg->memory, 0);
5672 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
5673 WRITE_ONCE(memcg->soft_limit, PAGE_COUNTER_MAX);
5674 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
5675 memcg_wb_domain_size_changed(memcg);
5676 }
5677
mem_cgroup_css_rstat_flush(struct cgroup_subsys_state * css,int cpu)5678 static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu)
5679 {
5680 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5681 struct mem_cgroup *parent = parent_mem_cgroup(memcg);
5682 struct memcg_vmstats_percpu *statc;
5683 long delta, delta_cpu, v;
5684 int i, nid;
5685
5686 statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
5687
5688 for (i = 0; i < MEMCG_NR_STAT; i++) {
5689 /*
5690 * Collect the aggregated propagation counts of groups
5691 * below us. We're in a per-cpu loop here and this is
5692 * a global counter, so the first cycle will get them.
5693 */
5694 delta = memcg->vmstats->state_pending[i];
5695 if (delta)
5696 memcg->vmstats->state_pending[i] = 0;
5697
5698 /* Add CPU changes on this level since the last flush */
5699 delta_cpu = 0;
5700 v = READ_ONCE(statc->state[i]);
5701 if (v != statc->state_prev[i]) {
5702 delta_cpu = v - statc->state_prev[i];
5703 delta += delta_cpu;
5704 statc->state_prev[i] = v;
5705 }
5706
5707 /* Aggregate counts on this level and propagate upwards */
5708 if (delta_cpu)
5709 memcg->vmstats->state_local[i] += delta_cpu;
5710
5711 if (delta) {
5712 memcg->vmstats->state[i] += delta;
5713 if (parent)
5714 parent->vmstats->state_pending[i] += delta;
5715 }
5716 }
5717
5718 for (i = 0; i < NR_MEMCG_EVENTS; i++) {
5719 delta = memcg->vmstats->events_pending[i];
5720 if (delta)
5721 memcg->vmstats->events_pending[i] = 0;
5722
5723 delta_cpu = 0;
5724 v = READ_ONCE(statc->events[i]);
5725 if (v != statc->events_prev[i]) {
5726 delta_cpu = v - statc->events_prev[i];
5727 delta += delta_cpu;
5728 statc->events_prev[i] = v;
5729 }
5730
5731 if (delta_cpu)
5732 memcg->vmstats->events_local[i] += delta_cpu;
5733
5734 if (delta) {
5735 memcg->vmstats->events[i] += delta;
5736 if (parent)
5737 parent->vmstats->events_pending[i] += delta;
5738 }
5739 }
5740
5741 for_each_node_state(nid, N_MEMORY) {
5742 struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid];
5743 struct mem_cgroup_per_node *ppn = NULL;
5744 struct lruvec_stats_percpu *lstatc;
5745
5746 if (parent)
5747 ppn = parent->nodeinfo[nid];
5748
5749 lstatc = per_cpu_ptr(pn->lruvec_stats_percpu, cpu);
5750
5751 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
5752 delta = pn->lruvec_stats.state_pending[i];
5753 if (delta)
5754 pn->lruvec_stats.state_pending[i] = 0;
5755
5756 delta_cpu = 0;
5757 v = READ_ONCE(lstatc->state[i]);
5758 if (v != lstatc->state_prev[i]) {
5759 delta_cpu = v - lstatc->state_prev[i];
5760 delta += delta_cpu;
5761 lstatc->state_prev[i] = v;
5762 }
5763
5764 if (delta_cpu)
5765 pn->lruvec_stats.state_local[i] += delta_cpu;
5766
5767 if (delta) {
5768 pn->lruvec_stats.state[i] += delta;
5769 if (ppn)
5770 ppn->lruvec_stats.state_pending[i] += delta;
5771 }
5772 }
5773 }
5774 statc->stats_updates = 0;
5775 /* We are in a per-cpu loop here, only do the atomic write once */
5776 if (atomic64_read(&memcg->vmstats->stats_updates))
5777 atomic64_set(&memcg->vmstats->stats_updates, 0);
5778 }
5779
5780 #ifdef CONFIG_MMU
5781 /* Handlers for move charge at task migration. */
mem_cgroup_do_precharge(unsigned long count)5782 static int mem_cgroup_do_precharge(unsigned long count)
5783 {
5784 int ret;
5785
5786 /* Try a single bulk charge without reclaim first, kswapd may wake */
5787 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count);
5788 if (!ret) {
5789 mc.precharge += count;
5790 return ret;
5791 }
5792
5793 /* Try charges one by one with reclaim, but do not retry */
5794 while (count--) {
5795 ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1);
5796 if (ret)
5797 return ret;
5798 mc.precharge++;
5799 cond_resched();
5800 }
5801 return 0;
5802 }
5803
5804 union mc_target {
5805 struct folio *folio;
5806 swp_entry_t ent;
5807 };
5808
5809 enum mc_target_type {
5810 MC_TARGET_NONE = 0,
5811 MC_TARGET_PAGE,
5812 MC_TARGET_SWAP,
5813 MC_TARGET_DEVICE,
5814 };
5815
mc_handle_present_pte(struct vm_area_struct * vma,unsigned long addr,pte_t ptent)5816 static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
5817 unsigned long addr, pte_t ptent)
5818 {
5819 struct page *page = vm_normal_page(vma, addr, ptent);
5820
5821 if (!page)
5822 return NULL;
5823 if (PageAnon(page)) {
5824 if (!(mc.flags & MOVE_ANON))
5825 return NULL;
5826 } else {
5827 if (!(mc.flags & MOVE_FILE))
5828 return NULL;
5829 }
5830 get_page(page);
5831
5832 return page;
5833 }
5834
5835 #if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE)
mc_handle_swap_pte(struct vm_area_struct * vma,pte_t ptent,swp_entry_t * entry)5836 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5837 pte_t ptent, swp_entry_t *entry)
5838 {
5839 struct page *page = NULL;
5840 swp_entry_t ent = pte_to_swp_entry(ptent);
5841
5842 if (!(mc.flags & MOVE_ANON))
5843 return NULL;
5844
5845 /*
5846 * Handle device private pages that are not accessible by the CPU, but
5847 * stored as special swap entries in the page table.
5848 */
5849 if (is_device_private_entry(ent)) {
5850 page = pfn_swap_entry_to_page(ent);
5851 if (!get_page_unless_zero(page))
5852 return NULL;
5853 return page;
5854 }
5855
5856 if (non_swap_entry(ent))
5857 return NULL;
5858
5859 /*
5860 * Because swap_cache_get_folio() updates some statistics counter,
5861 * we call find_get_page() with swapper_space directly.
5862 */
5863 page = find_get_page(swap_address_space(ent), swp_offset(ent));
5864 entry->val = ent.val;
5865
5866 return page;
5867 }
5868 #else
mc_handle_swap_pte(struct vm_area_struct * vma,pte_t ptent,swp_entry_t * entry)5869 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5870 pte_t ptent, swp_entry_t *entry)
5871 {
5872 return NULL;
5873 }
5874 #endif
5875
mc_handle_file_pte(struct vm_area_struct * vma,unsigned long addr,pte_t ptent)5876 static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
5877 unsigned long addr, pte_t ptent)
5878 {
5879 unsigned long index;
5880 struct folio *folio;
5881
5882 if (!vma->vm_file) /* anonymous vma */
5883 return NULL;
5884 if (!(mc.flags & MOVE_FILE))
5885 return NULL;
5886
5887 /* folio is moved even if it's not RSS of this task(page-faulted). */
5888 /* shmem/tmpfs may report page out on swap: account for that too. */
5889 index = linear_page_index(vma, addr);
5890 folio = filemap_get_incore_folio(vma->vm_file->f_mapping, index);
5891 if (IS_ERR(folio))
5892 return NULL;
5893 return folio_file_page(folio, index);
5894 }
5895
5896 /**
5897 * mem_cgroup_move_account - move account of the folio
5898 * @folio: The folio.
5899 * @compound: charge the page as compound or small page
5900 * @from: mem_cgroup which the folio is moved from.
5901 * @to: mem_cgroup which the folio is moved to. @from != @to.
5902 *
5903 * The folio must be locked and not on the LRU.
5904 *
5905 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
5906 * from old cgroup.
5907 */
mem_cgroup_move_account(struct folio * folio,bool compound,struct mem_cgroup * from,struct mem_cgroup * to)5908 static int mem_cgroup_move_account(struct folio *folio,
5909 bool compound,
5910 struct mem_cgroup *from,
5911 struct mem_cgroup *to)
5912 {
5913 struct lruvec *from_vec, *to_vec;
5914 struct pglist_data *pgdat;
5915 unsigned int nr_pages = compound ? folio_nr_pages(folio) : 1;
5916 int nid, ret;
5917
5918 VM_BUG_ON(from == to);
5919 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
5920 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
5921 VM_BUG_ON(compound && !folio_test_large(folio));
5922
5923 ret = -EINVAL;
5924 if (folio_memcg(folio) != from)
5925 goto out;
5926
5927 pgdat = folio_pgdat(folio);
5928 from_vec = mem_cgroup_lruvec(from, pgdat);
5929 to_vec = mem_cgroup_lruvec(to, pgdat);
5930
5931 folio_memcg_lock(folio);
5932
5933 if (folio_test_anon(folio)) {
5934 if (folio_mapped(folio)) {
5935 __mod_lruvec_state(from_vec, NR_ANON_MAPPED, -nr_pages);
5936 __mod_lruvec_state(to_vec, NR_ANON_MAPPED, nr_pages);
5937 if (folio_test_pmd_mappable(folio)) {
5938 __mod_lruvec_state(from_vec, NR_ANON_THPS,
5939 -nr_pages);
5940 __mod_lruvec_state(to_vec, NR_ANON_THPS,
5941 nr_pages);
5942 }
5943 }
5944 } else {
5945 __mod_lruvec_state(from_vec, NR_FILE_PAGES, -nr_pages);
5946 __mod_lruvec_state(to_vec, NR_FILE_PAGES, nr_pages);
5947
5948 if (folio_test_swapbacked(folio)) {
5949 __mod_lruvec_state(from_vec, NR_SHMEM, -nr_pages);
5950 __mod_lruvec_state(to_vec, NR_SHMEM, nr_pages);
5951 }
5952
5953 if (folio_mapped(folio)) {
5954 __mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages);
5955 __mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages);
5956 }
5957
5958 if (folio_test_dirty(folio)) {
5959 struct address_space *mapping = folio_mapping(folio);
5960
5961 if (mapping_can_writeback(mapping)) {
5962 __mod_lruvec_state(from_vec, NR_FILE_DIRTY,
5963 -nr_pages);
5964 __mod_lruvec_state(to_vec, NR_FILE_DIRTY,
5965 nr_pages);
5966 }
5967 }
5968 }
5969
5970 #ifdef CONFIG_SWAP
5971 if (folio_test_swapcache(folio)) {
5972 __mod_lruvec_state(from_vec, NR_SWAPCACHE, -nr_pages);
5973 __mod_lruvec_state(to_vec, NR_SWAPCACHE, nr_pages);
5974 }
5975 #endif
5976 if (folio_test_writeback(folio)) {
5977 __mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages);
5978 __mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages);
5979 }
5980
5981 /*
5982 * All state has been migrated, let's switch to the new memcg.
5983 *
5984 * It is safe to change page's memcg here because the page
5985 * is referenced, charged, isolated, and locked: we can't race
5986 * with (un)charging, migration, LRU putback, or anything else
5987 * that would rely on a stable page's memory cgroup.
5988 *
5989 * Note that folio_memcg_lock is a memcg lock, not a page lock,
5990 * to save space. As soon as we switch page's memory cgroup to a
5991 * new memcg that isn't locked, the above state can change
5992 * concurrently again. Make sure we're truly done with it.
5993 */
5994 smp_mb();
5995
5996 css_get(&to->css);
5997 css_put(&from->css);
5998
5999 folio->memcg_data = (unsigned long)to;
6000
6001 __folio_memcg_unlock(from);
6002
6003 ret = 0;
6004 nid = folio_nid(folio);
6005
6006 local_irq_disable();
6007 mem_cgroup_charge_statistics(to, nr_pages);
6008 memcg_check_events(to, nid);
6009 mem_cgroup_charge_statistics(from, -nr_pages);
6010 memcg_check_events(from, nid);
6011 local_irq_enable();
6012 out:
6013 return ret;
6014 }
6015
6016 /**
6017 * get_mctgt_type - get target type of moving charge
6018 * @vma: the vma the pte to be checked belongs
6019 * @addr: the address corresponding to the pte to be checked
6020 * @ptent: the pte to be checked
6021 * @target: the pointer the target page or swap ent will be stored(can be NULL)
6022 *
6023 * Context: Called with pte lock held.
6024 * Return:
6025 * * MC_TARGET_NONE - If the pte is not a target for move charge.
6026 * * MC_TARGET_PAGE - If the page corresponding to this pte is a target for
6027 * move charge. If @target is not NULL, the folio is stored in target->folio
6028 * with extra refcnt taken (Caller should release it).
6029 * * MC_TARGET_SWAP - If the swap entry corresponding to this pte is a
6030 * target for charge migration. If @target is not NULL, the entry is
6031 * stored in target->ent.
6032 * * MC_TARGET_DEVICE - Like MC_TARGET_PAGE but page is device memory and
6033 * thus not on the lru. For now such page is charged like a regular page
6034 * would be as it is just special memory taking the place of a regular page.
6035 * See Documentations/vm/hmm.txt and include/linux/hmm.h
6036 */
get_mctgt_type(struct vm_area_struct * vma,unsigned long addr,pte_t ptent,union mc_target * target)6037 static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
6038 unsigned long addr, pte_t ptent, union mc_target *target)
6039 {
6040 struct page *page = NULL;
6041 struct folio *folio;
6042 enum mc_target_type ret = MC_TARGET_NONE;
6043 swp_entry_t ent = { .val = 0 };
6044
6045 if (pte_present(ptent))
6046 page = mc_handle_present_pte(vma, addr, ptent);
6047 else if (pte_none_mostly(ptent))
6048 /*
6049 * PTE markers should be treated as a none pte here, separated
6050 * from other swap handling below.
6051 */
6052 page = mc_handle_file_pte(vma, addr, ptent);
6053 else if (is_swap_pte(ptent))
6054 page = mc_handle_swap_pte(vma, ptent, &ent);
6055
6056 if (page)
6057 folio = page_folio(page);
6058 if (target && page) {
6059 if (!folio_trylock(folio)) {
6060 folio_put(folio);
6061 return ret;
6062 }
6063 /*
6064 * page_mapped() must be stable during the move. This
6065 * pte is locked, so if it's present, the page cannot
6066 * become unmapped. If it isn't, we have only partial
6067 * control over the mapped state: the page lock will
6068 * prevent new faults against pagecache and swapcache,
6069 * so an unmapped page cannot become mapped. However,
6070 * if the page is already mapped elsewhere, it can
6071 * unmap, and there is nothing we can do about it.
6072 * Alas, skip moving the page in this case.
6073 */
6074 if (!pte_present(ptent) && page_mapped(page)) {
6075 folio_unlock(folio);
6076 folio_put(folio);
6077 return ret;
6078 }
6079 }
6080
6081 if (!page && !ent.val)
6082 return ret;
6083 if (page) {
6084 /*
6085 * Do only loose check w/o serialization.
6086 * mem_cgroup_move_account() checks the page is valid or
6087 * not under LRU exclusion.
6088 */
6089 if (folio_memcg(folio) == mc.from) {
6090 ret = MC_TARGET_PAGE;
6091 if (folio_is_device_private(folio) ||
6092 folio_is_device_coherent(folio))
6093 ret = MC_TARGET_DEVICE;
6094 if (target)
6095 target->folio = folio;
6096 }
6097 if (!ret || !target) {
6098 if (target)
6099 folio_unlock(folio);
6100 folio_put(folio);
6101 }
6102 }
6103 /*
6104 * There is a swap entry and a page doesn't exist or isn't charged.
6105 * But we cannot move a tail-page in a THP.
6106 */
6107 if (ent.val && !ret && (!page || !PageTransCompound(page)) &&
6108 mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
6109 ret = MC_TARGET_SWAP;
6110 if (target)
6111 target->ent = ent;
6112 }
6113 return ret;
6114 }
6115
6116 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
6117 /*
6118 * We don't consider PMD mapped swapping or file mapped pages because THP does
6119 * not support them for now.
6120 * Caller should make sure that pmd_trans_huge(pmd) is true.
6121 */
get_mctgt_type_thp(struct vm_area_struct * vma,unsigned long addr,pmd_t pmd,union mc_target * target)6122 static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
6123 unsigned long addr, pmd_t pmd, union mc_target *target)
6124 {
6125 struct page *page = NULL;
6126 struct folio *folio;
6127 enum mc_target_type ret = MC_TARGET_NONE;
6128
6129 if (unlikely(is_swap_pmd(pmd))) {
6130 VM_BUG_ON(thp_migration_supported() &&
6131 !is_pmd_migration_entry(pmd));
6132 return ret;
6133 }
6134 page = pmd_page(pmd);
6135 VM_BUG_ON_PAGE(!page || !PageHead(page), page);
6136 folio = page_folio(page);
6137 if (!(mc.flags & MOVE_ANON))
6138 return ret;
6139 if (folio_memcg(folio) == mc.from) {
6140 ret = MC_TARGET_PAGE;
6141 if (target) {
6142 folio_get(folio);
6143 if (!folio_trylock(folio)) {
6144 folio_put(folio);
6145 return MC_TARGET_NONE;
6146 }
6147 target->folio = folio;
6148 }
6149 }
6150 return ret;
6151 }
6152 #else
get_mctgt_type_thp(struct vm_area_struct * vma,unsigned long addr,pmd_t pmd,union mc_target * target)6153 static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
6154 unsigned long addr, pmd_t pmd, union mc_target *target)
6155 {
6156 return MC_TARGET_NONE;
6157 }
6158 #endif
6159
mem_cgroup_count_precharge_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,struct mm_walk * walk)6160 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
6161 unsigned long addr, unsigned long end,
6162 struct mm_walk *walk)
6163 {
6164 struct vm_area_struct *vma = walk->vma;
6165 pte_t *pte;
6166 spinlock_t *ptl;
6167
6168 ptl = pmd_trans_huge_lock(pmd, vma);
6169 if (ptl) {
6170 /*
6171 * Note their can not be MC_TARGET_DEVICE for now as we do not
6172 * support transparent huge page with MEMORY_DEVICE_PRIVATE but
6173 * this might change.
6174 */
6175 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
6176 mc.precharge += HPAGE_PMD_NR;
6177 spin_unlock(ptl);
6178 return 0;
6179 }
6180
6181 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
6182 if (!pte)
6183 return 0;
6184 for (; addr != end; pte++, addr += PAGE_SIZE)
6185 if (get_mctgt_type(vma, addr, ptep_get(pte), NULL))
6186 mc.precharge++; /* increment precharge temporarily */
6187 pte_unmap_unlock(pte - 1, ptl);
6188 cond_resched();
6189
6190 return 0;
6191 }
6192
6193 static const struct mm_walk_ops precharge_walk_ops = {
6194 .pmd_entry = mem_cgroup_count_precharge_pte_range,
6195 .walk_lock = PGWALK_RDLOCK,
6196 };
6197
mem_cgroup_count_precharge(struct mm_struct * mm)6198 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
6199 {
6200 unsigned long precharge;
6201
6202 mmap_read_lock(mm);
6203 walk_page_range(mm, 0, ULONG_MAX, &precharge_walk_ops, NULL);
6204 mmap_read_unlock(mm);
6205
6206 precharge = mc.precharge;
6207 mc.precharge = 0;
6208
6209 return precharge;
6210 }
6211
mem_cgroup_precharge_mc(struct mm_struct * mm)6212 static int mem_cgroup_precharge_mc(struct mm_struct *mm)
6213 {
6214 unsigned long precharge = mem_cgroup_count_precharge(mm);
6215
6216 VM_BUG_ON(mc.moving_task);
6217 mc.moving_task = current;
6218 return mem_cgroup_do_precharge(precharge);
6219 }
6220
6221 /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
__mem_cgroup_clear_mc(void)6222 static void __mem_cgroup_clear_mc(void)
6223 {
6224 struct mem_cgroup *from = mc.from;
6225 struct mem_cgroup *to = mc.to;
6226
6227 /* we must uncharge all the leftover precharges from mc.to */
6228 if (mc.precharge) {
6229 cancel_charge(mc.to, mc.precharge);
6230 mc.precharge = 0;
6231 }
6232 /*
6233 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
6234 * we must uncharge here.
6235 */
6236 if (mc.moved_charge) {
6237 cancel_charge(mc.from, mc.moved_charge);
6238 mc.moved_charge = 0;
6239 }
6240 /* we must fixup refcnts and charges */
6241 if (mc.moved_swap) {
6242 /* uncharge swap account from the old cgroup */
6243 if (!mem_cgroup_is_root(mc.from))
6244 page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
6245
6246 mem_cgroup_id_put_many(mc.from, mc.moved_swap);
6247
6248 /*
6249 * we charged both to->memory and to->memsw, so we
6250 * should uncharge to->memory.
6251 */
6252 if (!mem_cgroup_is_root(mc.to))
6253 page_counter_uncharge(&mc.to->memory, mc.moved_swap);
6254
6255 mc.moved_swap = 0;
6256 }
6257 memcg_oom_recover(from);
6258 memcg_oom_recover(to);
6259 wake_up_all(&mc.waitq);
6260 }
6261
mem_cgroup_clear_mc(void)6262 static void mem_cgroup_clear_mc(void)
6263 {
6264 struct mm_struct *mm = mc.mm;
6265
6266 /*
6267 * we must clear moving_task before waking up waiters at the end of
6268 * task migration.
6269 */
6270 mc.moving_task = NULL;
6271 __mem_cgroup_clear_mc();
6272 spin_lock(&mc.lock);
6273 mc.from = NULL;
6274 mc.to = NULL;
6275 mc.mm = NULL;
6276 spin_unlock(&mc.lock);
6277
6278 mmput(mm);
6279 }
6280
mem_cgroup_can_attach(struct cgroup_taskset * tset)6281 static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
6282 {
6283 struct cgroup_subsys_state *css;
6284 struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */
6285 struct mem_cgroup *from;
6286 struct task_struct *leader, *p;
6287 struct mm_struct *mm;
6288 unsigned long move_flags;
6289 int ret = 0;
6290
6291 /* charge immigration isn't supported on the default hierarchy */
6292 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
6293 return 0;
6294
6295 /*
6296 * Multi-process migrations only happen on the default hierarchy
6297 * where charge immigration is not used. Perform charge
6298 * immigration if @tset contains a leader and whine if there are
6299 * multiple.
6300 */
6301 p = NULL;
6302 cgroup_taskset_for_each_leader(leader, css, tset) {
6303 WARN_ON_ONCE(p);
6304 p = leader;
6305 memcg = mem_cgroup_from_css(css);
6306 }
6307 if (!p)
6308 return 0;
6309
6310 /*
6311 * We are now committed to this value whatever it is. Changes in this
6312 * tunable will only affect upcoming migrations, not the current one.
6313 * So we need to save it, and keep it going.
6314 */
6315 move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
6316 if (!move_flags)
6317 return 0;
6318
6319 from = mem_cgroup_from_task(p);
6320
6321 VM_BUG_ON(from == memcg);
6322
6323 mm = get_task_mm(p);
6324 if (!mm)
6325 return 0;
6326 /* We move charges only when we move a owner of the mm */
6327 if (mm->owner == p) {
6328 VM_BUG_ON(mc.from);
6329 VM_BUG_ON(mc.to);
6330 VM_BUG_ON(mc.precharge);
6331 VM_BUG_ON(mc.moved_charge);
6332 VM_BUG_ON(mc.moved_swap);
6333
6334 spin_lock(&mc.lock);
6335 mc.mm = mm;
6336 mc.from = from;
6337 mc.to = memcg;
6338 mc.flags = move_flags;
6339 spin_unlock(&mc.lock);
6340 /* We set mc.moving_task later */
6341
6342 ret = mem_cgroup_precharge_mc(mm);
6343 if (ret)
6344 mem_cgroup_clear_mc();
6345 } else {
6346 mmput(mm);
6347 }
6348 return ret;
6349 }
6350
mem_cgroup_cancel_attach(struct cgroup_taskset * tset)6351 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
6352 {
6353 if (mc.to)
6354 mem_cgroup_clear_mc();
6355 }
6356
mem_cgroup_move_charge_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,struct mm_walk * walk)6357 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
6358 unsigned long addr, unsigned long end,
6359 struct mm_walk *walk)
6360 {
6361 int ret = 0;
6362 struct vm_area_struct *vma = walk->vma;
6363 pte_t *pte;
6364 spinlock_t *ptl;
6365 enum mc_target_type target_type;
6366 union mc_target target;
6367 struct folio *folio;
6368
6369 ptl = pmd_trans_huge_lock(pmd, vma);
6370 if (ptl) {
6371 if (mc.precharge < HPAGE_PMD_NR) {
6372 spin_unlock(ptl);
6373 return 0;
6374 }
6375 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
6376 if (target_type == MC_TARGET_PAGE) {
6377 folio = target.folio;
6378 if (folio_isolate_lru(folio)) {
6379 if (!mem_cgroup_move_account(folio, true,
6380 mc.from, mc.to)) {
6381 mc.precharge -= HPAGE_PMD_NR;
6382 mc.moved_charge += HPAGE_PMD_NR;
6383 }
6384 folio_putback_lru(folio);
6385 }
6386 folio_unlock(folio);
6387 folio_put(folio);
6388 } else if (target_type == MC_TARGET_DEVICE) {
6389 folio = target.folio;
6390 if (!mem_cgroup_move_account(folio, true,
6391 mc.from, mc.to)) {
6392 mc.precharge -= HPAGE_PMD_NR;
6393 mc.moved_charge += HPAGE_PMD_NR;
6394 }
6395 folio_unlock(folio);
6396 folio_put(folio);
6397 }
6398 spin_unlock(ptl);
6399 return 0;
6400 }
6401
6402 retry:
6403 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
6404 if (!pte)
6405 return 0;
6406 for (; addr != end; addr += PAGE_SIZE) {
6407 pte_t ptent = ptep_get(pte++);
6408 bool device = false;
6409 swp_entry_t ent;
6410
6411 if (!mc.precharge)
6412 break;
6413
6414 switch (get_mctgt_type(vma, addr, ptent, &target)) {
6415 case MC_TARGET_DEVICE:
6416 device = true;
6417 fallthrough;
6418 case MC_TARGET_PAGE:
6419 folio = target.folio;
6420 /*
6421 * We can have a part of the split pmd here. Moving it
6422 * can be done but it would be too convoluted so simply
6423 * ignore such a partial THP and keep it in original
6424 * memcg. There should be somebody mapping the head.
6425 */
6426 if (folio_test_large(folio))
6427 goto put;
6428 if (!device && !folio_isolate_lru(folio))
6429 goto put;
6430 if (!mem_cgroup_move_account(folio, false,
6431 mc.from, mc.to)) {
6432 mc.precharge--;
6433 /* we uncharge from mc.from later. */
6434 mc.moved_charge++;
6435 }
6436 if (!device)
6437 folio_putback_lru(folio);
6438 put: /* get_mctgt_type() gets & locks the page */
6439 folio_unlock(folio);
6440 folio_put(folio);
6441 break;
6442 case MC_TARGET_SWAP:
6443 ent = target.ent;
6444 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
6445 mc.precharge--;
6446 mem_cgroup_id_get_many(mc.to, 1);
6447 /* we fixup other refcnts and charges later. */
6448 mc.moved_swap++;
6449 }
6450 break;
6451 default:
6452 break;
6453 }
6454 }
6455 pte_unmap_unlock(pte - 1, ptl);
6456 cond_resched();
6457
6458 if (addr != end) {
6459 /*
6460 * We have consumed all precharges we got in can_attach().
6461 * We try charge one by one, but don't do any additional
6462 * charges to mc.to if we have failed in charge once in attach()
6463 * phase.
6464 */
6465 ret = mem_cgroup_do_precharge(1);
6466 if (!ret)
6467 goto retry;
6468 }
6469
6470 return ret;
6471 }
6472
6473 static const struct mm_walk_ops charge_walk_ops = {
6474 .pmd_entry = mem_cgroup_move_charge_pte_range,
6475 .walk_lock = PGWALK_RDLOCK,
6476 };
6477
mem_cgroup_move_charge(void)6478 static void mem_cgroup_move_charge(void)
6479 {
6480 lru_add_drain_all();
6481 /*
6482 * Signal folio_memcg_lock() to take the memcg's move_lock
6483 * while we're moving its pages to another memcg. Then wait
6484 * for already started RCU-only updates to finish.
6485 */
6486 atomic_inc(&mc.from->moving_account);
6487 synchronize_rcu();
6488 retry:
6489 if (unlikely(!mmap_read_trylock(mc.mm))) {
6490 /*
6491 * Someone who are holding the mmap_lock might be waiting in
6492 * waitq. So we cancel all extra charges, wake up all waiters,
6493 * and retry. Because we cancel precharges, we might not be able
6494 * to move enough charges, but moving charge is a best-effort
6495 * feature anyway, so it wouldn't be a big problem.
6496 */
6497 __mem_cgroup_clear_mc();
6498 cond_resched();
6499 goto retry;
6500 }
6501 /*
6502 * When we have consumed all precharges and failed in doing
6503 * additional charge, the page walk just aborts.
6504 */
6505 walk_page_range(mc.mm, 0, ULONG_MAX, &charge_walk_ops, NULL);
6506 mmap_read_unlock(mc.mm);
6507 atomic_dec(&mc.from->moving_account);
6508 }
6509
mem_cgroup_move_task(void)6510 static void mem_cgroup_move_task(void)
6511 {
6512 if (mc.to) {
6513 mem_cgroup_move_charge();
6514 mem_cgroup_clear_mc();
6515 }
6516 }
6517 #else /* !CONFIG_MMU */
mem_cgroup_can_attach(struct cgroup_taskset * tset)6518 static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
6519 {
6520 return 0;
6521 }
mem_cgroup_cancel_attach(struct cgroup_taskset * tset)6522 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
6523 {
6524 }
mem_cgroup_move_task(void)6525 static void mem_cgroup_move_task(void)
6526 {
6527 }
6528 #endif
6529
6530 #ifdef CONFIG_LRU_GEN
mem_cgroup_attach(struct cgroup_taskset * tset)6531 static void mem_cgroup_attach(struct cgroup_taskset *tset)
6532 {
6533 struct task_struct *task;
6534 struct cgroup_subsys_state *css;
6535
6536 /* find the first leader if there is any */
6537 cgroup_taskset_for_each_leader(task, css, tset)
6538 break;
6539
6540 if (!task)
6541 return;
6542
6543 task_lock(task);
6544 if (task->mm && READ_ONCE(task->mm->owner) == task)
6545 lru_gen_migrate_mm(task->mm);
6546 task_unlock(task);
6547 }
6548 #else
mem_cgroup_attach(struct cgroup_taskset * tset)6549 static void mem_cgroup_attach(struct cgroup_taskset *tset)
6550 {
6551 }
6552 #endif /* CONFIG_LRU_GEN */
6553
seq_puts_memcg_tunable(struct seq_file * m,unsigned long value)6554 static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
6555 {
6556 if (value == PAGE_COUNTER_MAX)
6557 seq_puts(m, "max\n");
6558 else
6559 seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE);
6560
6561 return 0;
6562 }
6563
memory_current_read(struct cgroup_subsys_state * css,struct cftype * cft)6564 static u64 memory_current_read(struct cgroup_subsys_state *css,
6565 struct cftype *cft)
6566 {
6567 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6568
6569 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
6570 }
6571
memory_peak_read(struct cgroup_subsys_state * css,struct cftype * cft)6572 static u64 memory_peak_read(struct cgroup_subsys_state *css,
6573 struct cftype *cft)
6574 {
6575 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6576
6577 return (u64)memcg->memory.watermark * PAGE_SIZE;
6578 }
6579
memory_min_show(struct seq_file * m,void * v)6580 static int memory_min_show(struct seq_file *m, void *v)
6581 {
6582 return seq_puts_memcg_tunable(m,
6583 READ_ONCE(mem_cgroup_from_seq(m)->memory.min));
6584 }
6585
memory_min_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)6586 static ssize_t memory_min_write(struct kernfs_open_file *of,
6587 char *buf, size_t nbytes, loff_t off)
6588 {
6589 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6590 unsigned long min;
6591 int err;
6592
6593 buf = strstrip(buf);
6594 err = page_counter_memparse(buf, "max", &min);
6595 if (err)
6596 return err;
6597
6598 page_counter_set_min(&memcg->memory, min);
6599
6600 return nbytes;
6601 }
6602
memory_low_show(struct seq_file * m,void * v)6603 static int memory_low_show(struct seq_file *m, void *v)
6604 {
6605 return seq_puts_memcg_tunable(m,
6606 READ_ONCE(mem_cgroup_from_seq(m)->memory.low));
6607 }
6608
memory_low_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)6609 static ssize_t memory_low_write(struct kernfs_open_file *of,
6610 char *buf, size_t nbytes, loff_t off)
6611 {
6612 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6613 unsigned long low;
6614 int err;
6615
6616 buf = strstrip(buf);
6617 err = page_counter_memparse(buf, "max", &low);
6618 if (err)
6619 return err;
6620
6621 page_counter_set_low(&memcg->memory, low);
6622
6623 return nbytes;
6624 }
6625
memory_high_show(struct seq_file * m,void * v)6626 static int memory_high_show(struct seq_file *m, void *v)
6627 {
6628 return seq_puts_memcg_tunable(m,
6629 READ_ONCE(mem_cgroup_from_seq(m)->memory.high));
6630 }
6631
memory_high_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)6632 static ssize_t memory_high_write(struct kernfs_open_file *of,
6633 char *buf, size_t nbytes, loff_t off)
6634 {
6635 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6636 unsigned int nr_retries = MAX_RECLAIM_RETRIES;
6637 bool drained = false;
6638 unsigned long high;
6639 int err;
6640
6641 buf = strstrip(buf);
6642 err = page_counter_memparse(buf, "max", &high);
6643 if (err)
6644 return err;
6645
6646 page_counter_set_high(&memcg->memory, high);
6647
6648 for (;;) {
6649 unsigned long nr_pages = page_counter_read(&memcg->memory);
6650 unsigned long reclaimed;
6651
6652 if (nr_pages <= high)
6653 break;
6654
6655 if (signal_pending(current))
6656 break;
6657
6658 if (!drained) {
6659 drain_all_stock(memcg);
6660 drained = true;
6661 continue;
6662 }
6663
6664 reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
6665 GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP);
6666
6667 if (!reclaimed && !nr_retries--)
6668 break;
6669 }
6670
6671 memcg_wb_domain_size_changed(memcg);
6672 return nbytes;
6673 }
6674
memory_max_show(struct seq_file * m,void * v)6675 static int memory_max_show(struct seq_file *m, void *v)
6676 {
6677 return seq_puts_memcg_tunable(m,
6678 READ_ONCE(mem_cgroup_from_seq(m)->memory.max));
6679 }
6680
memory_max_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)6681 static ssize_t memory_max_write(struct kernfs_open_file *of,
6682 char *buf, size_t nbytes, loff_t off)
6683 {
6684 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6685 unsigned int nr_reclaims = MAX_RECLAIM_RETRIES;
6686 bool drained = false;
6687 unsigned long max;
6688 int err;
6689
6690 buf = strstrip(buf);
6691 err = page_counter_memparse(buf, "max", &max);
6692 if (err)
6693 return err;
6694
6695 xchg(&memcg->memory.max, max);
6696
6697 for (;;) {
6698 unsigned long nr_pages = page_counter_read(&memcg->memory);
6699
6700 if (nr_pages <= max)
6701 break;
6702
6703 if (signal_pending(current))
6704 break;
6705
6706 if (!drained) {
6707 drain_all_stock(memcg);
6708 drained = true;
6709 continue;
6710 }
6711
6712 if (nr_reclaims) {
6713 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
6714 GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP))
6715 nr_reclaims--;
6716 continue;
6717 }
6718
6719 memcg_memory_event(memcg, MEMCG_OOM);
6720 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
6721 break;
6722 }
6723
6724 memcg_wb_domain_size_changed(memcg);
6725 return nbytes;
6726 }
6727
__memory_events_show(struct seq_file * m,atomic_long_t * events)6728 static void __memory_events_show(struct seq_file *m, atomic_long_t *events)
6729 {
6730 seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW]));
6731 seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH]));
6732 seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX]));
6733 seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM]));
6734 seq_printf(m, "oom_kill %lu\n",
6735 atomic_long_read(&events[MEMCG_OOM_KILL]));
6736 seq_printf(m, "oom_group_kill %lu\n",
6737 atomic_long_read(&events[MEMCG_OOM_GROUP_KILL]));
6738 }
6739
memory_events_show(struct seq_file * m,void * v)6740 static int memory_events_show(struct seq_file *m, void *v)
6741 {
6742 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6743
6744 __memory_events_show(m, memcg->memory_events);
6745 return 0;
6746 }
6747
memory_events_local_show(struct seq_file * m,void * v)6748 static int memory_events_local_show(struct seq_file *m, void *v)
6749 {
6750 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6751
6752 __memory_events_show(m, memcg->memory_events_local);
6753 return 0;
6754 }
6755
memory_stat_show(struct seq_file * m,void * v)6756 static int memory_stat_show(struct seq_file *m, void *v)
6757 {
6758 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6759 char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
6760 struct seq_buf s;
6761
6762 if (!buf)
6763 return -ENOMEM;
6764 seq_buf_init(&s, buf, PAGE_SIZE);
6765 memory_stat_format(memcg, &s);
6766 seq_puts(m, buf);
6767 kfree(buf);
6768 return 0;
6769 }
6770
6771 #ifdef CONFIG_NUMA
lruvec_page_state_output(struct lruvec * lruvec,int item)6772 static inline unsigned long lruvec_page_state_output(struct lruvec *lruvec,
6773 int item)
6774 {
6775 return lruvec_page_state(lruvec, item) * memcg_page_state_unit(item);
6776 }
6777
memory_numa_stat_show(struct seq_file * m,void * v)6778 static int memory_numa_stat_show(struct seq_file *m, void *v)
6779 {
6780 int i;
6781 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6782
6783 mem_cgroup_flush_stats(memcg);
6784
6785 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
6786 int nid;
6787
6788 if (memory_stats[i].idx >= NR_VM_NODE_STAT_ITEMS)
6789 continue;
6790
6791 seq_printf(m, "%s", memory_stats[i].name);
6792 for_each_node_state(nid, N_MEMORY) {
6793 u64 size;
6794 struct lruvec *lruvec;
6795
6796 lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
6797 size = lruvec_page_state_output(lruvec,
6798 memory_stats[i].idx);
6799 seq_printf(m, " N%d=%llu", nid, size);
6800 }
6801 seq_putc(m, '\n');
6802 }
6803
6804 return 0;
6805 }
6806 #endif
6807
memory_oom_group_show(struct seq_file * m,void * v)6808 static int memory_oom_group_show(struct seq_file *m, void *v)
6809 {
6810 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6811
6812 seq_printf(m, "%d\n", READ_ONCE(memcg->oom_group));
6813
6814 return 0;
6815 }
6816
memory_oom_group_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)6817 static ssize_t memory_oom_group_write(struct kernfs_open_file *of,
6818 char *buf, size_t nbytes, loff_t off)
6819 {
6820 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6821 int ret, oom_group;
6822
6823 buf = strstrip(buf);
6824 if (!buf)
6825 return -EINVAL;
6826
6827 ret = kstrtoint(buf, 0, &oom_group);
6828 if (ret)
6829 return ret;
6830
6831 if (oom_group != 0 && oom_group != 1)
6832 return -EINVAL;
6833
6834 WRITE_ONCE(memcg->oom_group, oom_group);
6835
6836 return nbytes;
6837 }
6838
memory_reclaim(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)6839 static ssize_t memory_reclaim(struct kernfs_open_file *of, char *buf,
6840 size_t nbytes, loff_t off)
6841 {
6842 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6843 unsigned int nr_retries = MAX_RECLAIM_RETRIES;
6844 unsigned long nr_to_reclaim, nr_reclaimed = 0;
6845 unsigned int reclaim_options;
6846 int err;
6847
6848 buf = strstrip(buf);
6849 err = page_counter_memparse(buf, "", &nr_to_reclaim);
6850 if (err)
6851 return err;
6852
6853 reclaim_options = MEMCG_RECLAIM_MAY_SWAP | MEMCG_RECLAIM_PROACTIVE;
6854 while (nr_reclaimed < nr_to_reclaim) {
6855 /* Will converge on zero, but reclaim enforces a minimum */
6856 unsigned long batch_size = (nr_to_reclaim - nr_reclaimed) / 4;
6857 unsigned long reclaimed;
6858
6859 if (signal_pending(current))
6860 return -EINTR;
6861
6862 /*
6863 * This is the final attempt, drain percpu lru caches in the
6864 * hope of introducing more evictable pages for
6865 * try_to_free_mem_cgroup_pages().
6866 */
6867 if (!nr_retries)
6868 lru_add_drain_all();
6869
6870 reclaimed = try_to_free_mem_cgroup_pages(memcg,
6871 batch_size, GFP_KERNEL, reclaim_options);
6872
6873 if (!reclaimed && !nr_retries--)
6874 return -EAGAIN;
6875
6876 nr_reclaimed += reclaimed;
6877 }
6878
6879 return nbytes;
6880 }
6881
6882 static struct cftype memory_files[] = {
6883 {
6884 .name = "current",
6885 .flags = CFTYPE_NOT_ON_ROOT,
6886 .read_u64 = memory_current_read,
6887 },
6888 {
6889 .name = "peak",
6890 .flags = CFTYPE_NOT_ON_ROOT,
6891 .read_u64 = memory_peak_read,
6892 },
6893 {
6894 .name = "min",
6895 .flags = CFTYPE_NOT_ON_ROOT,
6896 .seq_show = memory_min_show,
6897 .write = memory_min_write,
6898 },
6899 {
6900 .name = "low",
6901 .flags = CFTYPE_NOT_ON_ROOT,
6902 .seq_show = memory_low_show,
6903 .write = memory_low_write,
6904 },
6905 {
6906 .name = "high",
6907 .flags = CFTYPE_NOT_ON_ROOT,
6908 .seq_show = memory_high_show,
6909 .write = memory_high_write,
6910 },
6911 {
6912 .name = "max",
6913 .flags = CFTYPE_NOT_ON_ROOT,
6914 .seq_show = memory_max_show,
6915 .write = memory_max_write,
6916 },
6917 {
6918 .name = "events",
6919 .flags = CFTYPE_NOT_ON_ROOT,
6920 .file_offset = offsetof(struct mem_cgroup, events_file),
6921 .seq_show = memory_events_show,
6922 },
6923 {
6924 .name = "events.local",
6925 .flags = CFTYPE_NOT_ON_ROOT,
6926 .file_offset = offsetof(struct mem_cgroup, events_local_file),
6927 .seq_show = memory_events_local_show,
6928 },
6929 {
6930 .name = "stat",
6931 .seq_show = memory_stat_show,
6932 },
6933 #ifdef CONFIG_NUMA
6934 {
6935 .name = "numa_stat",
6936 .seq_show = memory_numa_stat_show,
6937 },
6938 #endif
6939 {
6940 .name = "oom.group",
6941 .flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE,
6942 .seq_show = memory_oom_group_show,
6943 .write = memory_oom_group_write,
6944 },
6945 {
6946 .name = "reclaim",
6947 .flags = CFTYPE_NS_DELEGATABLE,
6948 .write = memory_reclaim,
6949 },
6950 { } /* terminate */
6951 };
6952
6953 struct cgroup_subsys memory_cgrp_subsys = {
6954 .css_alloc = mem_cgroup_css_alloc,
6955 .css_online = mem_cgroup_css_online,
6956 .css_offline = mem_cgroup_css_offline,
6957 .css_released = mem_cgroup_css_released,
6958 .css_free = mem_cgroup_css_free,
6959 .css_reset = mem_cgroup_css_reset,
6960 .css_rstat_flush = mem_cgroup_css_rstat_flush,
6961 .can_attach = mem_cgroup_can_attach,
6962 .attach = mem_cgroup_attach,
6963 .cancel_attach = mem_cgroup_cancel_attach,
6964 .post_attach = mem_cgroup_move_task,
6965 .dfl_cftypes = memory_files,
6966 .legacy_cftypes = mem_cgroup_legacy_files,
6967 .early_init = 0,
6968 };
6969
6970 /*
6971 * This function calculates an individual cgroup's effective
6972 * protection which is derived from its own memory.min/low, its
6973 * parent's and siblings' settings, as well as the actual memory
6974 * distribution in the tree.
6975 *
6976 * The following rules apply to the effective protection values:
6977 *
6978 * 1. At the first level of reclaim, effective protection is equal to
6979 * the declared protection in memory.min and memory.low.
6980 *
6981 * 2. To enable safe delegation of the protection configuration, at
6982 * subsequent levels the effective protection is capped to the
6983 * parent's effective protection.
6984 *
6985 * 3. To make complex and dynamic subtrees easier to configure, the
6986 * user is allowed to overcommit the declared protection at a given
6987 * level. If that is the case, the parent's effective protection is
6988 * distributed to the children in proportion to how much protection
6989 * they have declared and how much of it they are utilizing.
6990 *
6991 * This makes distribution proportional, but also work-conserving:
6992 * if one cgroup claims much more protection than it uses memory,
6993 * the unused remainder is available to its siblings.
6994 *
6995 * 4. Conversely, when the declared protection is undercommitted at a
6996 * given level, the distribution of the larger parental protection
6997 * budget is NOT proportional. A cgroup's protection from a sibling
6998 * is capped to its own memory.min/low setting.
6999 *
7000 * 5. However, to allow protecting recursive subtrees from each other
7001 * without having to declare each individual cgroup's fixed share
7002 * of the ancestor's claim to protection, any unutilized -
7003 * "floating" - protection from up the tree is distributed in
7004 * proportion to each cgroup's *usage*. This makes the protection
7005 * neutral wrt sibling cgroups and lets them compete freely over
7006 * the shared parental protection budget, but it protects the
7007 * subtree as a whole from neighboring subtrees.
7008 *
7009 * Note that 4. and 5. are not in conflict: 4. is about protecting
7010 * against immediate siblings whereas 5. is about protecting against
7011 * neighboring subtrees.
7012 */
effective_protection(unsigned long usage,unsigned long parent_usage,unsigned long setting,unsigned long parent_effective,unsigned long siblings_protected)7013 static unsigned long effective_protection(unsigned long usage,
7014 unsigned long parent_usage,
7015 unsigned long setting,
7016 unsigned long parent_effective,
7017 unsigned long siblings_protected)
7018 {
7019 unsigned long protected;
7020 unsigned long ep;
7021
7022 protected = min(usage, setting);
7023 /*
7024 * If all cgroups at this level combined claim and use more
7025 * protection than what the parent affords them, distribute
7026 * shares in proportion to utilization.
7027 *
7028 * We are using actual utilization rather than the statically
7029 * claimed protection in order to be work-conserving: claimed
7030 * but unused protection is available to siblings that would
7031 * otherwise get a smaller chunk than what they claimed.
7032 */
7033 if (siblings_protected > parent_effective)
7034 return protected * parent_effective / siblings_protected;
7035
7036 /*
7037 * Ok, utilized protection of all children is within what the
7038 * parent affords them, so we know whatever this child claims
7039 * and utilizes is effectively protected.
7040 *
7041 * If there is unprotected usage beyond this value, reclaim
7042 * will apply pressure in proportion to that amount.
7043 *
7044 * If there is unutilized protection, the cgroup will be fully
7045 * shielded from reclaim, but we do return a smaller value for
7046 * protection than what the group could enjoy in theory. This
7047 * is okay. With the overcommit distribution above, effective
7048 * protection is always dependent on how memory is actually
7049 * consumed among the siblings anyway.
7050 */
7051 ep = protected;
7052
7053 /*
7054 * If the children aren't claiming (all of) the protection
7055 * afforded to them by the parent, distribute the remainder in
7056 * proportion to the (unprotected) memory of each cgroup. That
7057 * way, cgroups that aren't explicitly prioritized wrt each
7058 * other compete freely over the allowance, but they are
7059 * collectively protected from neighboring trees.
7060 *
7061 * We're using unprotected memory for the weight so that if
7062 * some cgroups DO claim explicit protection, we don't protect
7063 * the same bytes twice.
7064 *
7065 * Check both usage and parent_usage against the respective
7066 * protected values. One should imply the other, but they
7067 * aren't read atomically - make sure the division is sane.
7068 */
7069 if (!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT))
7070 return ep;
7071 if (parent_effective > siblings_protected &&
7072 parent_usage > siblings_protected &&
7073 usage > protected) {
7074 unsigned long unclaimed;
7075
7076 unclaimed = parent_effective - siblings_protected;
7077 unclaimed *= usage - protected;
7078 unclaimed /= parent_usage - siblings_protected;
7079
7080 ep += unclaimed;
7081 }
7082
7083 return ep;
7084 }
7085
7086 /**
7087 * mem_cgroup_calculate_protection - check if memory consumption is in the normal range
7088 * @root: the top ancestor of the sub-tree being checked
7089 * @memcg: the memory cgroup to check
7090 *
7091 * WARNING: This function is not stateless! It can only be used as part
7092 * of a top-down tree iteration, not for isolated queries.
7093 */
mem_cgroup_calculate_protection(struct mem_cgroup * root,struct mem_cgroup * memcg)7094 void mem_cgroup_calculate_protection(struct mem_cgroup *root,
7095 struct mem_cgroup *memcg)
7096 {
7097 unsigned long usage, parent_usage;
7098 struct mem_cgroup *parent;
7099
7100 if (mem_cgroup_disabled())
7101 return;
7102
7103 if (!root)
7104 root = root_mem_cgroup;
7105
7106 /*
7107 * Effective values of the reclaim targets are ignored so they
7108 * can be stale. Have a look at mem_cgroup_protection for more
7109 * details.
7110 * TODO: calculation should be more robust so that we do not need
7111 * that special casing.
7112 */
7113 if (memcg == root)
7114 return;
7115
7116 usage = page_counter_read(&memcg->memory);
7117 if (!usage)
7118 return;
7119
7120 parent = parent_mem_cgroup(memcg);
7121
7122 if (parent == root) {
7123 memcg->memory.emin = READ_ONCE(memcg->memory.min);
7124 memcg->memory.elow = READ_ONCE(memcg->memory.low);
7125 return;
7126 }
7127
7128 parent_usage = page_counter_read(&parent->memory);
7129
7130 WRITE_ONCE(memcg->memory.emin, effective_protection(usage, parent_usage,
7131 READ_ONCE(memcg->memory.min),
7132 READ_ONCE(parent->memory.emin),
7133 atomic_long_read(&parent->memory.children_min_usage)));
7134
7135 WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage,
7136 READ_ONCE(memcg->memory.low),
7137 READ_ONCE(parent->memory.elow),
7138 atomic_long_read(&parent->memory.children_low_usage)));
7139 }
7140
charge_memcg(struct folio * folio,struct mem_cgroup * memcg,gfp_t gfp)7141 static int charge_memcg(struct folio *folio, struct mem_cgroup *memcg,
7142 gfp_t gfp)
7143 {
7144 long nr_pages = folio_nr_pages(folio);
7145 int ret;
7146
7147 ret = try_charge(memcg, gfp, nr_pages);
7148 if (ret)
7149 goto out;
7150
7151 css_get(&memcg->css);
7152 commit_charge(folio, memcg);
7153
7154 local_irq_disable();
7155 mem_cgroup_charge_statistics(memcg, nr_pages);
7156 memcg_check_events(memcg, folio_nid(folio));
7157 local_irq_enable();
7158 out:
7159 return ret;
7160 }
7161
__mem_cgroup_charge(struct folio * folio,struct mm_struct * mm,gfp_t gfp)7162 int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp)
7163 {
7164 struct mem_cgroup *memcg;
7165 int ret;
7166
7167 memcg = get_mem_cgroup_from_mm(mm);
7168 ret = charge_memcg(folio, memcg, gfp);
7169 css_put(&memcg->css);
7170
7171 return ret;
7172 }
7173
7174 /**
7175 * mem_cgroup_swapin_charge_folio - Charge a newly allocated folio for swapin.
7176 * @folio: folio to charge.
7177 * @mm: mm context of the victim
7178 * @gfp: reclaim mode
7179 * @entry: swap entry for which the folio is allocated
7180 *
7181 * This function charges a folio allocated for swapin. Please call this before
7182 * adding the folio to the swapcache.
7183 *
7184 * Returns 0 on success. Otherwise, an error code is returned.
7185 */
mem_cgroup_swapin_charge_folio(struct folio * folio,struct mm_struct * mm,gfp_t gfp,swp_entry_t entry)7186 int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
7187 gfp_t gfp, swp_entry_t entry)
7188 {
7189 struct mem_cgroup *memcg;
7190 unsigned short id;
7191 int ret;
7192
7193 if (mem_cgroup_disabled())
7194 return 0;
7195
7196 id = lookup_swap_cgroup_id(entry);
7197 rcu_read_lock();
7198 memcg = mem_cgroup_from_id(id);
7199 if (!memcg || !css_tryget_online(&memcg->css))
7200 memcg = get_mem_cgroup_from_mm(mm);
7201 rcu_read_unlock();
7202
7203 ret = charge_memcg(folio, memcg, gfp);
7204
7205 css_put(&memcg->css);
7206 return ret;
7207 }
7208
7209 /*
7210 * mem_cgroup_swapin_uncharge_swap - uncharge swap slot
7211 * @entry: swap entry for which the page is charged
7212 *
7213 * Call this function after successfully adding the charged page to swapcache.
7214 *
7215 * Note: This function assumes the page for which swap slot is being uncharged
7216 * is order 0 page.
7217 */
mem_cgroup_swapin_uncharge_swap(swp_entry_t entry)7218 void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry)
7219 {
7220 /*
7221 * Cgroup1's unified memory+swap counter has been charged with the
7222 * new swapcache page, finish the transfer by uncharging the swap
7223 * slot. The swap slot would also get uncharged when it dies, but
7224 * it can stick around indefinitely and we'd count the page twice
7225 * the entire time.
7226 *
7227 * Cgroup2 has separate resource counters for memory and swap,
7228 * so this is a non-issue here. Memory and swap charge lifetimes
7229 * correspond 1:1 to page and swap slot lifetimes: we charge the
7230 * page to memory here, and uncharge swap when the slot is freed.
7231 */
7232 if (!mem_cgroup_disabled() && do_memsw_account()) {
7233 /*
7234 * The swap entry might not get freed for a long time,
7235 * let's not wait for it. The page already received a
7236 * memory+swap charge, drop the swap entry duplicate.
7237 */
7238 mem_cgroup_uncharge_swap(entry, 1);
7239 }
7240 }
7241
7242 struct uncharge_gather {
7243 struct mem_cgroup *memcg;
7244 unsigned long nr_memory;
7245 unsigned long pgpgout;
7246 unsigned long nr_kmem;
7247 int nid;
7248 };
7249
uncharge_gather_clear(struct uncharge_gather * ug)7250 static inline void uncharge_gather_clear(struct uncharge_gather *ug)
7251 {
7252 memset(ug, 0, sizeof(*ug));
7253 }
7254
uncharge_batch(const struct uncharge_gather * ug)7255 static void uncharge_batch(const struct uncharge_gather *ug)
7256 {
7257 unsigned long flags;
7258
7259 if (ug->nr_memory) {
7260 page_counter_uncharge(&ug->memcg->memory, ug->nr_memory);
7261 if (do_memsw_account())
7262 page_counter_uncharge(&ug->memcg->memsw, ug->nr_memory);
7263 if (ug->nr_kmem)
7264 memcg_account_kmem(ug->memcg, -ug->nr_kmem);
7265 memcg_oom_recover(ug->memcg);
7266 }
7267
7268 local_irq_save(flags);
7269 __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
7270 __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_memory);
7271 memcg_check_events(ug->memcg, ug->nid);
7272 local_irq_restore(flags);
7273
7274 /* drop reference from uncharge_folio */
7275 css_put(&ug->memcg->css);
7276 }
7277
uncharge_folio(struct folio * folio,struct uncharge_gather * ug)7278 static void uncharge_folio(struct folio *folio, struct uncharge_gather *ug)
7279 {
7280 long nr_pages;
7281 struct mem_cgroup *memcg;
7282 struct obj_cgroup *objcg;
7283
7284 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
7285
7286 /*
7287 * Nobody should be changing or seriously looking at
7288 * folio memcg or objcg at this point, we have fully
7289 * exclusive access to the folio.
7290 */
7291 if (folio_memcg_kmem(folio)) {
7292 objcg = __folio_objcg(folio);
7293 /*
7294 * This get matches the put at the end of the function and
7295 * kmem pages do not hold memcg references anymore.
7296 */
7297 memcg = get_mem_cgroup_from_objcg(objcg);
7298 } else {
7299 memcg = __folio_memcg(folio);
7300 }
7301
7302 if (!memcg)
7303 return;
7304
7305 if (ug->memcg != memcg) {
7306 if (ug->memcg) {
7307 uncharge_batch(ug);
7308 uncharge_gather_clear(ug);
7309 }
7310 ug->memcg = memcg;
7311 ug->nid = folio_nid(folio);
7312
7313 /* pairs with css_put in uncharge_batch */
7314 css_get(&memcg->css);
7315 }
7316
7317 nr_pages = folio_nr_pages(folio);
7318
7319 if (folio_memcg_kmem(folio)) {
7320 ug->nr_memory += nr_pages;
7321 ug->nr_kmem += nr_pages;
7322
7323 folio->memcg_data = 0;
7324 obj_cgroup_put(objcg);
7325 } else {
7326 /* LRU pages aren't accounted at the root level */
7327 if (!mem_cgroup_is_root(memcg))
7328 ug->nr_memory += nr_pages;
7329 ug->pgpgout++;
7330
7331 folio->memcg_data = 0;
7332 }
7333
7334 css_put(&memcg->css);
7335 }
7336
__mem_cgroup_uncharge(struct folio * folio)7337 void __mem_cgroup_uncharge(struct folio *folio)
7338 {
7339 struct uncharge_gather ug;
7340
7341 /* Don't touch folio->lru of any random page, pre-check: */
7342 if (!folio_memcg(folio))
7343 return;
7344
7345 uncharge_gather_clear(&ug);
7346 uncharge_folio(folio, &ug);
7347 uncharge_batch(&ug);
7348 }
7349
7350 /**
7351 * __mem_cgroup_uncharge_list - uncharge a list of page
7352 * @page_list: list of pages to uncharge
7353 *
7354 * Uncharge a list of pages previously charged with
7355 * __mem_cgroup_charge().
7356 */
__mem_cgroup_uncharge_list(struct list_head * page_list)7357 void __mem_cgroup_uncharge_list(struct list_head *page_list)
7358 {
7359 struct uncharge_gather ug;
7360 struct folio *folio;
7361
7362 uncharge_gather_clear(&ug);
7363 list_for_each_entry(folio, page_list, lru)
7364 uncharge_folio(folio, &ug);
7365 if (ug.memcg)
7366 uncharge_batch(&ug);
7367 }
7368
7369 /**
7370 * mem_cgroup_migrate - Charge a folio's replacement.
7371 * @old: Currently circulating folio.
7372 * @new: Replacement folio.
7373 *
7374 * Charge @new as a replacement folio for @old. @old will
7375 * be uncharged upon free.
7376 *
7377 * Both folios must be locked, @new->mapping must be set up.
7378 */
mem_cgroup_migrate(struct folio * old,struct folio * new)7379 void mem_cgroup_migrate(struct folio *old, struct folio *new)
7380 {
7381 struct mem_cgroup *memcg;
7382 long nr_pages = folio_nr_pages(new);
7383 unsigned long flags;
7384
7385 VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
7386 VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
7387 VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new);
7388 VM_BUG_ON_FOLIO(folio_nr_pages(old) != nr_pages, new);
7389
7390 if (mem_cgroup_disabled())
7391 return;
7392
7393 /* Page cache replacement: new folio already charged? */
7394 if (folio_memcg(new))
7395 return;
7396
7397 memcg = folio_memcg(old);
7398 VM_WARN_ON_ONCE_FOLIO(!memcg, old);
7399 if (!memcg)
7400 return;
7401
7402 /* Force-charge the new page. The old one will be freed soon */
7403 if (!mem_cgroup_is_root(memcg)) {
7404 page_counter_charge(&memcg->memory, nr_pages);
7405 if (do_memsw_account())
7406 page_counter_charge(&memcg->memsw, nr_pages);
7407 }
7408
7409 css_get(&memcg->css);
7410 commit_charge(new, memcg);
7411
7412 local_irq_save(flags);
7413 mem_cgroup_charge_statistics(memcg, nr_pages);
7414 memcg_check_events(memcg, folio_nid(new));
7415 local_irq_restore(flags);
7416 }
7417
7418 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
7419 EXPORT_SYMBOL(memcg_sockets_enabled_key);
7420
mem_cgroup_sk_alloc(struct sock * sk)7421 void mem_cgroup_sk_alloc(struct sock *sk)
7422 {
7423 struct mem_cgroup *memcg;
7424
7425 if (!mem_cgroup_sockets_enabled)
7426 return;
7427
7428 /* Do not associate the sock with unrelated interrupted task's memcg. */
7429 if (!in_task())
7430 return;
7431
7432 rcu_read_lock();
7433 memcg = mem_cgroup_from_task(current);
7434 if (mem_cgroup_is_root(memcg))
7435 goto out;
7436 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
7437 goto out;
7438 if (css_tryget(&memcg->css))
7439 sk->sk_memcg = memcg;
7440 out:
7441 rcu_read_unlock();
7442 }
7443
mem_cgroup_sk_free(struct sock * sk)7444 void mem_cgroup_sk_free(struct sock *sk)
7445 {
7446 if (sk->sk_memcg)
7447 css_put(&sk->sk_memcg->css);
7448 }
7449
7450 /**
7451 * mem_cgroup_charge_skmem - charge socket memory
7452 * @memcg: memcg to charge
7453 * @nr_pages: number of pages to charge
7454 * @gfp_mask: reclaim mode
7455 *
7456 * Charges @nr_pages to @memcg. Returns %true if the charge fit within
7457 * @memcg's configured limit, %false if it doesn't.
7458 */
mem_cgroup_charge_skmem(struct mem_cgroup * memcg,unsigned int nr_pages,gfp_t gfp_mask)7459 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
7460 gfp_t gfp_mask)
7461 {
7462 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
7463 struct page_counter *fail;
7464
7465 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
7466 memcg->tcpmem_pressure = 0;
7467 return true;
7468 }
7469 memcg->tcpmem_pressure = 1;
7470 if (gfp_mask & __GFP_NOFAIL) {
7471 page_counter_charge(&memcg->tcpmem, nr_pages);
7472 return true;
7473 }
7474 return false;
7475 }
7476
7477 if (try_charge(memcg, gfp_mask, nr_pages) == 0) {
7478 mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
7479 return true;
7480 }
7481
7482 return false;
7483 }
7484
7485 /**
7486 * mem_cgroup_uncharge_skmem - uncharge socket memory
7487 * @memcg: memcg to uncharge
7488 * @nr_pages: number of pages to uncharge
7489 */
mem_cgroup_uncharge_skmem(struct mem_cgroup * memcg,unsigned int nr_pages)7490 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
7491 {
7492 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
7493 page_counter_uncharge(&memcg->tcpmem, nr_pages);
7494 return;
7495 }
7496
7497 mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
7498
7499 refill_stock(memcg, nr_pages);
7500 }
7501
cgroup_memory(char * s)7502 static int __init cgroup_memory(char *s)
7503 {
7504 char *token;
7505
7506 while ((token = strsep(&s, ",")) != NULL) {
7507 if (!*token)
7508 continue;
7509 if (!strcmp(token, "nosocket"))
7510 cgroup_memory_nosocket = true;
7511 if (!strcmp(token, "nokmem"))
7512 cgroup_memory_nokmem = true;
7513 if (!strcmp(token, "nobpf"))
7514 cgroup_memory_nobpf = true;
7515 }
7516 return 1;
7517 }
7518 __setup("cgroup.memory=", cgroup_memory);
7519
7520 /*
7521 * subsys_initcall() for memory controller.
7522 *
7523 * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
7524 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
7525 * basically everything that doesn't depend on a specific mem_cgroup structure
7526 * should be initialized from here.
7527 */
mem_cgroup_init(void)7528 static int __init mem_cgroup_init(void)
7529 {
7530 int cpu, node;
7531
7532 /*
7533 * Currently s32 type (can refer to struct batched_lruvec_stat) is
7534 * used for per-memcg-per-cpu caching of per-node statistics. In order
7535 * to work fine, we should make sure that the overfill threshold can't
7536 * exceed S32_MAX / PAGE_SIZE.
7537 */
7538 BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S32_MAX / PAGE_SIZE);
7539
7540 cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
7541 memcg_hotplug_cpu_dead);
7542
7543 for_each_possible_cpu(cpu)
7544 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
7545 drain_local_stock);
7546
7547 for_each_node(node) {
7548 struct mem_cgroup_tree_per_node *rtpn;
7549
7550 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, node);
7551
7552 rtpn->rb_root = RB_ROOT;
7553 rtpn->rb_rightmost = NULL;
7554 spin_lock_init(&rtpn->lock);
7555 soft_limit_tree.rb_tree_per_node[node] = rtpn;
7556 }
7557
7558 return 0;
7559 }
7560 subsys_initcall(mem_cgroup_init);
7561
7562 #ifdef CONFIG_SWAP
mem_cgroup_id_get_online(struct mem_cgroup * memcg)7563 static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
7564 {
7565 while (!refcount_inc_not_zero(&memcg->id.ref)) {
7566 /*
7567 * The root cgroup cannot be destroyed, so it's refcount must
7568 * always be >= 1.
7569 */
7570 if (WARN_ON_ONCE(mem_cgroup_is_root(memcg))) {
7571 VM_BUG_ON(1);
7572 break;
7573 }
7574 memcg = parent_mem_cgroup(memcg);
7575 if (!memcg)
7576 memcg = root_mem_cgroup;
7577 }
7578 return memcg;
7579 }
7580
7581 /**
7582 * mem_cgroup_swapout - transfer a memsw charge to swap
7583 * @folio: folio whose memsw charge to transfer
7584 * @entry: swap entry to move the charge to
7585 *
7586 * Transfer the memsw charge of @folio to @entry.
7587 */
mem_cgroup_swapout(struct folio * folio,swp_entry_t entry)7588 void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
7589 {
7590 struct mem_cgroup *memcg, *swap_memcg;
7591 unsigned int nr_entries;
7592 unsigned short oldid;
7593
7594 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
7595 VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
7596
7597 if (mem_cgroup_disabled())
7598 return;
7599
7600 if (!do_memsw_account())
7601 return;
7602
7603 memcg = folio_memcg(folio);
7604
7605 VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
7606 if (!memcg)
7607 return;
7608
7609 /*
7610 * In case the memcg owning these pages has been offlined and doesn't
7611 * have an ID allocated to it anymore, charge the closest online
7612 * ancestor for the swap instead and transfer the memory+swap charge.
7613 */
7614 swap_memcg = mem_cgroup_id_get_online(memcg);
7615 nr_entries = folio_nr_pages(folio);
7616 /* Get references for the tail pages, too */
7617 if (nr_entries > 1)
7618 mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
7619 oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg),
7620 nr_entries);
7621 VM_BUG_ON_FOLIO(oldid, folio);
7622 mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
7623
7624 folio->memcg_data = 0;
7625
7626 if (!mem_cgroup_is_root(memcg))
7627 page_counter_uncharge(&memcg->memory, nr_entries);
7628
7629 if (memcg != swap_memcg) {
7630 if (!mem_cgroup_is_root(swap_memcg))
7631 page_counter_charge(&swap_memcg->memsw, nr_entries);
7632 page_counter_uncharge(&memcg->memsw, nr_entries);
7633 }
7634
7635 /*
7636 * Interrupts should be disabled here because the caller holds the
7637 * i_pages lock which is taken with interrupts-off. It is
7638 * important here to have the interrupts disabled because it is the
7639 * only synchronisation we have for updating the per-CPU variables.
7640 */
7641 memcg_stats_lock();
7642 mem_cgroup_charge_statistics(memcg, -nr_entries);
7643 memcg_stats_unlock();
7644 memcg_check_events(memcg, folio_nid(folio));
7645
7646 css_put(&memcg->css);
7647 }
7648
7649 /**
7650 * __mem_cgroup_try_charge_swap - try charging swap space for a folio
7651 * @folio: folio being added to swap
7652 * @entry: swap entry to charge
7653 *
7654 * Try to charge @folio's memcg for the swap space at @entry.
7655 *
7656 * Returns 0 on success, -ENOMEM on failure.
7657 */
__mem_cgroup_try_charge_swap(struct folio * folio,swp_entry_t entry)7658 int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry)
7659 {
7660 unsigned int nr_pages = folio_nr_pages(folio);
7661 struct page_counter *counter;
7662 struct mem_cgroup *memcg;
7663 unsigned short oldid;
7664
7665 if (do_memsw_account())
7666 return 0;
7667
7668 memcg = folio_memcg(folio);
7669
7670 VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
7671 if (!memcg)
7672 return 0;
7673
7674 if (!entry.val) {
7675 memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7676 return 0;
7677 }
7678
7679 memcg = mem_cgroup_id_get_online(memcg);
7680
7681 if (!mem_cgroup_is_root(memcg) &&
7682 !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
7683 memcg_memory_event(memcg, MEMCG_SWAP_MAX);
7684 memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7685 mem_cgroup_id_put(memcg);
7686 return -ENOMEM;
7687 }
7688
7689 /* Get references for the tail pages, too */
7690 if (nr_pages > 1)
7691 mem_cgroup_id_get_many(memcg, nr_pages - 1);
7692 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages);
7693 VM_BUG_ON_FOLIO(oldid, folio);
7694 mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
7695
7696 return 0;
7697 }
7698
7699 /**
7700 * __mem_cgroup_uncharge_swap - uncharge swap space
7701 * @entry: swap entry to uncharge
7702 * @nr_pages: the amount of swap space to uncharge
7703 */
__mem_cgroup_uncharge_swap(swp_entry_t entry,unsigned int nr_pages)7704 void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
7705 {
7706 struct mem_cgroup *memcg;
7707 unsigned short id;
7708
7709 id = swap_cgroup_record(entry, 0, nr_pages);
7710 rcu_read_lock();
7711 memcg = mem_cgroup_from_id(id);
7712 if (memcg) {
7713 if (!mem_cgroup_is_root(memcg)) {
7714 if (do_memsw_account())
7715 page_counter_uncharge(&memcg->memsw, nr_pages);
7716 else
7717 page_counter_uncharge(&memcg->swap, nr_pages);
7718 }
7719 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
7720 mem_cgroup_id_put_many(memcg, nr_pages);
7721 }
7722 rcu_read_unlock();
7723 }
7724
mem_cgroup_get_nr_swap_pages(struct mem_cgroup * memcg)7725 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
7726 {
7727 long nr_swap_pages = get_nr_swap_pages();
7728
7729 if (mem_cgroup_disabled() || do_memsw_account())
7730 return nr_swap_pages;
7731 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg))
7732 nr_swap_pages = min_t(long, nr_swap_pages,
7733 READ_ONCE(memcg->swap.max) -
7734 page_counter_read(&memcg->swap));
7735 return nr_swap_pages;
7736 }
7737
mem_cgroup_swap_full(struct folio * folio)7738 bool mem_cgroup_swap_full(struct folio *folio)
7739 {
7740 struct mem_cgroup *memcg;
7741
7742 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
7743
7744 if (vm_swap_full())
7745 return true;
7746 if (do_memsw_account())
7747 return false;
7748
7749 memcg = folio_memcg(folio);
7750 if (!memcg)
7751 return false;
7752
7753 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
7754 unsigned long usage = page_counter_read(&memcg->swap);
7755
7756 if (usage * 2 >= READ_ONCE(memcg->swap.high) ||
7757 usage * 2 >= READ_ONCE(memcg->swap.max))
7758 return true;
7759 }
7760
7761 return false;
7762 }
7763
setup_swap_account(char * s)7764 static int __init setup_swap_account(char *s)
7765 {
7766 bool res;
7767
7768 if (!kstrtobool(s, &res) && !res)
7769 pr_warn_once("The swapaccount=0 commandline option is deprecated "
7770 "in favor of configuring swap control via cgroupfs. "
7771 "Please report your usecase to linux-mm@kvack.org if you "
7772 "depend on this functionality.\n");
7773 return 1;
7774 }
7775 __setup("swapaccount=", setup_swap_account);
7776
swap_current_read(struct cgroup_subsys_state * css,struct cftype * cft)7777 static u64 swap_current_read(struct cgroup_subsys_state *css,
7778 struct cftype *cft)
7779 {
7780 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
7781
7782 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
7783 }
7784
swap_peak_read(struct cgroup_subsys_state * css,struct cftype * cft)7785 static u64 swap_peak_read(struct cgroup_subsys_state *css,
7786 struct cftype *cft)
7787 {
7788 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
7789
7790 return (u64)memcg->swap.watermark * PAGE_SIZE;
7791 }
7792
swap_high_show(struct seq_file * m,void * v)7793 static int swap_high_show(struct seq_file *m, void *v)
7794 {
7795 return seq_puts_memcg_tunable(m,
7796 READ_ONCE(mem_cgroup_from_seq(m)->swap.high));
7797 }
7798
swap_high_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)7799 static ssize_t swap_high_write(struct kernfs_open_file *of,
7800 char *buf, size_t nbytes, loff_t off)
7801 {
7802 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
7803 unsigned long high;
7804 int err;
7805
7806 buf = strstrip(buf);
7807 err = page_counter_memparse(buf, "max", &high);
7808 if (err)
7809 return err;
7810
7811 page_counter_set_high(&memcg->swap, high);
7812
7813 return nbytes;
7814 }
7815
swap_max_show(struct seq_file * m,void * v)7816 static int swap_max_show(struct seq_file *m, void *v)
7817 {
7818 return seq_puts_memcg_tunable(m,
7819 READ_ONCE(mem_cgroup_from_seq(m)->swap.max));
7820 }
7821
swap_max_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)7822 static ssize_t swap_max_write(struct kernfs_open_file *of,
7823 char *buf, size_t nbytes, loff_t off)
7824 {
7825 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
7826 unsigned long max;
7827 int err;
7828
7829 buf = strstrip(buf);
7830 err = page_counter_memparse(buf, "max", &max);
7831 if (err)
7832 return err;
7833
7834 xchg(&memcg->swap.max, max);
7835
7836 return nbytes;
7837 }
7838
swap_events_show(struct seq_file * m,void * v)7839 static int swap_events_show(struct seq_file *m, void *v)
7840 {
7841 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
7842
7843 seq_printf(m, "high %lu\n",
7844 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH]));
7845 seq_printf(m, "max %lu\n",
7846 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
7847 seq_printf(m, "fail %lu\n",
7848 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL]));
7849
7850 return 0;
7851 }
7852
7853 static struct cftype swap_files[] = {
7854 {
7855 .name = "swap.current",
7856 .flags = CFTYPE_NOT_ON_ROOT,
7857 .read_u64 = swap_current_read,
7858 },
7859 {
7860 .name = "swap.high",
7861 .flags = CFTYPE_NOT_ON_ROOT,
7862 .seq_show = swap_high_show,
7863 .write = swap_high_write,
7864 },
7865 {
7866 .name = "swap.max",
7867 .flags = CFTYPE_NOT_ON_ROOT,
7868 .seq_show = swap_max_show,
7869 .write = swap_max_write,
7870 },
7871 {
7872 .name = "swap.peak",
7873 .flags = CFTYPE_NOT_ON_ROOT,
7874 .read_u64 = swap_peak_read,
7875 },
7876 {
7877 .name = "swap.events",
7878 .flags = CFTYPE_NOT_ON_ROOT,
7879 .file_offset = offsetof(struct mem_cgroup, swap_events_file),
7880 .seq_show = swap_events_show,
7881 },
7882 { } /* terminate */
7883 };
7884
7885 static struct cftype memsw_files[] = {
7886 {
7887 .name = "memsw.usage_in_bytes",
7888 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
7889 .read_u64 = mem_cgroup_read_u64,
7890 },
7891 {
7892 .name = "memsw.max_usage_in_bytes",
7893 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
7894 .write = mem_cgroup_reset,
7895 .read_u64 = mem_cgroup_read_u64,
7896 },
7897 {
7898 .name = "memsw.limit_in_bytes",
7899 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
7900 .write = mem_cgroup_write,
7901 .read_u64 = mem_cgroup_read_u64,
7902 },
7903 {
7904 .name = "memsw.failcnt",
7905 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
7906 .write = mem_cgroup_reset,
7907 .read_u64 = mem_cgroup_read_u64,
7908 },
7909 { }, /* terminate */
7910 };
7911
7912 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
7913 /**
7914 * obj_cgroup_may_zswap - check if this cgroup can zswap
7915 * @objcg: the object cgroup
7916 *
7917 * Check if the hierarchical zswap limit has been reached.
7918 *
7919 * This doesn't check for specific headroom, and it is not atomic
7920 * either. But with zswap, the size of the allocation is only known
7921 * once compression has occured, and this optimistic pre-check avoids
7922 * spending cycles on compression when there is already no room left
7923 * or zswap is disabled altogether somewhere in the hierarchy.
7924 */
obj_cgroup_may_zswap(struct obj_cgroup * objcg)7925 bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
7926 {
7927 struct mem_cgroup *memcg, *original_memcg;
7928 bool ret = true;
7929
7930 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
7931 return true;
7932
7933 original_memcg = get_mem_cgroup_from_objcg(objcg);
7934 for (memcg = original_memcg; !mem_cgroup_is_root(memcg);
7935 memcg = parent_mem_cgroup(memcg)) {
7936 unsigned long max = READ_ONCE(memcg->zswap_max);
7937 unsigned long pages;
7938
7939 if (max == PAGE_COUNTER_MAX)
7940 continue;
7941 if (max == 0) {
7942 ret = false;
7943 break;
7944 }
7945
7946 /*
7947 * mem_cgroup_flush_stats() ignores small changes. Use
7948 * do_flush_stats() directly to get accurate stats for charging.
7949 */
7950 do_flush_stats(memcg);
7951 pages = memcg_page_state(memcg, MEMCG_ZSWAP_B) / PAGE_SIZE;
7952 if (pages < max)
7953 continue;
7954 ret = false;
7955 break;
7956 }
7957 mem_cgroup_put(original_memcg);
7958 return ret;
7959 }
7960
7961 /**
7962 * obj_cgroup_charge_zswap - charge compression backend memory
7963 * @objcg: the object cgroup
7964 * @size: size of compressed object
7965 *
7966 * This forces the charge after obj_cgroup_may_zswap() allowed
7967 * compression and storage in zwap for this cgroup to go ahead.
7968 */
obj_cgroup_charge_zswap(struct obj_cgroup * objcg,size_t size)7969 void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size)
7970 {
7971 struct mem_cgroup *memcg;
7972
7973 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
7974 return;
7975
7976 VM_WARN_ON_ONCE(!(current->flags & PF_MEMALLOC));
7977
7978 /* PF_MEMALLOC context, charging must succeed */
7979 if (obj_cgroup_charge(objcg, GFP_KERNEL, size))
7980 VM_WARN_ON_ONCE(1);
7981
7982 rcu_read_lock();
7983 memcg = obj_cgroup_memcg(objcg);
7984 mod_memcg_state(memcg, MEMCG_ZSWAP_B, size);
7985 mod_memcg_state(memcg, MEMCG_ZSWAPPED, 1);
7986 rcu_read_unlock();
7987 }
7988
7989 /**
7990 * obj_cgroup_uncharge_zswap - uncharge compression backend memory
7991 * @objcg: the object cgroup
7992 * @size: size of compressed object
7993 *
7994 * Uncharges zswap memory on page in.
7995 */
obj_cgroup_uncharge_zswap(struct obj_cgroup * objcg,size_t size)7996 void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size)
7997 {
7998 struct mem_cgroup *memcg;
7999
8000 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
8001 return;
8002
8003 obj_cgroup_uncharge(objcg, size);
8004
8005 rcu_read_lock();
8006 memcg = obj_cgroup_memcg(objcg);
8007 mod_memcg_state(memcg, MEMCG_ZSWAP_B, -size);
8008 mod_memcg_state(memcg, MEMCG_ZSWAPPED, -1);
8009 rcu_read_unlock();
8010 }
8011
zswap_current_read(struct cgroup_subsys_state * css,struct cftype * cft)8012 static u64 zswap_current_read(struct cgroup_subsys_state *css,
8013 struct cftype *cft)
8014 {
8015 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
8016
8017 mem_cgroup_flush_stats(memcg);
8018 return memcg_page_state(memcg, MEMCG_ZSWAP_B);
8019 }
8020
zswap_max_show(struct seq_file * m,void * v)8021 static int zswap_max_show(struct seq_file *m, void *v)
8022 {
8023 return seq_puts_memcg_tunable(m,
8024 READ_ONCE(mem_cgroup_from_seq(m)->zswap_max));
8025 }
8026
zswap_max_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)8027 static ssize_t zswap_max_write(struct kernfs_open_file *of,
8028 char *buf, size_t nbytes, loff_t off)
8029 {
8030 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
8031 unsigned long max;
8032 int err;
8033
8034 buf = strstrip(buf);
8035 err = page_counter_memparse(buf, "max", &max);
8036 if (err)
8037 return err;
8038
8039 xchg(&memcg->zswap_max, max);
8040
8041 return nbytes;
8042 }
8043
8044 static struct cftype zswap_files[] = {
8045 {
8046 .name = "zswap.current",
8047 .flags = CFTYPE_NOT_ON_ROOT,
8048 .read_u64 = zswap_current_read,
8049 },
8050 {
8051 .name = "zswap.max",
8052 .flags = CFTYPE_NOT_ON_ROOT,
8053 .seq_show = zswap_max_show,
8054 .write = zswap_max_write,
8055 },
8056 { } /* terminate */
8057 };
8058 #endif /* CONFIG_MEMCG_KMEM && CONFIG_ZSWAP */
8059
mem_cgroup_swap_init(void)8060 static int __init mem_cgroup_swap_init(void)
8061 {
8062 if (mem_cgroup_disabled())
8063 return 0;
8064
8065 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files));
8066 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files));
8067 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
8068 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, zswap_files));
8069 #endif
8070 return 0;
8071 }
8072 subsys_initcall(mem_cgroup_swap_init);
8073
8074 #endif /* CONFIG_SWAP */
8075