• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /* memcontrol.h - Memory Controller
3  *
4  * Copyright IBM Corporation, 2007
5  * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6  *
7  * Copyright 2007 OpenVZ SWsoft Inc
8  * Author: Pavel Emelianov <xemul@openvz.org>
9  */
10 
11 #ifndef _LINUX_MEMCONTROL_H
12 #define _LINUX_MEMCONTROL_H
13 #include <linux/android_kabi.h>
14 #include <linux/cgroup.h>
15 #include <linux/vm_event_item.h>
16 #include <linux/hardirq.h>
17 #include <linux/jump_label.h>
18 #include <linux/kernel.h>
19 #include <linux/page_counter.h>
20 #include <linux/vmpressure.h>
21 #include <linux/eventfd.h>
22 #include <linux/mm.h>
23 #include <linux/vmstat.h>
24 #include <linux/writeback.h>
25 #include <linux/page-flags.h>
26 #include <linux/shrinker.h>
27 
28 struct mem_cgroup;
29 struct obj_cgroup;
30 struct page;
31 struct mm_struct;
32 struct kmem_cache;
33 
34 /* Cgroup-specific page state, on top of universal node page state */
35 enum memcg_stat_item {
36 	MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS,
37 	MEMCG_SOCK,
38 	MEMCG_PERCPU_B,
39 	MEMCG_VMALLOC,
40 	MEMCG_KMEM,
41 	MEMCG_ZSWAP_B,
42 	MEMCG_ZSWAPPED,
43 	MEMCG_NR_STAT,
44 };
45 
46 enum memcg_memory_event {
47 	MEMCG_LOW,
48 	MEMCG_HIGH,
49 	MEMCG_MAX,
50 	MEMCG_OOM,
51 	MEMCG_OOM_KILL,
52 	MEMCG_OOM_GROUP_KILL,
53 	MEMCG_SWAP_HIGH,
54 	MEMCG_SWAP_MAX,
55 	MEMCG_SWAP_FAIL,
56 	MEMCG_NR_MEMORY_EVENTS,
57 };
58 
59 struct mem_cgroup_reclaim_cookie {
60 	pg_data_t *pgdat;
61 	int generation;
62 };
63 
64 #ifdef CONFIG_MEMCG
65 
66 #define MEM_CGROUP_ID_SHIFT	16
67 
68 struct mem_cgroup_id {
69 	int id;
70 	refcount_t ref;
71 };
72 
73 struct memcg_vmstats_percpu;
74 struct memcg1_events_percpu;
75 struct memcg_vmstats;
76 struct lruvec_stats_percpu;
77 struct lruvec_stats;
78 
79 struct mem_cgroup_reclaim_iter {
80 	struct mem_cgroup *position;
81 	/* scan generation, increased every round-trip */
82 	atomic_t generation;
83 };
84 
85 /*
86  * per-node information in memory controller.
87  */
88 struct mem_cgroup_per_node {
89 	/* Keep the read-only fields at the start */
90 	struct mem_cgroup	*memcg;		/* Back pointer, we cannot */
91 						/* use container_of	   */
92 
93 	struct lruvec_stats_percpu __percpu	*lruvec_stats_percpu;
94 	struct lruvec_stats			*lruvec_stats;
95 	struct shrinker_info __rcu	*shrinker_info;
96 
97 #ifdef CONFIG_MEMCG_V1
98 	/*
99 	 * Memcg-v1 only stuff in middle as buffer between read mostly fields
100 	 * and update often fields to avoid false sharing. If v1 stuff is
101 	 * not present, an explicit padding is needed.
102 	 */
103 
104 	struct rb_node		tree_node;	/* RB tree node */
105 	unsigned long		usage_in_excess;/* Set to the value by which */
106 						/* the soft limit is exceeded*/
107 	bool			on_tree;
108 #else
109 	CACHELINE_PADDING(_pad1_);
110 #endif
111 
112 	/* Fields which get updated often at the end. */
113 	struct lruvec		lruvec;
114 	CACHELINE_PADDING(_pad2_);
115 	unsigned long		lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
116 	struct mem_cgroup_reclaim_iter	iter;
117 
118 	ANDROID_BACKPORT_RESERVE(1);
119 };
120 
121 struct mem_cgroup_threshold {
122 	struct eventfd_ctx *eventfd;
123 	unsigned long threshold;
124 };
125 
126 /* For threshold */
127 struct mem_cgroup_threshold_ary {
128 	/* An array index points to threshold just below or equal to usage. */
129 	int current_threshold;
130 	/* Size of entries[] */
131 	unsigned int size;
132 	/* Array of thresholds */
133 	struct mem_cgroup_threshold entries[] __counted_by(size);
134 };
135 
136 struct mem_cgroup_thresholds {
137 	/* Primary thresholds array */
138 	struct mem_cgroup_threshold_ary *primary;
139 	/*
140 	 * Spare threshold array.
141 	 * This is needed to make mem_cgroup_unregister_event() "never fail".
142 	 * It must be able to store at least primary->size - 1 entries.
143 	 */
144 	struct mem_cgroup_threshold_ary *spare;
145 };
146 
147 /*
148  * Remember four most recent foreign writebacks with dirty pages in this
149  * cgroup.  Inode sharing is expected to be uncommon and, even if we miss
150  * one in a given round, we're likely to catch it later if it keeps
151  * foreign-dirtying, so a fairly low count should be enough.
152  *
153  * See mem_cgroup_track_foreign_dirty_slowpath() for details.
154  */
155 #define MEMCG_CGWB_FRN_CNT	4
156 
157 struct memcg_cgwb_frn {
158 	u64 bdi_id;			/* bdi->id of the foreign inode */
159 	int memcg_id;			/* memcg->css.id of foreign inode */
160 	u64 at;				/* jiffies_64 at the time of dirtying */
161 	struct wb_completion done;	/* tracks in-flight foreign writebacks */
162 };
163 
164 /*
165  * Bucket for arbitrarily byte-sized objects charged to a memory
166  * cgroup. The bucket can be reparented in one piece when the cgroup
167  * is destroyed, without having to round up the individual references
168  * of all live memory objects in the wild.
169  */
170 struct obj_cgroup {
171 	struct percpu_ref refcnt;
172 	struct mem_cgroup *memcg;
173 	atomic_t nr_charged_bytes;
174 	union {
175 		struct list_head list; /* protected by objcg_lock */
176 		struct rcu_head rcu;
177 	};
178 };
179 
180 /*
181  * The memory controller data structure. The memory controller controls both
182  * page cache and RSS per cgroup. We would eventually like to provide
183  * statistics based on the statistics developed by Rik Van Riel for clock-pro,
184  * to help the administrator determine what knobs to tune.
185  */
186 struct mem_cgroup {
187 	struct cgroup_subsys_state css;
188 
189 	/* Private memcg ID. Used to ID objects that outlive the cgroup */
190 	struct mem_cgroup_id id;
191 
192 	/* Accounted resources */
193 	struct page_counter memory;		/* Both v1 & v2 */
194 
195 	union {
196 		struct page_counter swap;	/* v2 only */
197 		struct page_counter memsw;	/* v1 only */
198 	};
199 
200 	/* registered local peak watchers */
201 	struct list_head memory_peaks;
202 	struct list_head swap_peaks;
203 	spinlock_t	 peaks_lock;
204 
205 	/* Range enforcement for interrupt charges */
206 	struct work_struct high_work;
207 
208 #ifdef CONFIG_ZSWAP
209 	unsigned long zswap_max;
210 
211 	/*
212 	 * Prevent pages from this memcg from being written back from zswap to
213 	 * swap, and from being swapped out on zswap store failures.
214 	 */
215 	bool zswap_writeback;
216 #endif
217 
218 	/* vmpressure notifications */
219 	struct vmpressure vmpressure;
220 
221 	/*
222 	 * Should the OOM killer kill all belonging tasks, had it kill one?
223 	 */
224 	bool oom_group;
225 
226 	int swappiness;
227 
228 	/* memory.events and memory.events.local */
229 	struct cgroup_file events_file;
230 	struct cgroup_file events_local_file;
231 
232 	/* handle for "memory.swap.events" */
233 	struct cgroup_file swap_events_file;
234 
235 	/* memory.stat */
236 	struct memcg_vmstats	*vmstats;
237 
238 	/* memory.events */
239 	atomic_long_t		memory_events[MEMCG_NR_MEMORY_EVENTS];
240 	atomic_long_t		memory_events_local[MEMCG_NR_MEMORY_EVENTS];
241 
242 	/*
243 	 * Hint of reclaim pressure for socket memroy management. Note
244 	 * that this indicator should NOT be used in legacy cgroup mode
245 	 * where socket memory is accounted/charged separately.
246 	 */
247 	unsigned long		socket_pressure;
248 
249 	int kmemcg_id;
250 	/*
251 	 * memcg->objcg is wiped out as a part of the objcg repaprenting
252 	 * process. memcg->orig_objcg preserves a pointer (and a reference)
253 	 * to the original objcg until the end of live of memcg.
254 	 */
255 	struct obj_cgroup __rcu	*objcg;
256 	struct obj_cgroup	*orig_objcg;
257 	/* list of inherited objcgs, protected by objcg_lock */
258 	struct list_head objcg_list;
259 
260 	struct memcg_vmstats_percpu __percpu *vmstats_percpu;
261 
262 #ifdef CONFIG_CGROUP_WRITEBACK
263 	struct list_head cgwb_list;
264 	struct wb_domain cgwb_domain;
265 	struct memcg_cgwb_frn cgwb_frn[MEMCG_CGWB_FRN_CNT];
266 #endif
267 
268 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
269 	struct deferred_split deferred_split_queue;
270 #endif
271 
272 #ifdef CONFIG_LRU_GEN_WALKS_MMU
273 	/* per-memcg mm_struct list */
274 	struct lru_gen_mm_list mm_list;
275 #endif
276 
277 #ifdef CONFIG_MEMCG_V1
278 	/* Legacy consumer-oriented counters */
279 	struct page_counter kmem;		/* v1 only */
280 	struct page_counter tcpmem;		/* v1 only */
281 
282 	struct memcg1_events_percpu __percpu *events_percpu;
283 
284 	unsigned long soft_limit;
285 
286 	/* protected by memcg_oom_lock */
287 	bool oom_lock;
288 	int under_oom;
289 
290 	/* OOM-Killer disable */
291 	int oom_kill_disable;
292 
293 	/* protect arrays of thresholds */
294 	struct mutex thresholds_lock;
295 
296 	/* thresholds for memory usage. RCU-protected */
297 	struct mem_cgroup_thresholds thresholds;
298 
299 	/* thresholds for mem+swap usage. RCU-protected */
300 	struct mem_cgroup_thresholds memsw_thresholds;
301 
302 	/* For oom notifier event fd */
303 	struct list_head oom_notify;
304 
305 	/*
306 	 * Should we move charges of a task when a task is moved into this
307 	 * mem_cgroup ? And what type of charges should we move ?
308 	 */
309 	unsigned long move_charge_at_immigrate;
310 	/* taken only while moving_account > 0 */
311 	spinlock_t move_lock;
312 	unsigned long move_lock_flags;
313 
314 	/* Legacy tcp memory accounting */
315 	bool tcpmem_active;
316 	int tcpmem_pressure;
317 
318 	/*
319 	 * set > 0 if pages under this cgroup are moving to other cgroup.
320 	 */
321 	atomic_t moving_account;
322 	struct task_struct *move_lock_task;
323 
324 	/* List of events which userspace want to receive */
325 	struct list_head event_list;
326 	spinlock_t event_list_lock;
327 #endif /* CONFIG_MEMCG_V1 */
328 
329 	ANDROID_BACKPORT_RESERVE(1);
330 	ANDROID_OEM_DATA_ARRAY(1, 2);
331 
332 	struct mem_cgroup_per_node *nodeinfo[];
333 };
334 
335 /*
336  * size of first charge trial.
337  * TODO: maybe necessary to use big numbers in big irons or dynamic based of the
338  * workload.
339  */
340 #define MEMCG_CHARGE_BATCH 64U
341 
342 extern struct mem_cgroup *root_mem_cgroup;
343 
344 enum page_memcg_data_flags {
345 	/* page->memcg_data is a pointer to an slabobj_ext vector */
346 	MEMCG_DATA_OBJEXTS = (1UL << 0),
347 	/* page has been accounted as a non-slab kernel page */
348 	MEMCG_DATA_KMEM = (1UL << 1),
349 	/* the next bit after the last actual flag */
350 	__NR_MEMCG_DATA_FLAGS  = (1UL << 2),
351 };
352 
353 #define __FIRST_OBJEXT_FLAG	__NR_MEMCG_DATA_FLAGS
354 
355 #else /* CONFIG_MEMCG */
356 
357 #define __FIRST_OBJEXT_FLAG	(1UL << 0)
358 
359 #endif /* CONFIG_MEMCG */
360 
361 enum objext_flags {
362 	/* slabobj_ext vector failed to allocate */
363 	OBJEXTS_ALLOC_FAIL = __FIRST_OBJEXT_FLAG,
364 	/* the next bit after the last actual flag */
365 	__NR_OBJEXTS_FLAGS  = (__FIRST_OBJEXT_FLAG << 1),
366 };
367 
368 #define OBJEXTS_FLAGS_MASK (__NR_OBJEXTS_FLAGS - 1)
369 
370 #ifdef CONFIG_MEMCG
371 
372 static inline bool folio_memcg_kmem(struct folio *folio);
373 
374 void do_traversal_all_lruvec(int (*callback)(struct mem_cgroup *memcg,
375 					     struct lruvec *lruvec,
376 					     void *private),
377 			     void *private);
378 
379 int mem_cgroup_move_account(struct folio *folio,
380 			    bool compound,
381 			    struct mem_cgroup *from,
382 			    struct mem_cgroup *to);
383 
384 /*
385  * After the initialization objcg->memcg is always pointing at
386  * a valid memcg, but can be atomically swapped to the parent memcg.
387  *
388  * The caller must ensure that the returned memcg won't be released.
389  */
obj_cgroup_memcg(struct obj_cgroup * objcg)390 static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg)
391 {
392 	lockdep_assert_once(rcu_read_lock_held() || lockdep_is_held(&cgroup_mutex));
393 	return READ_ONCE(objcg->memcg);
394 }
395 
396 /*
397  * __folio_memcg - Get the memory cgroup associated with a non-kmem folio
398  * @folio: Pointer to the folio.
399  *
400  * Returns a pointer to the memory cgroup associated with the folio,
401  * or NULL. This function assumes that the folio is known to have a
402  * proper memory cgroup pointer. It's not safe to call this function
403  * against some type of folios, e.g. slab folios or ex-slab folios or
404  * kmem folios.
405  */
__folio_memcg(struct folio * folio)406 static inline struct mem_cgroup *__folio_memcg(struct folio *folio)
407 {
408 	unsigned long memcg_data = folio->memcg_data;
409 
410 	VM_BUG_ON_FOLIO(folio_test_slab(folio), folio);
411 	VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJEXTS, folio);
412 	VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_KMEM, folio);
413 
414 	return (struct mem_cgroup *)(memcg_data & ~OBJEXTS_FLAGS_MASK);
415 }
416 
417 /*
418  * __folio_objcg - get the object cgroup associated with a kmem folio.
419  * @folio: Pointer to the folio.
420  *
421  * Returns a pointer to the object cgroup associated with the folio,
422  * or NULL. This function assumes that the folio is known to have a
423  * proper object cgroup pointer. It's not safe to call this function
424  * against some type of folios, e.g. slab folios or ex-slab folios or
425  * LRU folios.
426  */
__folio_objcg(struct folio * folio)427 static inline struct obj_cgroup *__folio_objcg(struct folio *folio)
428 {
429 	unsigned long memcg_data = folio->memcg_data;
430 
431 	VM_BUG_ON_FOLIO(folio_test_slab(folio), folio);
432 	VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJEXTS, folio);
433 	VM_BUG_ON_FOLIO(!(memcg_data & MEMCG_DATA_KMEM), folio);
434 
435 	return (struct obj_cgroup *)(memcg_data & ~OBJEXTS_FLAGS_MASK);
436 }
437 
438 /*
439  * folio_memcg - Get the memory cgroup associated with a folio.
440  * @folio: Pointer to the folio.
441  *
442  * Returns a pointer to the memory cgroup associated with the folio,
443  * or NULL. This function assumes that the folio is known to have a
444  * proper memory cgroup pointer. It's not safe to call this function
445  * against some type of folios, e.g. slab folios or ex-slab folios.
446  *
447  * For a non-kmem folio any of the following ensures folio and memcg binding
448  * stability:
449  *
450  * - the folio lock
451  * - LRU isolation
452  * - folio_memcg_lock()
453  * - exclusive reference
454  * - mem_cgroup_trylock_pages()
455  *
456  * For a kmem folio a caller should hold an rcu read lock to protect memcg
457  * associated with a kmem folio from being released.
458  */
folio_memcg(struct folio * folio)459 static inline struct mem_cgroup *folio_memcg(struct folio *folio)
460 {
461 	if (folio_memcg_kmem(folio))
462 		return obj_cgroup_memcg(__folio_objcg(folio));
463 	return __folio_memcg(folio);
464 }
465 
466 /*
467  * folio_memcg_charged - If a folio is charged to a memory cgroup.
468  * @folio: Pointer to the folio.
469  *
470  * Returns true if folio is charged to a memory cgroup, otherwise returns false.
471  */
folio_memcg_charged(struct folio * folio)472 static inline bool folio_memcg_charged(struct folio *folio)
473 {
474 	if (folio_memcg_kmem(folio))
475 		return __folio_objcg(folio) != NULL;
476 	return __folio_memcg(folio) != NULL;
477 }
478 
479 /**
480  * folio_memcg_rcu - Locklessly get the memory cgroup associated with a folio.
481  * @folio: Pointer to the folio.
482  *
483  * This function assumes that the folio is known to have a
484  * proper memory cgroup pointer. It's not safe to call this function
485  * against some type of folios, e.g. slab folios or ex-slab folios.
486  *
487  * Return: A pointer to the memory cgroup associated with the folio,
488  * or NULL.
489  */
folio_memcg_rcu(struct folio * folio)490 static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio)
491 {
492 	unsigned long memcg_data = READ_ONCE(folio->memcg_data);
493 
494 	VM_BUG_ON_FOLIO(folio_test_slab(folio), folio);
495 
496 	if (memcg_data & MEMCG_DATA_KMEM) {
497 		struct obj_cgroup *objcg;
498 
499 		objcg = (void *)(memcg_data & ~OBJEXTS_FLAGS_MASK);
500 		return obj_cgroup_memcg(objcg);
501 	}
502 
503 	WARN_ON_ONCE(!rcu_read_lock_held());
504 
505 	return (struct mem_cgroup *)(memcg_data & ~OBJEXTS_FLAGS_MASK);
506 }
507 
508 /*
509  * folio_memcg_check - Get the memory cgroup associated with a folio.
510  * @folio: Pointer to the folio.
511  *
512  * Returns a pointer to the memory cgroup associated with the folio,
513  * or NULL. This function unlike folio_memcg() can take any folio
514  * as an argument. It has to be used in cases when it's not known if a folio
515  * has an associated memory cgroup pointer or an object cgroups vector or
516  * an object cgroup.
517  *
518  * For a non-kmem folio any of the following ensures folio and memcg binding
519  * stability:
520  *
521  * - the folio lock
522  * - LRU isolation
523  * - lock_folio_memcg()
524  * - exclusive reference
525  * - mem_cgroup_trylock_pages()
526  *
527  * For a kmem folio a caller should hold an rcu read lock to protect memcg
528  * associated with a kmem folio from being released.
529  */
folio_memcg_check(struct folio * folio)530 static inline struct mem_cgroup *folio_memcg_check(struct folio *folio)
531 {
532 	/*
533 	 * Because folio->memcg_data might be changed asynchronously
534 	 * for slabs, READ_ONCE() should be used here.
535 	 */
536 	unsigned long memcg_data = READ_ONCE(folio->memcg_data);
537 
538 	if (memcg_data & MEMCG_DATA_OBJEXTS)
539 		return NULL;
540 
541 	if (memcg_data & MEMCG_DATA_KMEM) {
542 		struct obj_cgroup *objcg;
543 
544 		objcg = (void *)(memcg_data & ~OBJEXTS_FLAGS_MASK);
545 		return obj_cgroup_memcg(objcg);
546 	}
547 
548 	return (struct mem_cgroup *)(memcg_data & ~OBJEXTS_FLAGS_MASK);
549 }
550 
page_memcg_check(struct page * page)551 static inline struct mem_cgroup *page_memcg_check(struct page *page)
552 {
553 	if (PageTail(page))
554 		return NULL;
555 	return folio_memcg_check((struct folio *)page);
556 }
557 
get_mem_cgroup_from_objcg(struct obj_cgroup * objcg)558 static inline struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg)
559 {
560 	struct mem_cgroup *memcg;
561 
562 	rcu_read_lock();
563 retry:
564 	memcg = obj_cgroup_memcg(objcg);
565 	if (unlikely(!css_tryget(&memcg->css)))
566 		goto retry;
567 	rcu_read_unlock();
568 
569 	return memcg;
570 }
571 
572 /*
573  * folio_memcg_kmem - Check if the folio has the memcg_kmem flag set.
574  * @folio: Pointer to the folio.
575  *
576  * Checks if the folio has MemcgKmem flag set. The caller must ensure
577  * that the folio has an associated memory cgroup. It's not safe to call
578  * this function against some types of folios, e.g. slab folios.
579  */
folio_memcg_kmem(struct folio * folio)580 static inline bool folio_memcg_kmem(struct folio *folio)
581 {
582 	VM_BUG_ON_PGFLAGS(PageTail(&folio->page), &folio->page);
583 	VM_BUG_ON_FOLIO(folio->memcg_data & MEMCG_DATA_OBJEXTS, folio);
584 	return folio->memcg_data & MEMCG_DATA_KMEM;
585 }
586 
PageMemcgKmem(struct page * page)587 static inline bool PageMemcgKmem(struct page *page)
588 {
589 	return folio_memcg_kmem(page_folio(page));
590 }
591 
mem_cgroup_is_root(struct mem_cgroup * memcg)592 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
593 {
594 	return (memcg == root_mem_cgroup);
595 }
596 
mem_cgroup_disabled(void)597 static inline bool mem_cgroup_disabled(void)
598 {
599 	return !cgroup_subsys_enabled(memory_cgrp_subsys);
600 }
601 
mem_cgroup_protection(struct mem_cgroup * root,struct mem_cgroup * memcg,unsigned long * min,unsigned long * low)602 static inline void mem_cgroup_protection(struct mem_cgroup *root,
603 					 struct mem_cgroup *memcg,
604 					 unsigned long *min,
605 					 unsigned long *low)
606 {
607 	*min = *low = 0;
608 
609 	if (mem_cgroup_disabled())
610 		return;
611 
612 	/*
613 	 * There is no reclaim protection applied to a targeted reclaim.
614 	 * We are special casing this specific case here because
615 	 * mem_cgroup_calculate_protection is not robust enough to keep
616 	 * the protection invariant for calculated effective values for
617 	 * parallel reclaimers with different reclaim target. This is
618 	 * especially a problem for tail memcgs (as they have pages on LRU)
619 	 * which would want to have effective values 0 for targeted reclaim
620 	 * but a different value for external reclaim.
621 	 *
622 	 * Example
623 	 * Let's have global and A's reclaim in parallel:
624 	 *  |
625 	 *  A (low=2G, usage = 3G, max = 3G, children_low_usage = 1.5G)
626 	 *  |\
627 	 *  | C (low = 1G, usage = 2.5G)
628 	 *  B (low = 1G, usage = 0.5G)
629 	 *
630 	 * For the global reclaim
631 	 * A.elow = A.low
632 	 * B.elow = min(B.usage, B.low) because children_low_usage <= A.elow
633 	 * C.elow = min(C.usage, C.low)
634 	 *
635 	 * With the effective values resetting we have A reclaim
636 	 * A.elow = 0
637 	 * B.elow = B.low
638 	 * C.elow = C.low
639 	 *
640 	 * If the global reclaim races with A's reclaim then
641 	 * B.elow = C.elow = 0 because children_low_usage > A.elow)
642 	 * is possible and reclaiming B would be violating the protection.
643 	 *
644 	 */
645 	if (root == memcg)
646 		return;
647 
648 	*min = READ_ONCE(memcg->memory.emin);
649 	*low = READ_ONCE(memcg->memory.elow);
650 }
651 
652 void mem_cgroup_calculate_protection(struct mem_cgroup *root,
653 				     struct mem_cgroup *memcg);
654 
mem_cgroup_unprotected(struct mem_cgroup * target,struct mem_cgroup * memcg)655 static inline bool mem_cgroup_unprotected(struct mem_cgroup *target,
656 					  struct mem_cgroup *memcg)
657 {
658 	/*
659 	 * The root memcg doesn't account charges, and doesn't support
660 	 * protection. The target memcg's protection is ignored, see
661 	 * mem_cgroup_calculate_protection() and mem_cgroup_protection()
662 	 */
663 	return mem_cgroup_disabled() || mem_cgroup_is_root(memcg) ||
664 		memcg == target;
665 }
666 
mem_cgroup_below_low(struct mem_cgroup * target,struct mem_cgroup * memcg)667 static inline bool mem_cgroup_below_low(struct mem_cgroup *target,
668 					struct mem_cgroup *memcg)
669 {
670 	if (mem_cgroup_unprotected(target, memcg))
671 		return false;
672 
673 	return READ_ONCE(memcg->memory.elow) >=
674 		page_counter_read(&memcg->memory);
675 }
676 
mem_cgroup_below_min(struct mem_cgroup * target,struct mem_cgroup * memcg)677 static inline bool mem_cgroup_below_min(struct mem_cgroup *target,
678 					struct mem_cgroup *memcg)
679 {
680 	if (mem_cgroup_unprotected(target, memcg))
681 		return false;
682 
683 	return READ_ONCE(memcg->memory.emin) >=
684 		page_counter_read(&memcg->memory);
685 }
686 
687 void mem_cgroup_commit_charge(struct folio *folio, struct mem_cgroup *memcg);
688 
689 int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp);
690 
691 /**
692  * mem_cgroup_charge - Charge a newly allocated folio to a cgroup.
693  * @folio: Folio to charge.
694  * @mm: mm context of the allocating task.
695  * @gfp: Reclaim mode.
696  *
697  * Try to charge @folio to the memcg that @mm belongs to, reclaiming
698  * pages according to @gfp if necessary.  If @mm is NULL, try to
699  * charge to the active memcg.
700  *
701  * Do not use this for folios allocated for swapin.
702  *
703  * Return: 0 on success. Otherwise, an error code is returned.
704  */
mem_cgroup_charge(struct folio * folio,struct mm_struct * mm,gfp_t gfp)705 static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm,
706 				    gfp_t gfp)
707 {
708 	if (mem_cgroup_disabled())
709 		return 0;
710 	return __mem_cgroup_charge(folio, mm, gfp);
711 }
712 
713 int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg, gfp_t gfp,
714 		long nr_pages);
715 
716 int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
717 				  gfp_t gfp, swp_entry_t entry);
718 
719 void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry, unsigned int nr_pages);
720 
721 void __mem_cgroup_uncharge(struct folio *folio);
722 
723 /**
724  * mem_cgroup_uncharge - Uncharge a folio.
725  * @folio: Folio to uncharge.
726  *
727  * Uncharge a folio previously charged with mem_cgroup_charge().
728  */
mem_cgroup_uncharge(struct folio * folio)729 static inline void mem_cgroup_uncharge(struct folio *folio)
730 {
731 	if (mem_cgroup_disabled())
732 		return;
733 	__mem_cgroup_uncharge(folio);
734 }
735 
736 void __mem_cgroup_uncharge_folios(struct folio_batch *folios);
mem_cgroup_uncharge_folios(struct folio_batch * folios)737 static inline void mem_cgroup_uncharge_folios(struct folio_batch *folios)
738 {
739 	if (mem_cgroup_disabled())
740 		return;
741 	__mem_cgroup_uncharge_folios(folios);
742 }
743 
744 void mem_cgroup_cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages);
745 void mem_cgroup_replace_folio(struct folio *old, struct folio *new);
746 void mem_cgroup_migrate(struct folio *old, struct folio *new);
747 
748 /**
749  * mem_cgroup_lruvec - get the lru list vector for a memcg & node
750  * @memcg: memcg of the wanted lruvec
751  * @pgdat: pglist_data
752  *
753  * Returns the lru list vector holding pages for a given @memcg &
754  * @pgdat combination. This can be the node lruvec, if the memory
755  * controller is disabled.
756  */
mem_cgroup_lruvec(struct mem_cgroup * memcg,struct pglist_data * pgdat)757 static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
758 					       struct pglist_data *pgdat)
759 {
760 	struct mem_cgroup_per_node *mz;
761 	struct lruvec *lruvec;
762 
763 	if (mem_cgroup_disabled()) {
764 		lruvec = &pgdat->__lruvec;
765 		goto out;
766 	}
767 
768 	if (!memcg)
769 		memcg = root_mem_cgroup;
770 
771 	mz = memcg->nodeinfo[pgdat->node_id];
772 	lruvec = &mz->lruvec;
773 out:
774 	/*
775 	 * Since a node can be onlined after the mem_cgroup was created,
776 	 * we have to be prepared to initialize lruvec->pgdat here;
777 	 * and if offlined then reonlined, we need to reinitialize it.
778 	 */
779 	if (unlikely(lruvec->pgdat != pgdat))
780 		lruvec->pgdat = pgdat;
781 	return lruvec;
782 }
783 
784 /**
785  * folio_lruvec - return lruvec for isolating/putting an LRU folio
786  * @folio: Pointer to the folio.
787  *
788  * This function relies on folio->mem_cgroup being stable.
789  */
folio_lruvec(struct folio * folio)790 static inline struct lruvec *folio_lruvec(struct folio *folio)
791 {
792 	struct mem_cgroup *memcg = folio_memcg(folio);
793 
794 	VM_WARN_ON_ONCE_FOLIO(!memcg && !mem_cgroup_disabled(), folio);
795 	return mem_cgroup_lruvec(memcg, folio_pgdat(folio));
796 }
797 
798 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
799 
800 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
801 
802 struct mem_cgroup *get_mem_cgroup_from_current(void);
803 
804 struct mem_cgroup *get_mem_cgroup_from_folio(struct folio *folio);
805 
806 struct lruvec *folio_lruvec_lock(struct folio *folio);
807 struct lruvec *folio_lruvec_lock_irq(struct folio *folio);
808 struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
809 						unsigned long *flags);
810 
811 #ifdef CONFIG_DEBUG_VM
812 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio);
813 #else
814 static inline
lruvec_memcg_debug(struct lruvec * lruvec,struct folio * folio)815 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
816 {
817 }
818 #endif
819 
820 static inline
mem_cgroup_from_css(struct cgroup_subsys_state * css)821 struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){
822 	return css ? container_of(css, struct mem_cgroup, css) : NULL;
823 }
824 
obj_cgroup_tryget(struct obj_cgroup * objcg)825 static inline bool obj_cgroup_tryget(struct obj_cgroup *objcg)
826 {
827 	return percpu_ref_tryget(&objcg->refcnt);
828 }
829 
obj_cgroup_get(struct obj_cgroup * objcg)830 static inline void obj_cgroup_get(struct obj_cgroup *objcg)
831 {
832 	percpu_ref_get(&objcg->refcnt);
833 }
834 
obj_cgroup_get_many(struct obj_cgroup * objcg,unsigned long nr)835 static inline void obj_cgroup_get_many(struct obj_cgroup *objcg,
836 				       unsigned long nr)
837 {
838 	percpu_ref_get_many(&objcg->refcnt, nr);
839 }
840 
obj_cgroup_put(struct obj_cgroup * objcg)841 static inline void obj_cgroup_put(struct obj_cgroup *objcg)
842 {
843 	if (objcg)
844 		percpu_ref_put(&objcg->refcnt);
845 }
846 
mem_cgroup_tryget(struct mem_cgroup * memcg)847 static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg)
848 {
849 	return !memcg || css_tryget(&memcg->css);
850 }
851 
mem_cgroup_tryget_online(struct mem_cgroup * memcg)852 static inline bool mem_cgroup_tryget_online(struct mem_cgroup *memcg)
853 {
854 	return !memcg || css_tryget_online(&memcg->css);
855 }
856 
mem_cgroup_put(struct mem_cgroup * memcg)857 static inline void mem_cgroup_put(struct mem_cgroup *memcg)
858 {
859 	if (memcg)
860 		css_put(&memcg->css);
861 }
862 
863 #define mem_cgroup_from_counter(counter, member)	\
864 	container_of(counter, struct mem_cgroup, member)
865 
866 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
867 				   struct mem_cgroup *,
868 				   struct mem_cgroup_reclaim_cookie *);
869 void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
870 void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
871 			   int (*)(struct task_struct *, void *), void *arg);
872 
mem_cgroup_id(struct mem_cgroup * memcg)873 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
874 {
875 	if (mem_cgroup_disabled())
876 		return 0;
877 
878 	return memcg->id.id;
879 }
880 struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
881 
882 #ifdef CONFIG_SHRINKER_DEBUG
mem_cgroup_ino(struct mem_cgroup * memcg)883 static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg)
884 {
885 	return memcg ? cgroup_ino(memcg->css.cgroup) : 0;
886 }
887 
888 struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino);
889 #endif
890 
mem_cgroup_from_seq(struct seq_file * m)891 static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
892 {
893 	return mem_cgroup_from_css(seq_css(m));
894 }
895 
lruvec_memcg(struct lruvec * lruvec)896 static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
897 {
898 	struct mem_cgroup_per_node *mz;
899 
900 	if (mem_cgroup_disabled())
901 		return NULL;
902 
903 	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
904 	return mz->memcg;
905 }
906 
907 /**
908  * parent_mem_cgroup - find the accounting parent of a memcg
909  * @memcg: memcg whose parent to find
910  *
911  * Returns the parent memcg, or NULL if this is the root.
912  */
parent_mem_cgroup(struct mem_cgroup * memcg)913 static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
914 {
915 	return mem_cgroup_from_css(memcg->css.parent);
916 }
917 
mem_cgroup_is_descendant(struct mem_cgroup * memcg,struct mem_cgroup * root)918 static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
919 			      struct mem_cgroup *root)
920 {
921 	if (root == memcg)
922 		return true;
923 	return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup);
924 }
925 
mm_match_cgroup(struct mm_struct * mm,struct mem_cgroup * memcg)926 static inline bool mm_match_cgroup(struct mm_struct *mm,
927 				   struct mem_cgroup *memcg)
928 {
929 	struct mem_cgroup *task_memcg;
930 	bool match = false;
931 
932 	rcu_read_lock();
933 	task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
934 	if (task_memcg)
935 		match = mem_cgroup_is_descendant(task_memcg, memcg);
936 	rcu_read_unlock();
937 	return match;
938 }
939 
940 struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio);
941 ino_t page_cgroup_ino(struct page *page);
942 
mem_cgroup_online(struct mem_cgroup * memcg)943 static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
944 {
945 	if (mem_cgroup_disabled())
946 		return true;
947 	return !!(memcg->css.flags & CSS_ONLINE);
948 }
949 
950 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
951 		int zid, int nr_pages);
952 
953 static inline
mem_cgroup_get_zone_lru_size(struct lruvec * lruvec,enum lru_list lru,int zone_idx)954 unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
955 		enum lru_list lru, int zone_idx)
956 {
957 	struct mem_cgroup_per_node *mz;
958 
959 	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
960 	return READ_ONCE(mz->lru_zone_size[zone_idx][lru]);
961 }
962 
963 void mem_cgroup_handle_over_high(gfp_t gfp_mask);
964 
965 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
966 
967 unsigned long mem_cgroup_size(struct mem_cgroup *memcg);
968 
969 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg,
970 				struct task_struct *p);
971 
972 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg);
973 
974 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
975 					    struct mem_cgroup *oom_domain);
976 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg);
977 
978 void __mod_memcg_state(struct mem_cgroup *memcg, enum memcg_stat_item idx,
979 		       int val);
980 
981 /* idx can be of type enum memcg_stat_item or node_stat_item */
mod_memcg_state(struct mem_cgroup * memcg,enum memcg_stat_item idx,int val)982 static inline void mod_memcg_state(struct mem_cgroup *memcg,
983 				   enum memcg_stat_item idx, int val)
984 {
985 	unsigned long flags;
986 
987 	local_irq_save(flags);
988 	__mod_memcg_state(memcg, idx, val);
989 	local_irq_restore(flags);
990 }
991 
mod_memcg_page_state(struct page * page,enum memcg_stat_item idx,int val)992 static inline void mod_memcg_page_state(struct page *page,
993 					enum memcg_stat_item idx, int val)
994 {
995 	struct mem_cgroup *memcg;
996 
997 	if (mem_cgroup_disabled())
998 		return;
999 
1000 	rcu_read_lock();
1001 	memcg = folio_memcg(page_folio(page));
1002 	if (memcg)
1003 		mod_memcg_state(memcg, idx, val);
1004 	rcu_read_unlock();
1005 }
1006 
1007 unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx);
1008 unsigned long lruvec_page_state(struct lruvec *lruvec, enum node_stat_item idx);
1009 unsigned long lruvec_page_state_local(struct lruvec *lruvec,
1010 				      enum node_stat_item idx);
1011 
1012 void mem_cgroup_flush_stats(struct mem_cgroup *memcg);
1013 void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg);
1014 
1015 void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val);
1016 
mod_lruvec_kmem_state(void * p,enum node_stat_item idx,int val)1017 static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
1018 					 int val)
1019 {
1020 	unsigned long flags;
1021 
1022 	local_irq_save(flags);
1023 	__mod_lruvec_kmem_state(p, idx, val);
1024 	local_irq_restore(flags);
1025 }
1026 
1027 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
1028 			  unsigned long count);
1029 
count_memcg_events(struct mem_cgroup * memcg,enum vm_event_item idx,unsigned long count)1030 static inline void count_memcg_events(struct mem_cgroup *memcg,
1031 				      enum vm_event_item idx,
1032 				      unsigned long count)
1033 {
1034 	unsigned long flags;
1035 
1036 	local_irq_save(flags);
1037 	__count_memcg_events(memcg, idx, count);
1038 	local_irq_restore(flags);
1039 }
1040 
count_memcg_folio_events(struct folio * folio,enum vm_event_item idx,unsigned long nr)1041 static inline void count_memcg_folio_events(struct folio *folio,
1042 		enum vm_event_item idx, unsigned long nr)
1043 {
1044 	struct mem_cgroup *memcg = folio_memcg(folio);
1045 
1046 	if (memcg)
1047 		count_memcg_events(memcg, idx, nr);
1048 }
1049 
count_memcg_events_mm(struct mm_struct * mm,enum vm_event_item idx,unsigned long count)1050 static inline void count_memcg_events_mm(struct mm_struct *mm,
1051 					enum vm_event_item idx, unsigned long count)
1052 {
1053 	struct mem_cgroup *memcg;
1054 
1055 	if (mem_cgroup_disabled())
1056 		return;
1057 
1058 	rcu_read_lock();
1059 	memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1060 	if (likely(memcg))
1061 		count_memcg_events(memcg, idx, count);
1062 	rcu_read_unlock();
1063 }
1064 
count_memcg_event_mm(struct mm_struct * mm,enum vm_event_item idx)1065 static inline void count_memcg_event_mm(struct mm_struct *mm,
1066 					enum vm_event_item idx)
1067 {
1068 	count_memcg_events_mm(mm, idx, 1);
1069 }
1070 
memcg_memory_event(struct mem_cgroup * memcg,enum memcg_memory_event event)1071 static inline void memcg_memory_event(struct mem_cgroup *memcg,
1072 				      enum memcg_memory_event event)
1073 {
1074 	bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX ||
1075 			  event == MEMCG_SWAP_FAIL;
1076 
1077 	atomic_long_inc(&memcg->memory_events_local[event]);
1078 	if (!swap_event)
1079 		cgroup_file_notify(&memcg->events_local_file);
1080 
1081 	do {
1082 		atomic_long_inc(&memcg->memory_events[event]);
1083 		if (swap_event)
1084 			cgroup_file_notify(&memcg->swap_events_file);
1085 		else
1086 			cgroup_file_notify(&memcg->events_file);
1087 
1088 		if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
1089 			break;
1090 		if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
1091 			break;
1092 	} while ((memcg = parent_mem_cgroup(memcg)) &&
1093 		 !mem_cgroup_is_root(memcg));
1094 }
1095 
memcg_memory_event_mm(struct mm_struct * mm,enum memcg_memory_event event)1096 static inline void memcg_memory_event_mm(struct mm_struct *mm,
1097 					 enum memcg_memory_event event)
1098 {
1099 	struct mem_cgroup *memcg;
1100 
1101 	if (mem_cgroup_disabled())
1102 		return;
1103 
1104 	rcu_read_lock();
1105 	memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1106 	if (likely(memcg))
1107 		memcg_memory_event(memcg, event);
1108 	rcu_read_unlock();
1109 }
1110 
1111 void split_page_memcg(struct page *head, int old_order, int new_order);
1112 
1113 extern int mem_cgroup_init(void);
1114 #else /* CONFIG_MEMCG */
1115 
1116 #define MEM_CGROUP_ID_SHIFT	0
1117 
folio_memcg(struct folio * folio)1118 static inline struct mem_cgroup *folio_memcg(struct folio *folio)
1119 {
1120 	return NULL;
1121 }
1122 
folio_memcg_rcu(struct folio * folio)1123 static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio)
1124 {
1125 	WARN_ON_ONCE(!rcu_read_lock_held());
1126 	return NULL;
1127 }
1128 
folio_memcg_check(struct folio * folio)1129 static inline struct mem_cgroup *folio_memcg_check(struct folio *folio)
1130 {
1131 	return NULL;
1132 }
1133 
page_memcg_check(struct page * page)1134 static inline struct mem_cgroup *page_memcg_check(struct page *page)
1135 {
1136 	return NULL;
1137 }
1138 
get_mem_cgroup_from_objcg(struct obj_cgroup * objcg)1139 static inline struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg)
1140 {
1141 	return NULL;
1142 }
1143 
folio_memcg_kmem(struct folio * folio)1144 static inline bool folio_memcg_kmem(struct folio *folio)
1145 {
1146 	return false;
1147 }
1148 
PageMemcgKmem(struct page * page)1149 static inline bool PageMemcgKmem(struct page *page)
1150 {
1151 	return false;
1152 }
1153 
mem_cgroup_move_account(struct folio * folio,bool compound,struct mem_cgroup * from,struct mem_cgroup * to)1154 static inline int mem_cgroup_move_account(struct folio *folio,
1155 					  bool compound,
1156 					  struct mem_cgroup *from,
1157 					  struct mem_cgroup *to)
1158 {
1159 	return 0;
1160 }
1161 
mem_cgroup_is_root(struct mem_cgroup * memcg)1162 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
1163 {
1164 	return true;
1165 }
1166 
mem_cgroup_disabled(void)1167 static inline bool mem_cgroup_disabled(void)
1168 {
1169 	return true;
1170 }
1171 
memcg_memory_event(struct mem_cgroup * memcg,enum memcg_memory_event event)1172 static inline void memcg_memory_event(struct mem_cgroup *memcg,
1173 				      enum memcg_memory_event event)
1174 {
1175 }
1176 
memcg_memory_event_mm(struct mm_struct * mm,enum memcg_memory_event event)1177 static inline void memcg_memory_event_mm(struct mm_struct *mm,
1178 					 enum memcg_memory_event event)
1179 {
1180 }
1181 
mem_cgroup_protection(struct mem_cgroup * root,struct mem_cgroup * memcg,unsigned long * min,unsigned long * low)1182 static inline void mem_cgroup_protection(struct mem_cgroup *root,
1183 					 struct mem_cgroup *memcg,
1184 					 unsigned long *min,
1185 					 unsigned long *low)
1186 {
1187 	*min = *low = 0;
1188 }
1189 
mem_cgroup_calculate_protection(struct mem_cgroup * root,struct mem_cgroup * memcg)1190 static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root,
1191 						   struct mem_cgroup *memcg)
1192 {
1193 }
1194 
mem_cgroup_unprotected(struct mem_cgroup * target,struct mem_cgroup * memcg)1195 static inline bool mem_cgroup_unprotected(struct mem_cgroup *target,
1196 					  struct mem_cgroup *memcg)
1197 {
1198 	return true;
1199 }
mem_cgroup_below_low(struct mem_cgroup * target,struct mem_cgroup * memcg)1200 static inline bool mem_cgroup_below_low(struct mem_cgroup *target,
1201 					struct mem_cgroup *memcg)
1202 {
1203 	return false;
1204 }
1205 
mem_cgroup_below_min(struct mem_cgroup * target,struct mem_cgroup * memcg)1206 static inline bool mem_cgroup_below_min(struct mem_cgroup *target,
1207 					struct mem_cgroup *memcg)
1208 {
1209 	return false;
1210 }
1211 
mem_cgroup_commit_charge(struct folio * folio,struct mem_cgroup * memcg)1212 static inline void mem_cgroup_commit_charge(struct folio *folio,
1213 		struct mem_cgroup *memcg)
1214 {
1215 }
1216 
mem_cgroup_charge(struct folio * folio,struct mm_struct * mm,gfp_t gfp)1217 static inline int mem_cgroup_charge(struct folio *folio,
1218 		struct mm_struct *mm, gfp_t gfp)
1219 {
1220 	return 0;
1221 }
1222 
mem_cgroup_hugetlb_try_charge(struct mem_cgroup * memcg,gfp_t gfp,long nr_pages)1223 static inline int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg,
1224 		gfp_t gfp, long nr_pages)
1225 {
1226 	return 0;
1227 }
1228 
mem_cgroup_swapin_charge_folio(struct folio * folio,struct mm_struct * mm,gfp_t gfp,swp_entry_t entry)1229 static inline int mem_cgroup_swapin_charge_folio(struct folio *folio,
1230 			struct mm_struct *mm, gfp_t gfp, swp_entry_t entry)
1231 {
1232 	return 0;
1233 }
1234 
mem_cgroup_swapin_uncharge_swap(swp_entry_t entry,unsigned int nr)1235 static inline void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry, unsigned int nr)
1236 {
1237 }
1238 
mem_cgroup_uncharge(struct folio * folio)1239 static inline void mem_cgroup_uncharge(struct folio *folio)
1240 {
1241 }
1242 
mem_cgroup_uncharge_folios(struct folio_batch * folios)1243 static inline void mem_cgroup_uncharge_folios(struct folio_batch *folios)
1244 {
1245 }
1246 
mem_cgroup_cancel_charge(struct mem_cgroup * memcg,unsigned int nr_pages)1247 static inline void mem_cgroup_cancel_charge(struct mem_cgroup *memcg,
1248 		unsigned int nr_pages)
1249 {
1250 }
1251 
mem_cgroup_replace_folio(struct folio * old,struct folio * new)1252 static inline void mem_cgroup_replace_folio(struct folio *old,
1253 		struct folio *new)
1254 {
1255 }
1256 
mem_cgroup_migrate(struct folio * old,struct folio * new)1257 static inline void mem_cgroup_migrate(struct folio *old, struct folio *new)
1258 {
1259 }
1260 
mem_cgroup_lruvec(struct mem_cgroup * memcg,struct pglist_data * pgdat)1261 static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
1262 					       struct pglist_data *pgdat)
1263 {
1264 	return &pgdat->__lruvec;
1265 }
1266 
folio_lruvec(struct folio * folio)1267 static inline struct lruvec *folio_lruvec(struct folio *folio)
1268 {
1269 	struct pglist_data *pgdat = folio_pgdat(folio);
1270 	return &pgdat->__lruvec;
1271 }
1272 
1273 static inline
lruvec_memcg_debug(struct lruvec * lruvec,struct folio * folio)1274 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
1275 {
1276 }
1277 
parent_mem_cgroup(struct mem_cgroup * memcg)1278 static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
1279 {
1280 	return NULL;
1281 }
1282 
mm_match_cgroup(struct mm_struct * mm,struct mem_cgroup * memcg)1283 static inline bool mm_match_cgroup(struct mm_struct *mm,
1284 		struct mem_cgroup *memcg)
1285 {
1286 	return true;
1287 }
1288 
get_mem_cgroup_from_mm(struct mm_struct * mm)1289 static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
1290 {
1291 	return NULL;
1292 }
1293 
get_mem_cgroup_from_current(void)1294 static inline struct mem_cgroup *get_mem_cgroup_from_current(void)
1295 {
1296 	return NULL;
1297 }
1298 
get_mem_cgroup_from_folio(struct folio * folio)1299 static inline struct mem_cgroup *get_mem_cgroup_from_folio(struct folio *folio)
1300 {
1301 	return NULL;
1302 }
1303 
1304 static inline
mem_cgroup_from_css(struct cgroup_subsys_state * css)1305 struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css)
1306 {
1307 	return NULL;
1308 }
1309 
obj_cgroup_put(struct obj_cgroup * objcg)1310 static inline void obj_cgroup_put(struct obj_cgroup *objcg)
1311 {
1312 }
1313 
mem_cgroup_tryget(struct mem_cgroup * memcg)1314 static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg)
1315 {
1316 	return true;
1317 }
1318 
mem_cgroup_tryget_online(struct mem_cgroup * memcg)1319 static inline bool mem_cgroup_tryget_online(struct mem_cgroup *memcg)
1320 {
1321 	return true;
1322 }
1323 
mem_cgroup_put(struct mem_cgroup * memcg)1324 static inline void mem_cgroup_put(struct mem_cgroup *memcg)
1325 {
1326 }
1327 
folio_lruvec_lock(struct folio * folio)1328 static inline struct lruvec *folio_lruvec_lock(struct folio *folio)
1329 {
1330 	struct pglist_data *pgdat = folio_pgdat(folio);
1331 
1332 	spin_lock(&pgdat->__lruvec.lru_lock);
1333 	return &pgdat->__lruvec;
1334 }
1335 
folio_lruvec_lock_irq(struct folio * folio)1336 static inline struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
1337 {
1338 	struct pglist_data *pgdat = folio_pgdat(folio);
1339 
1340 	spin_lock_irq(&pgdat->__lruvec.lru_lock);
1341 	return &pgdat->__lruvec;
1342 }
1343 
folio_lruvec_lock_irqsave(struct folio * folio,unsigned long * flagsp)1344 static inline struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
1345 		unsigned long *flagsp)
1346 {
1347 	struct pglist_data *pgdat = folio_pgdat(folio);
1348 
1349 	spin_lock_irqsave(&pgdat->__lruvec.lru_lock, *flagsp);
1350 	return &pgdat->__lruvec;
1351 }
1352 
1353 static inline struct mem_cgroup *
mem_cgroup_iter(struct mem_cgroup * root,struct mem_cgroup * prev,struct mem_cgroup_reclaim_cookie * reclaim)1354 mem_cgroup_iter(struct mem_cgroup *root,
1355 		struct mem_cgroup *prev,
1356 		struct mem_cgroup_reclaim_cookie *reclaim)
1357 {
1358 	return NULL;
1359 }
1360 
mem_cgroup_iter_break(struct mem_cgroup * root,struct mem_cgroup * prev)1361 static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
1362 					 struct mem_cgroup *prev)
1363 {
1364 }
1365 
mem_cgroup_scan_tasks(struct mem_cgroup * memcg,int (* fn)(struct task_struct *,void *),void * arg)1366 static inline void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1367 		int (*fn)(struct task_struct *, void *), void *arg)
1368 {
1369 }
1370 
mem_cgroup_id(struct mem_cgroup * memcg)1371 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
1372 {
1373 	return 0;
1374 }
1375 
mem_cgroup_from_id(unsigned short id)1376 static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
1377 {
1378 	WARN_ON_ONCE(id);
1379 	/* XXX: This should always return root_mem_cgroup */
1380 	return NULL;
1381 }
1382 
1383 #ifdef CONFIG_SHRINKER_DEBUG
mem_cgroup_ino(struct mem_cgroup * memcg)1384 static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg)
1385 {
1386 	return 0;
1387 }
1388 
mem_cgroup_get_from_ino(unsigned long ino)1389 static inline struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino)
1390 {
1391 	return NULL;
1392 }
1393 #endif
1394 
mem_cgroup_from_seq(struct seq_file * m)1395 static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
1396 {
1397 	return NULL;
1398 }
1399 
lruvec_memcg(struct lruvec * lruvec)1400 static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
1401 {
1402 	return NULL;
1403 }
1404 
mem_cgroup_online(struct mem_cgroup * memcg)1405 static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
1406 {
1407 	return true;
1408 }
1409 
1410 static inline
mem_cgroup_get_zone_lru_size(struct lruvec * lruvec,enum lru_list lru,int zone_idx)1411 unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
1412 		enum lru_list lru, int zone_idx)
1413 {
1414 	return 0;
1415 }
1416 
mem_cgroup_get_max(struct mem_cgroup * memcg)1417 static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
1418 {
1419 	return 0;
1420 }
1421 
mem_cgroup_size(struct mem_cgroup * memcg)1422 static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1423 {
1424 	return 0;
1425 }
1426 
1427 static inline void
mem_cgroup_print_oom_context(struct mem_cgroup * memcg,struct task_struct * p)1428 mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1429 {
1430 }
1431 
1432 static inline void
mem_cgroup_print_oom_meminfo(struct mem_cgroup * memcg)1433 mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1434 {
1435 }
1436 
mem_cgroup_handle_over_high(gfp_t gfp_mask)1437 static inline void mem_cgroup_handle_over_high(gfp_t gfp_mask)
1438 {
1439 }
1440 
mem_cgroup_get_oom_group(struct task_struct * victim,struct mem_cgroup * oom_domain)1441 static inline struct mem_cgroup *mem_cgroup_get_oom_group(
1442 	struct task_struct *victim, struct mem_cgroup *oom_domain)
1443 {
1444 	return NULL;
1445 }
1446 
mem_cgroup_print_oom_group(struct mem_cgroup * memcg)1447 static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
1448 {
1449 }
1450 
__mod_memcg_state(struct mem_cgroup * memcg,enum memcg_stat_item idx,int nr)1451 static inline void __mod_memcg_state(struct mem_cgroup *memcg,
1452 				     enum memcg_stat_item idx,
1453 				     int nr)
1454 {
1455 }
1456 
mod_memcg_state(struct mem_cgroup * memcg,enum memcg_stat_item idx,int nr)1457 static inline void mod_memcg_state(struct mem_cgroup *memcg,
1458 				   enum memcg_stat_item idx,
1459 				   int nr)
1460 {
1461 }
1462 
mod_memcg_page_state(struct page * page,enum memcg_stat_item idx,int val)1463 static inline void mod_memcg_page_state(struct page *page,
1464 					enum memcg_stat_item idx, int val)
1465 {
1466 }
1467 
memcg_page_state(struct mem_cgroup * memcg,int idx)1468 static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
1469 {
1470 	return 0;
1471 }
1472 
lruvec_page_state(struct lruvec * lruvec,enum node_stat_item idx)1473 static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
1474 					      enum node_stat_item idx)
1475 {
1476 	return node_page_state(lruvec_pgdat(lruvec), idx);
1477 }
1478 
lruvec_page_state_local(struct lruvec * lruvec,enum node_stat_item idx)1479 static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
1480 						    enum node_stat_item idx)
1481 {
1482 	return node_page_state(lruvec_pgdat(lruvec), idx);
1483 }
1484 
mem_cgroup_flush_stats(struct mem_cgroup * memcg)1485 static inline void mem_cgroup_flush_stats(struct mem_cgroup *memcg)
1486 {
1487 }
1488 
mem_cgroup_flush_stats_ratelimited(struct mem_cgroup * memcg)1489 static inline void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg)
1490 {
1491 }
1492 
__mod_lruvec_kmem_state(void * p,enum node_stat_item idx,int val)1493 static inline void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
1494 					   int val)
1495 {
1496 	struct page *page = virt_to_head_page(p);
1497 
1498 	__mod_node_page_state(page_pgdat(page), idx, val);
1499 }
1500 
mod_lruvec_kmem_state(void * p,enum node_stat_item idx,int val)1501 static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
1502 					 int val)
1503 {
1504 	struct page *page = virt_to_head_page(p);
1505 
1506 	mod_node_page_state(page_pgdat(page), idx, val);
1507 }
1508 
count_memcg_events(struct mem_cgroup * memcg,enum vm_event_item idx,unsigned long count)1509 static inline void count_memcg_events(struct mem_cgroup *memcg,
1510 				      enum vm_event_item idx,
1511 				      unsigned long count)
1512 {
1513 }
1514 
__count_memcg_events(struct mem_cgroup * memcg,enum vm_event_item idx,unsigned long count)1515 static inline void __count_memcg_events(struct mem_cgroup *memcg,
1516 					enum vm_event_item idx,
1517 					unsigned long count)
1518 {
1519 }
1520 
count_memcg_folio_events(struct folio * folio,enum vm_event_item idx,unsigned long nr)1521 static inline void count_memcg_folio_events(struct folio *folio,
1522 		enum vm_event_item idx, unsigned long nr)
1523 {
1524 }
1525 
count_memcg_events_mm(struct mm_struct * mm,enum vm_event_item idx,unsigned long count)1526 static inline void count_memcg_events_mm(struct mm_struct *mm,
1527 					enum vm_event_item idx, unsigned long count)
1528 {
1529 }
1530 
1531 static inline
count_memcg_event_mm(struct mm_struct * mm,enum vm_event_item idx)1532 void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
1533 {
1534 }
1535 
split_page_memcg(struct page * head,int old_order,int new_order)1536 static inline void split_page_memcg(struct page *head, int old_order, int new_order)
1537 {
1538 }
1539 
mem_cgroup_init(void)1540 static inline int mem_cgroup_init(void) { return 0; }
1541 #endif /* CONFIG_MEMCG */
1542 
1543 /*
1544  * Extended information for slab objects stored as an array in page->memcg_data
1545  * if MEMCG_DATA_OBJEXTS is set.
1546  */
1547 struct slabobj_ext {
1548 #ifdef CONFIG_MEMCG
1549 	struct obj_cgroup *objcg;
1550 #endif
1551 #ifdef CONFIG_MEM_ALLOC_PROFILING
1552 	union codetag_ref ref;
1553 #endif
1554 } __aligned(8);
1555 
__inc_lruvec_kmem_state(void * p,enum node_stat_item idx)1556 static inline void __inc_lruvec_kmem_state(void *p, enum node_stat_item idx)
1557 {
1558 	__mod_lruvec_kmem_state(p, idx, 1);
1559 }
1560 
__dec_lruvec_kmem_state(void * p,enum node_stat_item idx)1561 static inline void __dec_lruvec_kmem_state(void *p, enum node_stat_item idx)
1562 {
1563 	__mod_lruvec_kmem_state(p, idx, -1);
1564 }
1565 
parent_lruvec(struct lruvec * lruvec)1566 static inline struct lruvec *parent_lruvec(struct lruvec *lruvec)
1567 {
1568 	struct mem_cgroup *memcg;
1569 
1570 	memcg = lruvec_memcg(lruvec);
1571 	if (!memcg)
1572 		return NULL;
1573 	memcg = parent_mem_cgroup(memcg);
1574 	if (!memcg)
1575 		return NULL;
1576 	return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec));
1577 }
1578 
unlock_page_lruvec(struct lruvec * lruvec)1579 static inline void unlock_page_lruvec(struct lruvec *lruvec)
1580 {
1581 	spin_unlock(&lruvec->lru_lock);
1582 }
1583 
unlock_page_lruvec_irq(struct lruvec * lruvec)1584 static inline void unlock_page_lruvec_irq(struct lruvec *lruvec)
1585 {
1586 	spin_unlock_irq(&lruvec->lru_lock);
1587 }
1588 
unlock_page_lruvec_irqrestore(struct lruvec * lruvec,unsigned long flags)1589 static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec,
1590 		unsigned long flags)
1591 {
1592 	spin_unlock_irqrestore(&lruvec->lru_lock, flags);
1593 }
1594 
1595 /* Test requires a stable folio->memcg binding, see folio_memcg() */
folio_matches_lruvec(struct folio * folio,struct lruvec * lruvec)1596 static inline bool folio_matches_lruvec(struct folio *folio,
1597 		struct lruvec *lruvec)
1598 {
1599 	return lruvec_pgdat(lruvec) == folio_pgdat(folio) &&
1600 	       lruvec_memcg(lruvec) == folio_memcg(folio);
1601 }
1602 
1603 /* Don't lock again iff page's lruvec locked */
folio_lruvec_relock_irq(struct folio * folio,struct lruvec * locked_lruvec)1604 static inline struct lruvec *folio_lruvec_relock_irq(struct folio *folio,
1605 		struct lruvec *locked_lruvec)
1606 {
1607 	if (locked_lruvec) {
1608 		if (folio_matches_lruvec(folio, locked_lruvec))
1609 			return locked_lruvec;
1610 
1611 		unlock_page_lruvec_irq(locked_lruvec);
1612 	}
1613 
1614 	return folio_lruvec_lock_irq(folio);
1615 }
1616 
1617 /* Don't lock again iff folio's lruvec locked */
folio_lruvec_relock_irqsave(struct folio * folio,struct lruvec ** lruvecp,unsigned long * flags)1618 static inline void folio_lruvec_relock_irqsave(struct folio *folio,
1619 		struct lruvec **lruvecp, unsigned long *flags)
1620 {
1621 	if (*lruvecp) {
1622 		if (folio_matches_lruvec(folio, *lruvecp))
1623 			return;
1624 
1625 		unlock_page_lruvec_irqrestore(*lruvecp, *flags);
1626 	}
1627 
1628 	*lruvecp = folio_lruvec_lock_irqsave(folio, flags);
1629 }
1630 
1631 #ifdef CONFIG_CGROUP_WRITEBACK
1632 
1633 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
1634 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
1635 			 unsigned long *pheadroom, unsigned long *pdirty,
1636 			 unsigned long *pwriteback);
1637 
1638 void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
1639 					     struct bdi_writeback *wb);
1640 
mem_cgroup_track_foreign_dirty(struct folio * folio,struct bdi_writeback * wb)1641 static inline void mem_cgroup_track_foreign_dirty(struct folio *folio,
1642 						  struct bdi_writeback *wb)
1643 {
1644 	struct mem_cgroup *memcg;
1645 
1646 	if (mem_cgroup_disabled())
1647 		return;
1648 
1649 	memcg = folio_memcg(folio);
1650 	if (unlikely(memcg && &memcg->css != wb->memcg_css))
1651 		mem_cgroup_track_foreign_dirty_slowpath(folio, wb);
1652 }
1653 
1654 void mem_cgroup_flush_foreign(struct bdi_writeback *wb);
1655 
1656 #else	/* CONFIG_CGROUP_WRITEBACK */
1657 
mem_cgroup_wb_domain(struct bdi_writeback * wb)1658 static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
1659 {
1660 	return NULL;
1661 }
1662 
mem_cgroup_wb_stats(struct bdi_writeback * wb,unsigned long * pfilepages,unsigned long * pheadroom,unsigned long * pdirty,unsigned long * pwriteback)1663 static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
1664 				       unsigned long *pfilepages,
1665 				       unsigned long *pheadroom,
1666 				       unsigned long *pdirty,
1667 				       unsigned long *pwriteback)
1668 {
1669 }
1670 
mem_cgroup_track_foreign_dirty(struct folio * folio,struct bdi_writeback * wb)1671 static inline void mem_cgroup_track_foreign_dirty(struct folio *folio,
1672 						  struct bdi_writeback *wb)
1673 {
1674 }
1675 
mem_cgroup_flush_foreign(struct bdi_writeback * wb)1676 static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
1677 {
1678 }
1679 
1680 #endif	/* CONFIG_CGROUP_WRITEBACK */
1681 
1682 struct sock;
1683 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
1684 			     gfp_t gfp_mask);
1685 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
1686 #ifdef CONFIG_MEMCG
1687 extern struct static_key_false memcg_sockets_enabled_key;
1688 #define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
1689 void mem_cgroup_sk_alloc(struct sock *sk);
1690 void mem_cgroup_sk_free(struct sock *sk);
mem_cgroup_under_socket_pressure(struct mem_cgroup * memcg)1691 static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
1692 {
1693 #ifdef CONFIG_MEMCG_V1
1694 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
1695 		return !!memcg->tcpmem_pressure;
1696 #endif /* CONFIG_MEMCG_V1 */
1697 	do {
1698 		if (time_before(jiffies, READ_ONCE(memcg->socket_pressure)))
1699 			return true;
1700 	} while ((memcg = parent_mem_cgroup(memcg)));
1701 	return false;
1702 }
1703 
1704 int alloc_shrinker_info(struct mem_cgroup *memcg);
1705 void free_shrinker_info(struct mem_cgroup *memcg);
1706 void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id);
1707 void reparent_shrinker_deferred(struct mem_cgroup *memcg);
1708 #else
1709 #define mem_cgroup_sockets_enabled 0
mem_cgroup_sk_alloc(struct sock * sk)1710 static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
mem_cgroup_sk_free(struct sock * sk)1711 static inline void mem_cgroup_sk_free(struct sock *sk) { };
mem_cgroup_under_socket_pressure(struct mem_cgroup * memcg)1712 static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
1713 {
1714 	return false;
1715 }
1716 
set_shrinker_bit(struct mem_cgroup * memcg,int nid,int shrinker_id)1717 static inline void set_shrinker_bit(struct mem_cgroup *memcg,
1718 				    int nid, int shrinker_id)
1719 {
1720 }
1721 #endif
1722 
1723 #ifdef CONFIG_MEMCG
1724 bool mem_cgroup_kmem_disabled(void);
1725 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order);
1726 void __memcg_kmem_uncharge_page(struct page *page, int order);
1727 
1728 /*
1729  * The returned objcg pointer is safe to use without additional
1730  * protection within a scope. The scope is defined either by
1731  * the current task (similar to the "current" global variable)
1732  * or by set_active_memcg() pair.
1733  * Please, use obj_cgroup_get() to get a reference if the pointer
1734  * needs to be used outside of the local scope.
1735  */
1736 struct obj_cgroup *current_obj_cgroup(void);
1737 struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio);
1738 
get_obj_cgroup_from_current(void)1739 static inline struct obj_cgroup *get_obj_cgroup_from_current(void)
1740 {
1741 	struct obj_cgroup *objcg = current_obj_cgroup();
1742 
1743 	if (objcg)
1744 		obj_cgroup_get(objcg);
1745 
1746 	return objcg;
1747 }
1748 
1749 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size);
1750 void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size);
1751 
1752 extern struct static_key_false memcg_bpf_enabled_key;
memcg_bpf_enabled(void)1753 static inline bool memcg_bpf_enabled(void)
1754 {
1755 	return static_branch_likely(&memcg_bpf_enabled_key);
1756 }
1757 
1758 extern struct static_key_false memcg_kmem_online_key;
1759 
memcg_kmem_online(void)1760 static inline bool memcg_kmem_online(void)
1761 {
1762 	return static_branch_likely(&memcg_kmem_online_key);
1763 }
1764 
memcg_kmem_charge_page(struct page * page,gfp_t gfp,int order)1765 static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1766 					 int order)
1767 {
1768 	if (memcg_kmem_online())
1769 		return __memcg_kmem_charge_page(page, gfp, order);
1770 	return 0;
1771 }
1772 
memcg_kmem_uncharge_page(struct page * page,int order)1773 static inline void memcg_kmem_uncharge_page(struct page *page, int order)
1774 {
1775 	if (memcg_kmem_online())
1776 		__memcg_kmem_uncharge_page(page, order);
1777 }
1778 
1779 /*
1780  * A helper for accessing memcg's kmem_id, used for getting
1781  * corresponding LRU lists.
1782  */
memcg_kmem_id(struct mem_cgroup * memcg)1783 static inline int memcg_kmem_id(struct mem_cgroup *memcg)
1784 {
1785 	return memcg ? memcg->kmemcg_id : -1;
1786 }
1787 
1788 struct mem_cgroup *mem_cgroup_from_slab_obj(void *p);
1789 
count_objcg_events(struct obj_cgroup * objcg,enum vm_event_item idx,unsigned long count)1790 static inline void count_objcg_events(struct obj_cgroup *objcg,
1791 				      enum vm_event_item idx,
1792 				      unsigned long count)
1793 {
1794 	struct mem_cgroup *memcg;
1795 
1796 	if (!memcg_kmem_online())
1797 		return;
1798 
1799 	rcu_read_lock();
1800 	memcg = obj_cgroup_memcg(objcg);
1801 	count_memcg_events(memcg, idx, count);
1802 	rcu_read_unlock();
1803 }
1804 
1805 #else
mem_cgroup_kmem_disabled(void)1806 static inline bool mem_cgroup_kmem_disabled(void)
1807 {
1808 	return true;
1809 }
1810 
memcg_kmem_charge_page(struct page * page,gfp_t gfp,int order)1811 static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1812 					 int order)
1813 {
1814 	return 0;
1815 }
1816 
memcg_kmem_uncharge_page(struct page * page,int order)1817 static inline void memcg_kmem_uncharge_page(struct page *page, int order)
1818 {
1819 }
1820 
__memcg_kmem_charge_page(struct page * page,gfp_t gfp,int order)1821 static inline int __memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1822 					   int order)
1823 {
1824 	return 0;
1825 }
1826 
__memcg_kmem_uncharge_page(struct page * page,int order)1827 static inline void __memcg_kmem_uncharge_page(struct page *page, int order)
1828 {
1829 }
1830 
get_obj_cgroup_from_folio(struct folio * folio)1831 static inline struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio)
1832 {
1833 	return NULL;
1834 }
1835 
memcg_bpf_enabled(void)1836 static inline bool memcg_bpf_enabled(void)
1837 {
1838 	return false;
1839 }
1840 
memcg_kmem_online(void)1841 static inline bool memcg_kmem_online(void)
1842 {
1843 	return false;
1844 }
1845 
memcg_kmem_id(struct mem_cgroup * memcg)1846 static inline int memcg_kmem_id(struct mem_cgroup *memcg)
1847 {
1848 	return -1;
1849 }
1850 
mem_cgroup_from_slab_obj(void * p)1851 static inline struct mem_cgroup *mem_cgroup_from_slab_obj(void *p)
1852 {
1853 	return NULL;
1854 }
1855 
count_objcg_events(struct obj_cgroup * objcg,enum vm_event_item idx,unsigned long count)1856 static inline void count_objcg_events(struct obj_cgroup *objcg,
1857 				      enum vm_event_item idx,
1858 				      unsigned long count)
1859 {
1860 }
1861 
1862 #endif /* CONFIG_MEMCG */
1863 
1864 #if defined(CONFIG_MEMCG) && defined(CONFIG_ZSWAP)
1865 bool obj_cgroup_may_zswap(struct obj_cgroup *objcg);
1866 void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size);
1867 void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size);
1868 bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg);
1869 #else
obj_cgroup_may_zswap(struct obj_cgroup * objcg)1870 static inline bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
1871 {
1872 	return true;
1873 }
obj_cgroup_charge_zswap(struct obj_cgroup * objcg,size_t size)1874 static inline void obj_cgroup_charge_zswap(struct obj_cgroup *objcg,
1875 					   size_t size)
1876 {
1877 }
obj_cgroup_uncharge_zswap(struct obj_cgroup * objcg,size_t size)1878 static inline void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg,
1879 					     size_t size)
1880 {
1881 }
mem_cgroup_zswap_writeback_enabled(struct mem_cgroup * memcg)1882 static inline bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg)
1883 {
1884 	/* if zswap is disabled, do not block pages going to the swapping device */
1885 	return true;
1886 }
1887 #endif
1888 
1889 
1890 /* Cgroup v1-related declarations */
1891 
1892 #ifdef CONFIG_MEMCG_V1
1893 unsigned long memcg1_soft_limit_reclaim(pg_data_t *pgdat, int order,
1894 					gfp_t gfp_mask,
1895 					unsigned long *total_scanned);
1896 
1897 bool mem_cgroup_oom_synchronize(bool wait);
1898 
task_in_memcg_oom(struct task_struct * p)1899 static inline bool task_in_memcg_oom(struct task_struct *p)
1900 {
1901 	return p->memcg_in_oom;
1902 }
1903 
1904 void folio_memcg_lock(struct folio *folio);
1905 void folio_memcg_unlock(struct folio *folio);
1906 
1907 /* try to stablize folio_memcg() for all the pages in a memcg */
mem_cgroup_trylock_pages(struct mem_cgroup * memcg)1908 static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg)
1909 {
1910 	rcu_read_lock();
1911 
1912 	if (mem_cgroup_disabled() || !atomic_read(&memcg->moving_account))
1913 		return true;
1914 
1915 	rcu_read_unlock();
1916 	return false;
1917 }
1918 
mem_cgroup_unlock_pages(void)1919 static inline void mem_cgroup_unlock_pages(void)
1920 {
1921 	rcu_read_unlock();
1922 }
1923 
mem_cgroup_enter_user_fault(void)1924 static inline void mem_cgroup_enter_user_fault(void)
1925 {
1926 	WARN_ON(current->in_user_fault);
1927 	current->in_user_fault = 1;
1928 }
1929 
mem_cgroup_exit_user_fault(void)1930 static inline void mem_cgroup_exit_user_fault(void)
1931 {
1932 	WARN_ON(!current->in_user_fault);
1933 	current->in_user_fault = 0;
1934 }
1935 
1936 #else /* CONFIG_MEMCG_V1 */
1937 static inline
memcg1_soft_limit_reclaim(pg_data_t * pgdat,int order,gfp_t gfp_mask,unsigned long * total_scanned)1938 unsigned long memcg1_soft_limit_reclaim(pg_data_t *pgdat, int order,
1939 					gfp_t gfp_mask,
1940 					unsigned long *total_scanned)
1941 {
1942 	return 0;
1943 }
1944 
folio_memcg_lock(struct folio * folio)1945 static inline void folio_memcg_lock(struct folio *folio)
1946 {
1947 }
1948 
folio_memcg_unlock(struct folio * folio)1949 static inline void folio_memcg_unlock(struct folio *folio)
1950 {
1951 }
1952 
mem_cgroup_trylock_pages(struct mem_cgroup * memcg)1953 static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg)
1954 {
1955 	/* to match folio_memcg_rcu() */
1956 	rcu_read_lock();
1957 	return true;
1958 }
1959 
mem_cgroup_unlock_pages(void)1960 static inline void mem_cgroup_unlock_pages(void)
1961 {
1962 	rcu_read_unlock();
1963 }
1964 
task_in_memcg_oom(struct task_struct * p)1965 static inline bool task_in_memcg_oom(struct task_struct *p)
1966 {
1967 	return false;
1968 }
1969 
mem_cgroup_oom_synchronize(bool wait)1970 static inline bool mem_cgroup_oom_synchronize(bool wait)
1971 {
1972 	return false;
1973 }
1974 
mem_cgroup_enter_user_fault(void)1975 static inline void mem_cgroup_enter_user_fault(void)
1976 {
1977 }
1978 
mem_cgroup_exit_user_fault(void)1979 static inline void mem_cgroup_exit_user_fault(void)
1980 {
1981 }
1982 
1983 #endif /* CONFIG_MEMCG_V1 */
1984 
1985 #endif /* _LINUX_MEMCONTROL_H */
1986