1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /* memcontrol.h - Memory Controller
3 *
4 * Copyright IBM Corporation, 2007
5 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6 *
7 * Copyright 2007 OpenVZ SWsoft Inc
8 * Author: Pavel Emelianov <xemul@openvz.org>
9 */
10
11 #ifndef _LINUX_MEMCONTROL_H
12 #define _LINUX_MEMCONTROL_H
13 #include <linux/cgroup.h>
14 #include <linux/vm_event_item.h>
15 #include <linux/hardirq.h>
16 #include <linux/jump_label.h>
17 #include <linux/page_counter.h>
18 #include <linux/vmpressure.h>
19 #include <linux/eventfd.h>
20 #include <linux/mm.h>
21 #include <linux/vmstat.h>
22 #include <linux/writeback.h>
23 #include <linux/page-flags.h>
24 #include <linux/android_kabi.h>
25
26 struct mem_cgroup;
27 struct obj_cgroup;
28 struct page;
29 struct mm_struct;
30 struct kmem_cache;
31
32 /* Cgroup-specific page state, on top of universal node page state */
33 enum memcg_stat_item {
34 MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS,
35 MEMCG_SOCK,
36 MEMCG_PERCPU_B,
37 MEMCG_VMALLOC,
38 MEMCG_KMEM,
39 MEMCG_ZSWAP_B,
40 MEMCG_ZSWAPPED,
41 MEMCG_NR_STAT,
42 };
43
44 enum memcg_memory_event {
45 MEMCG_LOW,
46 MEMCG_HIGH,
47 MEMCG_MAX,
48 MEMCG_OOM,
49 MEMCG_OOM_KILL,
50 MEMCG_OOM_GROUP_KILL,
51 MEMCG_SWAP_HIGH,
52 MEMCG_SWAP_MAX,
53 MEMCG_SWAP_FAIL,
54 MEMCG_NR_MEMORY_EVENTS,
55 };
56
57 struct mem_cgroup_reclaim_cookie {
58 pg_data_t *pgdat;
59 unsigned int generation;
60 };
61
62 #ifdef CONFIG_MEMCG
63
64 #define MEM_CGROUP_ID_SHIFT 16
65
66 struct mem_cgroup_id {
67 int id;
68 refcount_t ref;
69 };
70
71 /*
72 * Per memcg event counter is incremented at every pagein/pageout. With THP,
73 * it will be incremented by the number of pages. This counter is used
74 * to trigger some periodic events. This is straightforward and better
75 * than using jiffies etc. to handle periodic memcg event.
76 */
77 enum mem_cgroup_events_target {
78 MEM_CGROUP_TARGET_THRESH,
79 MEM_CGROUP_TARGET_SOFTLIMIT,
80 MEM_CGROUP_NTARGETS,
81 };
82
83 struct memcg_vmstats_percpu;
84 struct memcg_vmstats;
85
86 struct mem_cgroup_reclaim_iter {
87 struct mem_cgroup *position;
88 /* scan generation, increased every round-trip */
89 unsigned int generation;
90 };
91
92 /*
93 * Bitmap and deferred work of shrinker::id corresponding to memcg-aware
94 * shrinkers, which have elements charged to this memcg.
95 */
96 struct shrinker_info {
97 struct rcu_head rcu;
98 atomic_long_t *nr_deferred;
99 unsigned long *map;
100 int map_nr_max;
101 };
102
103 struct lruvec_stats_percpu {
104 /* Local (CPU and cgroup) state */
105 long state[NR_VM_NODE_STAT_ITEMS];
106
107 /* Delta calculation for lockless upward propagation */
108 long state_prev[NR_VM_NODE_STAT_ITEMS];
109 };
110
111 struct lruvec_stats {
112 /* Aggregated (CPU and subtree) state */
113 long state[NR_VM_NODE_STAT_ITEMS];
114
115 /* Non-hierarchical (CPU aggregated) state */
116 long state_local[NR_VM_NODE_STAT_ITEMS];
117
118 /* Pending child counts during tree propagation */
119 long state_pending[NR_VM_NODE_STAT_ITEMS];
120 };
121
122 /*
123 * per-node information in memory controller.
124 */
125 struct mem_cgroup_per_node {
126 struct lruvec lruvec;
127
128 struct lruvec_stats_percpu __percpu *lruvec_stats_percpu;
129 struct lruvec_stats lruvec_stats;
130
131 unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
132
133 struct mem_cgroup_reclaim_iter iter;
134
135 struct shrinker_info __rcu *shrinker_info;
136
137 struct rb_node tree_node; /* RB tree node */
138 unsigned long usage_in_excess;/* Set to the value by which */
139 /* the soft limit is exceeded*/
140 bool on_tree;
141 struct mem_cgroup *memcg; /* Back pointer, we cannot */
142 /* use container_of */
143
144 ANDROID_BACKPORT_RESERVE(1);
145 };
146
147 struct mem_cgroup_threshold {
148 struct eventfd_ctx *eventfd;
149 unsigned long threshold;
150 };
151
152 /* For threshold */
153 struct mem_cgroup_threshold_ary {
154 /* An array index points to threshold just below or equal to usage. */
155 int current_threshold;
156 /* Size of entries[] */
157 unsigned int size;
158 /* Array of thresholds */
159 struct mem_cgroup_threshold entries[];
160 };
161
162 struct mem_cgroup_thresholds {
163 /* Primary thresholds array */
164 struct mem_cgroup_threshold_ary *primary;
165 /*
166 * Spare threshold array.
167 * This is needed to make mem_cgroup_unregister_event() "never fail".
168 * It must be able to store at least primary->size - 1 entries.
169 */
170 struct mem_cgroup_threshold_ary *spare;
171 };
172
173 /*
174 * Remember four most recent foreign writebacks with dirty pages in this
175 * cgroup. Inode sharing is expected to be uncommon and, even if we miss
176 * one in a given round, we're likely to catch it later if it keeps
177 * foreign-dirtying, so a fairly low count should be enough.
178 *
179 * See mem_cgroup_track_foreign_dirty_slowpath() for details.
180 */
181 #define MEMCG_CGWB_FRN_CNT 4
182
183 struct memcg_cgwb_frn {
184 u64 bdi_id; /* bdi->id of the foreign inode */
185 int memcg_id; /* memcg->css.id of foreign inode */
186 u64 at; /* jiffies_64 at the time of dirtying */
187 struct wb_completion done; /* tracks in-flight foreign writebacks */
188 };
189
190 /*
191 * Bucket for arbitrarily byte-sized objects charged to a memory
192 * cgroup. The bucket can be reparented in one piece when the cgroup
193 * is destroyed, without having to round up the individual references
194 * of all live memory objects in the wild.
195 */
196 struct obj_cgroup {
197 struct percpu_ref refcnt;
198 struct mem_cgroup *memcg;
199 atomic_t nr_charged_bytes;
200 union {
201 struct list_head list; /* protected by objcg_lock */
202 struct rcu_head rcu;
203 };
204 };
205
206 /*
207 * The memory controller data structure. The memory controller controls both
208 * page cache and RSS per cgroup. We would eventually like to provide
209 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
210 * to help the administrator determine what knobs to tune.
211 */
212 struct mem_cgroup {
213 struct cgroup_subsys_state css;
214
215 /* Private memcg ID. Used to ID objects that outlive the cgroup */
216 struct mem_cgroup_id id;
217
218 /* Accounted resources */
219 struct page_counter memory; /* Both v1 & v2 */
220
221 union {
222 struct page_counter swap; /* v2 only */
223 struct page_counter memsw; /* v1 only */
224 };
225
226 /* Legacy consumer-oriented counters */
227 struct page_counter kmem; /* v1 only */
228 struct page_counter tcpmem; /* v1 only */
229
230 /* Range enforcement for interrupt charges */
231 struct work_struct high_work;
232
233 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
234 unsigned long zswap_max;
235 #endif
236
237 unsigned long soft_limit;
238
239 /* vmpressure notifications */
240 struct vmpressure vmpressure;
241
242 /*
243 * Should the OOM killer kill all belonging tasks, had it kill one?
244 */
245 bool oom_group;
246
247 /* protected by memcg_oom_lock */
248 bool oom_lock;
249 int under_oom;
250
251 int swappiness;
252 /* OOM-Killer disable */
253 int oom_kill_disable;
254
255 /* memory.events and memory.events.local */
256 struct cgroup_file events_file;
257 struct cgroup_file events_local_file;
258
259 /* handle for "memory.swap.events" */
260 struct cgroup_file swap_events_file;
261
262 /* protect arrays of thresholds */
263 struct mutex thresholds_lock;
264
265 /* thresholds for memory usage. RCU-protected */
266 struct mem_cgroup_thresholds thresholds;
267
268 /* thresholds for mem+swap usage. RCU-protected */
269 struct mem_cgroup_thresholds memsw_thresholds;
270
271 /* For oom notifier event fd */
272 struct list_head oom_notify;
273
274 /*
275 * Should we move charges of a task when a task is moved into this
276 * mem_cgroup ? And what type of charges should we move ?
277 */
278 unsigned long move_charge_at_immigrate;
279 /* taken only while moving_account > 0 */
280 spinlock_t move_lock;
281 unsigned long move_lock_flags;
282
283 CACHELINE_PADDING(_pad1_);
284
285 /* memory.stat */
286 struct memcg_vmstats *vmstats;
287
288 /* memory.events */
289 atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS];
290 atomic_long_t memory_events_local[MEMCG_NR_MEMORY_EVENTS];
291
292 /*
293 * Hint of reclaim pressure for socket memroy management. Note
294 * that this indicator should NOT be used in legacy cgroup mode
295 * where socket memory is accounted/charged separately.
296 */
297 unsigned long socket_pressure;
298
299 /* Legacy tcp memory accounting */
300 bool tcpmem_active;
301 int tcpmem_pressure;
302
303 #ifdef CONFIG_MEMCG_KMEM
304 int kmemcg_id;
305 struct obj_cgroup __rcu *objcg;
306 /* list of inherited objcgs, protected by objcg_lock */
307 struct list_head objcg_list;
308 #endif
309
310 CACHELINE_PADDING(_pad2_);
311
312 /*
313 * set > 0 if pages under this cgroup are moving to other cgroup.
314 */
315 atomic_t moving_account;
316 struct task_struct *move_lock_task;
317
318 struct memcg_vmstats_percpu __percpu *vmstats_percpu;
319
320 #ifdef CONFIG_CGROUP_WRITEBACK
321 struct list_head cgwb_list;
322 struct wb_domain cgwb_domain;
323 struct memcg_cgwb_frn cgwb_frn[MEMCG_CGWB_FRN_CNT];
324 #endif
325
326 /* List of events which userspace want to receive */
327 struct list_head event_list;
328 spinlock_t event_list_lock;
329
330 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
331 struct deferred_split deferred_split_queue;
332 #endif
333
334 #ifdef CONFIG_LRU_GEN
335 /* per-memcg mm_struct list */
336 struct lru_gen_mm_list mm_list;
337 #endif
338
339 // These must be before the flexible array member nodeinfo below
340 ANDROID_BACKPORT_RESERVE(1);
341 ANDROID_BACKPORT_RESERVE(2);
342 ANDROID_OEM_DATA_ARRAY(1, 2);
343
344 struct mem_cgroup_per_node *nodeinfo[];
345 };
346
347 /*
348 * size of first charge trial.
349 * TODO: maybe necessary to use big numbers in big irons or dynamic based of the
350 * workload.
351 */
352 #define MEMCG_CHARGE_BATCH 64U
353
354 extern struct mem_cgroup *root_mem_cgroup;
355
356 enum page_memcg_data_flags {
357 /* page->memcg_data is a pointer to an objcgs vector */
358 MEMCG_DATA_OBJCGS = (1UL << 0),
359 /* page has been accounted as a non-slab kernel page */
360 MEMCG_DATA_KMEM = (1UL << 1),
361 /* the next bit after the last actual flag */
362 __NR_MEMCG_DATA_FLAGS = (1UL << 2),
363 };
364
365 #define MEMCG_DATA_FLAGS_MASK (__NR_MEMCG_DATA_FLAGS - 1)
366
367 static inline bool folio_memcg_kmem(struct folio *folio);
368
369 void do_traversal_all_lruvec(void);
370
371 /*
372 * After the initialization objcg->memcg is always pointing at
373 * a valid memcg, but can be atomically swapped to the parent memcg.
374 *
375 * The caller must ensure that the returned memcg won't be released:
376 * e.g. acquire the rcu_read_lock or css_set_lock.
377 */
obj_cgroup_memcg(struct obj_cgroup * objcg)378 static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg)
379 {
380 return READ_ONCE(objcg->memcg);
381 }
382
383 /*
384 * __folio_memcg - Get the memory cgroup associated with a non-kmem folio
385 * @folio: Pointer to the folio.
386 *
387 * Returns a pointer to the memory cgroup associated with the folio,
388 * or NULL. This function assumes that the folio is known to have a
389 * proper memory cgroup pointer. It's not safe to call this function
390 * against some type of folios, e.g. slab folios or ex-slab folios or
391 * kmem folios.
392 */
__folio_memcg(struct folio * folio)393 static inline struct mem_cgroup *__folio_memcg(struct folio *folio)
394 {
395 unsigned long memcg_data = folio->memcg_data;
396
397 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio);
398 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJCGS, folio);
399 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_KMEM, folio);
400
401 return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
402 }
403
404 /*
405 * __folio_objcg - get the object cgroup associated with a kmem folio.
406 * @folio: Pointer to the folio.
407 *
408 * Returns a pointer to the object cgroup associated with the folio,
409 * or NULL. This function assumes that the folio is known to have a
410 * proper object cgroup pointer. It's not safe to call this function
411 * against some type of folios, e.g. slab folios or ex-slab folios or
412 * LRU folios.
413 */
__folio_objcg(struct folio * folio)414 static inline struct obj_cgroup *__folio_objcg(struct folio *folio)
415 {
416 unsigned long memcg_data = folio->memcg_data;
417
418 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio);
419 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJCGS, folio);
420 VM_BUG_ON_FOLIO(!(memcg_data & MEMCG_DATA_KMEM), folio);
421
422 return (struct obj_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
423 }
424
425 /*
426 * folio_memcg - Get the memory cgroup associated with a folio.
427 * @folio: Pointer to the folio.
428 *
429 * Returns a pointer to the memory cgroup associated with the folio,
430 * or NULL. This function assumes that the folio is known to have a
431 * proper memory cgroup pointer. It's not safe to call this function
432 * against some type of folios, e.g. slab folios or ex-slab folios.
433 *
434 * For a non-kmem folio any of the following ensures folio and memcg binding
435 * stability:
436 *
437 * - the folio lock
438 * - LRU isolation
439 * - folio_memcg_lock()
440 * - exclusive reference
441 * - mem_cgroup_trylock_pages()
442 *
443 * For a kmem folio a caller should hold an rcu read lock to protect memcg
444 * associated with a kmem folio from being released.
445 */
folio_memcg(struct folio * folio)446 static inline struct mem_cgroup *folio_memcg(struct folio *folio)
447 {
448 if (folio_memcg_kmem(folio))
449 return obj_cgroup_memcg(__folio_objcg(folio));
450 return __folio_memcg(folio);
451 }
452
page_memcg(struct page * page)453 static inline struct mem_cgroup *page_memcg(struct page *page)
454 {
455 return folio_memcg(page_folio(page));
456 }
457
458 /**
459 * folio_memcg_rcu - Locklessly get the memory cgroup associated with a folio.
460 * @folio: Pointer to the folio.
461 *
462 * This function assumes that the folio is known to have a
463 * proper memory cgroup pointer. It's not safe to call this function
464 * against some type of folios, e.g. slab folios or ex-slab folios.
465 *
466 * Return: A pointer to the memory cgroup associated with the folio,
467 * or NULL.
468 */
folio_memcg_rcu(struct folio * folio)469 static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio)
470 {
471 unsigned long memcg_data = READ_ONCE(folio->memcg_data);
472
473 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio);
474 WARN_ON_ONCE(!rcu_read_lock_held());
475
476 if (memcg_data & MEMCG_DATA_KMEM) {
477 struct obj_cgroup *objcg;
478
479 objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
480 return obj_cgroup_memcg(objcg);
481 }
482
483 return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
484 }
485
486 /*
487 * folio_memcg_check - Get the memory cgroup associated with a folio.
488 * @folio: Pointer to the folio.
489 *
490 * Returns a pointer to the memory cgroup associated with the folio,
491 * or NULL. This function unlike folio_memcg() can take any folio
492 * as an argument. It has to be used in cases when it's not known if a folio
493 * has an associated memory cgroup pointer or an object cgroups vector or
494 * an object cgroup.
495 *
496 * For a non-kmem folio any of the following ensures folio and memcg binding
497 * stability:
498 *
499 * - the folio lock
500 * - LRU isolation
501 * - lock_folio_memcg()
502 * - exclusive reference
503 * - mem_cgroup_trylock_pages()
504 *
505 * For a kmem folio a caller should hold an rcu read lock to protect memcg
506 * associated with a kmem folio from being released.
507 */
folio_memcg_check(struct folio * folio)508 static inline struct mem_cgroup *folio_memcg_check(struct folio *folio)
509 {
510 /*
511 * Because folio->memcg_data might be changed asynchronously
512 * for slabs, READ_ONCE() should be used here.
513 */
514 unsigned long memcg_data = READ_ONCE(folio->memcg_data);
515
516 if (memcg_data & MEMCG_DATA_OBJCGS)
517 return NULL;
518
519 if (memcg_data & MEMCG_DATA_KMEM) {
520 struct obj_cgroup *objcg;
521
522 objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
523 return obj_cgroup_memcg(objcg);
524 }
525
526 return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
527 }
528
page_memcg_check(struct page * page)529 static inline struct mem_cgroup *page_memcg_check(struct page *page)
530 {
531 if (PageTail(page))
532 return NULL;
533 return folio_memcg_check((struct folio *)page);
534 }
535
get_mem_cgroup_from_objcg(struct obj_cgroup * objcg)536 static inline struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg)
537 {
538 struct mem_cgroup *memcg;
539
540 rcu_read_lock();
541 retry:
542 memcg = obj_cgroup_memcg(objcg);
543 if (unlikely(!css_tryget(&memcg->css)))
544 goto retry;
545 rcu_read_unlock();
546
547 return memcg;
548 }
549
550 #ifdef CONFIG_MEMCG_KMEM
551 /*
552 * folio_memcg_kmem - Check if the folio has the memcg_kmem flag set.
553 * @folio: Pointer to the folio.
554 *
555 * Checks if the folio has MemcgKmem flag set. The caller must ensure
556 * that the folio has an associated memory cgroup. It's not safe to call
557 * this function against some types of folios, e.g. slab folios.
558 */
folio_memcg_kmem(struct folio * folio)559 static inline bool folio_memcg_kmem(struct folio *folio)
560 {
561 VM_BUG_ON_PGFLAGS(PageTail(&folio->page), &folio->page);
562 VM_BUG_ON_FOLIO(folio->memcg_data & MEMCG_DATA_OBJCGS, folio);
563 return folio->memcg_data & MEMCG_DATA_KMEM;
564 }
565
566
567 #else
folio_memcg_kmem(struct folio * folio)568 static inline bool folio_memcg_kmem(struct folio *folio)
569 {
570 return false;
571 }
572
573 #endif
574
PageMemcgKmem(struct page * page)575 static inline bool PageMemcgKmem(struct page *page)
576 {
577 return folio_memcg_kmem(page_folio(page));
578 }
579
mem_cgroup_is_root(struct mem_cgroup * memcg)580 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
581 {
582 return (memcg == root_mem_cgroup);
583 }
584
mem_cgroup_disabled(void)585 static inline bool mem_cgroup_disabled(void)
586 {
587 return !cgroup_subsys_enabled(memory_cgrp_subsys);
588 }
589
mem_cgroup_protection(struct mem_cgroup * root,struct mem_cgroup * memcg,unsigned long * min,unsigned long * low)590 static inline void mem_cgroup_protection(struct mem_cgroup *root,
591 struct mem_cgroup *memcg,
592 unsigned long *min,
593 unsigned long *low)
594 {
595 *min = *low = 0;
596
597 if (mem_cgroup_disabled())
598 return;
599
600 /*
601 * There is no reclaim protection applied to a targeted reclaim.
602 * We are special casing this specific case here because
603 * mem_cgroup_calculate_protection is not robust enough to keep
604 * the protection invariant for calculated effective values for
605 * parallel reclaimers with different reclaim target. This is
606 * especially a problem for tail memcgs (as they have pages on LRU)
607 * which would want to have effective values 0 for targeted reclaim
608 * but a different value for external reclaim.
609 *
610 * Example
611 * Let's have global and A's reclaim in parallel:
612 * |
613 * A (low=2G, usage = 3G, max = 3G, children_low_usage = 1.5G)
614 * |\
615 * | C (low = 1G, usage = 2.5G)
616 * B (low = 1G, usage = 0.5G)
617 *
618 * For the global reclaim
619 * A.elow = A.low
620 * B.elow = min(B.usage, B.low) because children_low_usage <= A.elow
621 * C.elow = min(C.usage, C.low)
622 *
623 * With the effective values resetting we have A reclaim
624 * A.elow = 0
625 * B.elow = B.low
626 * C.elow = C.low
627 *
628 * If the global reclaim races with A's reclaim then
629 * B.elow = C.elow = 0 because children_low_usage > A.elow)
630 * is possible and reclaiming B would be violating the protection.
631 *
632 */
633 if (root == memcg)
634 return;
635
636 *min = READ_ONCE(memcg->memory.emin);
637 *low = READ_ONCE(memcg->memory.elow);
638 }
639
640 void mem_cgroup_calculate_protection(struct mem_cgroup *root,
641 struct mem_cgroup *memcg);
642
mem_cgroup_unprotected(struct mem_cgroup * target,struct mem_cgroup * memcg)643 static inline bool mem_cgroup_unprotected(struct mem_cgroup *target,
644 struct mem_cgroup *memcg)
645 {
646 /*
647 * The root memcg doesn't account charges, and doesn't support
648 * protection. The target memcg's protection is ignored, see
649 * mem_cgroup_calculate_protection() and mem_cgroup_protection()
650 */
651 return mem_cgroup_disabled() || mem_cgroup_is_root(memcg) ||
652 memcg == target;
653 }
654
mem_cgroup_below_low(struct mem_cgroup * target,struct mem_cgroup * memcg)655 static inline bool mem_cgroup_below_low(struct mem_cgroup *target,
656 struct mem_cgroup *memcg)
657 {
658 if (mem_cgroup_unprotected(target, memcg))
659 return false;
660
661 return READ_ONCE(memcg->memory.elow) >=
662 page_counter_read(&memcg->memory);
663 }
664
mem_cgroup_below_min(struct mem_cgroup * target,struct mem_cgroup * memcg)665 static inline bool mem_cgroup_below_min(struct mem_cgroup *target,
666 struct mem_cgroup *memcg)
667 {
668 if (mem_cgroup_unprotected(target, memcg))
669 return false;
670
671 return READ_ONCE(memcg->memory.emin) >=
672 page_counter_read(&memcg->memory);
673 }
674
675 int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp);
676
677 /**
678 * mem_cgroup_charge - Charge a newly allocated folio to a cgroup.
679 * @folio: Folio to charge.
680 * @mm: mm context of the allocating task.
681 * @gfp: Reclaim mode.
682 *
683 * Try to charge @folio to the memcg that @mm belongs to, reclaiming
684 * pages according to @gfp if necessary. If @mm is NULL, try to
685 * charge to the active memcg.
686 *
687 * Do not use this for folios allocated for swapin.
688 *
689 * Return: 0 on success. Otherwise, an error code is returned.
690 */
mem_cgroup_charge(struct folio * folio,struct mm_struct * mm,gfp_t gfp)691 static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm,
692 gfp_t gfp)
693 {
694 if (mem_cgroup_disabled())
695 return 0;
696 return __mem_cgroup_charge(folio, mm, gfp);
697 }
698
699 int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
700 gfp_t gfp, swp_entry_t entry);
701 void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry);
702
703 void __mem_cgroup_uncharge(struct folio *folio);
704
705 /**
706 * mem_cgroup_uncharge - Uncharge a folio.
707 * @folio: Folio to uncharge.
708 *
709 * Uncharge a folio previously charged with mem_cgroup_charge().
710 */
mem_cgroup_uncharge(struct folio * folio)711 static inline void mem_cgroup_uncharge(struct folio *folio)
712 {
713 if (mem_cgroup_disabled())
714 return;
715 __mem_cgroup_uncharge(folio);
716 }
717
718 void __mem_cgroup_uncharge_list(struct list_head *page_list);
mem_cgroup_uncharge_list(struct list_head * page_list)719 static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
720 {
721 if (mem_cgroup_disabled())
722 return;
723 __mem_cgroup_uncharge_list(page_list);
724 }
725
726 void mem_cgroup_migrate(struct folio *old, struct folio *new);
727
728 /**
729 * mem_cgroup_lruvec - get the lru list vector for a memcg & node
730 * @memcg: memcg of the wanted lruvec
731 * @pgdat: pglist_data
732 *
733 * Returns the lru list vector holding pages for a given @memcg &
734 * @pgdat combination. This can be the node lruvec, if the memory
735 * controller is disabled.
736 */
mem_cgroup_lruvec(struct mem_cgroup * memcg,struct pglist_data * pgdat)737 static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
738 struct pglist_data *pgdat)
739 {
740 struct mem_cgroup_per_node *mz;
741 struct lruvec *lruvec;
742
743 if (mem_cgroup_disabled()) {
744 lruvec = &pgdat->__lruvec;
745 goto out;
746 }
747
748 if (!memcg)
749 memcg = root_mem_cgroup;
750
751 mz = memcg->nodeinfo[pgdat->node_id];
752 lruvec = &mz->lruvec;
753 out:
754 /*
755 * Since a node can be onlined after the mem_cgroup was created,
756 * we have to be prepared to initialize lruvec->pgdat here;
757 * and if offlined then reonlined, we need to reinitialize it.
758 */
759 if (unlikely(lruvec->pgdat != pgdat))
760 lruvec->pgdat = pgdat;
761 return lruvec;
762 }
763
764 /**
765 * folio_lruvec - return lruvec for isolating/putting an LRU folio
766 * @folio: Pointer to the folio.
767 *
768 * This function relies on folio->mem_cgroup being stable.
769 */
folio_lruvec(struct folio * folio)770 static inline struct lruvec *folio_lruvec(struct folio *folio)
771 {
772 struct mem_cgroup *memcg = folio_memcg(folio);
773
774 VM_WARN_ON_ONCE_FOLIO(!memcg && !mem_cgroup_disabled(), folio);
775 return mem_cgroup_lruvec(memcg, folio_pgdat(folio));
776 }
777
778 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
779
780 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
781
782 struct lruvec *folio_lruvec_lock(struct folio *folio);
783 struct lruvec *folio_lruvec_lock_irq(struct folio *folio);
784 struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
785 unsigned long *flags);
786
787 #ifdef CONFIG_DEBUG_VM
788 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio);
789 #else
790 static inline
lruvec_memcg_debug(struct lruvec * lruvec,struct folio * folio)791 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
792 {
793 }
794 #endif
795
796 static inline
mem_cgroup_from_css(struct cgroup_subsys_state * css)797 struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){
798 return css ? container_of(css, struct mem_cgroup, css) : NULL;
799 }
800
obj_cgroup_tryget(struct obj_cgroup * objcg)801 static inline bool obj_cgroup_tryget(struct obj_cgroup *objcg)
802 {
803 return percpu_ref_tryget(&objcg->refcnt);
804 }
805
obj_cgroup_get(struct obj_cgroup * objcg)806 static inline void obj_cgroup_get(struct obj_cgroup *objcg)
807 {
808 percpu_ref_get(&objcg->refcnt);
809 }
810
obj_cgroup_get_many(struct obj_cgroup * objcg,unsigned long nr)811 static inline void obj_cgroup_get_many(struct obj_cgroup *objcg,
812 unsigned long nr)
813 {
814 percpu_ref_get_many(&objcg->refcnt, nr);
815 }
816
obj_cgroup_put(struct obj_cgroup * objcg)817 static inline void obj_cgroup_put(struct obj_cgroup *objcg)
818 {
819 percpu_ref_put(&objcg->refcnt);
820 }
821
mem_cgroup_tryget(struct mem_cgroup * memcg)822 static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg)
823 {
824 return !memcg || css_tryget(&memcg->css);
825 }
826
mem_cgroup_put(struct mem_cgroup * memcg)827 static inline void mem_cgroup_put(struct mem_cgroup *memcg)
828 {
829 if (memcg)
830 css_put(&memcg->css);
831 }
832
833 #define mem_cgroup_from_counter(counter, member) \
834 container_of(counter, struct mem_cgroup, member)
835
836 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
837 struct mem_cgroup *,
838 struct mem_cgroup_reclaim_cookie *);
839 void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
840 void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
841 int (*)(struct task_struct *, void *), void *arg);
842
mem_cgroup_id(struct mem_cgroup * memcg)843 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
844 {
845 if (mem_cgroup_disabled())
846 return 0;
847
848 return memcg->id.id;
849 }
850 struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
851
852 #ifdef CONFIG_SHRINKER_DEBUG
mem_cgroup_ino(struct mem_cgroup * memcg)853 static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg)
854 {
855 return memcg ? cgroup_ino(memcg->css.cgroup) : 0;
856 }
857
858 struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino);
859 #endif
860
mem_cgroup_from_seq(struct seq_file * m)861 static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
862 {
863 return mem_cgroup_from_css(seq_css(m));
864 }
865
lruvec_memcg(struct lruvec * lruvec)866 static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
867 {
868 struct mem_cgroup_per_node *mz;
869
870 if (mem_cgroup_disabled())
871 return NULL;
872
873 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
874 return mz->memcg;
875 }
876
877 /**
878 * parent_mem_cgroup - find the accounting parent of a memcg
879 * @memcg: memcg whose parent to find
880 *
881 * Returns the parent memcg, or NULL if this is the root.
882 */
parent_mem_cgroup(struct mem_cgroup * memcg)883 static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
884 {
885 return mem_cgroup_from_css(memcg->css.parent);
886 }
887
mem_cgroup_is_descendant(struct mem_cgroup * memcg,struct mem_cgroup * root)888 static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
889 struct mem_cgroup *root)
890 {
891 if (root == memcg)
892 return true;
893 return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup);
894 }
895
mm_match_cgroup(struct mm_struct * mm,struct mem_cgroup * memcg)896 static inline bool mm_match_cgroup(struct mm_struct *mm,
897 struct mem_cgroup *memcg)
898 {
899 struct mem_cgroup *task_memcg;
900 bool match = false;
901
902 rcu_read_lock();
903 task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
904 if (task_memcg)
905 match = mem_cgroup_is_descendant(task_memcg, memcg);
906 rcu_read_unlock();
907 return match;
908 }
909
910 struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio);
911 ino_t page_cgroup_ino(struct page *page);
912
mem_cgroup_online(struct mem_cgroup * memcg)913 static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
914 {
915 if (mem_cgroup_disabled())
916 return true;
917 return !!(memcg->css.flags & CSS_ONLINE);
918 }
919
920 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
921 int zid, int nr_pages);
922
923 static inline
mem_cgroup_get_zone_lru_size(struct lruvec * lruvec,enum lru_list lru,int zone_idx)924 unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
925 enum lru_list lru, int zone_idx)
926 {
927 struct mem_cgroup_per_node *mz;
928
929 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
930 return READ_ONCE(mz->lru_zone_size[zone_idx][lru]);
931 }
932
933 void mem_cgroup_handle_over_high(gfp_t gfp_mask);
934
935 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
936
937 unsigned long mem_cgroup_size(struct mem_cgroup *memcg);
938
939 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg,
940 struct task_struct *p);
941
942 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg);
943
mem_cgroup_enter_user_fault(void)944 static inline void mem_cgroup_enter_user_fault(void)
945 {
946 WARN_ON(current->in_user_fault);
947 current->in_user_fault = 1;
948 }
949
mem_cgroup_exit_user_fault(void)950 static inline void mem_cgroup_exit_user_fault(void)
951 {
952 WARN_ON(!current->in_user_fault);
953 current->in_user_fault = 0;
954 }
955
task_in_memcg_oom(struct task_struct * p)956 static inline bool task_in_memcg_oom(struct task_struct *p)
957 {
958 return p->memcg_in_oom;
959 }
960
961 bool mem_cgroup_oom_synchronize(bool wait);
962 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
963 struct mem_cgroup *oom_domain);
964 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg);
965
966 void folio_memcg_lock(struct folio *folio);
967 void folio_memcg_unlock(struct folio *folio);
968
969 void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val);
970
971 /* try to stablize folio_memcg() for all the pages in a memcg */
mem_cgroup_trylock_pages(struct mem_cgroup * memcg)972 static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg)
973 {
974 rcu_read_lock();
975
976 if (mem_cgroup_disabled() || !atomic_read(&memcg->moving_account))
977 return true;
978
979 rcu_read_unlock();
980 return false;
981 }
982
mem_cgroup_unlock_pages(void)983 static inline void mem_cgroup_unlock_pages(void)
984 {
985 rcu_read_unlock();
986 }
987
988 /* idx can be of type enum memcg_stat_item or node_stat_item */
mod_memcg_state(struct mem_cgroup * memcg,int idx,int val)989 static inline void mod_memcg_state(struct mem_cgroup *memcg,
990 int idx, int val)
991 {
992 unsigned long flags;
993
994 local_irq_save(flags);
995 __mod_memcg_state(memcg, idx, val);
996 local_irq_restore(flags);
997 }
998
mod_memcg_page_state(struct page * page,int idx,int val)999 static inline void mod_memcg_page_state(struct page *page,
1000 int idx, int val)
1001 {
1002 struct mem_cgroup *memcg;
1003
1004 if (mem_cgroup_disabled())
1005 return;
1006
1007 rcu_read_lock();
1008 memcg = page_memcg(page);
1009 if (memcg)
1010 mod_memcg_state(memcg, idx, val);
1011 rcu_read_unlock();
1012 }
1013
1014 unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx);
1015
lruvec_page_state(struct lruvec * lruvec,enum node_stat_item idx)1016 static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
1017 enum node_stat_item idx)
1018 {
1019 struct mem_cgroup_per_node *pn;
1020 long x;
1021
1022 if (mem_cgroup_disabled())
1023 return node_page_state(lruvec_pgdat(lruvec), idx);
1024
1025 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
1026 x = READ_ONCE(pn->lruvec_stats.state[idx]);
1027 #ifdef CONFIG_SMP
1028 if (x < 0)
1029 x = 0;
1030 #endif
1031 return x;
1032 }
1033
lruvec_page_state_local(struct lruvec * lruvec,enum node_stat_item idx)1034 static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
1035 enum node_stat_item idx)
1036 {
1037 struct mem_cgroup_per_node *pn;
1038 long x = 0;
1039
1040 if (mem_cgroup_disabled())
1041 return node_page_state(lruvec_pgdat(lruvec), idx);
1042
1043 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
1044 x = READ_ONCE(pn->lruvec_stats.state_local[idx]);
1045 #ifdef CONFIG_SMP
1046 if (x < 0)
1047 x = 0;
1048 #endif
1049 return x;
1050 }
1051
1052 void mem_cgroup_flush_stats(struct mem_cgroup *memcg);
1053 void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg);
1054
1055 void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
1056 int val);
1057 void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val);
1058
mod_lruvec_kmem_state(void * p,enum node_stat_item idx,int val)1059 static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
1060 int val)
1061 {
1062 unsigned long flags;
1063
1064 local_irq_save(flags);
1065 __mod_lruvec_kmem_state(p, idx, val);
1066 local_irq_restore(flags);
1067 }
1068
mod_memcg_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx,int val)1069 static inline void mod_memcg_lruvec_state(struct lruvec *lruvec,
1070 enum node_stat_item idx, int val)
1071 {
1072 unsigned long flags;
1073
1074 local_irq_save(flags);
1075 __mod_memcg_lruvec_state(lruvec, idx, val);
1076 local_irq_restore(flags);
1077 }
1078
1079 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
1080 unsigned long count);
1081
count_memcg_events(struct mem_cgroup * memcg,enum vm_event_item idx,unsigned long count)1082 static inline void count_memcg_events(struct mem_cgroup *memcg,
1083 enum vm_event_item idx,
1084 unsigned long count)
1085 {
1086 unsigned long flags;
1087
1088 local_irq_save(flags);
1089 __count_memcg_events(memcg, idx, count);
1090 local_irq_restore(flags);
1091 }
1092
count_memcg_folio_events(struct folio * folio,enum vm_event_item idx,unsigned long nr)1093 static inline void count_memcg_folio_events(struct folio *folio,
1094 enum vm_event_item idx, unsigned long nr)
1095 {
1096 struct mem_cgroup *memcg = folio_memcg(folio);
1097
1098 if (memcg)
1099 count_memcg_events(memcg, idx, nr);
1100 }
1101
count_memcg_event_mm(struct mm_struct * mm,enum vm_event_item idx)1102 static inline void count_memcg_event_mm(struct mm_struct *mm,
1103 enum vm_event_item idx)
1104 {
1105 struct mem_cgroup *memcg;
1106
1107 if (mem_cgroup_disabled())
1108 return;
1109
1110 rcu_read_lock();
1111 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1112 if (likely(memcg))
1113 count_memcg_events(memcg, idx, 1);
1114 rcu_read_unlock();
1115 }
1116
memcg_memory_event(struct mem_cgroup * memcg,enum memcg_memory_event event)1117 static inline void memcg_memory_event(struct mem_cgroup *memcg,
1118 enum memcg_memory_event event)
1119 {
1120 bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX ||
1121 event == MEMCG_SWAP_FAIL;
1122
1123 atomic_long_inc(&memcg->memory_events_local[event]);
1124 if (!swap_event)
1125 cgroup_file_notify(&memcg->events_local_file);
1126
1127 do {
1128 atomic_long_inc(&memcg->memory_events[event]);
1129 if (swap_event)
1130 cgroup_file_notify(&memcg->swap_events_file);
1131 else
1132 cgroup_file_notify(&memcg->events_file);
1133
1134 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
1135 break;
1136 if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
1137 break;
1138 } while ((memcg = parent_mem_cgroup(memcg)) &&
1139 !mem_cgroup_is_root(memcg));
1140 }
1141
memcg_memory_event_mm(struct mm_struct * mm,enum memcg_memory_event event)1142 static inline void memcg_memory_event_mm(struct mm_struct *mm,
1143 enum memcg_memory_event event)
1144 {
1145 struct mem_cgroup *memcg;
1146
1147 if (mem_cgroup_disabled())
1148 return;
1149
1150 rcu_read_lock();
1151 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1152 if (likely(memcg))
1153 memcg_memory_event(memcg, event);
1154 rcu_read_unlock();
1155 }
1156
1157 void split_page_memcg(struct page *head, unsigned int nr);
1158 void folio_copy_memcg(struct folio *folio);
1159
1160 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
1161 gfp_t gfp_mask,
1162 unsigned long *total_scanned);
1163
1164 #else /* CONFIG_MEMCG */
1165
1166 #define MEM_CGROUP_ID_SHIFT 0
1167
folio_memcg(struct folio * folio)1168 static inline struct mem_cgroup *folio_memcg(struct folio *folio)
1169 {
1170 return NULL;
1171 }
1172
page_memcg(struct page * page)1173 static inline struct mem_cgroup *page_memcg(struct page *page)
1174 {
1175 return NULL;
1176 }
1177
folio_memcg_rcu(struct folio * folio)1178 static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio)
1179 {
1180 WARN_ON_ONCE(!rcu_read_lock_held());
1181 return NULL;
1182 }
1183
folio_memcg_check(struct folio * folio)1184 static inline struct mem_cgroup *folio_memcg_check(struct folio *folio)
1185 {
1186 return NULL;
1187 }
1188
page_memcg_check(struct page * page)1189 static inline struct mem_cgroup *page_memcg_check(struct page *page)
1190 {
1191 return NULL;
1192 }
1193
folio_memcg_kmem(struct folio * folio)1194 static inline bool folio_memcg_kmem(struct folio *folio)
1195 {
1196 return false;
1197 }
1198
PageMemcgKmem(struct page * page)1199 static inline bool PageMemcgKmem(struct page *page)
1200 {
1201 return false;
1202 }
1203
do_traversal_all_lruvec(void)1204 static inline void do_traversal_all_lruvec(void)
1205 {
1206 }
1207
mem_cgroup_is_root(struct mem_cgroup * memcg)1208 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
1209 {
1210 return true;
1211 }
1212
mem_cgroup_disabled(void)1213 static inline bool mem_cgroup_disabled(void)
1214 {
1215 return true;
1216 }
1217
memcg_memory_event(struct mem_cgroup * memcg,enum memcg_memory_event event)1218 static inline void memcg_memory_event(struct mem_cgroup *memcg,
1219 enum memcg_memory_event event)
1220 {
1221 }
1222
memcg_memory_event_mm(struct mm_struct * mm,enum memcg_memory_event event)1223 static inline void memcg_memory_event_mm(struct mm_struct *mm,
1224 enum memcg_memory_event event)
1225 {
1226 }
1227
mem_cgroup_protection(struct mem_cgroup * root,struct mem_cgroup * memcg,unsigned long * min,unsigned long * low)1228 static inline void mem_cgroup_protection(struct mem_cgroup *root,
1229 struct mem_cgroup *memcg,
1230 unsigned long *min,
1231 unsigned long *low)
1232 {
1233 *min = *low = 0;
1234 }
1235
mem_cgroup_calculate_protection(struct mem_cgroup * root,struct mem_cgroup * memcg)1236 static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root,
1237 struct mem_cgroup *memcg)
1238 {
1239 }
1240
mem_cgroup_unprotected(struct mem_cgroup * target,struct mem_cgroup * memcg)1241 static inline bool mem_cgroup_unprotected(struct mem_cgroup *target,
1242 struct mem_cgroup *memcg)
1243 {
1244 return true;
1245 }
mem_cgroup_below_low(struct mem_cgroup * target,struct mem_cgroup * memcg)1246 static inline bool mem_cgroup_below_low(struct mem_cgroup *target,
1247 struct mem_cgroup *memcg)
1248 {
1249 return false;
1250 }
1251
mem_cgroup_below_min(struct mem_cgroup * target,struct mem_cgroup * memcg)1252 static inline bool mem_cgroup_below_min(struct mem_cgroup *target,
1253 struct mem_cgroup *memcg)
1254 {
1255 return false;
1256 }
1257
mem_cgroup_charge(struct folio * folio,struct mm_struct * mm,gfp_t gfp)1258 static inline int mem_cgroup_charge(struct folio *folio,
1259 struct mm_struct *mm, gfp_t gfp)
1260 {
1261 return 0;
1262 }
1263
mem_cgroup_swapin_charge_folio(struct folio * folio,struct mm_struct * mm,gfp_t gfp,swp_entry_t entry)1264 static inline int mem_cgroup_swapin_charge_folio(struct folio *folio,
1265 struct mm_struct *mm, gfp_t gfp, swp_entry_t entry)
1266 {
1267 return 0;
1268 }
1269
mem_cgroup_swapin_uncharge_swap(swp_entry_t entry)1270 static inline void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry)
1271 {
1272 }
1273
mem_cgroup_uncharge(struct folio * folio)1274 static inline void mem_cgroup_uncharge(struct folio *folio)
1275 {
1276 }
1277
mem_cgroup_uncharge_list(struct list_head * page_list)1278 static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
1279 {
1280 }
1281
mem_cgroup_migrate(struct folio * old,struct folio * new)1282 static inline void mem_cgroup_migrate(struct folio *old, struct folio *new)
1283 {
1284 }
1285
mem_cgroup_lruvec(struct mem_cgroup * memcg,struct pglist_data * pgdat)1286 static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
1287 struct pglist_data *pgdat)
1288 {
1289 return &pgdat->__lruvec;
1290 }
1291
folio_lruvec(struct folio * folio)1292 static inline struct lruvec *folio_lruvec(struct folio *folio)
1293 {
1294 struct pglist_data *pgdat = folio_pgdat(folio);
1295 return &pgdat->__lruvec;
1296 }
1297
1298 static inline
lruvec_memcg_debug(struct lruvec * lruvec,struct folio * folio)1299 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
1300 {
1301 }
1302
parent_mem_cgroup(struct mem_cgroup * memcg)1303 static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
1304 {
1305 return NULL;
1306 }
1307
mm_match_cgroup(struct mm_struct * mm,struct mem_cgroup * memcg)1308 static inline bool mm_match_cgroup(struct mm_struct *mm,
1309 struct mem_cgroup *memcg)
1310 {
1311 return true;
1312 }
1313
get_mem_cgroup_from_mm(struct mm_struct * mm)1314 static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
1315 {
1316 return NULL;
1317 }
1318
1319 static inline
mem_cgroup_from_css(struct cgroup_subsys_state * css)1320 struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css)
1321 {
1322 return NULL;
1323 }
1324
obj_cgroup_put(struct obj_cgroup * objcg)1325 static inline void obj_cgroup_put(struct obj_cgroup *objcg)
1326 {
1327 }
1328
mem_cgroup_tryget(struct mem_cgroup * memcg)1329 static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg)
1330 {
1331 return true;
1332 }
1333
mem_cgroup_put(struct mem_cgroup * memcg)1334 static inline void mem_cgroup_put(struct mem_cgroup *memcg)
1335 {
1336 }
1337
folio_lruvec_lock(struct folio * folio)1338 static inline struct lruvec *folio_lruvec_lock(struct folio *folio)
1339 {
1340 struct pglist_data *pgdat = folio_pgdat(folio);
1341
1342 spin_lock(&pgdat->__lruvec.lru_lock);
1343 return &pgdat->__lruvec;
1344 }
1345
folio_lruvec_lock_irq(struct folio * folio)1346 static inline struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
1347 {
1348 struct pglist_data *pgdat = folio_pgdat(folio);
1349
1350 spin_lock_irq(&pgdat->__lruvec.lru_lock);
1351 return &pgdat->__lruvec;
1352 }
1353
folio_lruvec_lock_irqsave(struct folio * folio,unsigned long * flagsp)1354 static inline struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
1355 unsigned long *flagsp)
1356 {
1357 struct pglist_data *pgdat = folio_pgdat(folio);
1358
1359 spin_lock_irqsave(&pgdat->__lruvec.lru_lock, *flagsp);
1360 return &pgdat->__lruvec;
1361 }
1362
1363 static inline struct mem_cgroup *
mem_cgroup_iter(struct mem_cgroup * root,struct mem_cgroup * prev,struct mem_cgroup_reclaim_cookie * reclaim)1364 mem_cgroup_iter(struct mem_cgroup *root,
1365 struct mem_cgroup *prev,
1366 struct mem_cgroup_reclaim_cookie *reclaim)
1367 {
1368 return NULL;
1369 }
1370
mem_cgroup_iter_break(struct mem_cgroup * root,struct mem_cgroup * prev)1371 static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
1372 struct mem_cgroup *prev)
1373 {
1374 }
1375
mem_cgroup_scan_tasks(struct mem_cgroup * memcg,int (* fn)(struct task_struct *,void *),void * arg)1376 static inline void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1377 int (*fn)(struct task_struct *, void *), void *arg)
1378 {
1379 }
1380
mem_cgroup_id(struct mem_cgroup * memcg)1381 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
1382 {
1383 return 0;
1384 }
1385
mem_cgroup_from_id(unsigned short id)1386 static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
1387 {
1388 WARN_ON_ONCE(id);
1389 /* XXX: This should always return root_mem_cgroup */
1390 return NULL;
1391 }
1392
1393 #ifdef CONFIG_SHRINKER_DEBUG
mem_cgroup_ino(struct mem_cgroup * memcg)1394 static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg)
1395 {
1396 return 0;
1397 }
1398
mem_cgroup_get_from_ino(unsigned long ino)1399 static inline struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino)
1400 {
1401 return NULL;
1402 }
1403 #endif
1404
mem_cgroup_from_seq(struct seq_file * m)1405 static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
1406 {
1407 return NULL;
1408 }
1409
lruvec_memcg(struct lruvec * lruvec)1410 static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
1411 {
1412 return NULL;
1413 }
1414
mem_cgroup_online(struct mem_cgroup * memcg)1415 static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
1416 {
1417 return true;
1418 }
1419
1420 static inline
mem_cgroup_get_zone_lru_size(struct lruvec * lruvec,enum lru_list lru,int zone_idx)1421 unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
1422 enum lru_list lru, int zone_idx)
1423 {
1424 return 0;
1425 }
1426
mem_cgroup_get_max(struct mem_cgroup * memcg)1427 static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
1428 {
1429 return 0;
1430 }
1431
mem_cgroup_size(struct mem_cgroup * memcg)1432 static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1433 {
1434 return 0;
1435 }
1436
1437 static inline void
mem_cgroup_print_oom_context(struct mem_cgroup * memcg,struct task_struct * p)1438 mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1439 {
1440 }
1441
1442 static inline void
mem_cgroup_print_oom_meminfo(struct mem_cgroup * memcg)1443 mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1444 {
1445 }
1446
folio_memcg_lock(struct folio * folio)1447 static inline void folio_memcg_lock(struct folio *folio)
1448 {
1449 }
1450
folio_memcg_unlock(struct folio * folio)1451 static inline void folio_memcg_unlock(struct folio *folio)
1452 {
1453 }
1454
mem_cgroup_trylock_pages(struct mem_cgroup * memcg)1455 static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg)
1456 {
1457 /* to match folio_memcg_rcu() */
1458 rcu_read_lock();
1459 return true;
1460 }
1461
mem_cgroup_unlock_pages(void)1462 static inline void mem_cgroup_unlock_pages(void)
1463 {
1464 rcu_read_unlock();
1465 }
1466
mem_cgroup_handle_over_high(gfp_t gfp_mask)1467 static inline void mem_cgroup_handle_over_high(gfp_t gfp_mask)
1468 {
1469 }
1470
mem_cgroup_enter_user_fault(void)1471 static inline void mem_cgroup_enter_user_fault(void)
1472 {
1473 }
1474
mem_cgroup_exit_user_fault(void)1475 static inline void mem_cgroup_exit_user_fault(void)
1476 {
1477 }
1478
task_in_memcg_oom(struct task_struct * p)1479 static inline bool task_in_memcg_oom(struct task_struct *p)
1480 {
1481 return false;
1482 }
1483
mem_cgroup_oom_synchronize(bool wait)1484 static inline bool mem_cgroup_oom_synchronize(bool wait)
1485 {
1486 return false;
1487 }
1488
mem_cgroup_get_oom_group(struct task_struct * victim,struct mem_cgroup * oom_domain)1489 static inline struct mem_cgroup *mem_cgroup_get_oom_group(
1490 struct task_struct *victim, struct mem_cgroup *oom_domain)
1491 {
1492 return NULL;
1493 }
1494
mem_cgroup_print_oom_group(struct mem_cgroup * memcg)1495 static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
1496 {
1497 }
1498
__mod_memcg_state(struct mem_cgroup * memcg,int idx,int nr)1499 static inline void __mod_memcg_state(struct mem_cgroup *memcg,
1500 int idx,
1501 int nr)
1502 {
1503 }
1504
mod_memcg_state(struct mem_cgroup * memcg,int idx,int nr)1505 static inline void mod_memcg_state(struct mem_cgroup *memcg,
1506 int idx,
1507 int nr)
1508 {
1509 }
1510
mod_memcg_page_state(struct page * page,int idx,int val)1511 static inline void mod_memcg_page_state(struct page *page,
1512 int idx, int val)
1513 {
1514 }
1515
memcg_page_state(struct mem_cgroup * memcg,int idx)1516 static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
1517 {
1518 return 0;
1519 }
1520
lruvec_page_state(struct lruvec * lruvec,enum node_stat_item idx)1521 static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
1522 enum node_stat_item idx)
1523 {
1524 return node_page_state(lruvec_pgdat(lruvec), idx);
1525 }
1526
lruvec_page_state_local(struct lruvec * lruvec,enum node_stat_item idx)1527 static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
1528 enum node_stat_item idx)
1529 {
1530 return node_page_state(lruvec_pgdat(lruvec), idx);
1531 }
1532
mem_cgroup_flush_stats(struct mem_cgroup * memcg)1533 static inline void mem_cgroup_flush_stats(struct mem_cgroup *memcg)
1534 {
1535 }
1536
mem_cgroup_flush_stats_ratelimited(struct mem_cgroup * memcg)1537 static inline void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg)
1538 {
1539 }
1540
__mod_memcg_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx,int val)1541 static inline void __mod_memcg_lruvec_state(struct lruvec *lruvec,
1542 enum node_stat_item idx, int val)
1543 {
1544 }
1545
__mod_lruvec_kmem_state(void * p,enum node_stat_item idx,int val)1546 static inline void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
1547 int val)
1548 {
1549 struct page *page = virt_to_head_page(p);
1550
1551 __mod_node_page_state(page_pgdat(page), idx, val);
1552 }
1553
mod_lruvec_kmem_state(void * p,enum node_stat_item idx,int val)1554 static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
1555 int val)
1556 {
1557 struct page *page = virt_to_head_page(p);
1558
1559 mod_node_page_state(page_pgdat(page), idx, val);
1560 }
1561
count_memcg_events(struct mem_cgroup * memcg,enum vm_event_item idx,unsigned long count)1562 static inline void count_memcg_events(struct mem_cgroup *memcg,
1563 enum vm_event_item idx,
1564 unsigned long count)
1565 {
1566 }
1567
__count_memcg_events(struct mem_cgroup * memcg,enum vm_event_item idx,unsigned long count)1568 static inline void __count_memcg_events(struct mem_cgroup *memcg,
1569 enum vm_event_item idx,
1570 unsigned long count)
1571 {
1572 }
1573
count_memcg_folio_events(struct folio * folio,enum vm_event_item idx,unsigned long nr)1574 static inline void count_memcg_folio_events(struct folio *folio,
1575 enum vm_event_item idx, unsigned long nr)
1576 {
1577 }
1578
1579 static inline
count_memcg_event_mm(struct mm_struct * mm,enum vm_event_item idx)1580 void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
1581 {
1582 }
1583
split_page_memcg(struct page * head,unsigned int nr)1584 static inline void split_page_memcg(struct page *head, unsigned int nr)
1585 {
1586 }
1587
folio_copy_memcg(struct folio * folio)1588 static inline void folio_copy_memcg(struct folio *folio)
1589 {
1590 }
1591
1592 static inline
mem_cgroup_soft_limit_reclaim(pg_data_t * pgdat,int order,gfp_t gfp_mask,unsigned long * total_scanned)1593 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
1594 gfp_t gfp_mask,
1595 unsigned long *total_scanned)
1596 {
1597 return 0;
1598 }
1599 #endif /* CONFIG_MEMCG */
1600
__inc_lruvec_kmem_state(void * p,enum node_stat_item idx)1601 static inline void __inc_lruvec_kmem_state(void *p, enum node_stat_item idx)
1602 {
1603 __mod_lruvec_kmem_state(p, idx, 1);
1604 }
1605
__dec_lruvec_kmem_state(void * p,enum node_stat_item idx)1606 static inline void __dec_lruvec_kmem_state(void *p, enum node_stat_item idx)
1607 {
1608 __mod_lruvec_kmem_state(p, idx, -1);
1609 }
1610
parent_lruvec(struct lruvec * lruvec)1611 static inline struct lruvec *parent_lruvec(struct lruvec *lruvec)
1612 {
1613 struct mem_cgroup *memcg;
1614
1615 memcg = lruvec_memcg(lruvec);
1616 if (!memcg)
1617 return NULL;
1618 memcg = parent_mem_cgroup(memcg);
1619 if (!memcg)
1620 return NULL;
1621 return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec));
1622 }
1623
unlock_page_lruvec(struct lruvec * lruvec)1624 static inline void unlock_page_lruvec(struct lruvec *lruvec)
1625 {
1626 spin_unlock(&lruvec->lru_lock);
1627 }
1628
unlock_page_lruvec_irq(struct lruvec * lruvec)1629 static inline void unlock_page_lruvec_irq(struct lruvec *lruvec)
1630 {
1631 spin_unlock_irq(&lruvec->lru_lock);
1632 }
1633
unlock_page_lruvec_irqrestore(struct lruvec * lruvec,unsigned long flags)1634 static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec,
1635 unsigned long flags)
1636 {
1637 spin_unlock_irqrestore(&lruvec->lru_lock, flags);
1638 }
1639
1640 /* Test requires a stable page->memcg binding, see page_memcg() */
folio_matches_lruvec(struct folio * folio,struct lruvec * lruvec)1641 static inline bool folio_matches_lruvec(struct folio *folio,
1642 struct lruvec *lruvec)
1643 {
1644 return lruvec_pgdat(lruvec) == folio_pgdat(folio) &&
1645 lruvec_memcg(lruvec) == folio_memcg(folio);
1646 }
1647
1648 /* Don't lock again iff page's lruvec locked */
folio_lruvec_relock_irq(struct folio * folio,struct lruvec * locked_lruvec)1649 static inline struct lruvec *folio_lruvec_relock_irq(struct folio *folio,
1650 struct lruvec *locked_lruvec)
1651 {
1652 if (locked_lruvec) {
1653 if (folio_matches_lruvec(folio, locked_lruvec))
1654 return locked_lruvec;
1655
1656 unlock_page_lruvec_irq(locked_lruvec);
1657 }
1658
1659 return folio_lruvec_lock_irq(folio);
1660 }
1661
1662 /* Don't lock again iff page's lruvec locked */
folio_lruvec_relock_irqsave(struct folio * folio,struct lruvec * locked_lruvec,unsigned long * flags)1663 static inline struct lruvec *folio_lruvec_relock_irqsave(struct folio *folio,
1664 struct lruvec *locked_lruvec, unsigned long *flags)
1665 {
1666 if (locked_lruvec) {
1667 if (folio_matches_lruvec(folio, locked_lruvec))
1668 return locked_lruvec;
1669
1670 unlock_page_lruvec_irqrestore(locked_lruvec, *flags);
1671 }
1672
1673 return folio_lruvec_lock_irqsave(folio, flags);
1674 }
1675
1676 #ifdef CONFIG_CGROUP_WRITEBACK
1677
1678 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
1679 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
1680 unsigned long *pheadroom, unsigned long *pdirty,
1681 unsigned long *pwriteback);
1682
1683 void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
1684 struct bdi_writeback *wb);
1685
mem_cgroup_track_foreign_dirty(struct folio * folio,struct bdi_writeback * wb)1686 static inline void mem_cgroup_track_foreign_dirty(struct folio *folio,
1687 struct bdi_writeback *wb)
1688 {
1689 struct mem_cgroup *memcg;
1690
1691 if (mem_cgroup_disabled())
1692 return;
1693
1694 memcg = folio_memcg(folio);
1695 if (unlikely(memcg && &memcg->css != wb->memcg_css))
1696 mem_cgroup_track_foreign_dirty_slowpath(folio, wb);
1697 }
1698
1699 void mem_cgroup_flush_foreign(struct bdi_writeback *wb);
1700
1701 #else /* CONFIG_CGROUP_WRITEBACK */
1702
mem_cgroup_wb_domain(struct bdi_writeback * wb)1703 static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
1704 {
1705 return NULL;
1706 }
1707
mem_cgroup_wb_stats(struct bdi_writeback * wb,unsigned long * pfilepages,unsigned long * pheadroom,unsigned long * pdirty,unsigned long * pwriteback)1708 static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
1709 unsigned long *pfilepages,
1710 unsigned long *pheadroom,
1711 unsigned long *pdirty,
1712 unsigned long *pwriteback)
1713 {
1714 }
1715
mem_cgroup_track_foreign_dirty(struct folio * folio,struct bdi_writeback * wb)1716 static inline void mem_cgroup_track_foreign_dirty(struct folio *folio,
1717 struct bdi_writeback *wb)
1718 {
1719 }
1720
mem_cgroup_flush_foreign(struct bdi_writeback * wb)1721 static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
1722 {
1723 }
1724
1725 #endif /* CONFIG_CGROUP_WRITEBACK */
1726
1727 struct sock;
1728 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
1729 gfp_t gfp_mask);
1730 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
1731 #ifdef CONFIG_MEMCG
1732 extern struct static_key_false memcg_sockets_enabled_key;
1733 #define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
1734 void mem_cgroup_sk_alloc(struct sock *sk);
1735 void mem_cgroup_sk_free(struct sock *sk);
mem_cgroup_under_socket_pressure(struct mem_cgroup * memcg)1736 static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
1737 {
1738 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
1739 return !!memcg->tcpmem_pressure;
1740 do {
1741 if (time_before(jiffies, READ_ONCE(memcg->socket_pressure)))
1742 return true;
1743 } while ((memcg = parent_mem_cgroup(memcg)));
1744 return false;
1745 }
1746
1747 int alloc_shrinker_info(struct mem_cgroup *memcg);
1748 void free_shrinker_info(struct mem_cgroup *memcg);
1749 void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id);
1750 void reparent_shrinker_deferred(struct mem_cgroup *memcg);
1751 #else
1752 #define mem_cgroup_sockets_enabled 0
mem_cgroup_sk_alloc(struct sock * sk)1753 static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
mem_cgroup_sk_free(struct sock * sk)1754 static inline void mem_cgroup_sk_free(struct sock *sk) { };
mem_cgroup_under_socket_pressure(struct mem_cgroup * memcg)1755 static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
1756 {
1757 return false;
1758 }
1759
set_shrinker_bit(struct mem_cgroup * memcg,int nid,int shrinker_id)1760 static inline void set_shrinker_bit(struct mem_cgroup *memcg,
1761 int nid, int shrinker_id)
1762 {
1763 }
1764 #endif
1765
1766 #ifdef CONFIG_MEMCG_KMEM
1767 bool mem_cgroup_kmem_disabled(void);
1768 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order);
1769 void __memcg_kmem_uncharge_page(struct page *page, int order);
1770
1771 struct obj_cgroup *get_obj_cgroup_from_current(void);
1772 struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio);
1773
1774 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size);
1775 void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size);
1776
1777 extern struct static_key_false memcg_bpf_enabled_key;
memcg_bpf_enabled(void)1778 static inline bool memcg_bpf_enabled(void)
1779 {
1780 return static_branch_likely(&memcg_bpf_enabled_key);
1781 }
1782
1783 extern struct static_key_false memcg_kmem_online_key;
1784
memcg_kmem_online(void)1785 static inline bool memcg_kmem_online(void)
1786 {
1787 return static_branch_likely(&memcg_kmem_online_key);
1788 }
1789
memcg_kmem_charge_page(struct page * page,gfp_t gfp,int order)1790 static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1791 int order)
1792 {
1793 if (memcg_kmem_online())
1794 return __memcg_kmem_charge_page(page, gfp, order);
1795 return 0;
1796 }
1797
memcg_kmem_uncharge_page(struct page * page,int order)1798 static inline void memcg_kmem_uncharge_page(struct page *page, int order)
1799 {
1800 if (memcg_kmem_online())
1801 __memcg_kmem_uncharge_page(page, order);
1802 }
1803
1804 /*
1805 * A helper for accessing memcg's kmem_id, used for getting
1806 * corresponding LRU lists.
1807 */
memcg_kmem_id(struct mem_cgroup * memcg)1808 static inline int memcg_kmem_id(struct mem_cgroup *memcg)
1809 {
1810 return memcg ? memcg->kmemcg_id : -1;
1811 }
1812
1813 struct mem_cgroup *mem_cgroup_from_obj(void *p);
1814 struct mem_cgroup *mem_cgroup_from_slab_obj(void *p);
1815
count_objcg_event(struct obj_cgroup * objcg,enum vm_event_item idx)1816 static inline void count_objcg_event(struct obj_cgroup *objcg,
1817 enum vm_event_item idx)
1818 {
1819 struct mem_cgroup *memcg;
1820
1821 if (!memcg_kmem_online())
1822 return;
1823
1824 rcu_read_lock();
1825 memcg = obj_cgroup_memcg(objcg);
1826 count_memcg_events(memcg, idx, 1);
1827 rcu_read_unlock();
1828 }
1829
1830 #else
mem_cgroup_kmem_disabled(void)1831 static inline bool mem_cgroup_kmem_disabled(void)
1832 {
1833 return true;
1834 }
1835
memcg_kmem_charge_page(struct page * page,gfp_t gfp,int order)1836 static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1837 int order)
1838 {
1839 return 0;
1840 }
1841
memcg_kmem_uncharge_page(struct page * page,int order)1842 static inline void memcg_kmem_uncharge_page(struct page *page, int order)
1843 {
1844 }
1845
__memcg_kmem_charge_page(struct page * page,gfp_t gfp,int order)1846 static inline int __memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1847 int order)
1848 {
1849 return 0;
1850 }
1851
__memcg_kmem_uncharge_page(struct page * page,int order)1852 static inline void __memcg_kmem_uncharge_page(struct page *page, int order)
1853 {
1854 }
1855
get_obj_cgroup_from_folio(struct folio * folio)1856 static inline struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio)
1857 {
1858 return NULL;
1859 }
1860
memcg_bpf_enabled(void)1861 static inline bool memcg_bpf_enabled(void)
1862 {
1863 return false;
1864 }
1865
memcg_kmem_online(void)1866 static inline bool memcg_kmem_online(void)
1867 {
1868 return false;
1869 }
1870
memcg_kmem_id(struct mem_cgroup * memcg)1871 static inline int memcg_kmem_id(struct mem_cgroup *memcg)
1872 {
1873 return -1;
1874 }
1875
mem_cgroup_from_obj(void * p)1876 static inline struct mem_cgroup *mem_cgroup_from_obj(void *p)
1877 {
1878 return NULL;
1879 }
1880
mem_cgroup_from_slab_obj(void * p)1881 static inline struct mem_cgroup *mem_cgroup_from_slab_obj(void *p)
1882 {
1883 return NULL;
1884 }
1885
count_objcg_event(struct obj_cgroup * objcg,enum vm_event_item idx)1886 static inline void count_objcg_event(struct obj_cgroup *objcg,
1887 enum vm_event_item idx)
1888 {
1889 }
1890
1891 #endif /* CONFIG_MEMCG_KMEM */
1892
1893 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
1894 bool obj_cgroup_may_zswap(struct obj_cgroup *objcg);
1895 void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size);
1896 void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size);
1897 #else
obj_cgroup_may_zswap(struct obj_cgroup * objcg)1898 static inline bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
1899 {
1900 return true;
1901 }
obj_cgroup_charge_zswap(struct obj_cgroup * objcg,size_t size)1902 static inline void obj_cgroup_charge_zswap(struct obj_cgroup *objcg,
1903 size_t size)
1904 {
1905 }
obj_cgroup_uncharge_zswap(struct obj_cgroup * objcg,size_t size)1906 static inline void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg,
1907 size_t size)
1908 {
1909 }
1910 #endif
1911
1912 #endif /* _LINUX_MEMCONTROL_H */
1913