• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /* memcontrol.h - Memory Controller
3  *
4  * Copyright IBM Corporation, 2007
5  * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6  *
7  * Copyright 2007 OpenVZ SWsoft Inc
8  * Author: Pavel Emelianov <xemul@openvz.org>
9  */
10 
11 #ifndef _LINUX_MEMCONTROL_H
12 #define _LINUX_MEMCONTROL_H
13 #include <linux/cgroup.h>
14 #include <linux/vm_event_item.h>
15 #include <linux/hardirq.h>
16 #include <linux/jump_label.h>
17 #include <linux/page_counter.h>
18 #include <linux/vmpressure.h>
19 #include <linux/eventfd.h>
20 #include <linux/mm.h>
21 #include <linux/vmstat.h>
22 #include <linux/writeback.h>
23 #include <linux/page-flags.h>
24 
25 struct mem_cgroup;
26 struct obj_cgroup;
27 struct page;
28 struct mm_struct;
29 struct kmem_cache;
30 
31 /* Cgroup-specific page state, on top of universal node page state */
32 enum memcg_stat_item {
33 	MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS,
34 	MEMCG_SOCK,
35 	MEMCG_PERCPU_B,
36 	MEMCG_NR_STAT,
37 };
38 
39 enum memcg_memory_event {
40 	MEMCG_LOW,
41 	MEMCG_HIGH,
42 	MEMCG_MAX,
43 	MEMCG_OOM,
44 	MEMCG_OOM_KILL,
45 	MEMCG_SWAP_HIGH,
46 	MEMCG_SWAP_MAX,
47 	MEMCG_SWAP_FAIL,
48 	MEMCG_NR_MEMORY_EVENTS,
49 };
50 
51 struct mem_cgroup_reclaim_cookie {
52 	pg_data_t *pgdat;
53 	unsigned int generation;
54 };
55 
56 #ifdef CONFIG_MEMCG
57 
58 #define MEM_CGROUP_ID_SHIFT	16
59 #define MEM_CGROUP_ID_MAX	USHRT_MAX
60 
61 struct mem_cgroup_id {
62 	int id;
63 	refcount_t ref;
64 };
65 
66 /*
67  * Per memcg event counter is incremented at every pagein/pageout. With THP,
68  * it will be incremented by the number of pages. This counter is used
69  * to trigger some periodic events. This is straightforward and better
70  * than using jiffies etc. to handle periodic memcg event.
71  */
72 enum mem_cgroup_events_target {
73 	MEM_CGROUP_TARGET_THRESH,
74 	MEM_CGROUP_TARGET_SOFTLIMIT,
75 	MEM_CGROUP_NTARGETS,
76 };
77 
78 struct memcg_vmstats_percpu {
79 	long stat[MEMCG_NR_STAT];
80 	unsigned long events[NR_VM_EVENT_ITEMS];
81 	unsigned long nr_page_events;
82 	unsigned long targets[MEM_CGROUP_NTARGETS];
83 };
84 
85 struct mem_cgroup_reclaim_iter {
86 	struct mem_cgroup *position;
87 	/* scan generation, increased every round-trip */
88 	unsigned int generation;
89 };
90 
91 struct lruvec_stat {
92 	long count[NR_VM_NODE_STAT_ITEMS];
93 };
94 
95 /*
96  * Bitmap of shrinker::id corresponding to memcg-aware shrinkers,
97  * which have elements charged to this memcg.
98  */
99 struct memcg_shrinker_map {
100 	struct rcu_head rcu;
101 	unsigned long map[];
102 };
103 
104 /*
105  * per-node information in memory controller.
106  */
107 struct mem_cgroup_per_node {
108 	struct lruvec		lruvec;
109 
110 	/* Legacy local VM stats */
111 	struct lruvec_stat __percpu *lruvec_stat_local;
112 
113 	/* Subtree VM stats (batched updates) */
114 	struct lruvec_stat __percpu *lruvec_stat_cpu;
115 	atomic_long_t		lruvec_stat[NR_VM_NODE_STAT_ITEMS];
116 
117 	unsigned long		lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
118 
119 	struct mem_cgroup_reclaim_iter	iter;
120 
121 	struct memcg_shrinker_map __rcu	*shrinker_map;
122 
123 	struct rb_node		tree_node;	/* RB tree node */
124 	unsigned long		usage_in_excess;/* Set to the value by which */
125 						/* the soft limit is exceeded*/
126 	bool			on_tree;
127 	struct mem_cgroup	*memcg;		/* Back pointer, we cannot */
128 						/* use container_of	   */
129 };
130 
131 struct mem_cgroup_threshold {
132 	struct eventfd_ctx *eventfd;
133 	unsigned long threshold;
134 };
135 
136 /* For threshold */
137 struct mem_cgroup_threshold_ary {
138 	/* An array index points to threshold just below or equal to usage. */
139 	int current_threshold;
140 	/* Size of entries[] */
141 	unsigned int size;
142 	/* Array of thresholds */
143 	struct mem_cgroup_threshold entries[];
144 };
145 
146 struct mem_cgroup_thresholds {
147 	/* Primary thresholds array */
148 	struct mem_cgroup_threshold_ary *primary;
149 	/*
150 	 * Spare threshold array.
151 	 * This is needed to make mem_cgroup_unregister_event() "never fail".
152 	 * It must be able to store at least primary->size - 1 entries.
153 	 */
154 	struct mem_cgroup_threshold_ary *spare;
155 };
156 
157 enum memcg_kmem_state {
158 	KMEM_NONE,
159 	KMEM_ALLOCATED,
160 	KMEM_ONLINE,
161 };
162 
163 #if defined(CONFIG_SMP)
164 struct memcg_padding {
165 	char x[0];
166 } ____cacheline_internodealigned_in_smp;
167 #define MEMCG_PADDING(name)      struct memcg_padding name;
168 #else
169 #define MEMCG_PADDING(name)
170 #endif
171 
172 /*
173  * Remember four most recent foreign writebacks with dirty pages in this
174  * cgroup.  Inode sharing is expected to be uncommon and, even if we miss
175  * one in a given round, we're likely to catch it later if it keeps
176  * foreign-dirtying, so a fairly low count should be enough.
177  *
178  * See mem_cgroup_track_foreign_dirty_slowpath() for details.
179  */
180 #define MEMCG_CGWB_FRN_CNT	4
181 
182 struct memcg_cgwb_frn {
183 	u64 bdi_id;			/* bdi->id of the foreign inode */
184 	int memcg_id;			/* memcg->css.id of foreign inode */
185 	u64 at;				/* jiffies_64 at the time of dirtying */
186 	struct wb_completion done;	/* tracks in-flight foreign writebacks */
187 };
188 
189 /*
190  * Bucket for arbitrarily byte-sized objects charged to a memory
191  * cgroup. The bucket can be reparented in one piece when the cgroup
192  * is destroyed, without having to round up the individual references
193  * of all live memory objects in the wild.
194  */
195 struct obj_cgroup {
196 	struct percpu_ref refcnt;
197 	struct mem_cgroup *memcg;
198 	atomic_t nr_charged_bytes;
199 	union {
200 		struct list_head list; /* protected by objcg_lock */
201 		struct rcu_head rcu;
202 	};
203 };
204 
205 /*
206  * The memory controller data structure. The memory controller controls both
207  * page cache and RSS per cgroup. We would eventually like to provide
208  * statistics based on the statistics developed by Rik Van Riel for clock-pro,
209  * to help the administrator determine what knobs to tune.
210  */
211 struct mem_cgroup {
212 	struct cgroup_subsys_state css;
213 
214 	/* Private memcg ID. Used to ID objects that outlive the cgroup */
215 	struct mem_cgroup_id id;
216 
217 	/* Accounted resources */
218 	struct page_counter memory;		/* Both v1 & v2 */
219 
220 	union {
221 		struct page_counter swap;	/* v2 only */
222 		struct page_counter memsw;	/* v1 only */
223 	};
224 
225 	/* Legacy consumer-oriented counters */
226 	struct page_counter kmem;		/* v1 only */
227 	struct page_counter tcpmem;		/* v1 only */
228 
229 	/* Range enforcement for interrupt charges */
230 	struct work_struct high_work;
231 
232 	unsigned long soft_limit;
233 
234 	/* vmpressure notifications */
235 	struct vmpressure vmpressure;
236 
237 	/*
238 	 * Should the accounting and control be hierarchical, per subtree?
239 	 */
240 	bool use_hierarchy;
241 
242 	/*
243 	 * Should the OOM killer kill all belonging tasks, had it kill one?
244 	 */
245 	bool oom_group;
246 
247 	/* protected by memcg_oom_lock */
248 	bool		oom_lock;
249 	int		under_oom;
250 
251 	int	swappiness;
252 	/* OOM-Killer disable */
253 	int		oom_kill_disable;
254 
255 	/* memory.events and memory.events.local */
256 	struct cgroup_file events_file;
257 	struct cgroup_file events_local_file;
258 
259 	/* handle for "memory.swap.events" */
260 	struct cgroup_file swap_events_file;
261 
262 	/* protect arrays of thresholds */
263 	struct mutex thresholds_lock;
264 
265 	/* thresholds for memory usage. RCU-protected */
266 	struct mem_cgroup_thresholds thresholds;
267 
268 	/* thresholds for mem+swap usage. RCU-protected */
269 	struct mem_cgroup_thresholds memsw_thresholds;
270 
271 	/* For oom notifier event fd */
272 	struct list_head oom_notify;
273 
274 	/*
275 	 * Should we move charges of a task when a task is moved into this
276 	 * mem_cgroup ? And what type of charges should we move ?
277 	 */
278 	unsigned long move_charge_at_immigrate;
279 	/* taken only while moving_account > 0 */
280 	spinlock_t		move_lock;
281 	unsigned long		move_lock_flags;
282 
283 	MEMCG_PADDING(_pad1_);
284 
285 	atomic_long_t		vmstats[MEMCG_NR_STAT];
286 	atomic_long_t		vmevents[NR_VM_EVENT_ITEMS];
287 
288 	/* memory.events */
289 	atomic_long_t		memory_events[MEMCG_NR_MEMORY_EVENTS];
290 	atomic_long_t		memory_events_local[MEMCG_NR_MEMORY_EVENTS];
291 
292 	unsigned long		socket_pressure;
293 
294 	/* Legacy tcp memory accounting */
295 	bool			tcpmem_active;
296 	int			tcpmem_pressure;
297 
298 #ifdef CONFIG_MEMCG_KMEM
299         /* Index in the kmem_cache->memcg_params.memcg_caches array */
300 	int kmemcg_id;
301 	enum memcg_kmem_state kmem_state;
302 	struct obj_cgroup __rcu *objcg;
303 	/* list of inherited objcgs, protected by objcg_lock */
304 	struct list_head objcg_list;
305 #endif
306 
307 	MEMCG_PADDING(_pad2_);
308 
309 	/*
310 	 * set > 0 if pages under this cgroup are moving to other cgroup.
311 	 */
312 	atomic_t		moving_account;
313 	struct task_struct	*move_lock_task;
314 
315 	/* Legacy local VM stats and events */
316 	struct memcg_vmstats_percpu __percpu *vmstats_local;
317 
318 	/* Subtree VM stats and events (batched updates) */
319 	struct memcg_vmstats_percpu __percpu *vmstats_percpu;
320 
321 #ifdef CONFIG_CGROUP_WRITEBACK
322 	struct list_head cgwb_list;
323 	struct wb_domain cgwb_domain;
324 	struct memcg_cgwb_frn cgwb_frn[MEMCG_CGWB_FRN_CNT];
325 #endif
326 
327 	/* List of events which userspace want to receive */
328 	struct list_head event_list;
329 	spinlock_t event_list_lock;
330 
331 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
332 	struct deferred_split deferred_split_queue;
333 #endif
334 
335 	ANDROID_OEM_DATA(1);
336 	struct mem_cgroup_per_node *nodeinfo[0];
337 	/* WARNING: nodeinfo must be the last member here */
338 };
339 
340 /*
341  * size of first charge trial. "32" comes from vmscan.c's magic value.
342  * TODO: maybe necessary to use big numbers in big irons.
343  */
344 #define MEMCG_CHARGE_BATCH 32U
345 
346 extern struct mem_cgroup *root_mem_cgroup;
347 
348 struct lruvec *page_to_lruvec(struct page *page, pg_data_t *pgdat);
349 void do_traversal_all_lruvec(void);
350 
memcg_stat_item_in_bytes(int idx)351 static __always_inline bool memcg_stat_item_in_bytes(int idx)
352 {
353 	if (idx == MEMCG_PERCPU_B)
354 		return true;
355 	return vmstat_item_in_bytes(idx);
356 }
357 
mem_cgroup_is_root(struct mem_cgroup * memcg)358 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
359 {
360 	return (memcg == root_mem_cgroup);
361 }
362 
mem_cgroup_disabled(void)363 static inline bool mem_cgroup_disabled(void)
364 {
365 	return !cgroup_subsys_enabled(memory_cgrp_subsys);
366 }
367 
mem_cgroup_protection(struct mem_cgroup * root,struct mem_cgroup * memcg,unsigned long * min,unsigned long * low)368 static inline void mem_cgroup_protection(struct mem_cgroup *root,
369 					 struct mem_cgroup *memcg,
370 					 unsigned long *min,
371 					 unsigned long *low)
372 {
373 	*min = *low = 0;
374 
375 	if (mem_cgroup_disabled())
376 		return;
377 
378 	/*
379 	 * There is no reclaim protection applied to a targeted reclaim.
380 	 * We are special casing this specific case here because
381 	 * mem_cgroup_protected calculation is not robust enough to keep
382 	 * the protection invariant for calculated effective values for
383 	 * parallel reclaimers with different reclaim target. This is
384 	 * especially a problem for tail memcgs (as they have pages on LRU)
385 	 * which would want to have effective values 0 for targeted reclaim
386 	 * but a different value for external reclaim.
387 	 *
388 	 * Example
389 	 * Let's have global and A's reclaim in parallel:
390 	 *  |
391 	 *  A (low=2G, usage = 3G, max = 3G, children_low_usage = 1.5G)
392 	 *  |\
393 	 *  | C (low = 1G, usage = 2.5G)
394 	 *  B (low = 1G, usage = 0.5G)
395 	 *
396 	 * For the global reclaim
397 	 * A.elow = A.low
398 	 * B.elow = min(B.usage, B.low) because children_low_usage <= A.elow
399 	 * C.elow = min(C.usage, C.low)
400 	 *
401 	 * With the effective values resetting we have A reclaim
402 	 * A.elow = 0
403 	 * B.elow = B.low
404 	 * C.elow = C.low
405 	 *
406 	 * If the global reclaim races with A's reclaim then
407 	 * B.elow = C.elow = 0 because children_low_usage > A.elow)
408 	 * is possible and reclaiming B would be violating the protection.
409 	 *
410 	 */
411 	if (root == memcg)
412 		return;
413 
414 	*min = READ_ONCE(memcg->memory.emin);
415 	*low = READ_ONCE(memcg->memory.elow);
416 }
417 
418 void mem_cgroup_calculate_protection(struct mem_cgroup *root,
419 				     struct mem_cgroup *memcg);
420 
mem_cgroup_supports_protection(struct mem_cgroup * memcg)421 static inline bool mem_cgroup_supports_protection(struct mem_cgroup *memcg)
422 {
423 	/*
424 	 * The root memcg doesn't account charges, and doesn't support
425 	 * protection.
426 	 */
427 	return !mem_cgroup_disabled() && !mem_cgroup_is_root(memcg);
428 
429 }
430 
mem_cgroup_below_low(struct mem_cgroup * memcg)431 static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg)
432 {
433 	if (!mem_cgroup_supports_protection(memcg))
434 		return false;
435 
436 	return READ_ONCE(memcg->memory.elow) >=
437 		page_counter_read(&memcg->memory);
438 }
439 
mem_cgroup_below_min(struct mem_cgroup * memcg)440 static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg)
441 {
442 	if (!mem_cgroup_supports_protection(memcg))
443 		return false;
444 
445 	return READ_ONCE(memcg->memory.emin) >=
446 		page_counter_read(&memcg->memory);
447 }
448 
449 int __mem_cgroup_charge(struct page *page, struct mm_struct *mm,
450 			gfp_t gfp_mask);
mem_cgroup_charge(struct page * page,struct mm_struct * mm,gfp_t gfp_mask)451 static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
452 				    gfp_t gfp_mask)
453 {
454 	if (mem_cgroup_disabled())
455 		return 0;
456 	return __mem_cgroup_charge(page, mm, gfp_mask);
457 }
458 
459 void __mem_cgroup_uncharge(struct page *page);
mem_cgroup_uncharge(struct page * page)460 static inline void mem_cgroup_uncharge(struct page *page)
461 {
462 	if (mem_cgroup_disabled())
463 		return;
464 	__mem_cgroup_uncharge(page);
465 }
466 
467 void __mem_cgroup_uncharge_list(struct list_head *page_list);
mem_cgroup_uncharge_list(struct list_head * page_list)468 static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
469 {
470 	if (mem_cgroup_disabled())
471 		return;
472 	__mem_cgroup_uncharge_list(page_list);
473 }
474 
475 void mem_cgroup_migrate(struct page *oldpage, struct page *newpage);
476 
477 static struct mem_cgroup_per_node *
mem_cgroup_nodeinfo(struct mem_cgroup * memcg,int nid)478 mem_cgroup_nodeinfo(struct mem_cgroup *memcg, int nid)
479 {
480 	return memcg->nodeinfo[nid];
481 }
482 
483 /**
484  * mem_cgroup_lruvec - get the lru list vector for a memcg & node
485  * @memcg: memcg of the wanted lruvec
486  *
487  * Returns the lru list vector holding pages for a given @memcg &
488  * @node combination. This can be the node lruvec, if the memory
489  * controller is disabled.
490  */
mem_cgroup_lruvec(struct mem_cgroup * memcg,struct pglist_data * pgdat)491 static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
492 					       struct pglist_data *pgdat)
493 {
494 	struct mem_cgroup_per_node *mz;
495 	struct lruvec *lruvec;
496 
497 	if (mem_cgroup_disabled()) {
498 		lruvec = &pgdat->__lruvec;
499 		goto out;
500 	}
501 
502 	if (!memcg)
503 		memcg = root_mem_cgroup;
504 
505 	mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
506 	lruvec = &mz->lruvec;
507 out:
508 	/*
509 	 * Since a node can be onlined after the mem_cgroup was created,
510 	 * we have to be prepared to initialize lruvec->pgdat here;
511 	 * and if offlined then reonlined, we need to reinitialize it.
512 	 */
513 	if (unlikely(lruvec->pgdat != pgdat))
514 		lruvec->pgdat = pgdat;
515 	return lruvec;
516 }
517 
518 struct lruvec *mem_cgroup_page_lruvec(struct page *, struct pglist_data *);
519 
520 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
521 
522 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
523 
524 struct mem_cgroup *get_mem_cgroup_from_page(struct page *page);
525 
526 static inline
mem_cgroup_from_css(struct cgroup_subsys_state * css)527 struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){
528 	return css ? container_of(css, struct mem_cgroup, css) : NULL;
529 }
530 
obj_cgroup_tryget(struct obj_cgroup * objcg)531 static inline bool obj_cgroup_tryget(struct obj_cgroup *objcg)
532 {
533 	return percpu_ref_tryget(&objcg->refcnt);
534 }
535 
obj_cgroup_get(struct obj_cgroup * objcg)536 static inline void obj_cgroup_get(struct obj_cgroup *objcg)
537 {
538 	percpu_ref_get(&objcg->refcnt);
539 }
540 
obj_cgroup_put(struct obj_cgroup * objcg)541 static inline void obj_cgroup_put(struct obj_cgroup *objcg)
542 {
543 	percpu_ref_put(&objcg->refcnt);
544 }
545 
546 /*
547  * After the initialization objcg->memcg is always pointing at
548  * a valid memcg, but can be atomically swapped to the parent memcg.
549  *
550  * The caller must ensure that the returned memcg won't be released:
551  * e.g. acquire the rcu_read_lock or css_set_lock.
552  */
obj_cgroup_memcg(struct obj_cgroup * objcg)553 static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg)
554 {
555 	return READ_ONCE(objcg->memcg);
556 }
557 
mem_cgroup_put(struct mem_cgroup * memcg)558 static inline void mem_cgroup_put(struct mem_cgroup *memcg)
559 {
560 	if (memcg)
561 		css_put(&memcg->css);
562 }
563 
564 #define mem_cgroup_from_counter(counter, member)	\
565 	container_of(counter, struct mem_cgroup, member)
566 
567 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
568 				   struct mem_cgroup *,
569 				   struct mem_cgroup_reclaim_cookie *);
570 void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
571 int mem_cgroup_scan_tasks(struct mem_cgroup *,
572 			  int (*)(struct task_struct *, void *), void *);
573 
mem_cgroup_id(struct mem_cgroup * memcg)574 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
575 {
576 	if (mem_cgroup_disabled())
577 		return 0;
578 
579 	return memcg->id.id;
580 }
581 struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
582 
mem_cgroup_from_seq(struct seq_file * m)583 static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
584 {
585 	return mem_cgroup_from_css(seq_css(m));
586 }
587 
lruvec_memcg(struct lruvec * lruvec)588 static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
589 {
590 	struct mem_cgroup_per_node *mz;
591 
592 	if (mem_cgroup_disabled())
593 		return NULL;
594 
595 	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
596 	return mz->memcg;
597 }
598 
599 /**
600  * parent_mem_cgroup - find the accounting parent of a memcg
601  * @memcg: memcg whose parent to find
602  *
603  * Returns the parent memcg, or NULL if this is the root or the memory
604  * controller is in legacy no-hierarchy mode.
605  */
parent_mem_cgroup(struct mem_cgroup * memcg)606 static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
607 {
608 	if (!memcg->memory.parent)
609 		return NULL;
610 	return mem_cgroup_from_counter(memcg->memory.parent, memory);
611 }
612 
mem_cgroup_is_descendant(struct mem_cgroup * memcg,struct mem_cgroup * root)613 static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
614 			      struct mem_cgroup *root)
615 {
616 	if (root == memcg)
617 		return true;
618 	if (!root->use_hierarchy)
619 		return false;
620 	return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup);
621 }
622 
mm_match_cgroup(struct mm_struct * mm,struct mem_cgroup * memcg)623 static inline bool mm_match_cgroup(struct mm_struct *mm,
624 				   struct mem_cgroup *memcg)
625 {
626 	struct mem_cgroup *task_memcg;
627 	bool match = false;
628 
629 	rcu_read_lock();
630 	task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
631 	if (task_memcg)
632 		match = mem_cgroup_is_descendant(task_memcg, memcg);
633 	rcu_read_unlock();
634 	return match;
635 }
636 
637 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page);
638 ino_t page_cgroup_ino(struct page *page);
639 
mem_cgroup_online(struct mem_cgroup * memcg)640 static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
641 {
642 	if (mem_cgroup_disabled())
643 		return true;
644 	return !!(memcg->css.flags & CSS_ONLINE);
645 }
646 
647 /*
648  * For memory reclaim.
649  */
650 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
651 
652 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
653 		int zid, int nr_pages);
654 
655 static inline
mem_cgroup_get_zone_lru_size(struct lruvec * lruvec,enum lru_list lru,int zone_idx)656 unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
657 		enum lru_list lru, int zone_idx)
658 {
659 	struct mem_cgroup_per_node *mz;
660 
661 	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
662 	return READ_ONCE(mz->lru_zone_size[zone_idx][lru]);
663 }
664 
665 void mem_cgroup_handle_over_high(void);
666 
667 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
668 
669 unsigned long mem_cgroup_size(struct mem_cgroup *memcg);
670 
671 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg,
672 				struct task_struct *p);
673 
674 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg);
675 
mem_cgroup_enter_user_fault(void)676 static inline void mem_cgroup_enter_user_fault(void)
677 {
678 	WARN_ON(current->in_user_fault);
679 	current->in_user_fault = 1;
680 }
681 
mem_cgroup_exit_user_fault(void)682 static inline void mem_cgroup_exit_user_fault(void)
683 {
684 	WARN_ON(!current->in_user_fault);
685 	current->in_user_fault = 0;
686 }
687 
task_in_memcg_oom(struct task_struct * p)688 static inline bool task_in_memcg_oom(struct task_struct *p)
689 {
690 	return p->memcg_in_oom;
691 }
692 
693 bool mem_cgroup_oom_synchronize(bool wait);
694 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
695 					    struct mem_cgroup *oom_domain);
696 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg);
697 
698 #ifdef CONFIG_MEMCG_SWAP
699 extern bool cgroup_memory_noswap;
700 #endif
701 
702 struct mem_cgroup *lock_page_memcg(struct page *page);
703 void __unlock_page_memcg(struct mem_cgroup *memcg);
704 void unlock_page_memcg(struct page *page);
705 
706 /*
707  * idx can be of type enum memcg_stat_item or node_stat_item.
708  * Keep in sync with memcg_exact_page_state().
709  */
memcg_page_state(struct mem_cgroup * memcg,int idx)710 static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
711 {
712 	long x = atomic_long_read(&memcg->vmstats[idx]);
713 #ifdef CONFIG_SMP
714 	if (x < 0)
715 		x = 0;
716 #endif
717 	return x;
718 }
719 
720 /*
721  * idx can be of type enum memcg_stat_item or node_stat_item.
722  * Keep in sync with memcg_exact_page_state().
723  */
memcg_page_state_local(struct mem_cgroup * memcg,int idx)724 static inline unsigned long memcg_page_state_local(struct mem_cgroup *memcg,
725 						   int idx)
726 {
727 	long x = 0;
728 	int cpu;
729 
730 	for_each_possible_cpu(cpu)
731 		x += per_cpu(memcg->vmstats_local->stat[idx], cpu);
732 #ifdef CONFIG_SMP
733 	if (x < 0)
734 		x = 0;
735 #endif
736 	return x;
737 }
738 
739 void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val);
740 
741 /* idx can be of type enum memcg_stat_item or node_stat_item */
mod_memcg_state(struct mem_cgroup * memcg,int idx,int val)742 static inline void mod_memcg_state(struct mem_cgroup *memcg,
743 				   int idx, int val)
744 {
745 	unsigned long flags;
746 
747 	local_irq_save(flags);
748 	__mod_memcg_state(memcg, idx, val);
749 	local_irq_restore(flags);
750 }
751 
752 /**
753  * mod_memcg_page_state - update page state statistics
754  * @page: the page
755  * @idx: page state item to account
756  * @val: number of pages (positive or negative)
757  *
758  * The @page must be locked or the caller must use lock_page_memcg()
759  * to prevent double accounting when the page is concurrently being
760  * moved to another memcg:
761  *
762  *   lock_page(page) or lock_page_memcg(page)
763  *   if (TestClearPageState(page))
764  *     mod_memcg_page_state(page, state, -1);
765  *   unlock_page(page) or unlock_page_memcg(page)
766  *
767  * Kernel pages are an exception to this, since they'll never move.
768  */
__mod_memcg_page_state(struct page * page,int idx,int val)769 static inline void __mod_memcg_page_state(struct page *page,
770 					  int idx, int val)
771 {
772 	if (page->mem_cgroup)
773 		__mod_memcg_state(page->mem_cgroup, idx, val);
774 }
775 
mod_memcg_page_state(struct page * page,int idx,int val)776 static inline void mod_memcg_page_state(struct page *page,
777 					int idx, int val)
778 {
779 	if (page->mem_cgroup)
780 		mod_memcg_state(page->mem_cgroup, idx, val);
781 }
782 
lruvec_page_state(struct lruvec * lruvec,enum node_stat_item idx)783 static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
784 					      enum node_stat_item idx)
785 {
786 	struct mem_cgroup_per_node *pn;
787 	long x;
788 
789 	if (mem_cgroup_disabled())
790 		return node_page_state(lruvec_pgdat(lruvec), idx);
791 
792 	pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
793 	x = atomic_long_read(&pn->lruvec_stat[idx]);
794 #ifdef CONFIG_SMP
795 	if (x < 0)
796 		x = 0;
797 #endif
798 	return x;
799 }
800 
lruvec_page_state_local(struct lruvec * lruvec,enum node_stat_item idx)801 static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
802 						    enum node_stat_item idx)
803 {
804 	struct mem_cgroup_per_node *pn;
805 	long x = 0;
806 	int cpu;
807 
808 	if (mem_cgroup_disabled())
809 		return node_page_state(lruvec_pgdat(lruvec), idx);
810 
811 	pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
812 	for_each_possible_cpu(cpu)
813 		x += per_cpu(pn->lruvec_stat_local->count[idx], cpu);
814 #ifdef CONFIG_SMP
815 	if (x < 0)
816 		x = 0;
817 #endif
818 	return x;
819 }
820 
821 void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
822 			      int val);
823 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
824 			int val);
825 void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val);
826 
827 void mod_memcg_obj_state(void *p, int idx, int val);
828 
mod_lruvec_slab_state(void * p,enum node_stat_item idx,int val)829 static inline void mod_lruvec_slab_state(void *p, enum node_stat_item idx,
830 					 int val)
831 {
832 	unsigned long flags;
833 
834 	local_irq_save(flags);
835 	__mod_lruvec_slab_state(p, idx, val);
836 	local_irq_restore(flags);
837 }
838 
mod_memcg_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx,int val)839 static inline void mod_memcg_lruvec_state(struct lruvec *lruvec,
840 					  enum node_stat_item idx, int val)
841 {
842 	unsigned long flags;
843 
844 	local_irq_save(flags);
845 	__mod_memcg_lruvec_state(lruvec, idx, val);
846 	local_irq_restore(flags);
847 }
848 
mod_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx,int val)849 static inline void mod_lruvec_state(struct lruvec *lruvec,
850 				    enum node_stat_item idx, int val)
851 {
852 	unsigned long flags;
853 
854 	local_irq_save(flags);
855 	__mod_lruvec_state(lruvec, idx, val);
856 	local_irq_restore(flags);
857 }
858 
__mod_lruvec_page_state(struct page * page,enum node_stat_item idx,int val)859 static inline void __mod_lruvec_page_state(struct page *page,
860 					   enum node_stat_item idx, int val)
861 {
862 	struct page *head = compound_head(page); /* rmap on tail pages */
863 	pg_data_t *pgdat = page_pgdat(page);
864 	struct lruvec *lruvec;
865 
866 	/* Untracked pages have no memcg, no lruvec. Update only the node */
867 	if (!head->mem_cgroup) {
868 		__mod_node_page_state(pgdat, idx, val);
869 		return;
870 	}
871 
872 	lruvec = mem_cgroup_lruvec(head->mem_cgroup, pgdat);
873 	__mod_lruvec_state(lruvec, idx, val);
874 }
875 
mod_lruvec_page_state(struct page * page,enum node_stat_item idx,int val)876 static inline void mod_lruvec_page_state(struct page *page,
877 					 enum node_stat_item idx, int val)
878 {
879 	unsigned long flags;
880 
881 	local_irq_save(flags);
882 	__mod_lruvec_page_state(page, idx, val);
883 	local_irq_restore(flags);
884 }
885 
886 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
887 						gfp_t gfp_mask,
888 						unsigned long *total_scanned);
889 
890 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
891 			  unsigned long count);
892 
count_memcg_events(struct mem_cgroup * memcg,enum vm_event_item idx,unsigned long count)893 static inline void count_memcg_events(struct mem_cgroup *memcg,
894 				      enum vm_event_item idx,
895 				      unsigned long count)
896 {
897 	unsigned long flags;
898 
899 	local_irq_save(flags);
900 	__count_memcg_events(memcg, idx, count);
901 	local_irq_restore(flags);
902 }
903 
count_memcg_page_event(struct page * page,enum vm_event_item idx)904 static inline void count_memcg_page_event(struct page *page,
905 					  enum vm_event_item idx)
906 {
907 	if (page->mem_cgroup)
908 		count_memcg_events(page->mem_cgroup, idx, 1);
909 }
910 
count_memcg_event_mm(struct mm_struct * mm,enum vm_event_item idx)911 static inline void count_memcg_event_mm(struct mm_struct *mm,
912 					enum vm_event_item idx)
913 {
914 	struct mem_cgroup *memcg;
915 
916 	if (mem_cgroup_disabled())
917 		return;
918 
919 	rcu_read_lock();
920 	memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
921 	if (likely(memcg))
922 		count_memcg_events(memcg, idx, 1);
923 	rcu_read_unlock();
924 }
925 
memcg_memory_event(struct mem_cgroup * memcg,enum memcg_memory_event event)926 static inline void memcg_memory_event(struct mem_cgroup *memcg,
927 				      enum memcg_memory_event event)
928 {
929 	bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX ||
930 			  event == MEMCG_SWAP_FAIL;
931 
932 	atomic_long_inc(&memcg->memory_events_local[event]);
933 	if (!swap_event)
934 		cgroup_file_notify(&memcg->events_local_file);
935 
936 	do {
937 		atomic_long_inc(&memcg->memory_events[event]);
938 		if (swap_event)
939 			cgroup_file_notify(&memcg->swap_events_file);
940 		else
941 			cgroup_file_notify(&memcg->events_file);
942 
943 		if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
944 			break;
945 		if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
946 			break;
947 	} while ((memcg = parent_mem_cgroup(memcg)) &&
948 		 !mem_cgroup_is_root(memcg));
949 }
950 
memcg_memory_event_mm(struct mm_struct * mm,enum memcg_memory_event event)951 static inline void memcg_memory_event_mm(struct mm_struct *mm,
952 					 enum memcg_memory_event event)
953 {
954 	struct mem_cgroup *memcg;
955 
956 	if (mem_cgroup_disabled())
957 		return;
958 
959 	rcu_read_lock();
960 	memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
961 	if (likely(memcg))
962 		memcg_memory_event(memcg, event);
963 	rcu_read_unlock();
964 }
965 
966 void split_page_memcg(struct page *head, unsigned int nr);
967 
968 #else /* CONFIG_MEMCG */
969 
970 #define MEM_CGROUP_ID_SHIFT	0
971 #define MEM_CGROUP_ID_MAX	0
972 
973 struct mem_cgroup;
974 
page_to_lruvec(struct page * page,pg_data_t * pgdat)975 static inline struct lruvec *page_to_lruvec(struct page *page, pg_data_t *pgdat)
976 {
977 	return NULL;
978 }
979 
do_traversal_all_lruvec(void)980 static inline void do_traversal_all_lruvec(void)
981 {
982 }
983 
mem_cgroup_is_root(struct mem_cgroup * memcg)984 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
985 {
986 	return true;
987 }
988 
mem_cgroup_disabled(void)989 static inline bool mem_cgroup_disabled(void)
990 {
991 	return true;
992 }
993 
memcg_memory_event(struct mem_cgroup * memcg,enum memcg_memory_event event)994 static inline void memcg_memory_event(struct mem_cgroup *memcg,
995 				      enum memcg_memory_event event)
996 {
997 }
998 
memcg_memory_event_mm(struct mm_struct * mm,enum memcg_memory_event event)999 static inline void memcg_memory_event_mm(struct mm_struct *mm,
1000 					 enum memcg_memory_event event)
1001 {
1002 }
1003 
mem_cgroup_protection(struct mem_cgroup * root,struct mem_cgroup * memcg,unsigned long * min,unsigned long * low)1004 static inline void mem_cgroup_protection(struct mem_cgroup *root,
1005 					 struct mem_cgroup *memcg,
1006 					 unsigned long *min,
1007 					 unsigned long *low)
1008 {
1009 	*min = *low = 0;
1010 }
1011 
mem_cgroup_calculate_protection(struct mem_cgroup * root,struct mem_cgroup * memcg)1012 static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root,
1013 						   struct mem_cgroup *memcg)
1014 {
1015 }
1016 
mem_cgroup_below_low(struct mem_cgroup * memcg)1017 static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg)
1018 {
1019 	return false;
1020 }
1021 
mem_cgroup_below_min(struct mem_cgroup * memcg)1022 static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg)
1023 {
1024 	return false;
1025 }
1026 
mem_cgroup_charge(struct page * page,struct mm_struct * mm,gfp_t gfp_mask)1027 static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
1028 				    gfp_t gfp_mask)
1029 {
1030 	return 0;
1031 }
1032 
mem_cgroup_uncharge(struct page * page)1033 static inline void mem_cgroup_uncharge(struct page *page)
1034 {
1035 }
1036 
mem_cgroup_uncharge_list(struct list_head * page_list)1037 static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
1038 {
1039 }
1040 
mem_cgroup_migrate(struct page * old,struct page * new)1041 static inline void mem_cgroup_migrate(struct page *old, struct page *new)
1042 {
1043 }
1044 
mem_cgroup_lruvec(struct mem_cgroup * memcg,struct pglist_data * pgdat)1045 static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
1046 					       struct pglist_data *pgdat)
1047 {
1048 	return &pgdat->__lruvec;
1049 }
1050 
mem_cgroup_page_lruvec(struct page * page,struct pglist_data * pgdat)1051 static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
1052 						    struct pglist_data *pgdat)
1053 {
1054 	return &pgdat->__lruvec;
1055 }
1056 
parent_mem_cgroup(struct mem_cgroup * memcg)1057 static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
1058 {
1059 	return NULL;
1060 }
1061 
mm_match_cgroup(struct mm_struct * mm,struct mem_cgroup * memcg)1062 static inline bool mm_match_cgroup(struct mm_struct *mm,
1063 		struct mem_cgroup *memcg)
1064 {
1065 	return true;
1066 }
1067 
get_mem_cgroup_from_mm(struct mm_struct * mm)1068 static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
1069 {
1070 	return NULL;
1071 }
1072 
get_mem_cgroup_from_page(struct page * page)1073 static inline struct mem_cgroup *get_mem_cgroup_from_page(struct page *page)
1074 {
1075 	return NULL;
1076 }
1077 
mem_cgroup_put(struct mem_cgroup * memcg)1078 static inline void mem_cgroup_put(struct mem_cgroup *memcg)
1079 {
1080 }
1081 
1082 static inline struct mem_cgroup *
mem_cgroup_iter(struct mem_cgroup * root,struct mem_cgroup * prev,struct mem_cgroup_reclaim_cookie * reclaim)1083 mem_cgroup_iter(struct mem_cgroup *root,
1084 		struct mem_cgroup *prev,
1085 		struct mem_cgroup_reclaim_cookie *reclaim)
1086 {
1087 	return NULL;
1088 }
1089 
mem_cgroup_iter_break(struct mem_cgroup * root,struct mem_cgroup * prev)1090 static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
1091 					 struct mem_cgroup *prev)
1092 {
1093 }
1094 
mem_cgroup_scan_tasks(struct mem_cgroup * memcg,int (* fn)(struct task_struct *,void *),void * arg)1095 static inline int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1096 		int (*fn)(struct task_struct *, void *), void *arg)
1097 {
1098 	return 0;
1099 }
1100 
mem_cgroup_id(struct mem_cgroup * memcg)1101 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
1102 {
1103 	return 0;
1104 }
1105 
mem_cgroup_from_id(unsigned short id)1106 static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
1107 {
1108 	WARN_ON_ONCE(id);
1109 	/* XXX: This should always return root_mem_cgroup */
1110 	return NULL;
1111 }
1112 
mem_cgroup_from_seq(struct seq_file * m)1113 static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
1114 {
1115 	return NULL;
1116 }
1117 
lruvec_memcg(struct lruvec * lruvec)1118 static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
1119 {
1120 	return NULL;
1121 }
1122 
mem_cgroup_online(struct mem_cgroup * memcg)1123 static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
1124 {
1125 	return true;
1126 }
1127 
1128 static inline
mem_cgroup_get_zone_lru_size(struct lruvec * lruvec,enum lru_list lru,int zone_idx)1129 unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
1130 		enum lru_list lru, int zone_idx)
1131 {
1132 	return 0;
1133 }
1134 
mem_cgroup_get_max(struct mem_cgroup * memcg)1135 static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
1136 {
1137 	return 0;
1138 }
1139 
mem_cgroup_size(struct mem_cgroup * memcg)1140 static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1141 {
1142 	return 0;
1143 }
1144 
1145 static inline void
mem_cgroup_print_oom_context(struct mem_cgroup * memcg,struct task_struct * p)1146 mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1147 {
1148 }
1149 
1150 static inline void
mem_cgroup_print_oom_meminfo(struct mem_cgroup * memcg)1151 mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1152 {
1153 }
1154 
lock_page_memcg(struct page * page)1155 static inline struct mem_cgroup *lock_page_memcg(struct page *page)
1156 {
1157 	return NULL;
1158 }
1159 
__unlock_page_memcg(struct mem_cgroup * memcg)1160 static inline void __unlock_page_memcg(struct mem_cgroup *memcg)
1161 {
1162 }
1163 
unlock_page_memcg(struct page * page)1164 static inline void unlock_page_memcg(struct page *page)
1165 {
1166 }
1167 
mem_cgroup_handle_over_high(void)1168 static inline void mem_cgroup_handle_over_high(void)
1169 {
1170 }
1171 
mem_cgroup_enter_user_fault(void)1172 static inline void mem_cgroup_enter_user_fault(void)
1173 {
1174 }
1175 
mem_cgroup_exit_user_fault(void)1176 static inline void mem_cgroup_exit_user_fault(void)
1177 {
1178 }
1179 
task_in_memcg_oom(struct task_struct * p)1180 static inline bool task_in_memcg_oom(struct task_struct *p)
1181 {
1182 	return false;
1183 }
1184 
mem_cgroup_oom_synchronize(bool wait)1185 static inline bool mem_cgroup_oom_synchronize(bool wait)
1186 {
1187 	return false;
1188 }
1189 
mem_cgroup_get_oom_group(struct task_struct * victim,struct mem_cgroup * oom_domain)1190 static inline struct mem_cgroup *mem_cgroup_get_oom_group(
1191 	struct task_struct *victim, struct mem_cgroup *oom_domain)
1192 {
1193 	return NULL;
1194 }
1195 
mem_cgroup_print_oom_group(struct mem_cgroup * memcg)1196 static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
1197 {
1198 }
1199 
memcg_page_state(struct mem_cgroup * memcg,int idx)1200 static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
1201 {
1202 	return 0;
1203 }
1204 
memcg_page_state_local(struct mem_cgroup * memcg,int idx)1205 static inline unsigned long memcg_page_state_local(struct mem_cgroup *memcg,
1206 						   int idx)
1207 {
1208 	return 0;
1209 }
1210 
__mod_memcg_state(struct mem_cgroup * memcg,int idx,int nr)1211 static inline void __mod_memcg_state(struct mem_cgroup *memcg,
1212 				     int idx,
1213 				     int nr)
1214 {
1215 }
1216 
mod_memcg_state(struct mem_cgroup * memcg,int idx,int nr)1217 static inline void mod_memcg_state(struct mem_cgroup *memcg,
1218 				   int idx,
1219 				   int nr)
1220 {
1221 }
1222 
__mod_memcg_page_state(struct page * page,int idx,int nr)1223 static inline void __mod_memcg_page_state(struct page *page,
1224 					  int idx,
1225 					  int nr)
1226 {
1227 }
1228 
mod_memcg_page_state(struct page * page,int idx,int nr)1229 static inline void mod_memcg_page_state(struct page *page,
1230 					int idx,
1231 					int nr)
1232 {
1233 }
1234 
lruvec_page_state(struct lruvec * lruvec,enum node_stat_item idx)1235 static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
1236 					      enum node_stat_item idx)
1237 {
1238 	return node_page_state(lruvec_pgdat(lruvec), idx);
1239 }
1240 
lruvec_page_state_local(struct lruvec * lruvec,enum node_stat_item idx)1241 static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
1242 						    enum node_stat_item idx)
1243 {
1244 	return node_page_state(lruvec_pgdat(lruvec), idx);
1245 }
1246 
__mod_memcg_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx,int val)1247 static inline void __mod_memcg_lruvec_state(struct lruvec *lruvec,
1248 					    enum node_stat_item idx, int val)
1249 {
1250 }
1251 
__mod_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx,int val)1252 static inline void __mod_lruvec_state(struct lruvec *lruvec,
1253 				      enum node_stat_item idx, int val)
1254 {
1255 	__mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
1256 }
1257 
mod_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx,int val)1258 static inline void mod_lruvec_state(struct lruvec *lruvec,
1259 				    enum node_stat_item idx, int val)
1260 {
1261 	mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
1262 }
1263 
__mod_lruvec_page_state(struct page * page,enum node_stat_item idx,int val)1264 static inline void __mod_lruvec_page_state(struct page *page,
1265 					   enum node_stat_item idx, int val)
1266 {
1267 	__mod_node_page_state(page_pgdat(page), idx, val);
1268 }
1269 
mod_lruvec_page_state(struct page * page,enum node_stat_item idx,int val)1270 static inline void mod_lruvec_page_state(struct page *page,
1271 					 enum node_stat_item idx, int val)
1272 {
1273 	mod_node_page_state(page_pgdat(page), idx, val);
1274 }
1275 
__mod_lruvec_slab_state(void * p,enum node_stat_item idx,int val)1276 static inline void __mod_lruvec_slab_state(void *p, enum node_stat_item idx,
1277 					   int val)
1278 {
1279 	struct page *page = virt_to_head_page(p);
1280 
1281 	__mod_node_page_state(page_pgdat(page), idx, val);
1282 }
1283 
mod_lruvec_slab_state(void * p,enum node_stat_item idx,int val)1284 static inline void mod_lruvec_slab_state(void *p, enum node_stat_item idx,
1285 					 int val)
1286 {
1287 	struct page *page = virt_to_head_page(p);
1288 
1289 	mod_node_page_state(page_pgdat(page), idx, val);
1290 }
1291 
mod_memcg_obj_state(void * p,int idx,int val)1292 static inline void mod_memcg_obj_state(void *p, int idx, int val)
1293 {
1294 }
1295 
1296 static inline
mem_cgroup_soft_limit_reclaim(pg_data_t * pgdat,int order,gfp_t gfp_mask,unsigned long * total_scanned)1297 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
1298 					    gfp_t gfp_mask,
1299 					    unsigned long *total_scanned)
1300 {
1301 	return 0;
1302 }
1303 
split_page_memcg(struct page * head,unsigned int nr)1304 static inline void split_page_memcg(struct page *head, unsigned int nr)
1305 {
1306 }
1307 
count_memcg_events(struct mem_cgroup * memcg,enum vm_event_item idx,unsigned long count)1308 static inline void count_memcg_events(struct mem_cgroup *memcg,
1309 				      enum vm_event_item idx,
1310 				      unsigned long count)
1311 {
1312 }
1313 
__count_memcg_events(struct mem_cgroup * memcg,enum vm_event_item idx,unsigned long count)1314 static inline void __count_memcg_events(struct mem_cgroup *memcg,
1315 					enum vm_event_item idx,
1316 					unsigned long count)
1317 {
1318 }
1319 
count_memcg_page_event(struct page * page,int idx)1320 static inline void count_memcg_page_event(struct page *page,
1321 					  int idx)
1322 {
1323 }
1324 
1325 static inline
count_memcg_event_mm(struct mm_struct * mm,enum vm_event_item idx)1326 void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
1327 {
1328 }
1329 #endif /* CONFIG_MEMCG */
1330 
1331 /* idx can be of type enum memcg_stat_item or node_stat_item */
__inc_memcg_state(struct mem_cgroup * memcg,int idx)1332 static inline void __inc_memcg_state(struct mem_cgroup *memcg,
1333 				     int idx)
1334 {
1335 	__mod_memcg_state(memcg, idx, 1);
1336 }
1337 
1338 /* idx can be of type enum memcg_stat_item or node_stat_item */
__dec_memcg_state(struct mem_cgroup * memcg,int idx)1339 static inline void __dec_memcg_state(struct mem_cgroup *memcg,
1340 				     int idx)
1341 {
1342 	__mod_memcg_state(memcg, idx, -1);
1343 }
1344 
1345 /* idx can be of type enum memcg_stat_item or node_stat_item */
__inc_memcg_page_state(struct page * page,int idx)1346 static inline void __inc_memcg_page_state(struct page *page,
1347 					  int idx)
1348 {
1349 	__mod_memcg_page_state(page, idx, 1);
1350 }
1351 
1352 /* idx can be of type enum memcg_stat_item or node_stat_item */
__dec_memcg_page_state(struct page * page,int idx)1353 static inline void __dec_memcg_page_state(struct page *page,
1354 					  int idx)
1355 {
1356 	__mod_memcg_page_state(page, idx, -1);
1357 }
1358 
__inc_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx)1359 static inline void __inc_lruvec_state(struct lruvec *lruvec,
1360 				      enum node_stat_item idx)
1361 {
1362 	__mod_lruvec_state(lruvec, idx, 1);
1363 }
1364 
__dec_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx)1365 static inline void __dec_lruvec_state(struct lruvec *lruvec,
1366 				      enum node_stat_item idx)
1367 {
1368 	__mod_lruvec_state(lruvec, idx, -1);
1369 }
1370 
__inc_lruvec_page_state(struct page * page,enum node_stat_item idx)1371 static inline void __inc_lruvec_page_state(struct page *page,
1372 					   enum node_stat_item idx)
1373 {
1374 	__mod_lruvec_page_state(page, idx, 1);
1375 }
1376 
__dec_lruvec_page_state(struct page * page,enum node_stat_item idx)1377 static inline void __dec_lruvec_page_state(struct page *page,
1378 					   enum node_stat_item idx)
1379 {
1380 	__mod_lruvec_page_state(page, idx, -1);
1381 }
1382 
__inc_lruvec_slab_state(void * p,enum node_stat_item idx)1383 static inline void __inc_lruvec_slab_state(void *p, enum node_stat_item idx)
1384 {
1385 	__mod_lruvec_slab_state(p, idx, 1);
1386 }
1387 
__dec_lruvec_slab_state(void * p,enum node_stat_item idx)1388 static inline void __dec_lruvec_slab_state(void *p, enum node_stat_item idx)
1389 {
1390 	__mod_lruvec_slab_state(p, idx, -1);
1391 }
1392 
1393 /* idx can be of type enum memcg_stat_item or node_stat_item */
inc_memcg_state(struct mem_cgroup * memcg,int idx)1394 static inline void inc_memcg_state(struct mem_cgroup *memcg,
1395 				   int idx)
1396 {
1397 	mod_memcg_state(memcg, idx, 1);
1398 }
1399 
1400 /* idx can be of type enum memcg_stat_item or node_stat_item */
dec_memcg_state(struct mem_cgroup * memcg,int idx)1401 static inline void dec_memcg_state(struct mem_cgroup *memcg,
1402 				   int idx)
1403 {
1404 	mod_memcg_state(memcg, idx, -1);
1405 }
1406 
1407 /* idx can be of type enum memcg_stat_item or node_stat_item */
inc_memcg_page_state(struct page * page,int idx)1408 static inline void inc_memcg_page_state(struct page *page,
1409 					int idx)
1410 {
1411 	mod_memcg_page_state(page, idx, 1);
1412 }
1413 
1414 /* idx can be of type enum memcg_stat_item or node_stat_item */
dec_memcg_page_state(struct page * page,int idx)1415 static inline void dec_memcg_page_state(struct page *page,
1416 					int idx)
1417 {
1418 	mod_memcg_page_state(page, idx, -1);
1419 }
1420 
inc_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx)1421 static inline void inc_lruvec_state(struct lruvec *lruvec,
1422 				    enum node_stat_item idx)
1423 {
1424 	mod_lruvec_state(lruvec, idx, 1);
1425 }
1426 
dec_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx)1427 static inline void dec_lruvec_state(struct lruvec *lruvec,
1428 				    enum node_stat_item idx)
1429 {
1430 	mod_lruvec_state(lruvec, idx, -1);
1431 }
1432 
inc_lruvec_page_state(struct page * page,enum node_stat_item idx)1433 static inline void inc_lruvec_page_state(struct page *page,
1434 					 enum node_stat_item idx)
1435 {
1436 	mod_lruvec_page_state(page, idx, 1);
1437 }
1438 
dec_lruvec_page_state(struct page * page,enum node_stat_item idx)1439 static inline void dec_lruvec_page_state(struct page *page,
1440 					 enum node_stat_item idx)
1441 {
1442 	mod_lruvec_page_state(page, idx, -1);
1443 }
1444 
parent_lruvec(struct lruvec * lruvec)1445 static inline struct lruvec *parent_lruvec(struct lruvec *lruvec)
1446 {
1447 	struct mem_cgroup *memcg;
1448 
1449 	memcg = lruvec_memcg(lruvec);
1450 	if (!memcg)
1451 		return NULL;
1452 	memcg = parent_mem_cgroup(memcg);
1453 	if (!memcg)
1454 		return NULL;
1455 	return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec));
1456 }
1457 
1458 #ifdef CONFIG_CGROUP_WRITEBACK
1459 
1460 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
1461 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
1462 			 unsigned long *pheadroom, unsigned long *pdirty,
1463 			 unsigned long *pwriteback);
1464 
1465 void mem_cgroup_track_foreign_dirty_slowpath(struct page *page,
1466 					     struct bdi_writeback *wb);
1467 
mem_cgroup_track_foreign_dirty(struct page * page,struct bdi_writeback * wb)1468 static inline void mem_cgroup_track_foreign_dirty(struct page *page,
1469 						  struct bdi_writeback *wb)
1470 {
1471 	if (mem_cgroup_disabled())
1472 		return;
1473 
1474 	if (unlikely(&page->mem_cgroup->css != wb->memcg_css))
1475 		mem_cgroup_track_foreign_dirty_slowpath(page, wb);
1476 }
1477 
1478 void mem_cgroup_flush_foreign(struct bdi_writeback *wb);
1479 
1480 #else	/* CONFIG_CGROUP_WRITEBACK */
1481 
mem_cgroup_wb_domain(struct bdi_writeback * wb)1482 static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
1483 {
1484 	return NULL;
1485 }
1486 
mem_cgroup_wb_stats(struct bdi_writeback * wb,unsigned long * pfilepages,unsigned long * pheadroom,unsigned long * pdirty,unsigned long * pwriteback)1487 static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
1488 				       unsigned long *pfilepages,
1489 				       unsigned long *pheadroom,
1490 				       unsigned long *pdirty,
1491 				       unsigned long *pwriteback)
1492 {
1493 }
1494 
mem_cgroup_track_foreign_dirty(struct page * page,struct bdi_writeback * wb)1495 static inline void mem_cgroup_track_foreign_dirty(struct page *page,
1496 						  struct bdi_writeback *wb)
1497 {
1498 }
1499 
mem_cgroup_flush_foreign(struct bdi_writeback * wb)1500 static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
1501 {
1502 }
1503 
1504 #endif	/* CONFIG_CGROUP_WRITEBACK */
1505 
1506 struct sock;
1507 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
1508 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
1509 #ifdef CONFIG_MEMCG
1510 extern struct static_key_false memcg_sockets_enabled_key;
1511 #define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
1512 void mem_cgroup_sk_alloc(struct sock *sk);
1513 void mem_cgroup_sk_free(struct sock *sk);
mem_cgroup_under_socket_pressure(struct mem_cgroup * memcg)1514 static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
1515 {
1516 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure)
1517 		return true;
1518 	do {
1519 		if (time_before(jiffies, memcg->socket_pressure))
1520 			return true;
1521 	} while ((memcg = parent_mem_cgroup(memcg)));
1522 	return false;
1523 }
1524 
1525 extern int memcg_expand_shrinker_maps(int new_id);
1526 
1527 extern void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
1528 				   int nid, int shrinker_id);
1529 #else
1530 #define mem_cgroup_sockets_enabled 0
mem_cgroup_sk_alloc(struct sock * sk)1531 static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
mem_cgroup_sk_free(struct sock * sk)1532 static inline void mem_cgroup_sk_free(struct sock *sk) { };
mem_cgroup_under_socket_pressure(struct mem_cgroup * memcg)1533 static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
1534 {
1535 	return false;
1536 }
1537 
memcg_set_shrinker_bit(struct mem_cgroup * memcg,int nid,int shrinker_id)1538 static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
1539 					  int nid, int shrinker_id)
1540 {
1541 }
1542 #endif
1543 
1544 #ifdef CONFIG_MEMCG_KMEM
1545 int __memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp,
1546 			unsigned int nr_pages);
1547 void __memcg_kmem_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages);
1548 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order);
1549 void __memcg_kmem_uncharge_page(struct page *page, int order);
1550 
1551 struct obj_cgroup *get_obj_cgroup_from_current(void);
1552 
1553 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size);
1554 void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size);
1555 
1556 extern struct static_key_false memcg_kmem_enabled_key;
1557 
1558 extern int memcg_nr_cache_ids;
1559 void memcg_get_cache_ids(void);
1560 void memcg_put_cache_ids(void);
1561 
1562 /*
1563  * Helper macro to loop through all memcg-specific caches. Callers must still
1564  * check if the cache is valid (it is either valid or NULL).
1565  * the slab_mutex must be held when looping through those caches
1566  */
1567 #define for_each_memcg_cache_index(_idx)	\
1568 	for ((_idx) = 0; (_idx) < memcg_nr_cache_ids; (_idx)++)
1569 
memcg_kmem_enabled(void)1570 static inline bool memcg_kmem_enabled(void)
1571 {
1572 	return static_branch_likely(&memcg_kmem_enabled_key);
1573 }
1574 
memcg_kmem_charge_page(struct page * page,gfp_t gfp,int order)1575 static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1576 					 int order)
1577 {
1578 	if (memcg_kmem_enabled())
1579 		return __memcg_kmem_charge_page(page, gfp, order);
1580 	return 0;
1581 }
1582 
memcg_kmem_uncharge_page(struct page * page,int order)1583 static inline void memcg_kmem_uncharge_page(struct page *page, int order)
1584 {
1585 	if (memcg_kmem_enabled())
1586 		__memcg_kmem_uncharge_page(page, order);
1587 }
1588 
memcg_kmem_charge(struct mem_cgroup * memcg,gfp_t gfp,unsigned int nr_pages)1589 static inline int memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp,
1590 				    unsigned int nr_pages)
1591 {
1592 	if (memcg_kmem_enabled())
1593 		return __memcg_kmem_charge(memcg, gfp, nr_pages);
1594 	return 0;
1595 }
1596 
memcg_kmem_uncharge(struct mem_cgroup * memcg,unsigned int nr_pages)1597 static inline void memcg_kmem_uncharge(struct mem_cgroup *memcg,
1598 				       unsigned int nr_pages)
1599 {
1600 	if (memcg_kmem_enabled())
1601 		__memcg_kmem_uncharge(memcg, nr_pages);
1602 }
1603 
1604 /*
1605  * helper for accessing a memcg's index. It will be used as an index in the
1606  * child cache array in kmem_cache, and also to derive its name. This function
1607  * will return -1 when this is not a kmem-limited memcg.
1608  */
memcg_cache_id(struct mem_cgroup * memcg)1609 static inline int memcg_cache_id(struct mem_cgroup *memcg)
1610 {
1611 	return memcg ? memcg->kmemcg_id : -1;
1612 }
1613 
1614 struct mem_cgroup *mem_cgroup_from_obj(void *p);
1615 
1616 #else
1617 
memcg_kmem_charge_page(struct page * page,gfp_t gfp,int order)1618 static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1619 					 int order)
1620 {
1621 	return 0;
1622 }
1623 
memcg_kmem_uncharge_page(struct page * page,int order)1624 static inline void memcg_kmem_uncharge_page(struct page *page, int order)
1625 {
1626 }
1627 
__memcg_kmem_charge_page(struct page * page,gfp_t gfp,int order)1628 static inline int __memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1629 					   int order)
1630 {
1631 	return 0;
1632 }
1633 
__memcg_kmem_uncharge_page(struct page * page,int order)1634 static inline void __memcg_kmem_uncharge_page(struct page *page, int order)
1635 {
1636 }
1637 
1638 #define for_each_memcg_cache_index(_idx)	\
1639 	for (; NULL; )
1640 
memcg_kmem_enabled(void)1641 static inline bool memcg_kmem_enabled(void)
1642 {
1643 	return false;
1644 }
1645 
memcg_cache_id(struct mem_cgroup * memcg)1646 static inline int memcg_cache_id(struct mem_cgroup *memcg)
1647 {
1648 	return -1;
1649 }
1650 
memcg_get_cache_ids(void)1651 static inline void memcg_get_cache_ids(void)
1652 {
1653 }
1654 
memcg_put_cache_ids(void)1655 static inline void memcg_put_cache_ids(void)
1656 {
1657 }
1658 
mem_cgroup_from_obj(void * p)1659 static inline struct mem_cgroup *mem_cgroup_from_obj(void *p)
1660 {
1661        return NULL;
1662 }
1663 
1664 #endif /* CONFIG_MEMCG_KMEM */
1665 
1666 #endif /* _LINUX_MEMCONTROL_H */
1667