• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* memcontrol.h - Memory Controller
2  *
3  * Copyright IBM Corporation, 2007
4  * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5  *
6  * Copyright 2007 OpenVZ SWsoft Inc
7  * Author: Pavel Emelianov <xemul@openvz.org>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  */
19 
20 #ifndef _LINUX_MEMCONTROL_H
21 #define _LINUX_MEMCONTROL_H
22 #include <linux/cgroup.h>
23 #include <linux/vm_event_item.h>
24 #include <linux/hardirq.h>
25 #include <linux/jump_label.h>
26 #include <linux/page_counter.h>
27 #include <linux/vmpressure.h>
28 #include <linux/eventfd.h>
29 #include <linux/mmzone.h>
30 #include <linux/writeback.h>
31 #include <linux/page-flags.h>
32 
33 struct mem_cgroup;
34 struct page;
35 struct mm_struct;
36 struct kmem_cache;
37 
38 /*
39  * The corresponding mem_cgroup_stat_names is defined in mm/memcontrol.c,
40  * These two lists should keep in accord with each other.
41  */
42 enum mem_cgroup_stat_index {
43 	/*
44 	 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
45 	 */
46 	MEM_CGROUP_STAT_CACHE,		/* # of pages charged as cache */
47 	MEM_CGROUP_STAT_RSS,		/* # of pages charged as anon rss */
48 	MEM_CGROUP_STAT_RSS_HUGE,	/* # of pages charged as anon huge */
49 	MEM_CGROUP_STAT_FILE_MAPPED,	/* # of pages charged as file rss */
50 	MEM_CGROUP_STAT_DIRTY,          /* # of dirty pages in page cache */
51 	MEM_CGROUP_STAT_WRITEBACK,	/* # of pages under writeback */
52 	MEM_CGROUP_STAT_SWAP,		/* # of pages, swapped out */
53 	MEM_CGROUP_STAT_NSTATS,
54 	/* default hierarchy stats */
55 	MEMCG_KERNEL_STACK_KB = MEM_CGROUP_STAT_NSTATS,
56 	MEMCG_SLAB_RECLAIMABLE,
57 	MEMCG_SLAB_UNRECLAIMABLE,
58 	MEMCG_SOCK,
59 	MEMCG_NR_STAT,
60 };
61 
62 struct mem_cgroup_reclaim_cookie {
63 	pg_data_t *pgdat;
64 	int priority;
65 	unsigned int generation;
66 };
67 
68 enum mem_cgroup_events_index {
69 	MEM_CGROUP_EVENTS_PGPGIN,	/* # of pages paged in */
70 	MEM_CGROUP_EVENTS_PGPGOUT,	/* # of pages paged out */
71 	MEM_CGROUP_EVENTS_PGFAULT,	/* # of page-faults */
72 	MEM_CGROUP_EVENTS_PGMAJFAULT,	/* # of major page-faults */
73 	MEM_CGROUP_EVENTS_NSTATS,
74 	/* default hierarchy events */
75 	MEMCG_LOW = MEM_CGROUP_EVENTS_NSTATS,
76 	MEMCG_HIGH,
77 	MEMCG_MAX,
78 	MEMCG_OOM,
79 	MEMCG_NR_EVENTS,
80 };
81 
82 /*
83  * Per memcg event counter is incremented at every pagein/pageout. With THP,
84  * it will be incremated by the number of pages. This counter is used for
85  * for trigger some periodic events. This is straightforward and better
86  * than using jiffies etc. to handle periodic memcg event.
87  */
88 enum mem_cgroup_events_target {
89 	MEM_CGROUP_TARGET_THRESH,
90 	MEM_CGROUP_TARGET_SOFTLIMIT,
91 	MEM_CGROUP_TARGET_NUMAINFO,
92 	MEM_CGROUP_NTARGETS,
93 };
94 
95 #ifdef CONFIG_MEMCG
96 
97 #define MEM_CGROUP_ID_SHIFT	16
98 #define MEM_CGROUP_ID_MAX	USHRT_MAX
99 
100 struct mem_cgroup_id {
101 	int id;
102 	atomic_t ref;
103 };
104 
105 struct mem_cgroup_stat_cpu {
106 	long count[MEMCG_NR_STAT];
107 	unsigned long events[MEMCG_NR_EVENTS];
108 	unsigned long nr_page_events;
109 	unsigned long targets[MEM_CGROUP_NTARGETS];
110 };
111 
112 struct mem_cgroup_reclaim_iter {
113 	struct mem_cgroup *position;
114 	/* scan generation, increased every round-trip */
115 	unsigned int generation;
116 };
117 
118 /*
119  * per-zone information in memory controller.
120  */
121 struct mem_cgroup_per_node {
122 	struct lruvec		lruvec;
123 	unsigned long		lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
124 
125 	struct mem_cgroup_reclaim_iter	iter[DEF_PRIORITY + 1];
126 
127 	struct rb_node		tree_node;	/* RB tree node */
128 	unsigned long		usage_in_excess;/* Set to the value by which */
129 						/* the soft limit is exceeded*/
130 	bool			on_tree;
131 	struct mem_cgroup	*memcg;		/* Back pointer, we cannot */
132 						/* use container_of	   */
133 };
134 
135 struct mem_cgroup_threshold {
136 	struct eventfd_ctx *eventfd;
137 	unsigned long threshold;
138 };
139 
140 /* For threshold */
141 struct mem_cgroup_threshold_ary {
142 	/* An array index points to threshold just below or equal to usage. */
143 	int current_threshold;
144 	/* Size of entries[] */
145 	unsigned int size;
146 	/* Array of thresholds */
147 	struct mem_cgroup_threshold entries[0];
148 };
149 
150 struct mem_cgroup_thresholds {
151 	/* Primary thresholds array */
152 	struct mem_cgroup_threshold_ary *primary;
153 	/*
154 	 * Spare threshold array.
155 	 * This is needed to make mem_cgroup_unregister_event() "never fail".
156 	 * It must be able to store at least primary->size - 1 entries.
157 	 */
158 	struct mem_cgroup_threshold_ary *spare;
159 };
160 
161 enum memcg_kmem_state {
162 	KMEM_NONE,
163 	KMEM_ALLOCATED,
164 	KMEM_ONLINE,
165 };
166 
167 /*
168  * The memory controller data structure. The memory controller controls both
169  * page cache and RSS per cgroup. We would eventually like to provide
170  * statistics based on the statistics developed by Rik Van Riel for clock-pro,
171  * to help the administrator determine what knobs to tune.
172  */
173 struct mem_cgroup {
174 	struct cgroup_subsys_state css;
175 
176 	/* Private memcg ID. Used to ID objects that outlive the cgroup */
177 	struct mem_cgroup_id id;
178 
179 	/* Accounted resources */
180 	struct page_counter memory;
181 	struct page_counter swap;
182 
183 	/* Legacy consumer-oriented counters */
184 	struct page_counter memsw;
185 	struct page_counter kmem;
186 	struct page_counter tcpmem;
187 
188 	/* Normal memory consumption range */
189 	unsigned long low;
190 	unsigned long high;
191 
192 	/* Range enforcement for interrupt charges */
193 	struct work_struct high_work;
194 
195 	unsigned long soft_limit;
196 
197 	/* vmpressure notifications */
198 	struct vmpressure vmpressure;
199 
200 	/*
201 	 * Should the accounting and control be hierarchical, per subtree?
202 	 */
203 	bool use_hierarchy;
204 
205 	/* protected by memcg_oom_lock */
206 	bool		oom_lock;
207 	int		under_oom;
208 
209 	int	swappiness;
210 	/* OOM-Killer disable */
211 	int		oom_kill_disable;
212 
213 	/* handle for "memory.events" */
214 	struct cgroup_file events_file;
215 
216 	/* protect arrays of thresholds */
217 	struct mutex thresholds_lock;
218 
219 	/* thresholds for memory usage. RCU-protected */
220 	struct mem_cgroup_thresholds thresholds;
221 
222 	/* thresholds for mem+swap usage. RCU-protected */
223 	struct mem_cgroup_thresholds memsw_thresholds;
224 
225 	/* For oom notifier event fd */
226 	struct list_head oom_notify;
227 
228 	/*
229 	 * Should we move charges of a task when a task is moved into this
230 	 * mem_cgroup ? And what type of charges should we move ?
231 	 */
232 	unsigned long move_charge_at_immigrate;
233 	/*
234 	 * set > 0 if pages under this cgroup are moving to other cgroup.
235 	 */
236 	atomic_t		moving_account;
237 	/* taken only while moving_account > 0 */
238 	spinlock_t		move_lock;
239 	struct task_struct	*move_lock_task;
240 	unsigned long		move_lock_flags;
241 	/*
242 	 * percpu counter.
243 	 */
244 	struct mem_cgroup_stat_cpu __percpu *stat;
245 
246 	unsigned long		socket_pressure;
247 
248 	/* Legacy tcp memory accounting */
249 	bool			tcpmem_active;
250 	int			tcpmem_pressure;
251 
252 #ifndef CONFIG_SLOB
253         /* Index in the kmem_cache->memcg_params.memcg_caches array */
254 	int kmemcg_id;
255 	enum memcg_kmem_state kmem_state;
256 #endif
257 
258 	int last_scanned_node;
259 #if MAX_NUMNODES > 1
260 	nodemask_t	scan_nodes;
261 	atomic_t	numainfo_events;
262 	atomic_t	numainfo_updating;
263 #endif
264 
265 #ifdef CONFIG_CGROUP_WRITEBACK
266 	struct list_head cgwb_list;
267 	struct wb_domain cgwb_domain;
268 #endif
269 
270 	/* List of events which userspace want to receive */
271 	struct list_head event_list;
272 	spinlock_t event_list_lock;
273 
274 	struct mem_cgroup_per_node *nodeinfo[0];
275 	/* WARNING: nodeinfo must be the last member here */
276 };
277 
278 extern struct mem_cgroup *root_mem_cgroup;
279 
mem_cgroup_disabled(void)280 static inline bool mem_cgroup_disabled(void)
281 {
282 	return !cgroup_subsys_enabled(memory_cgrp_subsys);
283 }
284 
285 /**
286  * mem_cgroup_events - count memory events against a cgroup
287  * @memcg: the memory cgroup
288  * @idx: the event index
289  * @nr: the number of events to account for
290  */
mem_cgroup_events(struct mem_cgroup * memcg,enum mem_cgroup_events_index idx,unsigned int nr)291 static inline void mem_cgroup_events(struct mem_cgroup *memcg,
292 		       enum mem_cgroup_events_index idx,
293 		       unsigned int nr)
294 {
295 	this_cpu_add(memcg->stat->events[idx], nr);
296 	cgroup_file_notify(&memcg->events_file);
297 }
298 
299 bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg);
300 
301 int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
302 			  gfp_t gfp_mask, struct mem_cgroup **memcgp,
303 			  bool compound);
304 void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
305 			      bool lrucare, bool compound);
306 void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
307 		bool compound);
308 void mem_cgroup_uncharge(struct page *page);
309 void mem_cgroup_uncharge_list(struct list_head *page_list);
310 
311 void mem_cgroup_migrate(struct page *oldpage, struct page *newpage);
312 
313 static struct mem_cgroup_per_node *
mem_cgroup_nodeinfo(struct mem_cgroup * memcg,int nid)314 mem_cgroup_nodeinfo(struct mem_cgroup *memcg, int nid)
315 {
316 	return memcg->nodeinfo[nid];
317 }
318 
319 /**
320  * mem_cgroup_lruvec - get the lru list vector for a node or a memcg zone
321  * @node: node of the wanted lruvec
322  * @memcg: memcg of the wanted lruvec
323  *
324  * Returns the lru list vector holding pages for a given @node or a given
325  * @memcg and @zone. This can be the node lruvec, if the memory controller
326  * is disabled.
327  */
mem_cgroup_lruvec(struct pglist_data * pgdat,struct mem_cgroup * memcg)328 static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat,
329 				struct mem_cgroup *memcg)
330 {
331 	struct mem_cgroup_per_node *mz;
332 	struct lruvec *lruvec;
333 
334 	if (mem_cgroup_disabled()) {
335 		lruvec = node_lruvec(pgdat);
336 		goto out;
337 	}
338 
339 	mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
340 	lruvec = &mz->lruvec;
341 out:
342 	/*
343 	 * Since a node can be onlined after the mem_cgroup was created,
344 	 * we have to be prepared to initialize lruvec->pgdat here;
345 	 * and if offlined then reonlined, we need to reinitialize it.
346 	 */
347 	if (unlikely(lruvec->pgdat != pgdat))
348 		lruvec->pgdat = pgdat;
349 	return lruvec;
350 }
351 
352 struct lruvec *mem_cgroup_page_lruvec(struct page *, struct pglist_data *);
353 
354 bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg);
355 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
356 
357 static inline
mem_cgroup_from_css(struct cgroup_subsys_state * css)358 struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){
359 	return css ? container_of(css, struct mem_cgroup, css) : NULL;
360 }
361 
362 #define mem_cgroup_from_counter(counter, member)	\
363 	container_of(counter, struct mem_cgroup, member)
364 
365 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
366 				   struct mem_cgroup *,
367 				   struct mem_cgroup_reclaim_cookie *);
368 void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
369 int mem_cgroup_scan_tasks(struct mem_cgroup *,
370 			  int (*)(struct task_struct *, void *), void *);
371 
mem_cgroup_id(struct mem_cgroup * memcg)372 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
373 {
374 	if (mem_cgroup_disabled())
375 		return 0;
376 
377 	return memcg->id.id;
378 }
379 struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
380 
381 /**
382  * parent_mem_cgroup - find the accounting parent of a memcg
383  * @memcg: memcg whose parent to find
384  *
385  * Returns the parent memcg, or NULL if this is the root or the memory
386  * controller is in legacy no-hierarchy mode.
387  */
parent_mem_cgroup(struct mem_cgroup * memcg)388 static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
389 {
390 	if (!memcg->memory.parent)
391 		return NULL;
392 	return mem_cgroup_from_counter(memcg->memory.parent, memory);
393 }
394 
mem_cgroup_is_descendant(struct mem_cgroup * memcg,struct mem_cgroup * root)395 static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
396 			      struct mem_cgroup *root)
397 {
398 	if (root == memcg)
399 		return true;
400 	if (!root->use_hierarchy)
401 		return false;
402 	return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup);
403 }
404 
mm_match_cgroup(struct mm_struct * mm,struct mem_cgroup * memcg)405 static inline bool mm_match_cgroup(struct mm_struct *mm,
406 				   struct mem_cgroup *memcg)
407 {
408 	struct mem_cgroup *task_memcg;
409 	bool match = false;
410 
411 	rcu_read_lock();
412 	task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
413 	if (task_memcg)
414 		match = mem_cgroup_is_descendant(task_memcg, memcg);
415 	rcu_read_unlock();
416 	return match;
417 }
418 
419 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page);
420 ino_t page_cgroup_ino(struct page *page);
421 
mem_cgroup_online(struct mem_cgroup * memcg)422 static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
423 {
424 	if (mem_cgroup_disabled())
425 		return true;
426 	return !!(memcg->css.flags & CSS_ONLINE);
427 }
428 
429 /*
430  * For memory reclaim.
431  */
432 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
433 
434 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
435 		int zid, int nr_pages);
436 
437 unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
438 					   int nid, unsigned int lru_mask);
439 
440 static inline
mem_cgroup_get_lru_size(struct lruvec * lruvec,enum lru_list lru)441 unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
442 {
443 	struct mem_cgroup_per_node *mz;
444 	unsigned long nr_pages = 0;
445 	int zid;
446 
447 	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
448 	for (zid = 0; zid < MAX_NR_ZONES; zid++)
449 		nr_pages += mz->lru_zone_size[zid][lru];
450 	return nr_pages;
451 }
452 
453 static inline
mem_cgroup_get_zone_lru_size(struct lruvec * lruvec,enum lru_list lru,int zone_idx)454 unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
455 		enum lru_list lru, int zone_idx)
456 {
457 	struct mem_cgroup_per_node *mz;
458 
459 	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
460 	return mz->lru_zone_size[zone_idx][lru];
461 }
462 
463 void mem_cgroup_handle_over_high(void);
464 
465 unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg);
466 
467 void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
468 				struct task_struct *p);
469 
mem_cgroup_oom_enable(void)470 static inline void mem_cgroup_oom_enable(void)
471 {
472 	WARN_ON(current->memcg_may_oom);
473 	current->memcg_may_oom = 1;
474 }
475 
mem_cgroup_oom_disable(void)476 static inline void mem_cgroup_oom_disable(void)
477 {
478 	WARN_ON(!current->memcg_may_oom);
479 	current->memcg_may_oom = 0;
480 }
481 
task_in_memcg_oom(struct task_struct * p)482 static inline bool task_in_memcg_oom(struct task_struct *p)
483 {
484 	return p->memcg_in_oom;
485 }
486 
487 bool mem_cgroup_oom_synchronize(bool wait);
488 
489 #ifdef CONFIG_MEMCG_SWAP
490 extern int do_swap_account;
491 #endif
492 
493 void lock_page_memcg(struct page *page);
494 void unlock_page_memcg(struct page *page);
495 
496 /**
497  * mem_cgroup_update_page_stat - update page state statistics
498  * @page: the page
499  * @idx: page state item to account
500  * @val: number of pages (positive or negative)
501  *
502  * The @page must be locked or the caller must use lock_page_memcg()
503  * to prevent double accounting when the page is concurrently being
504  * moved to another memcg:
505  *
506  *   lock_page(page) or lock_page_memcg(page)
507  *   if (TestClearPageState(page))
508  *     mem_cgroup_update_page_stat(page, state, -1);
509  *   unlock_page(page) or unlock_page_memcg(page)
510  */
mem_cgroup_update_page_stat(struct page * page,enum mem_cgroup_stat_index idx,int val)511 static inline void mem_cgroup_update_page_stat(struct page *page,
512 				 enum mem_cgroup_stat_index idx, int val)
513 {
514 	VM_BUG_ON(!(rcu_read_lock_held() || PageLocked(page)));
515 
516 	if (page->mem_cgroup)
517 		this_cpu_add(page->mem_cgroup->stat->count[idx], val);
518 }
519 
mem_cgroup_inc_page_stat(struct page * page,enum mem_cgroup_stat_index idx)520 static inline void mem_cgroup_inc_page_stat(struct page *page,
521 					    enum mem_cgroup_stat_index idx)
522 {
523 	mem_cgroup_update_page_stat(page, idx, 1);
524 }
525 
mem_cgroup_dec_page_stat(struct page * page,enum mem_cgroup_stat_index idx)526 static inline void mem_cgroup_dec_page_stat(struct page *page,
527 					    enum mem_cgroup_stat_index idx)
528 {
529 	mem_cgroup_update_page_stat(page, idx, -1);
530 }
531 
532 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
533 						gfp_t gfp_mask,
534 						unsigned long *total_scanned);
535 
mem_cgroup_count_vm_event(struct mm_struct * mm,enum vm_event_item idx)536 static inline void mem_cgroup_count_vm_event(struct mm_struct *mm,
537 					     enum vm_event_item idx)
538 {
539 	struct mem_cgroup *memcg;
540 
541 	if (mem_cgroup_disabled())
542 		return;
543 
544 	rcu_read_lock();
545 	memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
546 	if (unlikely(!memcg))
547 		goto out;
548 
549 	switch (idx) {
550 	case PGFAULT:
551 		this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGFAULT]);
552 		break;
553 	case PGMAJFAULT:
554 		this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT]);
555 		break;
556 	default:
557 		BUG();
558 	}
559 out:
560 	rcu_read_unlock();
561 }
562 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
563 void mem_cgroup_split_huge_fixup(struct page *head);
564 #endif
565 
566 #else /* CONFIG_MEMCG */
567 
568 #define MEM_CGROUP_ID_SHIFT	0
569 #define MEM_CGROUP_ID_MAX	0
570 
571 struct mem_cgroup;
572 
mem_cgroup_disabled(void)573 static inline bool mem_cgroup_disabled(void)
574 {
575 	return true;
576 }
577 
mem_cgroup_events(struct mem_cgroup * memcg,enum mem_cgroup_events_index idx,unsigned int nr)578 static inline void mem_cgroup_events(struct mem_cgroup *memcg,
579 				     enum mem_cgroup_events_index idx,
580 				     unsigned int nr)
581 {
582 }
583 
mem_cgroup_low(struct mem_cgroup * root,struct mem_cgroup * memcg)584 static inline bool mem_cgroup_low(struct mem_cgroup *root,
585 				  struct mem_cgroup *memcg)
586 {
587 	return false;
588 }
589 
mem_cgroup_try_charge(struct page * page,struct mm_struct * mm,gfp_t gfp_mask,struct mem_cgroup ** memcgp,bool compound)590 static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
591 					gfp_t gfp_mask,
592 					struct mem_cgroup **memcgp,
593 					bool compound)
594 {
595 	*memcgp = NULL;
596 	return 0;
597 }
598 
mem_cgroup_commit_charge(struct page * page,struct mem_cgroup * memcg,bool lrucare,bool compound)599 static inline void mem_cgroup_commit_charge(struct page *page,
600 					    struct mem_cgroup *memcg,
601 					    bool lrucare, bool compound)
602 {
603 }
604 
mem_cgroup_cancel_charge(struct page * page,struct mem_cgroup * memcg,bool compound)605 static inline void mem_cgroup_cancel_charge(struct page *page,
606 					    struct mem_cgroup *memcg,
607 					    bool compound)
608 {
609 }
610 
mem_cgroup_uncharge(struct page * page)611 static inline void mem_cgroup_uncharge(struct page *page)
612 {
613 }
614 
mem_cgroup_uncharge_list(struct list_head * page_list)615 static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
616 {
617 }
618 
mem_cgroup_migrate(struct page * old,struct page * new)619 static inline void mem_cgroup_migrate(struct page *old, struct page *new)
620 {
621 }
622 
mem_cgroup_lruvec(struct pglist_data * pgdat,struct mem_cgroup * memcg)623 static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat,
624 				struct mem_cgroup *memcg)
625 {
626 	return node_lruvec(pgdat);
627 }
628 
mem_cgroup_page_lruvec(struct page * page,struct pglist_data * pgdat)629 static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
630 						    struct pglist_data *pgdat)
631 {
632 	return &pgdat->lruvec;
633 }
634 
mm_match_cgroup(struct mm_struct * mm,struct mem_cgroup * memcg)635 static inline bool mm_match_cgroup(struct mm_struct *mm,
636 		struct mem_cgroup *memcg)
637 {
638 	return true;
639 }
640 
task_in_mem_cgroup(struct task_struct * task,const struct mem_cgroup * memcg)641 static inline bool task_in_mem_cgroup(struct task_struct *task,
642 				      const struct mem_cgroup *memcg)
643 {
644 	return true;
645 }
646 
647 static inline struct mem_cgroup *
mem_cgroup_iter(struct mem_cgroup * root,struct mem_cgroup * prev,struct mem_cgroup_reclaim_cookie * reclaim)648 mem_cgroup_iter(struct mem_cgroup *root,
649 		struct mem_cgroup *prev,
650 		struct mem_cgroup_reclaim_cookie *reclaim)
651 {
652 	return NULL;
653 }
654 
mem_cgroup_iter_break(struct mem_cgroup * root,struct mem_cgroup * prev)655 static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
656 					 struct mem_cgroup *prev)
657 {
658 }
659 
mem_cgroup_scan_tasks(struct mem_cgroup * memcg,int (* fn)(struct task_struct *,void *),void * arg)660 static inline int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
661 		int (*fn)(struct task_struct *, void *), void *arg)
662 {
663 	return 0;
664 }
665 
mem_cgroup_id(struct mem_cgroup * memcg)666 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
667 {
668 	return 0;
669 }
670 
mem_cgroup_from_id(unsigned short id)671 static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
672 {
673 	WARN_ON_ONCE(id);
674 	/* XXX: This should always return root_mem_cgroup */
675 	return NULL;
676 }
677 
mem_cgroup_online(struct mem_cgroup * memcg)678 static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
679 {
680 	return true;
681 }
682 
683 static inline unsigned long
mem_cgroup_get_lru_size(struct lruvec * lruvec,enum lru_list lru)684 mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
685 {
686 	return 0;
687 }
688 static inline
mem_cgroup_get_zone_lru_size(struct lruvec * lruvec,enum lru_list lru,int zone_idx)689 unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
690 		enum lru_list lru, int zone_idx)
691 {
692 	return 0;
693 }
694 
695 static inline unsigned long
mem_cgroup_node_nr_lru_pages(struct mem_cgroup * memcg,int nid,unsigned int lru_mask)696 mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
697 			     int nid, unsigned int lru_mask)
698 {
699 	return 0;
700 }
701 
mem_cgroup_get_limit(struct mem_cgroup * memcg)702 static inline unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg)
703 {
704 	return 0;
705 }
706 
707 static inline void
mem_cgroup_print_oom_info(struct mem_cgroup * memcg,struct task_struct * p)708 mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
709 {
710 }
711 
lock_page_memcg(struct page * page)712 static inline void lock_page_memcg(struct page *page)
713 {
714 }
715 
unlock_page_memcg(struct page * page)716 static inline void unlock_page_memcg(struct page *page)
717 {
718 }
719 
mem_cgroup_handle_over_high(void)720 static inline void mem_cgroup_handle_over_high(void)
721 {
722 }
723 
mem_cgroup_oom_enable(void)724 static inline void mem_cgroup_oom_enable(void)
725 {
726 }
727 
mem_cgroup_oom_disable(void)728 static inline void mem_cgroup_oom_disable(void)
729 {
730 }
731 
task_in_memcg_oom(struct task_struct * p)732 static inline bool task_in_memcg_oom(struct task_struct *p)
733 {
734 	return false;
735 }
736 
mem_cgroup_oom_synchronize(bool wait)737 static inline bool mem_cgroup_oom_synchronize(bool wait)
738 {
739 	return false;
740 }
741 
mem_cgroup_update_page_stat(struct page * page,enum mem_cgroup_stat_index idx,int nr)742 static inline void mem_cgroup_update_page_stat(struct page *page,
743 					       enum mem_cgroup_stat_index idx,
744 					       int nr)
745 {
746 }
747 
mem_cgroup_inc_page_stat(struct page * page,enum mem_cgroup_stat_index idx)748 static inline void mem_cgroup_inc_page_stat(struct page *page,
749 					    enum mem_cgroup_stat_index idx)
750 {
751 }
752 
mem_cgroup_dec_page_stat(struct page * page,enum mem_cgroup_stat_index idx)753 static inline void mem_cgroup_dec_page_stat(struct page *page,
754 					    enum mem_cgroup_stat_index idx)
755 {
756 }
757 
758 static inline
mem_cgroup_soft_limit_reclaim(pg_data_t * pgdat,int order,gfp_t gfp_mask,unsigned long * total_scanned)759 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
760 					    gfp_t gfp_mask,
761 					    unsigned long *total_scanned)
762 {
763 	return 0;
764 }
765 
mem_cgroup_split_huge_fixup(struct page * head)766 static inline void mem_cgroup_split_huge_fixup(struct page *head)
767 {
768 }
769 
770 static inline
mem_cgroup_count_vm_event(struct mm_struct * mm,enum vm_event_item idx)771 void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
772 {
773 }
774 #endif /* CONFIG_MEMCG */
775 
776 #ifdef CONFIG_CGROUP_WRITEBACK
777 
778 struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg);
779 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
780 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
781 			 unsigned long *pheadroom, unsigned long *pdirty,
782 			 unsigned long *pwriteback);
783 
784 #else	/* CONFIG_CGROUP_WRITEBACK */
785 
mem_cgroup_wb_domain(struct bdi_writeback * wb)786 static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
787 {
788 	return NULL;
789 }
790 
mem_cgroup_wb_stats(struct bdi_writeback * wb,unsigned long * pfilepages,unsigned long * pheadroom,unsigned long * pdirty,unsigned long * pwriteback)791 static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
792 				       unsigned long *pfilepages,
793 				       unsigned long *pheadroom,
794 				       unsigned long *pdirty,
795 				       unsigned long *pwriteback)
796 {
797 }
798 
799 #endif	/* CONFIG_CGROUP_WRITEBACK */
800 
801 struct sock;
802 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
803 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
804 #ifdef CONFIG_MEMCG
805 extern struct static_key_false memcg_sockets_enabled_key;
806 #define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
807 void mem_cgroup_sk_alloc(struct sock *sk);
808 void mem_cgroup_sk_free(struct sock *sk);
mem_cgroup_under_socket_pressure(struct mem_cgroup * memcg)809 static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
810 {
811 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure)
812 		return true;
813 	do {
814 		if (time_before(jiffies, memcg->socket_pressure))
815 			return true;
816 	} while ((memcg = parent_mem_cgroup(memcg)));
817 	return false;
818 }
819 #else
820 #define mem_cgroup_sockets_enabled 0
mem_cgroup_sk_alloc(struct sock * sk)821 static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
mem_cgroup_sk_free(struct sock * sk)822 static inline void mem_cgroup_sk_free(struct sock *sk) { };
mem_cgroup_under_socket_pressure(struct mem_cgroup * memcg)823 static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
824 {
825 	return false;
826 }
827 #endif
828 
829 struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep);
830 void memcg_kmem_put_cache(struct kmem_cache *cachep);
831 int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
832 			    struct mem_cgroup *memcg);
833 int memcg_kmem_charge(struct page *page, gfp_t gfp, int order);
834 void memcg_kmem_uncharge(struct page *page, int order);
835 
836 #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
837 extern struct static_key_false memcg_kmem_enabled_key;
838 
839 extern int memcg_nr_cache_ids;
840 void memcg_get_cache_ids(void);
841 void memcg_put_cache_ids(void);
842 
843 /*
844  * Helper macro to loop through all memcg-specific caches. Callers must still
845  * check if the cache is valid (it is either valid or NULL).
846  * the slab_mutex must be held when looping through those caches
847  */
848 #define for_each_memcg_cache_index(_idx)	\
849 	for ((_idx) = 0; (_idx) < memcg_nr_cache_ids; (_idx)++)
850 
memcg_kmem_enabled(void)851 static inline bool memcg_kmem_enabled(void)
852 {
853 	return static_branch_unlikely(&memcg_kmem_enabled_key);
854 }
855 
856 /*
857  * helper for accessing a memcg's index. It will be used as an index in the
858  * child cache array in kmem_cache, and also to derive its name. This function
859  * will return -1 when this is not a kmem-limited memcg.
860  */
memcg_cache_id(struct mem_cgroup * memcg)861 static inline int memcg_cache_id(struct mem_cgroup *memcg)
862 {
863 	return memcg ? memcg->kmemcg_id : -1;
864 }
865 
866 /**
867  * memcg_kmem_update_page_stat - update kmem page state statistics
868  * @page: the page
869  * @idx: page state item to account
870  * @val: number of pages (positive or negative)
871  */
memcg_kmem_update_page_stat(struct page * page,enum mem_cgroup_stat_index idx,int val)872 static inline void memcg_kmem_update_page_stat(struct page *page,
873 				enum mem_cgroup_stat_index idx, int val)
874 {
875 	if (memcg_kmem_enabled() && page->mem_cgroup)
876 		this_cpu_add(page->mem_cgroup->stat->count[idx], val);
877 }
878 
879 #else
880 #define for_each_memcg_cache_index(_idx)	\
881 	for (; NULL; )
882 
memcg_kmem_enabled(void)883 static inline bool memcg_kmem_enabled(void)
884 {
885 	return false;
886 }
887 
memcg_cache_id(struct mem_cgroup * memcg)888 static inline int memcg_cache_id(struct mem_cgroup *memcg)
889 {
890 	return -1;
891 }
892 
memcg_get_cache_ids(void)893 static inline void memcg_get_cache_ids(void)
894 {
895 }
896 
memcg_put_cache_ids(void)897 static inline void memcg_put_cache_ids(void)
898 {
899 }
900 
memcg_kmem_update_page_stat(struct page * page,enum mem_cgroup_stat_index idx,int val)901 static inline void memcg_kmem_update_page_stat(struct page *page,
902 				enum mem_cgroup_stat_index idx, int val)
903 {
904 }
905 #endif /* CONFIG_MEMCG && !CONFIG_SLOB */
906 
907 #endif /* _LINUX_MEMCONTROL_H */
908