• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_SHRINKER_H
3 #define _LINUX_SHRINKER_H
4 
5 #include <linux/android_vendor.h>
6 
7 /*
8  * This struct is used to pass information from page reclaim to the shrinkers.
9  * We consolidate the values for easier extension later.
10  *
11  * The 'gfpmask' refers to the allocation we are currently trying to
12  * fulfil.
13  */
14 struct shrink_control {
15 	gfp_t gfp_mask;
16 
17 	/* current node being shrunk (for NUMA aware shrinkers) */
18 	int nid;
19 
20 	/*
21 	 * How many objects scan_objects should scan and try to reclaim.
22 	 * This is reset before every call, so it is safe for callees
23 	 * to modify.
24 	 */
25 	unsigned long nr_to_scan;
26 
27 	/*
28 	 * How many objects did scan_objects process?
29 	 * This defaults to nr_to_scan before every call, but the callee
30 	 * should track its actual progress.
31 	 */
32 	unsigned long nr_scanned;
33 
34 	/* current memcg being shrunk (for memcg aware shrinkers) */
35 	struct mem_cgroup *memcg;
36 	ANDROID_OEM_DATA_ARRAY(1, 3);
37 };
38 
39 #define SHRINK_STOP (~0UL)
40 #define SHRINK_EMPTY (~0UL - 1)
41 /*
42  * A callback you can register to apply pressure to ageable caches.
43  *
44  * @count_objects should return the number of freeable items in the cache. If
45  * there are no objects to free, it should return SHRINK_EMPTY, while 0 is
46  * returned in cases of the number of freeable items cannot be determined
47  * or shrinker should skip this cache for this time (e.g., their number
48  * is below shrinkable limit). No deadlock checks should be done during the
49  * count callback - the shrinker relies on aggregating scan counts that couldn't
50  * be executed due to potential deadlocks to be run at a later call when the
51  * deadlock condition is no longer pending.
52  *
53  * @scan_objects will only be called if @count_objects returned a non-zero
54  * value for the number of freeable objects. The callout should scan the cache
55  * and attempt to free items from the cache. It should then return the number
56  * of objects freed during the scan, or SHRINK_STOP if progress cannot be made
57  * due to potential deadlocks. If SHRINK_STOP is returned, then no further
58  * attempts to call the @scan_objects will be made from the current reclaim
59  * context.
60  *
61  * @flags determine the shrinker abilities, like numa awareness
62  */
63 struct shrinker {
64 	unsigned long (*count_objects)(struct shrinker *,
65 				       struct shrink_control *sc);
66 	unsigned long (*scan_objects)(struct shrinker *,
67 				      struct shrink_control *sc);
68 
69 	long batch;	/* reclaim batch size, 0 = default */
70 	int seeks;	/* seeks to recreate an obj */
71 	unsigned flags;
72 
73 	/* These are for internal use */
74 	struct list_head list;
75 #ifdef CONFIG_MEMCG
76 	/* ID in shrinker_idr */
77 	int id;
78 #endif
79 	/* objs pending delete, per node */
80 	atomic_long_t *nr_deferred;
81 };
82 #define DEFAULT_SEEKS 2 /* A good number if you don't know better. */
83 
84 /* Flags */
85 #define SHRINKER_REGISTERED	(1 << 0)
86 #define SHRINKER_NUMA_AWARE	(1 << 1)
87 #define SHRINKER_MEMCG_AWARE	(1 << 2)
88 /*
89  * It just makes sense when the shrinker is also MEMCG_AWARE for now,
90  * non-MEMCG_AWARE shrinker should not have this flag set.
91  */
92 #define SHRINKER_NONSLAB	(1 << 3)
93 
94 extern int prealloc_shrinker(struct shrinker *shrinker);
95 extern void register_shrinker_prepared(struct shrinker *shrinker);
96 extern int register_shrinker(struct shrinker *shrinker);
97 extern void unregister_shrinker(struct shrinker *shrinker);
98 extern void free_prealloced_shrinker(struct shrinker *shrinker);
99 #endif
100