• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_SHRINKER_H
3 #define _LINUX_SHRINKER_H
4 
5 /*
6  * This struct is used to pass information from page reclaim to the shrinkers.
7  * We consolidate the values for easier extention later.
8  *
9  * The 'gfpmask' refers to the allocation we are currently trying to
10  * fulfil.
11  */
12 struct shrink_control {
13 	gfp_t gfp_mask;
14 
15 	/* current node being shrunk (for NUMA aware shrinkers) */
16 	int nid;
17 
18 	/*
19 	 * How many objects scan_objects should scan and try to reclaim.
20 	 * This is reset before every call, so it is safe for callees
21 	 * to modify.
22 	 */
23 	unsigned long nr_to_scan;
24 
25 	/*
26 	 * How many objects did scan_objects process?
27 	 * This defaults to nr_to_scan before every call, but the callee
28 	 * should track its actual progress.
29 	 */
30 	unsigned long nr_scanned;
31 
32 	/* current memcg being shrunk (for memcg aware shrinkers) */
33 	struct mem_cgroup *memcg;
34 };
35 
36 #define SHRINK_STOP (~0UL)
37 #define SHRINK_EMPTY (~0UL - 1)
38 /*
39  * A callback you can register to apply pressure to ageable caches.
40  *
41  * @count_objects should return the number of freeable items in the cache. If
42  * there are no objects to free, it should return SHRINK_EMPTY, while 0 is
43  * returned in cases of the number of freeable items cannot be determined
44  * or shrinker should skip this cache for this time (e.g., their number
45  * is below shrinkable limit). No deadlock checks should be done during the
46  * count callback - the shrinker relies on aggregating scan counts that couldn't
47  * be executed due to potential deadlocks to be run at a later call when the
48  * deadlock condition is no longer pending.
49  *
50  * @scan_objects will only be called if @count_objects returned a non-zero
51  * value for the number of freeable objects. The callout should scan the cache
52  * and attempt to free items from the cache. It should then return the number
53  * of objects freed during the scan, or SHRINK_STOP if progress cannot be made
54  * due to potential deadlocks. If SHRINK_STOP is returned, then no further
55  * attempts to call the @scan_objects will be made from the current reclaim
56  * context.
57  *
58  * @flags determine the shrinker abilities, like numa awareness
59  */
60 struct shrinker {
61 	unsigned long (*count_objects)(struct shrinker *,
62 				       struct shrink_control *sc);
63 	unsigned long (*scan_objects)(struct shrinker *,
64 				      struct shrink_control *sc);
65 
66 	long batch;	/* reclaim batch size, 0 = default */
67 	int seeks;	/* seeks to recreate an obj */
68 	unsigned flags;
69 
70 	/* These are for internal use */
71 	struct list_head list;
72 #ifdef CONFIG_MEMCG
73 	/* ID in shrinker_idr */
74 	int id;
75 #endif
76 	/* objs pending delete, per node */
77 	atomic_long_t *nr_deferred;
78 };
79 #define DEFAULT_SEEKS 2 /* A good number if you don't know better. */
80 
81 /* Flags */
82 #define SHRINKER_REGISTERED	(1 << 0)
83 #define SHRINKER_NUMA_AWARE	(1 << 1)
84 #define SHRINKER_MEMCG_AWARE	(1 << 2)
85 /*
86  * It just makes sense when the shrinker is also MEMCG_AWARE for now,
87  * non-MEMCG_AWARE shrinker should not have this flag set.
88  */
89 #define SHRINKER_NONSLAB	(1 << 3)
90 
91 extern int prealloc_shrinker(struct shrinker *shrinker);
92 extern void register_shrinker_prepared(struct shrinker *shrinker);
93 extern int register_shrinker(struct shrinker *shrinker);
94 extern void unregister_shrinker(struct shrinker *shrinker);
95 extern void free_prealloced_shrinker(struct shrinker *shrinker);
96 #endif
97