• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
3  * Authors: David Chinner and Glauber Costa
4  *
5  * Generic LRU infrastructure
6  */
7 #ifndef _LRU_LIST_H
8 #define _LRU_LIST_H
9 
10 #include <linux/list.h>
11 #include <linux/nodemask.h>
12 #include <linux/shrinker.h>
13 
14 struct mem_cgroup;
15 
16 /* list_lru_walk_cb has to always return one of those */
17 enum lru_status {
18 	LRU_REMOVED,		/* item removed from list */
19 	LRU_REMOVED_RETRY,	/* item removed, but lock has been
20 				   dropped and reacquired */
21 	LRU_ROTATE,		/* item referenced, give another pass */
22 	LRU_SKIP,		/* item cannot be locked, skip */
23 	LRU_RETRY,		/* item not freeable. May drop the lock
24 				   internally, but has to return locked. */
25 };
26 
27 struct list_lru_one {
28 	struct list_head	list;
29 	/* may become negative during memcg reparenting */
30 	long			nr_items;
31 };
32 
33 struct list_lru_memcg {
34 	/* array of per cgroup lists, indexed by memcg_cache_id */
35 	struct list_lru_one	*lru[0];
36 };
37 
38 struct list_lru_node {
39 	/* protects all lists on the node, including per cgroup */
40 	spinlock_t		lock;
41 	/* global list, used for the root cgroup in cgroup aware lrus */
42 	struct list_lru_one	lru;
43 #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
44 	/* for cgroup aware lrus points to per cgroup lists, otherwise NULL */
45 	struct list_lru_memcg	*memcg_lrus;
46 #endif
47 	long nr_items;
48 } ____cacheline_aligned_in_smp;
49 
50 struct list_lru {
51 	struct list_lru_node	*node;
52 #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
53 	struct list_head	list;
54 #endif
55 };
56 
57 void list_lru_destroy(struct list_lru *lru);
58 int __list_lru_init(struct list_lru *lru, bool memcg_aware,
59 		    struct lock_class_key *key);
60 
61 #define list_lru_init(lru)		__list_lru_init((lru), false, NULL)
62 #define list_lru_init_key(lru, key)	__list_lru_init((lru), false, (key))
63 #define list_lru_init_memcg(lru)	__list_lru_init((lru), true, NULL)
64 
65 int memcg_update_all_list_lrus(int num_memcgs);
66 void memcg_drain_all_list_lrus(int src_idx, int dst_idx);
67 
68 /**
69  * list_lru_add: add an element to the lru list's tail
70  * @list_lru: the lru pointer
71  * @item: the item to be added.
72  *
73  * If the element is already part of a list, this function returns doing
74  * nothing. Therefore the caller does not need to keep state about whether or
75  * not the element already belongs in the list and is allowed to lazy update
76  * it. Note however that this is valid for *a* list, not *this* list. If
77  * the caller organize itself in a way that elements can be in more than
78  * one type of list, it is up to the caller to fully remove the item from
79  * the previous list (with list_lru_del() for instance) before moving it
80  * to @list_lru
81  *
82  * Return value: true if the list was updated, false otherwise
83  */
84 bool list_lru_add(struct list_lru *lru, struct list_head *item);
85 
86 /**
87  * list_lru_del: delete an element to the lru list
88  * @list_lru: the lru pointer
89  * @item: the item to be deleted.
90  *
91  * This function works analogously as list_lru_add in terms of list
92  * manipulation. The comments about an element already pertaining to
93  * a list are also valid for list_lru_del.
94  *
95  * Return value: true if the list was updated, false otherwise
96  */
97 bool list_lru_del(struct list_lru *lru, struct list_head *item);
98 
99 /**
100  * list_lru_count_one: return the number of objects currently held by @lru
101  * @lru: the lru pointer.
102  * @nid: the node id to count from.
103  * @memcg: the cgroup to count from.
104  *
105  * Always return a non-negative number, 0 for empty lists. There is no
106  * guarantee that the list is not updated while the count is being computed.
107  * Callers that want such a guarantee need to provide an outer lock.
108  */
109 unsigned long list_lru_count_one(struct list_lru *lru,
110 				 int nid, struct mem_cgroup *memcg);
111 unsigned long list_lru_count_node(struct list_lru *lru, int nid);
112 
list_lru_shrink_count(struct list_lru * lru,struct shrink_control * sc)113 static inline unsigned long list_lru_shrink_count(struct list_lru *lru,
114 						  struct shrink_control *sc)
115 {
116 	return list_lru_count_one(lru, sc->nid, sc->memcg);
117 }
118 
list_lru_count(struct list_lru * lru)119 static inline unsigned long list_lru_count(struct list_lru *lru)
120 {
121 	long count = 0;
122 	int nid;
123 
124 	for_each_node_state(nid, N_NORMAL_MEMORY)
125 		count += list_lru_count_node(lru, nid);
126 
127 	return count;
128 }
129 
130 void list_lru_isolate(struct list_lru_one *list, struct list_head *item);
131 void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
132 			   struct list_head *head);
133 
134 typedef enum lru_status (*list_lru_walk_cb)(struct list_head *item,
135 		struct list_lru_one *list, spinlock_t *lock, void *cb_arg);
136 
137 /**
138  * list_lru_walk_one: walk a list_lru, isolating and disposing freeable items.
139  * @lru: the lru pointer.
140  * @nid: the node id to scan from.
141  * @memcg: the cgroup to scan from.
142  * @isolate: callback function that is resposible for deciding what to do with
143  *  the item currently being scanned
144  * @cb_arg: opaque type that will be passed to @isolate
145  * @nr_to_walk: how many items to scan.
146  *
147  * This function will scan all elements in a particular list_lru, calling the
148  * @isolate callback for each of those items, along with the current list
149  * spinlock and a caller-provided opaque. The @isolate callback can choose to
150  * drop the lock internally, but *must* return with the lock held. The callback
151  * will return an enum lru_status telling the list_lru infrastructure what to
152  * do with the object being scanned.
153  *
154  * Please note that nr_to_walk does not mean how many objects will be freed,
155  * just how many objects will be scanned.
156  *
157  * Return value: the number of objects effectively removed from the LRU.
158  */
159 unsigned long list_lru_walk_one(struct list_lru *lru,
160 				int nid, struct mem_cgroup *memcg,
161 				list_lru_walk_cb isolate, void *cb_arg,
162 				unsigned long *nr_to_walk);
163 unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
164 				 list_lru_walk_cb isolate, void *cb_arg,
165 				 unsigned long *nr_to_walk);
166 
167 static inline unsigned long
list_lru_shrink_walk(struct list_lru * lru,struct shrink_control * sc,list_lru_walk_cb isolate,void * cb_arg)168 list_lru_shrink_walk(struct list_lru *lru, struct shrink_control *sc,
169 		     list_lru_walk_cb isolate, void *cb_arg)
170 {
171 	return list_lru_walk_one(lru, sc->nid, sc->memcg, isolate, cb_arg,
172 				 &sc->nr_to_scan);
173 }
174 
175 static inline unsigned long
list_lru_walk(struct list_lru * lru,list_lru_walk_cb isolate,void * cb_arg,unsigned long nr_to_walk)176 list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate,
177 	      void *cb_arg, unsigned long nr_to_walk)
178 {
179 	long isolated = 0;
180 	int nid;
181 
182 	for_each_node_state(nid, N_NORMAL_MEMORY) {
183 		isolated += list_lru_walk_node(lru, nid, isolate,
184 					       cb_arg, &nr_to_walk);
185 		if (nr_to_walk <= 0)
186 			break;
187 	}
188 	return isolated;
189 }
190 #endif /* _LRU_LIST_H */
191