• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Cleancache frontend
3  *
4  * This code provides the generic "frontend" layer to call a matching
5  * "backend" driver implementation of cleancache.  See
6  * Documentation/vm/cleancache.txt for more information.
7  *
8  * Copyright (C) 2009-2010 Oracle Corp. All rights reserved.
9  * Author: Dan Magenheimer
10  *
11  * This work is licensed under the terms of the GNU GPL, version 2.
12  */
13 
14 #include <linux/module.h>
15 #include <linux/fs.h>
16 #include <linux/exportfs.h>
17 #include <linux/mm.h>
18 #include <linux/debugfs.h>
19 #include <linux/cleancache.h>
20 
21 /*
22  * cleancache_ops is set by cleancache_ops_register to contain the pointers
23  * to the cleancache "backend" implementation functions.
24  */
25 static struct cleancache_ops *cleancache_ops __read_mostly;
26 
27 /*
28  * Counters available via /sys/kernel/debug/frontswap (if debugfs is
29  * properly configured.  These are for information only so are not protected
30  * against increment races.
31  */
32 static u64 cleancache_succ_gets;
33 static u64 cleancache_failed_gets;
34 static u64 cleancache_puts;
35 static u64 cleancache_invalidates;
36 
37 /*
38  * When no backend is registered all calls to init_fs and init_shared_fs
39  * are registered and fake poolids (FAKE_FS_POOLID_OFFSET or
40  * FAKE_SHARED_FS_POOLID_OFFSET, plus offset in the respective array
41  * [shared_|]fs_poolid_map) are given to the respective super block
42  * (sb->cleancache_poolid) and no tmem_pools are created. When a backend
43  * registers with cleancache the previous calls to init_fs and init_shared_fs
44  * are executed to create tmem_pools and set the respective poolids. While no
45  * backend is registered all "puts", "gets" and "flushes" are ignored or failed.
46  */
47 #define MAX_INITIALIZABLE_FS 32
48 #define FAKE_FS_POOLID_OFFSET 1000
49 #define FAKE_SHARED_FS_POOLID_OFFSET 2000
50 
51 #define FS_NO_BACKEND (-1)
52 #define FS_UNKNOWN (-2)
53 static int fs_poolid_map[MAX_INITIALIZABLE_FS];
54 static int shared_fs_poolid_map[MAX_INITIALIZABLE_FS];
55 static char *uuids[MAX_INITIALIZABLE_FS];
56 /*
57  * Mutex for the [shared_|]fs_poolid_map to guard against multiple threads
58  * invoking umount (and ending in __cleancache_invalidate_fs) and also multiple
59  * threads calling mount (and ending up in __cleancache_init_[shared|]fs).
60  */
61 static DEFINE_MUTEX(poolid_mutex);
62 /*
63  * When set to false (default) all calls to the cleancache functions, except
64  * the __cleancache_invalidate_fs and __cleancache_init_[shared|]fs are guarded
65  * by the if (!cleancache_ops) return. This means multiple threads (from
66  * different filesystems) will be checking cleancache_ops. The usage of a
67  * bool instead of a atomic_t or a bool guarded by a spinlock is OK - we are
68  * OK if the time between the backend's have been initialized (and
69  * cleancache_ops has been set to not NULL) and when the filesystems start
70  * actually calling the backends. The inverse (when unloading) is obviously
71  * not good - but this shim does not do that (yet).
72  */
73 
74 /*
75  * The backends and filesystems work all asynchronously. This is b/c the
76  * backends can be built as modules.
77  * The usual sequence of events is:
78  *	a) mount /	-> __cleancache_init_fs is called. We set the
79  *		[shared_|]fs_poolid_map and uuids for.
80  *
81  *	b). user does I/Os -> we call the rest of __cleancache_* functions
82  *		which return immediately as cleancache_ops is false.
83  *
84  *	c). modprobe zcache -> cleancache_register_ops. We init the backend
85  *		and set cleancache_ops to true, and for any fs_poolid_map
86  *		(which is set by __cleancache_init_fs) we initialize the poolid.
87  *
88  *	d). user does I/Os -> now that cleancache_ops is true all the
89  *		__cleancache_* functions can call the backend. They all check
90  *		that fs_poolid_map is valid and if so invoke the backend.
91  *
92  *	e). umount /	-> __cleancache_invalidate_fs, the fs_poolid_map is
93  *		reset (which is the second check in the __cleancache_* ops
94  *		to call the backend).
95  *
96  * The sequence of event could also be c), followed by a), and d). and e). The
97  * c) would not happen anymore. There is also the chance of c), and one thread
98  * doing a) + d), and another doing e). For that case we depend on the
99  * filesystem calling __cleancache_invalidate_fs in the proper sequence (so
100  * that it handles all I/Os before it invalidates the fs (which is last part
101  * of unmounting process).
102  *
103  * Note: The acute reader will notice that there is no "rmmod zcache" case.
104  * This is b/c the functionality for that is not yet implemented and when
105  * done, will require some extra locking not yet devised.
106  */
107 
108 /*
109  * Register operations for cleancache, returning previous thus allowing
110  * detection of multiple backends and possible nesting.
111  */
cleancache_register_ops(struct cleancache_ops * ops)112 struct cleancache_ops *cleancache_register_ops(struct cleancache_ops *ops)
113 {
114 	struct cleancache_ops *old = cleancache_ops;
115 	int i;
116 
117 	mutex_lock(&poolid_mutex);
118 	for (i = 0; i < MAX_INITIALIZABLE_FS; i++) {
119 		if (fs_poolid_map[i] == FS_NO_BACKEND)
120 			fs_poolid_map[i] = ops->init_fs(PAGE_SIZE);
121 		if (shared_fs_poolid_map[i] == FS_NO_BACKEND)
122 			shared_fs_poolid_map[i] = ops->init_shared_fs
123 					(uuids[i], PAGE_SIZE);
124 	}
125 	/*
126 	 * We MUST set cleancache_ops _after_ we have called the backends
127 	 * init_fs or init_shared_fs functions. Otherwise the compiler might
128 	 * re-order where cleancache_ops is set in this function.
129 	 */
130 	barrier();
131 	cleancache_ops = ops;
132 	mutex_unlock(&poolid_mutex);
133 	return old;
134 }
135 EXPORT_SYMBOL(cleancache_register_ops);
136 
137 /* Called by a cleancache-enabled filesystem at time of mount */
__cleancache_init_fs(struct super_block * sb)138 void __cleancache_init_fs(struct super_block *sb)
139 {
140 	int i;
141 
142 	mutex_lock(&poolid_mutex);
143 	for (i = 0; i < MAX_INITIALIZABLE_FS; i++) {
144 		if (fs_poolid_map[i] == FS_UNKNOWN) {
145 			sb->cleancache_poolid = i + FAKE_FS_POOLID_OFFSET;
146 			if (cleancache_ops)
147 				fs_poolid_map[i] = cleancache_ops->init_fs(PAGE_SIZE);
148 			else
149 				fs_poolid_map[i] = FS_NO_BACKEND;
150 			break;
151 		}
152 	}
153 	mutex_unlock(&poolid_mutex);
154 }
155 EXPORT_SYMBOL(__cleancache_init_fs);
156 
157 /* Called by a cleancache-enabled clustered filesystem at time of mount */
__cleancache_init_shared_fs(char * uuid,struct super_block * sb)158 void __cleancache_init_shared_fs(char *uuid, struct super_block *sb)
159 {
160 	int i;
161 
162 	mutex_lock(&poolid_mutex);
163 	for (i = 0; i < MAX_INITIALIZABLE_FS; i++) {
164 		if (shared_fs_poolid_map[i] == FS_UNKNOWN) {
165 			sb->cleancache_poolid = i + FAKE_SHARED_FS_POOLID_OFFSET;
166 			uuids[i] = uuid;
167 			if (cleancache_ops)
168 				shared_fs_poolid_map[i] = cleancache_ops->init_shared_fs
169 						(uuid, PAGE_SIZE);
170 			else
171 				shared_fs_poolid_map[i] = FS_NO_BACKEND;
172 			break;
173 		}
174 	}
175 	mutex_unlock(&poolid_mutex);
176 }
177 EXPORT_SYMBOL(__cleancache_init_shared_fs);
178 
179 /*
180  * If the filesystem uses exportable filehandles, use the filehandle as
181  * the key, else use the inode number.
182  */
cleancache_get_key(struct inode * inode,struct cleancache_filekey * key)183 static int cleancache_get_key(struct inode *inode,
184 			      struct cleancache_filekey *key)
185 {
186 	int (*fhfn)(struct inode *, __u32 *fh, int *, struct inode *);
187 	int len = 0, maxlen = CLEANCACHE_KEY_MAX;
188 	struct super_block *sb = inode->i_sb;
189 
190 	key->u.ino = inode->i_ino;
191 	if (sb->s_export_op != NULL) {
192 		fhfn = sb->s_export_op->encode_fh;
193 		if  (fhfn) {
194 			len = (*fhfn)(inode, &key->u.fh[0], &maxlen, NULL);
195 			if (len <= FILEID_ROOT || len == FILEID_INVALID)
196 				return -1;
197 			if (maxlen > CLEANCACHE_KEY_MAX)
198 				return -1;
199 		}
200 	}
201 	return 0;
202 }
203 
204 /*
205  * Returns a pool_id that is associated with a given fake poolid.
206  */
get_poolid_from_fake(int fake_pool_id)207 static int get_poolid_from_fake(int fake_pool_id)
208 {
209 	if (fake_pool_id >= FAKE_SHARED_FS_POOLID_OFFSET)
210 		return shared_fs_poolid_map[fake_pool_id -
211 			FAKE_SHARED_FS_POOLID_OFFSET];
212 	else if (fake_pool_id >= FAKE_FS_POOLID_OFFSET)
213 		return fs_poolid_map[fake_pool_id - FAKE_FS_POOLID_OFFSET];
214 	return FS_NO_BACKEND;
215 }
216 
217 /*
218  * "Get" data from cleancache associated with the poolid/inode/index
219  * that were specified when the data was put to cleanache and, if
220  * successful, use it to fill the specified page with data and return 0.
221  * The pageframe is unchanged and returns -1 if the get fails.
222  * Page must be locked by caller.
223  *
224  * The function has two checks before any action is taken - whether
225  * a backend is registered and whether the sb->cleancache_poolid
226  * is correct.
227  */
__cleancache_get_page(struct page * page)228 int __cleancache_get_page(struct page *page)
229 {
230 	int ret = -1;
231 	int pool_id;
232 	int fake_pool_id;
233 	struct cleancache_filekey key = { .u.key = { 0 } };
234 
235 	if (!cleancache_ops) {
236 		cleancache_failed_gets++;
237 		goto out;
238 	}
239 
240 	VM_BUG_ON_PAGE(!PageLocked(page), page);
241 	fake_pool_id = page->mapping->host->i_sb->cleancache_poolid;
242 	if (fake_pool_id < 0)
243 		goto out;
244 	pool_id = get_poolid_from_fake(fake_pool_id);
245 
246 	if (cleancache_get_key(page->mapping->host, &key) < 0)
247 		goto out;
248 
249 	if (pool_id >= 0)
250 		ret = cleancache_ops->get_page(pool_id,
251 				key, page->index, page);
252 	if (ret == 0)
253 		cleancache_succ_gets++;
254 	else
255 		cleancache_failed_gets++;
256 out:
257 	return ret;
258 }
259 EXPORT_SYMBOL(__cleancache_get_page);
260 
261 /*
262  * "Put" data from a page to cleancache and associate it with the
263  * (previously-obtained per-filesystem) poolid and the page's,
264  * inode and page index.  Page must be locked.  Note that a put_page
265  * always "succeeds", though a subsequent get_page may succeed or fail.
266  *
267  * The function has two checks before any action is taken - whether
268  * a backend is registered and whether the sb->cleancache_poolid
269  * is correct.
270  */
__cleancache_put_page(struct page * page)271 void __cleancache_put_page(struct page *page)
272 {
273 	int pool_id;
274 	int fake_pool_id;
275 	struct cleancache_filekey key = { .u.key = { 0 } };
276 
277 	if (!cleancache_ops) {
278 		cleancache_puts++;
279 		return;
280 	}
281 
282 	VM_BUG_ON_PAGE(!PageLocked(page), page);
283 	fake_pool_id = page->mapping->host->i_sb->cleancache_poolid;
284 	if (fake_pool_id < 0)
285 		return;
286 
287 	pool_id = get_poolid_from_fake(fake_pool_id);
288 
289 	if (pool_id >= 0 &&
290 		cleancache_get_key(page->mapping->host, &key) >= 0) {
291 		cleancache_ops->put_page(pool_id, key, page->index, page);
292 		cleancache_puts++;
293 	}
294 }
295 EXPORT_SYMBOL(__cleancache_put_page);
296 
297 /*
298  * Invalidate any data from cleancache associated with the poolid and the
299  * page's inode and page index so that a subsequent "get" will fail.
300  *
301  * The function has two checks before any action is taken - whether
302  * a backend is registered and whether the sb->cleancache_poolid
303  * is correct.
304  */
__cleancache_invalidate_page(struct address_space * mapping,struct page * page)305 void __cleancache_invalidate_page(struct address_space *mapping,
306 					struct page *page)
307 {
308 	/* careful... page->mapping is NULL sometimes when this is called */
309 	int pool_id;
310 	int fake_pool_id = mapping->host->i_sb->cleancache_poolid;
311 	struct cleancache_filekey key = { .u.key = { 0 } };
312 
313 	if (!cleancache_ops)
314 		return;
315 
316 	if (fake_pool_id >= 0) {
317 		pool_id = get_poolid_from_fake(fake_pool_id);
318 		if (pool_id < 0)
319 			return;
320 
321 		VM_BUG_ON_PAGE(!PageLocked(page), page);
322 		if (cleancache_get_key(mapping->host, &key) >= 0) {
323 			cleancache_ops->invalidate_page(pool_id,
324 					key, page->index);
325 			cleancache_invalidates++;
326 		}
327 	}
328 }
329 EXPORT_SYMBOL(__cleancache_invalidate_page);
330 
331 /*
332  * Invalidate all data from cleancache associated with the poolid and the
333  * mappings's inode so that all subsequent gets to this poolid/inode
334  * will fail.
335  *
336  * The function has two checks before any action is taken - whether
337  * a backend is registered and whether the sb->cleancache_poolid
338  * is correct.
339  */
__cleancache_invalidate_inode(struct address_space * mapping)340 void __cleancache_invalidate_inode(struct address_space *mapping)
341 {
342 	int pool_id;
343 	int fake_pool_id = mapping->host->i_sb->cleancache_poolid;
344 	struct cleancache_filekey key = { .u.key = { 0 } };
345 
346 	if (!cleancache_ops)
347 		return;
348 
349 	if (fake_pool_id < 0)
350 		return;
351 
352 	pool_id = get_poolid_from_fake(fake_pool_id);
353 
354 	if (pool_id >= 0 && cleancache_get_key(mapping->host, &key) >= 0)
355 		cleancache_ops->invalidate_inode(pool_id, key);
356 }
357 EXPORT_SYMBOL(__cleancache_invalidate_inode);
358 
359 /*
360  * Called by any cleancache-enabled filesystem at time of unmount;
361  * note that pool_id is surrendered and may be returned by a subsequent
362  * cleancache_init_fs or cleancache_init_shared_fs.
363  */
__cleancache_invalidate_fs(struct super_block * sb)364 void __cleancache_invalidate_fs(struct super_block *sb)
365 {
366 	int index;
367 	int fake_pool_id = sb->cleancache_poolid;
368 	int old_poolid = fake_pool_id;
369 
370 	mutex_lock(&poolid_mutex);
371 	if (fake_pool_id >= FAKE_SHARED_FS_POOLID_OFFSET) {
372 		index = fake_pool_id - FAKE_SHARED_FS_POOLID_OFFSET;
373 		old_poolid = shared_fs_poolid_map[index];
374 		shared_fs_poolid_map[index] = FS_UNKNOWN;
375 		uuids[index] = NULL;
376 	} else if (fake_pool_id >= FAKE_FS_POOLID_OFFSET) {
377 		index = fake_pool_id - FAKE_FS_POOLID_OFFSET;
378 		old_poolid = fs_poolid_map[index];
379 		fs_poolid_map[index] = FS_UNKNOWN;
380 	}
381 	sb->cleancache_poolid = -1;
382 	if (cleancache_ops)
383 		cleancache_ops->invalidate_fs(old_poolid);
384 	mutex_unlock(&poolid_mutex);
385 }
386 EXPORT_SYMBOL(__cleancache_invalidate_fs);
387 
init_cleancache(void)388 static int __init init_cleancache(void)
389 {
390 	int i;
391 
392 #ifdef CONFIG_DEBUG_FS
393 	struct dentry *root = debugfs_create_dir("cleancache", NULL);
394 	if (root == NULL)
395 		return -ENXIO;
396 	debugfs_create_u64("succ_gets", S_IRUGO, root, &cleancache_succ_gets);
397 	debugfs_create_u64("failed_gets", S_IRUGO,
398 				root, &cleancache_failed_gets);
399 	debugfs_create_u64("puts", S_IRUGO, root, &cleancache_puts);
400 	debugfs_create_u64("invalidates", S_IRUGO,
401 				root, &cleancache_invalidates);
402 #endif
403 	for (i = 0; i < MAX_INITIALIZABLE_FS; i++) {
404 		fs_poolid_map[i] = FS_UNKNOWN;
405 		shared_fs_poolid_map[i] = FS_UNKNOWN;
406 	}
407 	return 0;
408 }
409 module_init(init_cleancache)
410