• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Cleancache frontend
4  *
5  * This code provides the generic "frontend" layer to call a matching
6  * "backend" driver implementation of cleancache.  See
7  * Documentation/vm/cleancache.rst for more information.
8  *
9  * Copyright (C) 2009-2010 Oracle Corp. All rights reserved.
10  * Author: Dan Magenheimer
11  */
12 
13 #include <linux/module.h>
14 #include <linux/fs.h>
15 #include <linux/exportfs.h>
16 #include <linux/mm.h>
17 #include <linux/debugfs.h>
18 #include <linux/cleancache.h>
19 
20 /*
21  * cleancache_ops is set by cleancache_register_ops to contain the pointers
22  * to the cleancache "backend" implementation functions.
23  */
24 static const struct cleancache_ops *cleancache_ops __read_mostly;
25 
26 /*
27  * Counters available via /sys/kernel/debug/cleancache (if debugfs is
28  * properly configured.  These are for information only so are not protected
29  * against increment races.
30  */
31 static u64 cleancache_succ_gets;
32 static u64 cleancache_failed_gets;
33 static u64 cleancache_puts;
34 static u64 cleancache_invalidates;
35 
cleancache_register_ops_sb(struct super_block * sb,void * unused)36 static void cleancache_register_ops_sb(struct super_block *sb, void *unused)
37 {
38 	switch (sb->cleancache_poolid) {
39 	case CLEANCACHE_NO_BACKEND:
40 		__cleancache_init_fs(sb);
41 		break;
42 	case CLEANCACHE_NO_BACKEND_SHARED:
43 		__cleancache_init_shared_fs(sb);
44 		break;
45 	}
46 }
47 
48 /*
49  * Register operations for cleancache. Returns 0 on success.
50  */
cleancache_register_ops(const struct cleancache_ops * ops)51 int cleancache_register_ops(const struct cleancache_ops *ops)
52 {
53 	if (cmpxchg(&cleancache_ops, NULL, ops))
54 		return -EBUSY;
55 
56 	/*
57 	 * A cleancache backend can be built as a module and hence loaded after
58 	 * a cleancache enabled filesystem has called cleancache_init_fs. To
59 	 * handle such a scenario, here we call ->init_fs or ->init_shared_fs
60 	 * for each active super block. To differentiate between local and
61 	 * shared filesystems, we temporarily initialize sb->cleancache_poolid
62 	 * to CLEANCACHE_NO_BACKEND or CLEANCACHE_NO_BACKEND_SHARED
63 	 * respectively in case there is no backend registered at the time
64 	 * cleancache_init_fs or cleancache_init_shared_fs is called.
65 	 *
66 	 * Since filesystems can be mounted concurrently with cleancache
67 	 * backend registration, we have to be careful to guarantee that all
68 	 * cleancache enabled filesystems that has been mounted by the time
69 	 * cleancache_register_ops is called has got and all mounted later will
70 	 * get cleancache_poolid. This is assured by the following statements
71 	 * tied together:
72 	 *
73 	 * a) iterate_supers skips only those super blocks that has started
74 	 *    ->kill_sb
75 	 *
76 	 * b) if iterate_supers encounters a super block that has not finished
77 	 *    ->mount yet, it waits until it is finished
78 	 *
79 	 * c) cleancache_init_fs is called from ->mount and
80 	 *    cleancache_invalidate_fs is called from ->kill_sb
81 	 *
82 	 * d) we call iterate_supers after cleancache_ops has been set
83 	 *
84 	 * From a) it follows that if iterate_supers skips a super block, then
85 	 * either the super block is already dead, in which case we do not need
86 	 * to bother initializing cleancache for it, or it was mounted after we
87 	 * initiated iterate_supers. In the latter case, it must have seen
88 	 * cleancache_ops set according to d) and initialized cleancache from
89 	 * ->mount by itself according to c). This proves that we call
90 	 * ->init_fs at least once for each active super block.
91 	 *
92 	 * From b) and c) it follows that if iterate_supers encounters a super
93 	 * block that has already started ->init_fs, it will wait until ->mount
94 	 * and hence ->init_fs has finished, then check cleancache_poolid, see
95 	 * that it has already been set and therefore do nothing. This proves
96 	 * that we call ->init_fs no more than once for each super block.
97 	 *
98 	 * Combined together, the last two paragraphs prove the function
99 	 * correctness.
100 	 *
101 	 * Note that various cleancache callbacks may proceed before this
102 	 * function is called or even concurrently with it, but since
103 	 * CLEANCACHE_NO_BACKEND is negative, they will all result in a noop
104 	 * until the corresponding ->init_fs has been actually called and
105 	 * cleancache_ops has been set.
106 	 */
107 	iterate_supers(cleancache_register_ops_sb, NULL);
108 	return 0;
109 }
110 EXPORT_SYMBOL(cleancache_register_ops);
111 
112 /* Called by a cleancache-enabled filesystem at time of mount */
__cleancache_init_fs(struct super_block * sb)113 void __cleancache_init_fs(struct super_block *sb)
114 {
115 	int pool_id = CLEANCACHE_NO_BACKEND;
116 
117 	if (cleancache_ops) {
118 		pool_id = cleancache_ops->init_fs(PAGE_SIZE);
119 		if (pool_id < 0)
120 			pool_id = CLEANCACHE_NO_POOL;
121 	}
122 	sb->cleancache_poolid = pool_id;
123 }
124 EXPORT_SYMBOL(__cleancache_init_fs);
125 
126 /* Called by a cleancache-enabled clustered filesystem at time of mount */
__cleancache_init_shared_fs(struct super_block * sb)127 void __cleancache_init_shared_fs(struct super_block *sb)
128 {
129 	int pool_id = CLEANCACHE_NO_BACKEND_SHARED;
130 
131 	if (cleancache_ops) {
132 		pool_id = cleancache_ops->init_shared_fs(&sb->s_uuid, PAGE_SIZE);
133 		if (pool_id < 0)
134 			pool_id = CLEANCACHE_NO_POOL;
135 	}
136 	sb->cleancache_poolid = pool_id;
137 }
138 EXPORT_SYMBOL(__cleancache_init_shared_fs);
139 
140 /*
141  * If the filesystem uses exportable filehandles, use the filehandle as
142  * the key, else use the inode number.
143  */
cleancache_get_key(struct inode * inode,struct cleancache_filekey * key)144 static int cleancache_get_key(struct inode *inode,
145 			      struct cleancache_filekey *key)
146 {
147 	int (*fhfn)(struct inode *, __u32 *fh, int *, struct inode *);
148 	int len = 0, maxlen = CLEANCACHE_KEY_MAX;
149 	struct super_block *sb = inode->i_sb;
150 
151 	key->u.ino = inode->i_ino;
152 	if (sb->s_export_op != NULL) {
153 		fhfn = sb->s_export_op->encode_fh;
154 		if  (fhfn) {
155 			len = (*fhfn)(inode, &key->u.fh[0], &maxlen, NULL);
156 			if (len <= FILEID_ROOT || len == FILEID_INVALID)
157 				return -1;
158 			if (maxlen > CLEANCACHE_KEY_MAX)
159 				return -1;
160 		}
161 	}
162 	return 0;
163 }
164 
165 /*
166  * "Get" data from cleancache associated with the poolid/inode/index
167  * that were specified when the data was put to cleanache and, if
168  * successful, use it to fill the specified page with data and return 0.
169  * The pageframe is unchanged and returns -1 if the get fails.
170  * Page must be locked by caller.
171  *
172  * The function has two checks before any action is taken - whether
173  * a backend is registered and whether the sb->cleancache_poolid
174  * is correct.
175  */
__cleancache_get_page(struct page * page)176 int __cleancache_get_page(struct page *page)
177 {
178 	int ret = -1;
179 	int pool_id;
180 	struct cleancache_filekey key = { .u.key = { 0 } };
181 
182 	if (!cleancache_ops) {
183 		cleancache_failed_gets++;
184 		goto out;
185 	}
186 
187 	VM_BUG_ON_PAGE(!PageLocked(page), page);
188 	pool_id = page->mapping->host->i_sb->cleancache_poolid;
189 	if (pool_id < 0)
190 		goto out;
191 
192 	if (cleancache_get_key(page->mapping->host, &key) < 0)
193 		goto out;
194 
195 	ret = cleancache_ops->get_page(pool_id, key, page->index, page);
196 	if (ret == 0)
197 		cleancache_succ_gets++;
198 	else
199 		cleancache_failed_gets++;
200 out:
201 	return ret;
202 }
203 EXPORT_SYMBOL(__cleancache_get_page);
204 
205 /*
206  * "Put" data from a page to cleancache and associate it with the
207  * (previously-obtained per-filesystem) poolid and the page's,
208  * inode and page index.  Page must be locked.  Note that a put_page
209  * always "succeeds", though a subsequent get_page may succeed or fail.
210  *
211  * The function has two checks before any action is taken - whether
212  * a backend is registered and whether the sb->cleancache_poolid
213  * is correct.
214  */
__cleancache_put_page(struct page * page)215 void __cleancache_put_page(struct page *page)
216 {
217 	int pool_id;
218 	struct cleancache_filekey key = { .u.key = { 0 } };
219 
220 	if (!cleancache_ops) {
221 		cleancache_puts++;
222 		return;
223 	}
224 
225 	VM_BUG_ON_PAGE(!PageLocked(page), page);
226 	pool_id = page->mapping->host->i_sb->cleancache_poolid;
227 	if (pool_id >= 0 &&
228 		cleancache_get_key(page->mapping->host, &key) >= 0) {
229 		cleancache_ops->put_page(pool_id, key, page->index, page);
230 		cleancache_puts++;
231 	}
232 }
233 EXPORT_SYMBOL(__cleancache_put_page);
234 
235 /*
236  * Invalidate any data from cleancache associated with the poolid and the
237  * page's inode and page index so that a subsequent "get" will fail.
238  *
239  * The function has two checks before any action is taken - whether
240  * a backend is registered and whether the sb->cleancache_poolid
241  * is correct.
242  */
__cleancache_invalidate_page(struct address_space * mapping,struct page * page)243 void __cleancache_invalidate_page(struct address_space *mapping,
244 					struct page *page)
245 {
246 	/* careful... page->mapping is NULL sometimes when this is called */
247 	int pool_id = mapping->host->i_sb->cleancache_poolid;
248 	struct cleancache_filekey key = { .u.key = { 0 } };
249 
250 	if (!cleancache_ops)
251 		return;
252 
253 	if (pool_id >= 0) {
254 		VM_BUG_ON_PAGE(!PageLocked(page), page);
255 		if (cleancache_get_key(mapping->host, &key) >= 0) {
256 			cleancache_ops->invalidate_page(pool_id,
257 					key, page->index);
258 			cleancache_invalidates++;
259 		}
260 	}
261 }
262 EXPORT_SYMBOL(__cleancache_invalidate_page);
263 
264 /*
265  * Invalidate all data from cleancache associated with the poolid and the
266  * mappings's inode so that all subsequent gets to this poolid/inode
267  * will fail.
268  *
269  * The function has two checks before any action is taken - whether
270  * a backend is registered and whether the sb->cleancache_poolid
271  * is correct.
272  */
__cleancache_invalidate_inode(struct address_space * mapping)273 void __cleancache_invalidate_inode(struct address_space *mapping)
274 {
275 	int pool_id = mapping->host->i_sb->cleancache_poolid;
276 	struct cleancache_filekey key = { .u.key = { 0 } };
277 
278 	if (!cleancache_ops)
279 		return;
280 
281 	if (pool_id >= 0 && cleancache_get_key(mapping->host, &key) >= 0)
282 		cleancache_ops->invalidate_inode(pool_id, key);
283 }
284 EXPORT_SYMBOL(__cleancache_invalidate_inode);
285 
286 /*
287  * Called by any cleancache-enabled filesystem at time of unmount;
288  * note that pool_id is surrendered and may be returned by a subsequent
289  * cleancache_init_fs or cleancache_init_shared_fs.
290  */
__cleancache_invalidate_fs(struct super_block * sb)291 void __cleancache_invalidate_fs(struct super_block *sb)
292 {
293 	int pool_id;
294 
295 	pool_id = sb->cleancache_poolid;
296 	sb->cleancache_poolid = CLEANCACHE_NO_POOL;
297 
298 	if (cleancache_ops && pool_id >= 0)
299 		cleancache_ops->invalidate_fs(pool_id);
300 }
301 EXPORT_SYMBOL(__cleancache_invalidate_fs);
302 
init_cleancache(void)303 static int __init init_cleancache(void)
304 {
305 #ifdef CONFIG_DEBUG_FS
306 	struct dentry *root = debugfs_create_dir("cleancache", NULL);
307 
308 	debugfs_create_u64("succ_gets", 0444, root, &cleancache_succ_gets);
309 	debugfs_create_u64("failed_gets", 0444, root, &cleancache_failed_gets);
310 	debugfs_create_u64("puts", 0444, root, &cleancache_puts);
311 	debugfs_create_u64("invalidates", 0444, root, &cleancache_invalidates);
312 #endif
313 	return 0;
314 }
315 module_init(init_cleancache)
316