• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * FUSE inode io modes.
4  *
5  * Copyright (c) 2024 CTERA Networks.
6  */
7 
8 #include "fuse_i.h"
9 
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/file.h>
13 #include <linux/fs.h>
14 
15 /*
16  * Return true if need to wait for new opens in caching mode.
17  */
fuse_is_io_cache_wait(struct fuse_inode * fi)18 static inline bool fuse_is_io_cache_wait(struct fuse_inode *fi)
19 {
20 	return READ_ONCE(fi->iocachectr) < 0 && !fuse_inode_backing(fi);
21 }
22 
23 /*
24  * Called on cached file open() and on first mmap() of direct_io file.
25  * Takes cached_io inode mode reference to be dropped on file release.
26  *
27  * Blocks new parallel dio writes and waits for the in-progress parallel dio
28  * writes to complete.
29  */
fuse_file_cached_io_open(struct inode * inode,struct fuse_file * ff)30 int fuse_file_cached_io_open(struct inode *inode, struct fuse_file *ff)
31 {
32 	struct fuse_inode *fi = get_fuse_inode(inode);
33 
34 	/* There are no io modes if server does not implement open */
35 	if (!ff->args)
36 		return 0;
37 
38 	spin_lock(&fi->lock);
39 	/*
40 	 * Setting the bit advises new direct-io writes to use an exclusive
41 	 * lock - without it the wait below might be forever.
42 	 */
43 	while (fuse_is_io_cache_wait(fi)) {
44 		set_bit(FUSE_I_CACHE_IO_MODE, &fi->state);
45 		spin_unlock(&fi->lock);
46 		wait_event(fi->direct_io_waitq, !fuse_is_io_cache_wait(fi));
47 		spin_lock(&fi->lock);
48 	}
49 
50 	/*
51 	 * Check if inode entered passthrough io mode while waiting for parallel
52 	 * dio write completion.
53 	 */
54 	/* Android's use case requires opening files in both passthrough and non
55 	 * passthrough modes */
56 #if 0
57 	if (fuse_inode_backing(fi)) {
58 		clear_bit(FUSE_I_CACHE_IO_MODE, &fi->state);
59 		spin_unlock(&fi->lock);
60 		return -ETXTBSY;
61 	}
62 #endif
63 
64 	WARN_ON(ff->iomode == IOM_UNCACHED);
65 	WARN_ON(ff->iomode == IOM_PASSTHROUGH);
66 	if (ff->iomode == IOM_NONE) {
67 		ff->iomode = IOM_CACHED;
68 		if (fi->iocachectr == 0)
69 			set_bit(FUSE_I_CACHE_IO_MODE, &fi->state);
70 		fi->iocachectr++;
71 	}
72 	spin_unlock(&fi->lock);
73 	return 0;
74 }
75 
fuse_file_cached_io_release(struct fuse_file * ff,struct fuse_inode * fi)76 static void fuse_file_cached_io_release(struct fuse_file *ff,
77 					struct fuse_inode *fi)
78 {
79 	spin_lock(&fi->lock);
80 	WARN_ON(fi->iocachectr <= 0);
81 	WARN_ON(ff->iomode != IOM_CACHED);
82 	ff->iomode = IOM_NONE;
83 	fi->iocachectr--;
84 	if (fi->iocachectr == 0)
85 		clear_bit(FUSE_I_CACHE_IO_MODE, &fi->state);
86 	spin_unlock(&fi->lock);
87 }
88 
89 /* Start strictly uncached io mode where cache access is not allowed if not in passthrough mode */
fuse_inode_uncached_io_start(struct fuse_inode * fi,struct fuse_backing * fb)90 int fuse_inode_uncached_io_start(struct fuse_inode *fi, struct fuse_backing *fb)
91 {
92 	struct fuse_backing *oldfb;
93 	int err = 0;
94 
95 	spin_lock(&fi->lock);
96 	/* deny conflicting backing files on same fuse inode */
97 	oldfb = fuse_inode_backing(fi);
98 	if (fb && oldfb && oldfb != fb) {
99 		err = -EBUSY;
100 		goto unlock;
101 	}
102 	if (!fb && fi->iocachectr > 0) {
103 		err = -ETXTBSY;
104 		goto unlock;
105 	}
106 	if (fb)
107 		fi->iopassctr++;
108 	else
109 		fi->iocachectr--;
110 
111 	/* fuse inode holds a single refcount of backing file */
112 	if (fb && !oldfb) {
113 		oldfb = fuse_inode_backing_set(fi, fb);
114 		WARN_ON_ONCE(oldfb != NULL);
115 	} else {
116 		fuse_backing_put(fb);
117 	}
118 unlock:
119 	spin_unlock(&fi->lock);
120 	return err;
121 }
122 
123 /* Takes uncached_io inode mode reference to be dropped on file release */
fuse_file_uncached_io_open(struct inode * inode,struct fuse_file * ff,struct fuse_backing * fb)124 static int fuse_file_uncached_io_open(struct inode *inode,
125 				      struct fuse_file *ff,
126 				      struct fuse_backing *fb)
127 {
128 	struct fuse_inode *fi = get_fuse_inode(inode);
129 	int err;
130 
131 	err = fuse_inode_uncached_io_start(fi, fb);
132 	if (err)
133 		return err;
134 
135 	WARN_ON(ff->iomode != IOM_NONE);
136 	ff->iomode = IOM_PASSTHROUGH;
137 	return 0;
138 }
139 
fuse_inode_uncached_io_end(struct fuse_inode * fi)140 void fuse_inode_uncached_io_end(struct fuse_inode *fi)
141 {
142 	struct fuse_backing *oldfb = NULL;
143 
144 	spin_lock(&fi->lock);
145 	WARN_ON(fi->iocachectr >= 0);
146 	fi->iocachectr++;
147 	if (!fi->iocachectr) {
148 		wake_up(&fi->direct_io_waitq);
149 		oldfb = fuse_inode_backing_set(fi, NULL);
150 	}
151 	spin_unlock(&fi->lock);
152 	if (oldfb)
153 		fuse_backing_put(oldfb);
154 }
155 
156 /* Drop uncached_io reference from passthrough open */
fuse_file_uncached_io_release(struct fuse_file * ff,struct fuse_inode * fi)157 static void fuse_file_uncached_io_release(struct fuse_file *ff,
158 					  struct fuse_inode *fi)
159 {
160 	WARN_ON(ff->iomode != IOM_UNCACHED);
161 	ff->iomode = IOM_NONE;
162 	fuse_inode_uncached_io_end(fi);
163 }
164 
fuse_inode_passthrough_io_end(struct fuse_inode * fi)165 static void fuse_inode_passthrough_io_end(struct fuse_inode *fi)
166 {
167 	struct fuse_backing *oldfb = NULL;
168 
169 	spin_lock(&fi->lock);
170 	WARN_ON(fi->iopassctr == 0);
171 	fi->iopassctr--;
172 	if (!fi->iopassctr) {
173 		oldfb = fuse_inode_backing_set(fi, NULL);
174 	}
175 	spin_unlock(&fi->lock);
176 	if (oldfb)
177 		fuse_backing_put(oldfb);
178 }
179 
180 /* Drop uncached_io reference from passthrough open */
fuse_file_passthrough_io_release(struct fuse_file * ff,struct fuse_inode * fi)181 static void fuse_file_passthrough_io_release(struct fuse_file *ff,
182 					  struct fuse_inode *fi)
183 {
184 	WARN_ON(ff->iomode != IOM_PASSTHROUGH);
185 	ff->iomode = IOM_NONE;
186 	fuse_inode_passthrough_io_end(fi);
187 }
188 
189 /*
190  * Open flags that are allowed in combination with FOPEN_PASSTHROUGH.
191  * A combination of FOPEN_PASSTHROUGH and FOPEN_DIRECT_IO means that read/write
192  * operations go directly to the server, but mmap is done on the backing file.
193  * FOPEN_PASSTHROUGH mode should not co-exist with any users of the fuse inode
194  * page cache, so FOPEN_KEEP_CACHE is a strange and undesired combination.
195  */
196 #define FOPEN_PASSTHROUGH_MASK \
197 	(FOPEN_PASSTHROUGH | FOPEN_DIRECT_IO | FOPEN_PARALLEL_DIRECT_WRITES | \
198 	 FOPEN_NOFLUSH | FOPEN_KEEP_CACHE)
199 
fuse_file_passthrough_open(struct inode * inode,struct file * file)200 static int fuse_file_passthrough_open(struct inode *inode, struct file *file)
201 {
202 	struct fuse_file *ff = file->private_data;
203 	struct fuse_conn *fc = get_fuse_conn(inode);
204 	struct fuse_backing *fb;
205 	int err;
206 
207 	/* Check allowed conditions for file open in passthrough mode */
208 	if (!IS_ENABLED(CONFIG_FUSE_PASSTHROUGH) || !fc->passthrough ||
209 	    (ff->open_flags & ~FOPEN_PASSTHROUGH_MASK))
210 		return -EINVAL;
211 
212 	fb = fuse_passthrough_open(file, inode,
213 				   ff->args->open_outarg.backing_id);
214 	if (IS_ERR(fb))
215 		return PTR_ERR(fb);
216 
217 	/* First passthrough file open denies caching inode io mode */
218 	err = fuse_file_uncached_io_open(inode, ff, fb);
219 	if (!err)
220 		return 0;
221 
222 	fuse_passthrough_release(ff, fb);
223 	fuse_backing_put(fb);
224 
225 	return err;
226 }
227 
228 /* Request access to submit new io to inode via open file */
fuse_file_io_open(struct file * file,struct inode * inode)229 int fuse_file_io_open(struct file *file, struct inode *inode)
230 {
231 	struct fuse_file *ff = file->private_data;
232 	struct fuse_inode *fi = get_fuse_inode(inode);
233 	struct fuse_conn *fc = get_fuse_conn(inode);
234 	int err;
235 
236 	/*
237 	 * io modes are not relevant with DAX and with server that does not
238 	 * implement open.
239 	 */
240 	if (FUSE_IS_DAX(inode) || !ff->args)
241 		return 0;
242 
243 	/*
244 	 * Server is expected to use FOPEN_PASSTHROUGH for all opens of an inode
245 	 * which is already open for passthrough.
246 	 */
247 	err = -EINVAL;
248 	if (fuse_inode_backing(fi) && !(ff->open_flags & FOPEN_PASSTHROUGH) && !fc->writeback_cache)
249 		goto fail;
250 
251 	/*
252 	 * FOPEN_PARALLEL_DIRECT_WRITES requires FOPEN_DIRECT_IO.
253 	 */
254 	if (!(ff->open_flags & FOPEN_DIRECT_IO))
255 		ff->open_flags &= ~FOPEN_PARALLEL_DIRECT_WRITES;
256 
257 	/*
258 	 * First passthrough file open denies caching inode io mode.
259 	 * First caching file open enters caching inode io mode.
260 	 *
261 	 * Note that if user opens a file open with O_DIRECT, but server did
262 	 * not specify FOPEN_DIRECT_IO, a later fcntl() could remove O_DIRECT,
263 	 * so we put the inode in caching mode to prevent parallel dio.
264 	 */
265 	if ((ff->open_flags & FOPEN_DIRECT_IO) &&
266 	    !(ff->open_flags & FOPEN_PASSTHROUGH))
267 		return 0;
268 
269 	if (ff->open_flags & FOPEN_PASSTHROUGH)
270 		err = fuse_file_passthrough_open(inode, file);
271 	else
272 		err = fuse_file_cached_io_open(inode, ff);
273 	if (err)
274 		goto fail;
275 
276 	return 0;
277 
278 fail:
279 	pr_debug("failed to open file in requested io mode (open_flags=0x%x, err=%i).\n",
280 		 ff->open_flags, err);
281 	/*
282 	 * The file open mode determines the inode io mode.
283 	 * Using incorrect open mode is a server mistake, which results in
284 	 * user visible failure of open() with EIO error.
285 	 */
286 	return -EIO;
287 }
288 
289 /* No more pending io and no new io possible to inode via open/mmapped file */
fuse_file_io_release(struct fuse_file * ff,struct inode * inode)290 void fuse_file_io_release(struct fuse_file *ff, struct inode *inode)
291 {
292 	struct fuse_inode *fi = get_fuse_inode(inode);
293 
294 	/*
295 	 * Last passthrough file close allows caching inode io mode.
296 	 * Last caching file close exits caching inode io mode.
297 	 */
298 	switch (ff->iomode) {
299 	case IOM_NONE:
300 		/* Nothing to do */
301 		break;
302 	case IOM_UNCACHED:
303 		fuse_file_uncached_io_release(ff, fi);
304 		break;
305 	case IOM_CACHED:
306 		fuse_file_cached_io_release(ff, fi);
307 		break;
308 	case IOM_PASSTHROUGH:
309 		fuse_file_passthrough_io_release(ff, fi);
310 		break;
311 	}
312 }
313