1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Network filesystem high-level buffered write support.
3  *
4  * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 
8 #include <linux/export.h>
9 #include <linux/fs.h>
10 #include <linux/mm.h>
11 #include <linux/pagemap.h>
12 #include <linux/slab.h>
13 #include <linux/pagevec.h>
14 #include "internal.h"
15 
__netfs_set_group(struct folio * folio,struct netfs_group * netfs_group)16 static void __netfs_set_group(struct folio *folio, struct netfs_group *netfs_group)
17 {
18 	if (netfs_group)
19 		folio_attach_private(folio, netfs_get_group(netfs_group));
20 }
21 
netfs_set_group(struct folio * folio,struct netfs_group * netfs_group)22 static void netfs_set_group(struct folio *folio, struct netfs_group *netfs_group)
23 {
24 	void *priv = folio_get_private(folio);
25 
26 	if (unlikely(priv != netfs_group)) {
27 		if (netfs_group && (!priv || priv == NETFS_FOLIO_COPY_TO_CACHE))
28 			folio_attach_private(folio, netfs_get_group(netfs_group));
29 		else if (!netfs_group && priv == NETFS_FOLIO_COPY_TO_CACHE)
30 			folio_detach_private(folio);
31 	}
32 }
33 
34 /*
35  * Grab a folio for writing and lock it.  Attempt to allocate as large a folio
36  * as possible to hold as much of the remaining length as possible in one go.
37  */
netfs_grab_folio_for_write(struct address_space * mapping,loff_t pos,size_t part)38 static struct folio *netfs_grab_folio_for_write(struct address_space *mapping,
39 						loff_t pos, size_t part)
40 {
41 	pgoff_t index = pos / PAGE_SIZE;
42 	fgf_t fgp_flags = FGP_WRITEBEGIN;
43 
44 	if (mapping_large_folio_support(mapping))
45 		fgp_flags |= fgf_set_order(pos % PAGE_SIZE + part);
46 
47 	return __filemap_get_folio(mapping, index, fgp_flags,
48 				   mapping_gfp_mask(mapping));
49 }
50 
51 /*
52  * Update i_size and estimate the update to i_blocks to reflect the additional
53  * data written into the pagecache until we can find out from the server what
54  * the values actually are.
55  */
netfs_update_i_size(struct netfs_inode * ctx,struct inode * inode,loff_t i_size,loff_t pos,size_t copied)56 static void netfs_update_i_size(struct netfs_inode *ctx, struct inode *inode,
57 				loff_t i_size, loff_t pos, size_t copied)
58 {
59 	blkcnt_t add;
60 	size_t gap;
61 
62 	if (ctx->ops->update_i_size) {
63 		ctx->ops->update_i_size(inode, pos);
64 		return;
65 	}
66 
67 	spin_lock(&inode->i_lock);
68 	i_size_write(inode, pos);
69 #if IS_ENABLED(CONFIG_FSCACHE)
70 	fscache_update_cookie(ctx->cache, NULL, &pos);
71 #endif
72 
73 	gap = SECTOR_SIZE - (i_size & (SECTOR_SIZE - 1));
74 	if (copied > gap) {
75 		add = DIV_ROUND_UP(copied - gap, SECTOR_SIZE);
76 
77 		inode->i_blocks = min_t(blkcnt_t,
78 					DIV_ROUND_UP(pos, SECTOR_SIZE),
79 					inode->i_blocks + add);
80 	}
81 	spin_unlock(&inode->i_lock);
82 }
83 
84 /**
85  * netfs_perform_write - Copy data into the pagecache.
86  * @iocb: The operation parameters
87  * @iter: The source buffer
88  * @netfs_group: Grouping for dirty pages (eg. ceph snaps).
89  *
90  * Copy data into pagecache pages attached to the inode specified by @iocb.
91  * The caller must hold appropriate inode locks.
92  *
93  * Dirty pages are tagged with a netfs_folio struct if they're not up to date
94  * to indicate the range modified.  Dirty pages may also be tagged with a
95  * netfs-specific grouping such that data from an old group gets flushed before
96  * a new one is started.
97  */
netfs_perform_write(struct kiocb * iocb,struct iov_iter * iter,struct netfs_group * netfs_group)98 ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
99 			    struct netfs_group *netfs_group)
100 {
101 	struct file *file = iocb->ki_filp;
102 	struct inode *inode = file_inode(file);
103 	struct address_space *mapping = inode->i_mapping;
104 	struct netfs_inode *ctx = netfs_inode(inode);
105 	struct writeback_control wbc = {
106 		.sync_mode	= WB_SYNC_NONE,
107 		.for_sync	= true,
108 		.nr_to_write	= LONG_MAX,
109 		.range_start	= iocb->ki_pos,
110 		.range_end	= iocb->ki_pos + iter->count,
111 	};
112 	struct netfs_io_request *wreq = NULL;
113 	struct folio *folio = NULL, *writethrough = NULL;
114 	unsigned int bdp_flags = (iocb->ki_flags & IOCB_NOWAIT) ? BDP_ASYNC : 0;
115 	ssize_t written = 0, ret, ret2;
116 	loff_t i_size, pos = iocb->ki_pos;
117 	size_t max_chunk = mapping_max_folio_size(mapping);
118 	bool maybe_trouble = false;
119 
120 	if (unlikely(test_bit(NETFS_ICTX_WRITETHROUGH, &ctx->flags) ||
121 		     iocb->ki_flags & (IOCB_DSYNC | IOCB_SYNC))
122 	    ) {
123 		wbc_attach_fdatawrite_inode(&wbc, mapping->host);
124 
125 		ret = filemap_write_and_wait_range(mapping, pos, pos + iter->count);
126 		if (ret < 0) {
127 			wbc_detach_inode(&wbc);
128 			goto out;
129 		}
130 
131 		wreq = netfs_begin_writethrough(iocb, iter->count);
132 		if (IS_ERR(wreq)) {
133 			wbc_detach_inode(&wbc);
134 			ret = PTR_ERR(wreq);
135 			wreq = NULL;
136 			goto out;
137 		}
138 		if (!is_sync_kiocb(iocb))
139 			wreq->iocb = iocb;
140 		netfs_stat(&netfs_n_wh_writethrough);
141 	} else {
142 		netfs_stat(&netfs_n_wh_buffered_write);
143 	}
144 
145 	do {
146 		struct netfs_folio *finfo;
147 		struct netfs_group *group;
148 		unsigned long long fpos;
149 		size_t flen;
150 		size_t offset;	/* Offset into pagecache folio */
151 		size_t part;	/* Bytes to write to folio */
152 		size_t copied;	/* Bytes copied from user */
153 
154 		offset = pos & (max_chunk - 1);
155 		part = min(max_chunk - offset, iov_iter_count(iter));
156 
157 		/* Bring in the user pages that we will copy from _first_ lest
158 		 * we hit a nasty deadlock on copying from the same page as
159 		 * we're writing to, without it being marked uptodate.
160 		 *
161 		 * Not only is this an optimisation, but it is also required to
162 		 * check that the address is actually valid, when atomic
163 		 * usercopies are used below.
164 		 *
165 		 * We rely on the page being held onto long enough by the LRU
166 		 * that we can grab it below if this causes it to be read.
167 		 */
168 		ret = -EFAULT;
169 		if (unlikely(fault_in_iov_iter_readable(iter, part) == part))
170 			break;
171 
172 		folio = netfs_grab_folio_for_write(mapping, pos, part);
173 		if (IS_ERR(folio)) {
174 			ret = PTR_ERR(folio);
175 			break;
176 		}
177 
178 		flen = folio_size(folio);
179 		fpos = folio_pos(folio);
180 		offset = pos - fpos;
181 		part = min_t(size_t, flen - offset, part);
182 
183 		/* Wait for writeback to complete.  The writeback engine owns
184 		 * the info in folio->private and may change it until it
185 		 * removes the WB mark.
186 		 */
187 		if (folio_get_private(folio) &&
188 		    folio_wait_writeback_killable(folio)) {
189 			ret = written ? -EINTR : -ERESTARTSYS;
190 			goto error_folio_unlock;
191 		}
192 
193 		if (signal_pending(current)) {
194 			ret = written ? -EINTR : -ERESTARTSYS;
195 			goto error_folio_unlock;
196 		}
197 
198 		/* Decide how we should modify a folio.  We might be attempting
199 		 * to do write-streaming, in which case we don't want to a
200 		 * local RMW cycle if we can avoid it.  If we're doing local
201 		 * caching or content crypto, we award that priority over
202 		 * avoiding RMW.  If the file is open readably, then we also
203 		 * assume that we may want to read what we wrote.
204 		 */
205 		finfo = netfs_folio_info(folio);
206 		group = netfs_folio_group(folio);
207 
208 		if (unlikely(group != netfs_group) &&
209 		    group != NETFS_FOLIO_COPY_TO_CACHE)
210 			goto flush_content;
211 
212 		if (folio_test_uptodate(folio)) {
213 			if (mapping_writably_mapped(mapping))
214 				flush_dcache_folio(folio);
215 			copied = copy_folio_from_iter_atomic(folio, offset, part, iter);
216 			if (unlikely(copied == 0))
217 				goto copy_failed;
218 			netfs_set_group(folio, netfs_group);
219 			trace_netfs_folio(folio, netfs_folio_is_uptodate);
220 			goto copied;
221 		}
222 
223 		/* If the page is above the zero-point then we assume that the
224 		 * server would just return a block of zeros or a short read if
225 		 * we try to read it.
226 		 */
227 		if (fpos >= ctx->zero_point) {
228 			zero_user_segment(&folio->page, 0, offset);
229 			copied = copy_folio_from_iter_atomic(folio, offset, part, iter);
230 			if (unlikely(copied == 0))
231 				goto copy_failed;
232 			zero_user_segment(&folio->page, offset + copied, flen);
233 			__netfs_set_group(folio, netfs_group);
234 			folio_mark_uptodate(folio);
235 			trace_netfs_folio(folio, netfs_modify_and_clear);
236 			goto copied;
237 		}
238 
239 		/* See if we can write a whole folio in one go. */
240 		if (!maybe_trouble && offset == 0 && part >= flen) {
241 			copied = copy_folio_from_iter_atomic(folio, offset, part, iter);
242 			if (unlikely(copied == 0))
243 				goto copy_failed;
244 			if (unlikely(copied < part)) {
245 				maybe_trouble = true;
246 				iov_iter_revert(iter, copied);
247 				copied = 0;
248 				folio_unlock(folio);
249 				goto retry;
250 			}
251 			__netfs_set_group(folio, netfs_group);
252 			folio_mark_uptodate(folio);
253 			trace_netfs_folio(folio, netfs_whole_folio_modify);
254 			goto copied;
255 		}
256 
257 		/* We don't want to do a streaming write on a file that loses
258 		 * caching service temporarily because the backing store got
259 		 * culled and we don't really want to get a streaming write on
260 		 * a file that's open for reading as ->read_folio() then has to
261 		 * be able to flush it.
262 		 */
263 		if ((file->f_mode & FMODE_READ) ||
264 		    netfs_is_cache_enabled(ctx)) {
265 			if (finfo) {
266 				netfs_stat(&netfs_n_wh_wstream_conflict);
267 				goto flush_content;
268 			}
269 			ret = netfs_prefetch_for_write(file, folio, offset, part);
270 			if (ret < 0) {
271 				_debug("prefetch = %zd", ret);
272 				goto error_folio_unlock;
273 			}
274 			/* Note that copy-to-cache may have been set. */
275 
276 			copied = copy_folio_from_iter_atomic(folio, offset, part, iter);
277 			if (unlikely(copied == 0))
278 				goto copy_failed;
279 			netfs_set_group(folio, netfs_group);
280 			trace_netfs_folio(folio, netfs_just_prefetch);
281 			goto copied;
282 		}
283 
284 		if (!finfo) {
285 			ret = -EIO;
286 			if (WARN_ON(folio_get_private(folio)))
287 				goto error_folio_unlock;
288 			copied = copy_folio_from_iter_atomic(folio, offset, part, iter);
289 			if (unlikely(copied == 0))
290 				goto copy_failed;
291 			if (offset == 0 && copied == flen) {
292 				__netfs_set_group(folio, netfs_group);
293 				folio_mark_uptodate(folio);
294 				trace_netfs_folio(folio, netfs_streaming_filled_page);
295 				goto copied;
296 			}
297 
298 			finfo = kzalloc(sizeof(*finfo), GFP_KERNEL);
299 			if (!finfo) {
300 				iov_iter_revert(iter, copied);
301 				ret = -ENOMEM;
302 				goto error_folio_unlock;
303 			}
304 			finfo->netfs_group = netfs_get_group(netfs_group);
305 			finfo->dirty_offset = offset;
306 			finfo->dirty_len = copied;
307 			folio_attach_private(folio, (void *)((unsigned long)finfo |
308 							     NETFS_FOLIO_INFO));
309 			trace_netfs_folio(folio, netfs_streaming_write);
310 			goto copied;
311 		}
312 
313 		/* We can continue a streaming write only if it continues on
314 		 * from the previous.  If it overlaps, we must flush lest we
315 		 * suffer a partial copy and disjoint dirty regions.
316 		 */
317 		if (offset == finfo->dirty_offset + finfo->dirty_len) {
318 			copied = copy_folio_from_iter_atomic(folio, offset, part, iter);
319 			if (unlikely(copied == 0))
320 				goto copy_failed;
321 			finfo->dirty_len += copied;
322 			if (finfo->dirty_offset == 0 && finfo->dirty_len == flen) {
323 				if (finfo->netfs_group)
324 					folio_change_private(folio, finfo->netfs_group);
325 				else
326 					folio_detach_private(folio);
327 				folio_mark_uptodate(folio);
328 				kfree(finfo);
329 				trace_netfs_folio(folio, netfs_streaming_cont_filled_page);
330 			} else {
331 				trace_netfs_folio(folio, netfs_streaming_write_cont);
332 			}
333 			goto copied;
334 		}
335 
336 		/* Incompatible write; flush the folio and try again. */
337 	flush_content:
338 		trace_netfs_folio(folio, netfs_flush_content);
339 		folio_unlock(folio);
340 		folio_put(folio);
341 		ret = filemap_write_and_wait_range(mapping, fpos, fpos + flen - 1);
342 		if (ret < 0)
343 			goto out;
344 		continue;
345 
346 	copied:
347 		flush_dcache_folio(folio);
348 
349 		/* Update the inode size if we moved the EOF marker */
350 		pos += copied;
351 		i_size = i_size_read(inode);
352 		if (pos > i_size)
353 			netfs_update_i_size(ctx, inode, i_size, pos, copied);
354 		written += copied;
355 
356 		if (likely(!wreq)) {
357 			folio_mark_dirty(folio);
358 			folio_unlock(folio);
359 		} else {
360 			netfs_advance_writethrough(wreq, &wbc, folio, copied,
361 						   offset + copied == flen,
362 						   &writethrough);
363 			/* Folio unlocked */
364 		}
365 	retry:
366 		folio_put(folio);
367 		folio = NULL;
368 
369 		ret = balance_dirty_pages_ratelimited_flags(mapping, bdp_flags);
370 		if (unlikely(ret < 0))
371 			break;
372 
373 		cond_resched();
374 	} while (iov_iter_count(iter));
375 
376 out:
377 	if (likely(written)) {
378 		/* Set indication that ctime and mtime got updated in case
379 		 * close is deferred.
380 		 */
381 		set_bit(NETFS_ICTX_MODIFIED_ATTR, &ctx->flags);
382 		if (unlikely(ctx->ops->post_modify))
383 			ctx->ops->post_modify(inode);
384 	}
385 
386 	if (unlikely(wreq)) {
387 		ret2 = netfs_end_writethrough(wreq, &wbc, writethrough);
388 		wbc_detach_inode(&wbc);
389 		if (ret2 == -EIOCBQUEUED)
390 			return ret2;
391 		if (ret == 0)
392 			ret = ret2;
393 	}
394 
395 	iocb->ki_pos += written;
396 	_leave(" = %zd [%zd]", written, ret);
397 	return written ? written : ret;
398 
399 copy_failed:
400 	ret = -EFAULT;
401 error_folio_unlock:
402 	folio_unlock(folio);
403 	folio_put(folio);
404 	goto out;
405 }
406 EXPORT_SYMBOL(netfs_perform_write);
407 
408 /**
409  * netfs_buffered_write_iter_locked - write data to a file
410  * @iocb:	IO state structure (file, offset, etc.)
411  * @from:	iov_iter with data to write
412  * @netfs_group: Grouping for dirty pages (eg. ceph snaps).
413  *
414  * This function does all the work needed for actually writing data to a
415  * file. It does all basic checks, removes SUID from the file, updates
416  * modification times and calls proper subroutines depending on whether we
417  * do direct IO or a standard buffered write.
418  *
419  * The caller must hold appropriate locks around this function and have called
420  * generic_write_checks() already.  The caller is also responsible for doing
421  * any necessary syncing afterwards.
422  *
423  * This function does *not* take care of syncing data in case of O_SYNC write.
424  * A caller has to handle it. This is mainly due to the fact that we want to
425  * avoid syncing under i_rwsem.
426  *
427  * Return:
428  * * number of bytes written, even for truncated writes
429  * * negative error code if no data has been written at all
430  */
netfs_buffered_write_iter_locked(struct kiocb * iocb,struct iov_iter * from,struct netfs_group * netfs_group)431 ssize_t netfs_buffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *from,
432 					 struct netfs_group *netfs_group)
433 {
434 	struct file *file = iocb->ki_filp;
435 	ssize_t ret;
436 
437 	trace_netfs_write_iter(iocb, from);
438 
439 	ret = file_remove_privs(file);
440 	if (ret)
441 		return ret;
442 
443 	ret = file_update_time(file);
444 	if (ret)
445 		return ret;
446 
447 	return netfs_perform_write(iocb, from, netfs_group);
448 }
449 EXPORT_SYMBOL(netfs_buffered_write_iter_locked);
450 
451 /**
452  * netfs_file_write_iter - write data to a file
453  * @iocb: IO state structure
454  * @from: iov_iter with data to write
455  *
456  * Perform a write to a file, writing into the pagecache if possible and doing
457  * an unbuffered write instead if not.
458  *
459  * Return:
460  * * Negative error code if no data has been written at all of
461  *   vfs_fsync_range() failed for a synchronous write
462  * * Number of bytes written, even for truncated writes
463  */
netfs_file_write_iter(struct kiocb * iocb,struct iov_iter * from)464 ssize_t netfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
465 {
466 	struct file *file = iocb->ki_filp;
467 	struct inode *inode = file->f_mapping->host;
468 	struct netfs_inode *ictx = netfs_inode(inode);
469 	ssize_t ret;
470 
471 	_enter("%llx,%zx,%llx", iocb->ki_pos, iov_iter_count(from), i_size_read(inode));
472 
473 	if (!iov_iter_count(from))
474 		return 0;
475 
476 	if ((iocb->ki_flags & IOCB_DIRECT) ||
477 	    test_bit(NETFS_ICTX_UNBUFFERED, &ictx->flags))
478 		return netfs_unbuffered_write_iter(iocb, from);
479 
480 	ret = netfs_start_io_write(inode);
481 	if (ret < 0)
482 		return ret;
483 
484 	ret = generic_write_checks(iocb, from);
485 	if (ret > 0)
486 		ret = netfs_buffered_write_iter_locked(iocb, from, NULL);
487 	netfs_end_io_write(inode);
488 	if (ret > 0)
489 		ret = generic_write_sync(iocb, ret);
490 	return ret;
491 }
492 EXPORT_SYMBOL(netfs_file_write_iter);
493 
494 /*
495  * Notification that a previously read-only page is about to become writable.
496  * Note that the caller indicates a single page of a multipage folio.
497  */
netfs_page_mkwrite(struct vm_fault * vmf,struct netfs_group * netfs_group)498 vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_group)
499 {
500 	struct netfs_group *group;
501 	struct folio *folio = page_folio(vmf->page);
502 	struct file *file = vmf->vma->vm_file;
503 	struct address_space *mapping = file->f_mapping;
504 	struct inode *inode = file_inode(file);
505 	struct netfs_inode *ictx = netfs_inode(inode);
506 	vm_fault_t ret = VM_FAULT_RETRY;
507 	int err;
508 
509 	_enter("%lx", folio->index);
510 
511 	sb_start_pagefault(inode->i_sb);
512 
513 	if (folio_lock_killable(folio) < 0)
514 		goto out;
515 	if (folio->mapping != mapping) {
516 		folio_unlock(folio);
517 		ret = VM_FAULT_NOPAGE;
518 		goto out;
519 	}
520 
521 	if (folio_wait_writeback_killable(folio)) {
522 		ret = VM_FAULT_LOCKED;
523 		goto out;
524 	}
525 
526 	/* Can we see a streaming write here? */
527 	if (WARN_ON(!folio_test_uptodate(folio))) {
528 		ret = VM_FAULT_SIGBUS | VM_FAULT_LOCKED;
529 		goto out;
530 	}
531 
532 	group = netfs_folio_group(folio);
533 	if (group != netfs_group && group != NETFS_FOLIO_COPY_TO_CACHE) {
534 		folio_unlock(folio);
535 		err = filemap_fdatawrite_range(mapping,
536 					       folio_pos(folio),
537 					       folio_pos(folio) + folio_size(folio));
538 		switch (err) {
539 		case 0:
540 			ret = VM_FAULT_RETRY;
541 			goto out;
542 		case -ENOMEM:
543 			ret = VM_FAULT_OOM;
544 			goto out;
545 		default:
546 			ret = VM_FAULT_SIGBUS;
547 			goto out;
548 		}
549 	}
550 
551 	if (folio_test_dirty(folio))
552 		trace_netfs_folio(folio, netfs_folio_trace_mkwrite_plus);
553 	else
554 		trace_netfs_folio(folio, netfs_folio_trace_mkwrite);
555 	netfs_set_group(folio, netfs_group);
556 	file_update_time(file);
557 	set_bit(NETFS_ICTX_MODIFIED_ATTR, &ictx->flags);
558 	if (ictx->ops->post_modify)
559 		ictx->ops->post_modify(inode);
560 	ret = VM_FAULT_LOCKED;
561 out:
562 	sb_end_pagefault(inode->i_sb);
563 	return ret;
564 }
565 EXPORT_SYMBOL(netfs_page_mkwrite);
566