• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Unbuffered and direct write support.
3  *
4  * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 
8 #include <linux/export.h>
9 #include <linux/uio.h>
10 #include "internal.h"
11 
netfs_cleanup_dio_write(struct netfs_io_request * wreq)12 static void netfs_cleanup_dio_write(struct netfs_io_request *wreq)
13 {
14 	struct inode *inode = wreq->inode;
15 	unsigned long long end = wreq->start + wreq->transferred;
16 
17 	if (wreq->error || end <= i_size_read(inode))
18 		return;
19 
20 	spin_lock(&inode->i_lock);
21 	if (end > i_size_read(inode)) {
22 		if (wreq->netfs_ops->update_i_size)
23 			wreq->netfs_ops->update_i_size(inode, end);
24 		else
25 			i_size_write(inode, end);
26 	}
27 	spin_unlock(&inode->i_lock);
28 }
29 
30 /*
31  * Perform an unbuffered write where we may have to do an RMW operation on an
32  * encrypted file.  This can also be used for direct I/O writes.
33  */
netfs_unbuffered_write_iter_locked(struct kiocb * iocb,struct iov_iter * iter,struct netfs_group * netfs_group)34 ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *iter,
35 						  struct netfs_group *netfs_group)
36 {
37 	struct netfs_io_request *wreq;
38 	unsigned long long start = iocb->ki_pos;
39 	unsigned long long end = start + iov_iter_count(iter);
40 	ssize_t ret, n;
41 	size_t len = iov_iter_count(iter);
42 	bool async = !is_sync_kiocb(iocb);
43 
44 	_enter("");
45 
46 	/* We're going to need a bounce buffer if what we transmit is going to
47 	 * be different in some way to the source buffer, e.g. because it gets
48 	 * encrypted/compressed or because it needs expanding to a block size.
49 	 */
50 	// TODO
51 
52 	_debug("uw %llx-%llx", start, end);
53 
54 	wreq = netfs_create_write_req(iocb->ki_filp->f_mapping, iocb->ki_filp, start,
55 				      iocb->ki_flags & IOCB_DIRECT ?
56 				      NETFS_DIO_WRITE : NETFS_UNBUFFERED_WRITE);
57 	if (IS_ERR(wreq))
58 		return PTR_ERR(wreq);
59 
60 	wreq->io_streams[0].avail = true;
61 	trace_netfs_write(wreq, (iocb->ki_flags & IOCB_DIRECT ?
62 				 netfs_write_trace_dio_write :
63 				 netfs_write_trace_unbuffered_write));
64 
65 	{
66 		/* If this is an async op and we're not using a bounce buffer,
67 		 * we have to save the source buffer as the iterator is only
68 		 * good until we return.  In such a case, extract an iterator
69 		 * to represent as much of the the output buffer as we can
70 		 * manage.  Note that the extraction might not be able to
71 		 * allocate a sufficiently large bvec array and may shorten the
72 		 * request.
73 		 */
74 		if (user_backed_iter(iter)) {
75 			n = netfs_extract_user_iter(iter, len, &wreq->iter, 0);
76 			if (n < 0) {
77 				ret = n;
78 				goto out;
79 			}
80 			wreq->direct_bv = (struct bio_vec *)wreq->iter.bvec;
81 			wreq->direct_bv_count = n;
82 			wreq->direct_bv_unpin = iov_iter_extract_will_pin(iter);
83 		} else {
84 			/* If this is a kernel-generated async DIO request,
85 			 * assume that any resources the iterator points to
86 			 * (eg. a bio_vec array) will persist till the end of
87 			 * the op.
88 			 */
89 			wreq->iter = *iter;
90 		}
91 
92 		wreq->io_iter = wreq->iter;
93 	}
94 
95 	__set_bit(NETFS_RREQ_USE_IO_ITER, &wreq->flags);
96 
97 	/* Copy the data into the bounce buffer and encrypt it. */
98 	// TODO
99 
100 	/* Dispatch the write. */
101 	__set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags);
102 	if (async)
103 		wreq->iocb = iocb;
104 	wreq->len = iov_iter_count(&wreq->io_iter);
105 	wreq->cleanup = netfs_cleanup_dio_write;
106 	ret = netfs_unbuffered_write(wreq, is_sync_kiocb(iocb), wreq->len);
107 	if (ret < 0) {
108 		_debug("begin = %zd", ret);
109 		goto out;
110 	}
111 
112 	if (!async) {
113 		trace_netfs_rreq(wreq, netfs_rreq_trace_wait_ip);
114 		wait_on_bit(&wreq->flags, NETFS_RREQ_IN_PROGRESS,
115 			    TASK_UNINTERRUPTIBLE);
116 		smp_rmb(); /* Read error/transferred after RIP flag */
117 		ret = wreq->error;
118 		if (ret == 0) {
119 			ret = wreq->transferred;
120 			iocb->ki_pos += ret;
121 		}
122 	} else {
123 		ret = -EIOCBQUEUED;
124 	}
125 
126 out:
127 	netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
128 	return ret;
129 }
130 EXPORT_SYMBOL(netfs_unbuffered_write_iter_locked);
131 
132 /**
133  * netfs_unbuffered_write_iter - Unbuffered write to a file
134  * @iocb: IO state structure
135  * @from: iov_iter with data to write
136  *
137  * Do an unbuffered write to a file, writing the data directly to the server
138  * and not lodging the data in the pagecache.
139  *
140  * Return:
141  * * Negative error code if no data has been written at all of
142  *   vfs_fsync_range() failed for a synchronous write
143  * * Number of bytes written, even for truncated writes
144  */
netfs_unbuffered_write_iter(struct kiocb * iocb,struct iov_iter * from)145 ssize_t netfs_unbuffered_write_iter(struct kiocb *iocb, struct iov_iter *from)
146 {
147 	struct file *file = iocb->ki_filp;
148 	struct address_space *mapping = file->f_mapping;
149 	struct inode *inode = mapping->host;
150 	struct netfs_inode *ictx = netfs_inode(inode);
151 	ssize_t ret;
152 	loff_t pos = iocb->ki_pos;
153 	unsigned long long end = pos + iov_iter_count(from) - 1;
154 
155 	_enter("%llx,%zx,%llx", pos, iov_iter_count(from), i_size_read(inode));
156 
157 	if (!iov_iter_count(from))
158 		return 0;
159 
160 	trace_netfs_write_iter(iocb, from);
161 	netfs_stat(&netfs_n_wh_dio_write);
162 
163 	ret = netfs_start_io_direct(inode);
164 	if (ret < 0)
165 		return ret;
166 	ret = generic_write_checks(iocb, from);
167 	if (ret <= 0)
168 		goto out;
169 	ret = file_remove_privs(file);
170 	if (ret < 0)
171 		goto out;
172 	ret = file_update_time(file);
173 	if (ret < 0)
174 		goto out;
175 	if (iocb->ki_flags & IOCB_NOWAIT) {
176 		/* We could block if there are any pages in the range. */
177 		ret = -EAGAIN;
178 		if (filemap_range_has_page(mapping, pos, end))
179 			if (filemap_invalidate_inode(inode, true, pos, end))
180 				goto out;
181 	} else {
182 		ret = filemap_write_and_wait_range(mapping, pos, end);
183 		if (ret < 0)
184 			goto out;
185 	}
186 
187 	/*
188 	 * After a write we want buffered reads to be sure to go to disk to get
189 	 * the new data.  We invalidate clean cached page from the region we're
190 	 * about to write.  We do this *before* the write so that we can return
191 	 * without clobbering -EIOCBQUEUED from ->direct_IO().
192 	 */
193 	ret = filemap_invalidate_inode(inode, true, pos, end);
194 	if (ret < 0)
195 		goto out;
196 	end = iocb->ki_pos + iov_iter_count(from);
197 	if (end > ictx->zero_point)
198 		ictx->zero_point = end;
199 
200 	fscache_invalidate(netfs_i_cookie(ictx), NULL, i_size_read(inode),
201 			   FSCACHE_INVAL_DIO_WRITE);
202 	ret = netfs_unbuffered_write_iter_locked(iocb, from, NULL);
203 out:
204 	netfs_end_io_direct(inode);
205 	return ret;
206 }
207 EXPORT_SYMBOL(netfs_unbuffered_write_iter);
208