• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2010 Red Hat, Inc.
3  * Copyright (c) 2016 Christoph Hellwig.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  */
14 #include <linux/module.h>
15 #include <linux/compiler.h>
16 #include <linux/fs.h>
17 #include <linux/iomap.h>
18 #include <linux/uaccess.h>
19 #include <linux/gfp.h>
20 #include <linux/mm.h>
21 #include <linux/swap.h>
22 #include <linux/pagemap.h>
23 #include <linux/file.h>
24 #include <linux/uio.h>
25 #include <linux/backing-dev.h>
26 #include <linux/buffer_head.h>
27 #include <linux/task_io_accounting_ops.h>
28 #include <linux/dax.h>
29 #include <linux/sched/signal.h>
30 
31 #include "internal.h"
32 
33 /*
34  * Execute a iomap write on a segment of the mapping that spans a
35  * contiguous range of pages that have identical block mapping state.
36  *
37  * This avoids the need to map pages individually, do individual allocations
38  * for each page and most importantly avoid the need for filesystem specific
39  * locking per page. Instead, all the operations are amortised over the entire
40  * range of pages. It is assumed that the filesystems will lock whatever
41  * resources they require in the iomap_begin call, and release them in the
42  * iomap_end call.
43  */
44 loff_t
iomap_apply(struct inode * inode,loff_t pos,loff_t length,unsigned flags,const struct iomap_ops * ops,void * data,iomap_actor_t actor)45 iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags,
46 		const struct iomap_ops *ops, void *data, iomap_actor_t actor)
47 {
48 	struct iomap iomap = { 0 };
49 	loff_t written = 0, ret;
50 
51 	/*
52 	 * Need to map a range from start position for length bytes. This can
53 	 * span multiple pages - it is only guaranteed to return a range of a
54 	 * single type of pages (e.g. all into a hole, all mapped or all
55 	 * unwritten). Failure at this point has nothing to undo.
56 	 *
57 	 * If allocation is required for this range, reserve the space now so
58 	 * that the allocation is guaranteed to succeed later on. Once we copy
59 	 * the data into the page cache pages, then we cannot fail otherwise we
60 	 * expose transient stale data. If the reserve fails, we can safely
61 	 * back out at this point as there is nothing to undo.
62 	 */
63 	ret = ops->iomap_begin(inode, pos, length, flags, &iomap);
64 	if (ret)
65 		return ret;
66 	if (WARN_ON(iomap.offset > pos))
67 		return -EIO;
68 
69 	/*
70 	 * Cut down the length to the one actually provided by the filesystem,
71 	 * as it might not be able to give us the whole size that we requested.
72 	 */
73 	if (iomap.offset + iomap.length < pos + length)
74 		length = iomap.offset + iomap.length - pos;
75 
76 	/*
77 	 * Now that we have guaranteed that the space allocation will succeed.
78 	 * we can do the copy-in page by page without having to worry about
79 	 * failures exposing transient data.
80 	 */
81 	written = actor(inode, pos, length, data, &iomap);
82 
83 	/*
84 	 * Now the data has been copied, commit the range we've copied.  This
85 	 * should not fail unless the filesystem has had a fatal error.
86 	 */
87 	if (ops->iomap_end) {
88 		ret = ops->iomap_end(inode, pos, length,
89 				     written > 0 ? written : 0,
90 				     flags, &iomap);
91 	}
92 
93 	return written ? written : ret;
94 }
95 
96 static void
iomap_write_failed(struct inode * inode,loff_t pos,unsigned len)97 iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
98 {
99 	loff_t i_size = i_size_read(inode);
100 
101 	/*
102 	 * Only truncate newly allocated pages beyoned EOF, even if the
103 	 * write started inside the existing inode size.
104 	 */
105 	if (pos + len > i_size)
106 		truncate_pagecache_range(inode, max(pos, i_size), pos + len);
107 }
108 
109 static int
iomap_write_begin(struct inode * inode,loff_t pos,unsigned len,unsigned flags,struct page ** pagep,struct iomap * iomap)110 iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
111 		struct page **pagep, struct iomap *iomap)
112 {
113 	pgoff_t index = pos >> PAGE_SHIFT;
114 	struct page *page;
115 	int status = 0;
116 
117 	BUG_ON(pos + len > iomap->offset + iomap->length);
118 
119 	if (fatal_signal_pending(current))
120 		return -EINTR;
121 
122 	page = grab_cache_page_write_begin(inode->i_mapping, index, flags);
123 	if (!page)
124 		return -ENOMEM;
125 
126 	status = __block_write_begin_int(page, pos, len, NULL, iomap);
127 	if (unlikely(status)) {
128 		unlock_page(page);
129 		put_page(page);
130 		page = NULL;
131 
132 		iomap_write_failed(inode, pos, len);
133 	}
134 
135 	*pagep = page;
136 	return status;
137 }
138 
139 static int
iomap_write_end(struct inode * inode,loff_t pos,unsigned len,unsigned copied,struct page * page)140 iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
141 		unsigned copied, struct page *page)
142 {
143 	int ret;
144 
145 	ret = generic_write_end(NULL, inode->i_mapping, pos, len,
146 			copied, page, NULL);
147 	if (ret < len)
148 		iomap_write_failed(inode, pos, len);
149 	return ret;
150 }
151 
152 static loff_t
iomap_write_actor(struct inode * inode,loff_t pos,loff_t length,void * data,struct iomap * iomap)153 iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
154 		struct iomap *iomap)
155 {
156 	struct iov_iter *i = data;
157 	long status = 0;
158 	ssize_t written = 0;
159 	unsigned int flags = AOP_FLAG_NOFS;
160 
161 	do {
162 		struct page *page;
163 		unsigned long offset;	/* Offset into pagecache page */
164 		unsigned long bytes;	/* Bytes to write to page */
165 		size_t copied;		/* Bytes copied from user */
166 
167 		offset = (pos & (PAGE_SIZE - 1));
168 		bytes = min_t(unsigned long, PAGE_SIZE - offset,
169 						iov_iter_count(i));
170 again:
171 		if (bytes > length)
172 			bytes = length;
173 
174 		/*
175 		 * Bring in the user page that we will copy from _first_.
176 		 * Otherwise there's a nasty deadlock on copying from the
177 		 * same page as we're writing to, without it being marked
178 		 * up-to-date.
179 		 *
180 		 * Not only is this an optimisation, but it is also required
181 		 * to check that the address is actually valid, when atomic
182 		 * usercopies are used, below.
183 		 */
184 		if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
185 			status = -EFAULT;
186 			break;
187 		}
188 
189 		status = iomap_write_begin(inode, pos, bytes, flags, &page,
190 				iomap);
191 		if (unlikely(status))
192 			break;
193 
194 		if (mapping_writably_mapped(inode->i_mapping))
195 			flush_dcache_page(page);
196 
197 		copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
198 
199 		flush_dcache_page(page);
200 
201 		status = iomap_write_end(inode, pos, bytes, copied, page);
202 		if (unlikely(status < 0))
203 			break;
204 		copied = status;
205 
206 		cond_resched();
207 
208 		iov_iter_advance(i, copied);
209 		if (unlikely(copied == 0)) {
210 			/*
211 			 * If we were unable to copy any data at all, we must
212 			 * fall back to a single segment length write.
213 			 *
214 			 * If we didn't fallback here, we could livelock
215 			 * because not all segments in the iov can be copied at
216 			 * once without a pagefault.
217 			 */
218 			bytes = min_t(unsigned long, PAGE_SIZE - offset,
219 						iov_iter_single_seg_count(i));
220 			goto again;
221 		}
222 		pos += copied;
223 		written += copied;
224 		length -= copied;
225 
226 		balance_dirty_pages_ratelimited(inode->i_mapping);
227 	} while (iov_iter_count(i) && length);
228 
229 	return written ? written : status;
230 }
231 
232 ssize_t
iomap_file_buffered_write(struct kiocb * iocb,struct iov_iter * iter,const struct iomap_ops * ops)233 iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *iter,
234 		const struct iomap_ops *ops)
235 {
236 	struct inode *inode = iocb->ki_filp->f_mapping->host;
237 	loff_t pos = iocb->ki_pos, ret = 0, written = 0;
238 
239 	while (iov_iter_count(iter)) {
240 		ret = iomap_apply(inode, pos, iov_iter_count(iter),
241 				IOMAP_WRITE, ops, iter, iomap_write_actor);
242 		if (ret <= 0)
243 			break;
244 		pos += ret;
245 		written += ret;
246 	}
247 
248 	return written ? written : ret;
249 }
250 EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
251 
252 static struct page *
__iomap_read_page(struct inode * inode,loff_t offset)253 __iomap_read_page(struct inode *inode, loff_t offset)
254 {
255 	struct address_space *mapping = inode->i_mapping;
256 	struct page *page;
257 
258 	page = read_mapping_page(mapping, offset >> PAGE_SHIFT, NULL);
259 	if (IS_ERR(page))
260 		return page;
261 	if (!PageUptodate(page)) {
262 		put_page(page);
263 		return ERR_PTR(-EIO);
264 	}
265 	return page;
266 }
267 
268 static loff_t
iomap_dirty_actor(struct inode * inode,loff_t pos,loff_t length,void * data,struct iomap * iomap)269 iomap_dirty_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
270 		struct iomap *iomap)
271 {
272 	long status = 0;
273 	ssize_t written = 0;
274 
275 	do {
276 		struct page *page, *rpage;
277 		unsigned long offset;	/* Offset into pagecache page */
278 		unsigned long bytes;	/* Bytes to write to page */
279 
280 		offset = (pos & (PAGE_SIZE - 1));
281 		bytes = min_t(loff_t, PAGE_SIZE - offset, length);
282 
283 		rpage = __iomap_read_page(inode, pos);
284 		if (IS_ERR(rpage))
285 			return PTR_ERR(rpage);
286 
287 		status = iomap_write_begin(inode, pos, bytes,
288 					   AOP_FLAG_NOFS, &page, iomap);
289 		put_page(rpage);
290 		if (unlikely(status))
291 			return status;
292 
293 		WARN_ON_ONCE(!PageUptodate(page));
294 
295 		status = iomap_write_end(inode, pos, bytes, bytes, page);
296 		if (unlikely(status <= 0)) {
297 			if (WARN_ON_ONCE(status == 0))
298 				return -EIO;
299 			return status;
300 		}
301 
302 		cond_resched();
303 
304 		pos += status;
305 		written += status;
306 		length -= status;
307 
308 		balance_dirty_pages_ratelimited(inode->i_mapping);
309 	} while (length);
310 
311 	return written;
312 }
313 
314 int
iomap_file_dirty(struct inode * inode,loff_t pos,loff_t len,const struct iomap_ops * ops)315 iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len,
316 		const struct iomap_ops *ops)
317 {
318 	loff_t ret;
319 
320 	while (len) {
321 		ret = iomap_apply(inode, pos, len, IOMAP_WRITE, ops, NULL,
322 				iomap_dirty_actor);
323 		if (ret <= 0)
324 			return ret;
325 		pos += ret;
326 		len -= ret;
327 	}
328 
329 	return 0;
330 }
331 EXPORT_SYMBOL_GPL(iomap_file_dirty);
332 
iomap_zero(struct inode * inode,loff_t pos,unsigned offset,unsigned bytes,struct iomap * iomap)333 static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset,
334 		unsigned bytes, struct iomap *iomap)
335 {
336 	struct page *page;
337 	int status;
338 
339 	status = iomap_write_begin(inode, pos, bytes, AOP_FLAG_NOFS, &page,
340 				   iomap);
341 	if (status)
342 		return status;
343 
344 	zero_user(page, offset, bytes);
345 	mark_page_accessed(page);
346 
347 	return iomap_write_end(inode, pos, bytes, bytes, page);
348 }
349 
iomap_dax_zero(loff_t pos,unsigned offset,unsigned bytes,struct iomap * iomap)350 static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes,
351 		struct iomap *iomap)
352 {
353 	sector_t sector = iomap->blkno +
354 		(((pos & ~(PAGE_SIZE - 1)) - iomap->offset) >> 9);
355 
356 	return __dax_zero_page_range(iomap->bdev, iomap->dax_dev, sector,
357 			offset, bytes);
358 }
359 
360 static loff_t
iomap_zero_range_actor(struct inode * inode,loff_t pos,loff_t count,void * data,struct iomap * iomap)361 iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count,
362 		void *data, struct iomap *iomap)
363 {
364 	bool *did_zero = data;
365 	loff_t written = 0;
366 	int status;
367 
368 	/* already zeroed?  we're done. */
369 	if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
370 	    	return count;
371 
372 	do {
373 		unsigned offset, bytes;
374 
375 		offset = pos & (PAGE_SIZE - 1); /* Within page */
376 		bytes = min_t(loff_t, PAGE_SIZE - offset, count);
377 
378 		if (IS_DAX(inode))
379 			status = iomap_dax_zero(pos, offset, bytes, iomap);
380 		else
381 			status = iomap_zero(inode, pos, offset, bytes, iomap);
382 		if (status < 0)
383 			return status;
384 
385 		pos += bytes;
386 		count -= bytes;
387 		written += bytes;
388 		if (did_zero)
389 			*did_zero = true;
390 	} while (count > 0);
391 
392 	return written;
393 }
394 
395 int
iomap_zero_range(struct inode * inode,loff_t pos,loff_t len,bool * did_zero,const struct iomap_ops * ops)396 iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
397 		const struct iomap_ops *ops)
398 {
399 	loff_t ret;
400 
401 	while (len > 0) {
402 		ret = iomap_apply(inode, pos, len, IOMAP_ZERO,
403 				ops, did_zero, iomap_zero_range_actor);
404 		if (ret <= 0)
405 			return ret;
406 
407 		pos += ret;
408 		len -= ret;
409 	}
410 
411 	return 0;
412 }
413 EXPORT_SYMBOL_GPL(iomap_zero_range);
414 
415 int
iomap_truncate_page(struct inode * inode,loff_t pos,bool * did_zero,const struct iomap_ops * ops)416 iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
417 		const struct iomap_ops *ops)
418 {
419 	unsigned int blocksize = i_blocksize(inode);
420 	unsigned int off = pos & (blocksize - 1);
421 
422 	/* Block boundary? Nothing to do */
423 	if (!off)
424 		return 0;
425 	return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops);
426 }
427 EXPORT_SYMBOL_GPL(iomap_truncate_page);
428 
429 static loff_t
iomap_page_mkwrite_actor(struct inode * inode,loff_t pos,loff_t length,void * data,struct iomap * iomap)430 iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
431 		void *data, struct iomap *iomap)
432 {
433 	struct page *page = data;
434 	int ret;
435 
436 	ret = __block_write_begin_int(page, pos, length, NULL, iomap);
437 	if (ret)
438 		return ret;
439 
440 	block_commit_write(page, 0, length);
441 	return length;
442 }
443 
iomap_page_mkwrite(struct vm_fault * vmf,const struct iomap_ops * ops)444 int iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
445 {
446 	struct page *page = vmf->page;
447 	struct inode *inode = file_inode(vmf->vma->vm_file);
448 	unsigned long length;
449 	loff_t offset, size;
450 	ssize_t ret;
451 
452 	lock_page(page);
453 	size = i_size_read(inode);
454 	if ((page->mapping != inode->i_mapping) ||
455 	    (page_offset(page) > size)) {
456 		/* We overload EFAULT to mean page got truncated */
457 		ret = -EFAULT;
458 		goto out_unlock;
459 	}
460 
461 	/* page is wholly or partially inside EOF */
462 	if (((page->index + 1) << PAGE_SHIFT) > size)
463 		length = size & ~PAGE_MASK;
464 	else
465 		length = PAGE_SIZE;
466 
467 	offset = page_offset(page);
468 	while (length > 0) {
469 		ret = iomap_apply(inode, offset, length,
470 				IOMAP_WRITE | IOMAP_FAULT, ops, page,
471 				iomap_page_mkwrite_actor);
472 		if (unlikely(ret <= 0))
473 			goto out_unlock;
474 		offset += ret;
475 		length -= ret;
476 	}
477 
478 	set_page_dirty(page);
479 	wait_for_stable_page(page);
480 	return VM_FAULT_LOCKED;
481 out_unlock:
482 	unlock_page(page);
483 	return block_page_mkwrite_return(ret);
484 }
485 EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
486 
487 struct fiemap_ctx {
488 	struct fiemap_extent_info *fi;
489 	struct iomap prev;
490 };
491 
iomap_to_fiemap(struct fiemap_extent_info * fi,struct iomap * iomap,u32 flags)492 static int iomap_to_fiemap(struct fiemap_extent_info *fi,
493 		struct iomap *iomap, u32 flags)
494 {
495 	switch (iomap->type) {
496 	case IOMAP_HOLE:
497 		/* skip holes */
498 		return 0;
499 	case IOMAP_DELALLOC:
500 		flags |= FIEMAP_EXTENT_DELALLOC | FIEMAP_EXTENT_UNKNOWN;
501 		break;
502 	case IOMAP_UNWRITTEN:
503 		flags |= FIEMAP_EXTENT_UNWRITTEN;
504 		break;
505 	case IOMAP_MAPPED:
506 		break;
507 	}
508 
509 	if (iomap->flags & IOMAP_F_MERGED)
510 		flags |= FIEMAP_EXTENT_MERGED;
511 	if (iomap->flags & IOMAP_F_SHARED)
512 		flags |= FIEMAP_EXTENT_SHARED;
513 
514 	return fiemap_fill_next_extent(fi, iomap->offset,
515 			iomap->blkno != IOMAP_NULL_BLOCK ? iomap->blkno << 9: 0,
516 			iomap->length, flags);
517 
518 }
519 
520 static loff_t
iomap_fiemap_actor(struct inode * inode,loff_t pos,loff_t length,void * data,struct iomap * iomap)521 iomap_fiemap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
522 		struct iomap *iomap)
523 {
524 	struct fiemap_ctx *ctx = data;
525 	loff_t ret = length;
526 
527 	if (iomap->type == IOMAP_HOLE)
528 		return length;
529 
530 	ret = iomap_to_fiemap(ctx->fi, &ctx->prev, 0);
531 	ctx->prev = *iomap;
532 	switch (ret) {
533 	case 0:		/* success */
534 		return length;
535 	case 1:		/* extent array full */
536 		return 0;
537 	default:
538 		return ret;
539 	}
540 }
541 
iomap_fiemap(struct inode * inode,struct fiemap_extent_info * fi,loff_t start,loff_t len,const struct iomap_ops * ops)542 int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi,
543 		loff_t start, loff_t len, const struct iomap_ops *ops)
544 {
545 	struct fiemap_ctx ctx;
546 	loff_t ret;
547 
548 	memset(&ctx, 0, sizeof(ctx));
549 	ctx.fi = fi;
550 	ctx.prev.type = IOMAP_HOLE;
551 
552 	ret = fiemap_check_flags(fi, FIEMAP_FLAG_SYNC);
553 	if (ret)
554 		return ret;
555 
556 	if (fi->fi_flags & FIEMAP_FLAG_SYNC) {
557 		ret = filemap_write_and_wait(inode->i_mapping);
558 		if (ret)
559 			return ret;
560 	}
561 
562 	while (len > 0) {
563 		ret = iomap_apply(inode, start, len, IOMAP_REPORT, ops, &ctx,
564 				iomap_fiemap_actor);
565 		/* inode with no (attribute) mapping will give ENOENT */
566 		if (ret == -ENOENT)
567 			break;
568 		if (ret < 0)
569 			return ret;
570 		if (ret == 0)
571 			break;
572 
573 		start += ret;
574 		len -= ret;
575 	}
576 
577 	if (ctx.prev.type != IOMAP_HOLE) {
578 		ret = iomap_to_fiemap(fi, &ctx.prev, FIEMAP_EXTENT_LAST);
579 		if (ret < 0)
580 			return ret;
581 	}
582 
583 	return 0;
584 }
585 EXPORT_SYMBOL_GPL(iomap_fiemap);
586 
587 static loff_t
iomap_seek_hole_actor(struct inode * inode,loff_t offset,loff_t length,void * data,struct iomap * iomap)588 iomap_seek_hole_actor(struct inode *inode, loff_t offset, loff_t length,
589 		      void *data, struct iomap *iomap)
590 {
591 	switch (iomap->type) {
592 	case IOMAP_UNWRITTEN:
593 		offset = page_cache_seek_hole_data(inode, offset, length,
594 						   SEEK_HOLE);
595 		if (offset < 0)
596 			return length;
597 		/* fall through */
598 	case IOMAP_HOLE:
599 		*(loff_t *)data = offset;
600 		return 0;
601 	default:
602 		return length;
603 	}
604 }
605 
606 loff_t
iomap_seek_hole(struct inode * inode,loff_t offset,const struct iomap_ops * ops)607 iomap_seek_hole(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
608 {
609 	loff_t size = i_size_read(inode);
610 	loff_t length = size - offset;
611 	loff_t ret;
612 
613 	/* Nothing to be found before or beyond the end of the file. */
614 	if (offset < 0 || offset >= size)
615 		return -ENXIO;
616 
617 	while (length > 0) {
618 		ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
619 				  &offset, iomap_seek_hole_actor);
620 		if (ret < 0)
621 			return ret;
622 		if (ret == 0)
623 			break;
624 
625 		offset += ret;
626 		length -= ret;
627 	}
628 
629 	return offset;
630 }
631 EXPORT_SYMBOL_GPL(iomap_seek_hole);
632 
633 static loff_t
iomap_seek_data_actor(struct inode * inode,loff_t offset,loff_t length,void * data,struct iomap * iomap)634 iomap_seek_data_actor(struct inode *inode, loff_t offset, loff_t length,
635 		      void *data, struct iomap *iomap)
636 {
637 	switch (iomap->type) {
638 	case IOMAP_HOLE:
639 		return length;
640 	case IOMAP_UNWRITTEN:
641 		offset = page_cache_seek_hole_data(inode, offset, length,
642 						   SEEK_DATA);
643 		if (offset < 0)
644 			return length;
645 		/*FALLTHRU*/
646 	default:
647 		*(loff_t *)data = offset;
648 		return 0;
649 	}
650 }
651 
652 loff_t
iomap_seek_data(struct inode * inode,loff_t offset,const struct iomap_ops * ops)653 iomap_seek_data(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
654 {
655 	loff_t size = i_size_read(inode);
656 	loff_t length = size - offset;
657 	loff_t ret;
658 
659 	/* Nothing to be found before or beyond the end of the file. */
660 	if (offset < 0 || offset >= size)
661 		return -ENXIO;
662 
663 	while (length > 0) {
664 		ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
665 				  &offset, iomap_seek_data_actor);
666 		if (ret < 0)
667 			return ret;
668 		if (ret == 0)
669 			break;
670 
671 		offset += ret;
672 		length -= ret;
673 	}
674 
675 	if (length <= 0)
676 		return -ENXIO;
677 	return offset;
678 }
679 EXPORT_SYMBOL_GPL(iomap_seek_data);
680 
681 /*
682  * Private flags for iomap_dio, must not overlap with the public ones in
683  * iomap.h:
684  */
685 #define IOMAP_DIO_WRITE		(1 << 30)
686 #define IOMAP_DIO_DIRTY		(1 << 31)
687 
688 struct iomap_dio {
689 	struct kiocb		*iocb;
690 	iomap_dio_end_io_t	*end_io;
691 	loff_t			i_size;
692 	loff_t			size;
693 	atomic_t		ref;
694 	unsigned		flags;
695 	int			error;
696 	bool			wait_for_completion;
697 
698 	union {
699 		/* used during submission and for synchronous completion: */
700 		struct {
701 			struct iov_iter		*iter;
702 			struct task_struct	*waiter;
703 			struct request_queue	*last_queue;
704 			blk_qc_t		cookie;
705 		} submit;
706 
707 		/* used for aio completion: */
708 		struct {
709 			struct work_struct	work;
710 		} aio;
711 	};
712 };
713 
iomap_dio_complete(struct iomap_dio * dio)714 static ssize_t iomap_dio_complete(struct iomap_dio *dio)
715 {
716 	struct kiocb *iocb = dio->iocb;
717 	struct inode *inode = file_inode(iocb->ki_filp);
718 	loff_t offset = iocb->ki_pos;
719 	ssize_t ret;
720 
721 	if (dio->end_io) {
722 		ret = dio->end_io(iocb,
723 				dio->error ? dio->error : dio->size,
724 				dio->flags);
725 	} else {
726 		ret = dio->error;
727 	}
728 
729 	if (likely(!ret)) {
730 		ret = dio->size;
731 		/* check for short read */
732 		if (offset + ret > dio->i_size &&
733 		    !(dio->flags & IOMAP_DIO_WRITE))
734 			ret = dio->i_size - offset;
735 		iocb->ki_pos += ret;
736 	}
737 
738 	/*
739 	 * Try again to invalidate clean pages which might have been cached by
740 	 * non-direct readahead, or faulted in by get_user_pages() if the source
741 	 * of the write was an mmap'ed region of the file we're writing.  Either
742 	 * one is a pretty crazy thing to do, so we don't support it 100%.  If
743 	 * this invalidation fails, tough, the write still worked...
744 	 *
745 	 * And this page cache invalidation has to be after dio->end_io(), as
746 	 * some filesystems convert unwritten extents to real allocations in
747 	 * end_io() when necessary, otherwise a racing buffer read would cache
748 	 * zeros from unwritten extents.
749 	 */
750 	if (!dio->error &&
751 	    (dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) {
752 		int err;
753 		err = invalidate_inode_pages2_range(inode->i_mapping,
754 				offset >> PAGE_SHIFT,
755 				(offset + dio->size - 1) >> PAGE_SHIFT);
756 		if (err)
757 			dio_warn_stale_pagecache(iocb->ki_filp);
758 	}
759 
760 	inode_dio_end(file_inode(iocb->ki_filp));
761 	kfree(dio);
762 
763 	return ret;
764 }
765 
iomap_dio_complete_work(struct work_struct * work)766 static void iomap_dio_complete_work(struct work_struct *work)
767 {
768 	struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work);
769 	struct kiocb *iocb = dio->iocb;
770 	bool is_write = (dio->flags & IOMAP_DIO_WRITE);
771 	ssize_t ret;
772 
773 	ret = iomap_dio_complete(dio);
774 	if (is_write && ret > 0)
775 		ret = generic_write_sync(iocb, ret);
776 	iocb->ki_complete(iocb, ret, 0);
777 }
778 
779 /*
780  * Set an error in the dio if none is set yet.  We have to use cmpxchg
781  * as the submission context and the completion context(s) can race to
782  * update the error.
783  */
iomap_dio_set_error(struct iomap_dio * dio,int ret)784 static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret)
785 {
786 	cmpxchg(&dio->error, 0, ret);
787 }
788 
iomap_dio_bio_end_io(struct bio * bio)789 static void iomap_dio_bio_end_io(struct bio *bio)
790 {
791 	struct iomap_dio *dio = bio->bi_private;
792 	bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY);
793 
794 	if (bio->bi_status)
795 		iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status));
796 
797 	if (atomic_dec_and_test(&dio->ref)) {
798 		if (dio->wait_for_completion) {
799 			struct task_struct *waiter = dio->submit.waiter;
800 			WRITE_ONCE(dio->submit.waiter, NULL);
801 			wake_up_process(waiter);
802 		} else if (dio->flags & IOMAP_DIO_WRITE) {
803 			struct inode *inode = file_inode(dio->iocb->ki_filp);
804 
805 			INIT_WORK(&dio->aio.work, iomap_dio_complete_work);
806 			queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work);
807 		} else {
808 			iomap_dio_complete_work(&dio->aio.work);
809 		}
810 	}
811 
812 	if (should_dirty) {
813 		bio_check_pages_dirty(bio);
814 	} else {
815 		struct bio_vec *bvec;
816 		int i;
817 
818 		bio_for_each_segment_all(bvec, bio, i)
819 			put_page(bvec->bv_page);
820 		bio_put(bio);
821 	}
822 }
823 
824 static blk_qc_t
iomap_dio_zero(struct iomap_dio * dio,struct iomap * iomap,loff_t pos,unsigned len)825 iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos,
826 		unsigned len)
827 {
828 	struct page *page = ZERO_PAGE(0);
829 	struct bio *bio;
830 
831 	bio = bio_alloc(GFP_KERNEL, 1);
832 	bio_set_dev(bio, iomap->bdev);
833 	bio->bi_iter.bi_sector =
834 		iomap->blkno + ((pos - iomap->offset) >> 9);
835 	bio->bi_private = dio;
836 	bio->bi_end_io = iomap_dio_bio_end_io;
837 
838 	get_page(page);
839 	if (bio_add_page(bio, page, len, 0) != len)
840 		BUG();
841 	bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC | REQ_IDLE);
842 
843 	atomic_inc(&dio->ref);
844 	return submit_bio(bio);
845 }
846 
847 static loff_t
iomap_dio_actor(struct inode * inode,loff_t pos,loff_t length,void * data,struct iomap * iomap)848 iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
849 		void *data, struct iomap *iomap)
850 {
851 	struct iomap_dio *dio = data;
852 	unsigned int blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev));
853 	unsigned int fs_block_size = i_blocksize(inode), pad;
854 	unsigned int align = iov_iter_alignment(dio->submit.iter);
855 	struct iov_iter iter;
856 	struct bio *bio;
857 	bool need_zeroout = false;
858 	int nr_pages, ret;
859 
860 	if ((pos | length | align) & ((1 << blkbits) - 1))
861 		return -EINVAL;
862 
863 	switch (iomap->type) {
864 	case IOMAP_HOLE:
865 		if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE))
866 			return -EIO;
867 		/*FALLTHRU*/
868 	case IOMAP_UNWRITTEN:
869 		if (!(dio->flags & IOMAP_DIO_WRITE)) {
870 			iov_iter_zero(length, dio->submit.iter);
871 			dio->size += length;
872 			return length;
873 		}
874 		dio->flags |= IOMAP_DIO_UNWRITTEN;
875 		need_zeroout = true;
876 		break;
877 	case IOMAP_MAPPED:
878 		if (iomap->flags & IOMAP_F_SHARED)
879 			dio->flags |= IOMAP_DIO_COW;
880 		if (iomap->flags & IOMAP_F_NEW)
881 			need_zeroout = true;
882 		break;
883 	default:
884 		WARN_ON_ONCE(1);
885 		return -EIO;
886 	}
887 
888 	/*
889 	 * Operate on a partial iter trimmed to the extent we were called for.
890 	 * We'll update the iter in the dio once we're done with this extent.
891 	 */
892 	iter = *dio->submit.iter;
893 	iov_iter_truncate(&iter, length);
894 
895 	nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
896 	if (nr_pages <= 0)
897 		return nr_pages;
898 
899 	if (need_zeroout) {
900 		/* zero out from the start of the block to the write offset */
901 		pad = pos & (fs_block_size - 1);
902 		if (pad)
903 			iomap_dio_zero(dio, iomap, pos - pad, pad);
904 	}
905 
906 	do {
907 		if (dio->error)
908 			return 0;
909 
910 		bio = bio_alloc(GFP_KERNEL, nr_pages);
911 		bio_set_dev(bio, iomap->bdev);
912 		bio->bi_iter.bi_sector =
913 			iomap->blkno + ((pos - iomap->offset) >> 9);
914 		bio->bi_write_hint = dio->iocb->ki_hint;
915 		bio->bi_private = dio;
916 		bio->bi_end_io = iomap_dio_bio_end_io;
917 
918 		ret = bio_iov_iter_get_pages(bio, &iter);
919 		if (unlikely(ret)) {
920 			bio_put(bio);
921 			return ret;
922 		}
923 
924 		if (dio->flags & IOMAP_DIO_WRITE) {
925 			bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC | REQ_IDLE);
926 			task_io_account_write(bio->bi_iter.bi_size);
927 		} else {
928 			bio_set_op_attrs(bio, REQ_OP_READ, 0);
929 			if (dio->flags & IOMAP_DIO_DIRTY)
930 				bio_set_pages_dirty(bio);
931 		}
932 
933 		dio->size += bio->bi_iter.bi_size;
934 		pos += bio->bi_iter.bi_size;
935 
936 		nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
937 
938 		atomic_inc(&dio->ref);
939 
940 		dio->submit.last_queue = bdev_get_queue(iomap->bdev);
941 		dio->submit.cookie = submit_bio(bio);
942 	} while (nr_pages);
943 
944 	/*
945 	 * We need to zeroout the tail of a sub-block write if the extent type
946 	 * requires zeroing or the write extends beyond EOF. If we don't zero
947 	 * the block tail in the latter case, we can expose stale data via mmap
948 	 * reads of the EOF block.
949 	 */
950 	if (need_zeroout ||
951 	    ((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode))) {
952 		/* zero out from the end of the write to the end of the block */
953 		pad = pos & (fs_block_size - 1);
954 		if (pad)
955 			iomap_dio_zero(dio, iomap, pos, fs_block_size - pad);
956 	}
957 
958 	iov_iter_advance(dio->submit.iter, length);
959 	return length;
960 }
961 
962 ssize_t
iomap_dio_rw(struct kiocb * iocb,struct iov_iter * iter,const struct iomap_ops * ops,iomap_dio_end_io_t end_io)963 iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
964 		const struct iomap_ops *ops, iomap_dio_end_io_t end_io)
965 {
966 	struct address_space *mapping = iocb->ki_filp->f_mapping;
967 	struct inode *inode = file_inode(iocb->ki_filp);
968 	size_t count = iov_iter_count(iter);
969 	loff_t pos = iocb->ki_pos, start = pos;
970 	loff_t end = iocb->ki_pos + count - 1, ret = 0;
971 	unsigned int flags = IOMAP_DIRECT;
972 	struct blk_plug plug;
973 	struct iomap_dio *dio;
974 
975 	lockdep_assert_held(&inode->i_rwsem);
976 
977 	if (!count)
978 		return 0;
979 
980 	dio = kmalloc(sizeof(*dio), GFP_KERNEL);
981 	if (!dio)
982 		return -ENOMEM;
983 
984 	dio->iocb = iocb;
985 	atomic_set(&dio->ref, 1);
986 	dio->size = 0;
987 	dio->i_size = i_size_read(inode);
988 	dio->end_io = end_io;
989 	dio->error = 0;
990 	dio->flags = 0;
991 	dio->wait_for_completion = is_sync_kiocb(iocb);
992 
993 	dio->submit.iter = iter;
994 	dio->submit.waiter = current;
995 	dio->submit.cookie = BLK_QC_T_NONE;
996 	dio->submit.last_queue = NULL;
997 
998 	if (iov_iter_rw(iter) == READ) {
999 		if (pos >= dio->i_size)
1000 			goto out_free_dio;
1001 
1002 		if (iter->type == ITER_IOVEC)
1003 			dio->flags |= IOMAP_DIO_DIRTY;
1004 	} else {
1005 		dio->flags |= IOMAP_DIO_WRITE;
1006 		flags |= IOMAP_WRITE;
1007 	}
1008 
1009 	if (iocb->ki_flags & IOCB_NOWAIT) {
1010 		if (filemap_range_has_page(mapping, start, end)) {
1011 			ret = -EAGAIN;
1012 			goto out_free_dio;
1013 		}
1014 		flags |= IOMAP_NOWAIT;
1015 	}
1016 
1017 	ret = filemap_write_and_wait_range(mapping, start, end);
1018 	if (ret)
1019 		goto out_free_dio;
1020 
1021 	/*
1022 	 * Try to invalidate cache pages for the range we're direct
1023 	 * writing.  If this invalidation fails, tough, the write will
1024 	 * still work, but racing two incompatible write paths is a
1025 	 * pretty crazy thing to do, so we don't support it 100%.
1026 	 */
1027 	ret = invalidate_inode_pages2_range(mapping,
1028 			start >> PAGE_SHIFT, end >> PAGE_SHIFT);
1029 	if (ret)
1030 		dio_warn_stale_pagecache(iocb->ki_filp);
1031 	ret = 0;
1032 
1033 	if (iov_iter_rw(iter) == WRITE && !dio->wait_for_completion &&
1034 	    !inode->i_sb->s_dio_done_wq) {
1035 		ret = sb_init_dio_done_wq(inode->i_sb);
1036 		if (ret < 0)
1037 			goto out_free_dio;
1038 	}
1039 
1040 	inode_dio_begin(inode);
1041 
1042 	blk_start_plug(&plug);
1043 	do {
1044 		ret = iomap_apply(inode, pos, count, flags, ops, dio,
1045 				iomap_dio_actor);
1046 		if (ret <= 0) {
1047 			/* magic error code to fall back to buffered I/O */
1048 			if (ret == -ENOTBLK) {
1049 				dio->wait_for_completion = true;
1050 				ret = 0;
1051 			}
1052 			break;
1053 		}
1054 		pos += ret;
1055 
1056 		if (iov_iter_rw(iter) == READ && pos >= dio->i_size) {
1057 			/*
1058 			 * We only report that we've read data up to i_size.
1059 			 * Revert iter to a state corresponding to that as
1060 			 * some callers (such as splice code) rely on it.
1061 			 */
1062 			iov_iter_revert(iter, pos - dio->i_size);
1063 			break;
1064 		}
1065 	} while ((count = iov_iter_count(iter)) > 0);
1066 	blk_finish_plug(&plug);
1067 
1068 	if (ret < 0)
1069 		iomap_dio_set_error(dio, ret);
1070 
1071 	if (!atomic_dec_and_test(&dio->ref)) {
1072 		if (!dio->wait_for_completion)
1073 			return -EIOCBQUEUED;
1074 
1075 		for (;;) {
1076 			set_current_state(TASK_UNINTERRUPTIBLE);
1077 			if (!READ_ONCE(dio->submit.waiter))
1078 				break;
1079 
1080 			if (!(iocb->ki_flags & IOCB_HIPRI) ||
1081 			    !dio->submit.last_queue ||
1082 			    !blk_mq_poll(dio->submit.last_queue,
1083 					 dio->submit.cookie))
1084 				io_schedule();
1085 		}
1086 		__set_current_state(TASK_RUNNING);
1087 	}
1088 
1089 	ret = iomap_dio_complete(dio);
1090 
1091 	return ret;
1092 
1093 out_free_dio:
1094 	kfree(dio);
1095 	return ret;
1096 }
1097 EXPORT_SYMBOL_GPL(iomap_dio_rw);
1098