1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2010 Red Hat, Inc.
4 * Copyright (C) 2016-2019 Christoph Hellwig.
5 */
6 #include <linux/module.h>
7 #include <linux/compiler.h>
8 #include <linux/fs.h>
9 #include <linux/iomap.h>
10 #include <linux/pagemap.h>
11 #include <linux/uio.h>
12 #include <linux/buffer_head.h>
13 #include <linux/dax.h>
14 #include <linux/writeback.h>
15 #include <linux/list_sort.h>
16 #include <linux/swap.h>
17 #include <linux/bio.h>
18 #include <linux/sched/signal.h>
19 #include <linux/migrate.h>
20 #include "trace.h"
21
22 #include "../internal.h"
23
24 /*
25 * Structure allocated for each page or THP when block size < page size
26 * to track sub-page uptodate status and I/O completions.
27 */
28 struct iomap_page {
29 atomic_t read_bytes_pending;
30 atomic_t write_bytes_pending;
31 spinlock_t uptodate_lock;
32 unsigned long uptodate[];
33 };
34
to_iomap_page(struct page * page)35 static inline struct iomap_page *to_iomap_page(struct page *page)
36 {
37 /*
38 * per-block data is stored in the head page. Callers should
39 * not be dealing with tail pages, and if they are, they can
40 * call thp_head() first.
41 */
42 VM_BUG_ON_PGFLAGS(PageTail(page), page);
43
44 if (page_has_private(page))
45 return (struct iomap_page *)page_private(page);
46 return NULL;
47 }
48
49 static struct bio_set iomap_ioend_bioset;
50
51 static struct iomap_page *
iomap_page_create(struct inode * inode,struct page * page)52 iomap_page_create(struct inode *inode, struct page *page)
53 {
54 struct iomap_page *iop = to_iomap_page(page);
55 unsigned int nr_blocks = i_blocks_per_page(inode, page);
56
57 if (iop || nr_blocks <= 1)
58 return iop;
59
60 iop = kzalloc(struct_size(iop, uptodate, BITS_TO_LONGS(nr_blocks)),
61 GFP_NOFS | __GFP_NOFAIL);
62 spin_lock_init(&iop->uptodate_lock);
63 if (PageUptodate(page))
64 bitmap_fill(iop->uptodate, nr_blocks);
65 attach_page_private(page, iop);
66 return iop;
67 }
68
69 static void
iomap_page_release(struct page * page)70 iomap_page_release(struct page *page)
71 {
72 struct iomap_page *iop = detach_page_private(page);
73 unsigned int nr_blocks = i_blocks_per_page(page->mapping->host, page);
74
75 if (!iop)
76 return;
77 WARN_ON_ONCE(atomic_read(&iop->read_bytes_pending));
78 WARN_ON_ONCE(atomic_read(&iop->write_bytes_pending));
79 WARN_ON_ONCE(bitmap_full(iop->uptodate, nr_blocks) !=
80 PageUptodate(page));
81 kfree(iop);
82 }
83
84 /*
85 * Calculate the range inside the page that we actually need to read.
86 */
87 static void
iomap_adjust_read_range(struct inode * inode,struct iomap_page * iop,loff_t * pos,loff_t length,unsigned * offp,unsigned * lenp)88 iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop,
89 loff_t *pos, loff_t length, unsigned *offp, unsigned *lenp)
90 {
91 loff_t orig_pos = *pos;
92 loff_t isize = i_size_read(inode);
93 unsigned block_bits = inode->i_blkbits;
94 unsigned block_size = (1 << block_bits);
95 unsigned poff = offset_in_page(*pos);
96 unsigned plen = min_t(loff_t, PAGE_SIZE - poff, length);
97 unsigned first = poff >> block_bits;
98 unsigned last = (poff + plen - 1) >> block_bits;
99
100 /*
101 * If the block size is smaller than the page size, we need to check the
102 * per-block uptodate status and adjust the offset and length if needed
103 * to avoid reading in already uptodate ranges.
104 */
105 if (iop) {
106 unsigned int i;
107
108 /* move forward for each leading block marked uptodate */
109 for (i = first; i <= last; i++) {
110 if (!test_bit(i, iop->uptodate))
111 break;
112 *pos += block_size;
113 poff += block_size;
114 plen -= block_size;
115 first++;
116 }
117
118 /* truncate len if we find any trailing uptodate block(s) */
119 for ( ; i <= last; i++) {
120 if (test_bit(i, iop->uptodate)) {
121 plen -= (last - i + 1) * block_size;
122 last = i - 1;
123 break;
124 }
125 }
126 }
127
128 /*
129 * If the extent spans the block that contains the i_size, we need to
130 * handle both halves separately so that we properly zero data in the
131 * page cache for blocks that are entirely outside of i_size.
132 */
133 if (orig_pos <= isize && orig_pos + length > isize) {
134 unsigned end = offset_in_page(isize - 1) >> block_bits;
135
136 if (first <= end && last > end)
137 plen -= (last - end) * block_size;
138 }
139
140 *offp = poff;
141 *lenp = plen;
142 }
143
144 static void
iomap_iop_set_range_uptodate(struct page * page,unsigned off,unsigned len)145 iomap_iop_set_range_uptodate(struct page *page, unsigned off, unsigned len)
146 {
147 struct iomap_page *iop = to_iomap_page(page);
148 struct inode *inode = page->mapping->host;
149 unsigned first = off >> inode->i_blkbits;
150 unsigned last = (off + len - 1) >> inode->i_blkbits;
151 unsigned long flags;
152
153 spin_lock_irqsave(&iop->uptodate_lock, flags);
154 bitmap_set(iop->uptodate, first, last - first + 1);
155 if (bitmap_full(iop->uptodate, i_blocks_per_page(inode, page)))
156 SetPageUptodate(page);
157 spin_unlock_irqrestore(&iop->uptodate_lock, flags);
158 }
159
160 static void
iomap_set_range_uptodate(struct page * page,unsigned off,unsigned len)161 iomap_set_range_uptodate(struct page *page, unsigned off, unsigned len)
162 {
163 if (PageError(page))
164 return;
165
166 if (page_has_private(page))
167 iomap_iop_set_range_uptodate(page, off, len);
168 else
169 SetPageUptodate(page);
170 }
171
172 static void
iomap_read_page_end_io(struct bio_vec * bvec,int error)173 iomap_read_page_end_io(struct bio_vec *bvec, int error)
174 {
175 struct page *page = bvec->bv_page;
176 struct iomap_page *iop = to_iomap_page(page);
177
178 if (unlikely(error)) {
179 ClearPageUptodate(page);
180 SetPageError(page);
181 } else {
182 iomap_set_range_uptodate(page, bvec->bv_offset, bvec->bv_len);
183 }
184
185 if (!iop || atomic_sub_and_test(bvec->bv_len, &iop->read_bytes_pending))
186 unlock_page(page);
187 }
188
189 static void
iomap_read_end_io(struct bio * bio)190 iomap_read_end_io(struct bio *bio)
191 {
192 int error = blk_status_to_errno(bio->bi_status);
193 struct bio_vec *bvec;
194 struct bvec_iter_all iter_all;
195
196 bio_for_each_segment_all(bvec, bio, iter_all)
197 iomap_read_page_end_io(bvec, error);
198 bio_put(bio);
199 }
200
201 struct iomap_readpage_ctx {
202 struct page *cur_page;
203 bool cur_page_in_bio;
204 struct bio *bio;
205 struct readahead_control *rac;
206 };
207
iomap_read_inline_data(const struct iomap_iter * iter,struct page * page)208 static loff_t iomap_read_inline_data(const struct iomap_iter *iter,
209 struct page *page)
210 {
211 const struct iomap *iomap = iomap_iter_srcmap(iter);
212 size_t size = i_size_read(iter->inode) - iomap->offset;
213 size_t poff = offset_in_page(iomap->offset);
214 void *addr;
215
216 if (PageUptodate(page))
217 return PAGE_SIZE - poff;
218
219 if (WARN_ON_ONCE(size > PAGE_SIZE - poff))
220 return -EIO;
221 if (WARN_ON_ONCE(size > PAGE_SIZE -
222 offset_in_page(iomap->inline_data)))
223 return -EIO;
224 if (WARN_ON_ONCE(size > iomap->length))
225 return -EIO;
226 if (poff > 0)
227 iomap_page_create(iter->inode, page);
228
229 addr = kmap_local_page(page) + poff;
230 memcpy(addr, iomap->inline_data, size);
231 memset(addr + size, 0, PAGE_SIZE - poff - size);
232 kunmap_local(addr);
233 iomap_set_range_uptodate(page, poff, PAGE_SIZE - poff);
234 return PAGE_SIZE - poff;
235 }
236
iomap_block_needs_zeroing(const struct iomap_iter * iter,loff_t pos)237 static inline bool iomap_block_needs_zeroing(const struct iomap_iter *iter,
238 loff_t pos)
239 {
240 const struct iomap *srcmap = iomap_iter_srcmap(iter);
241
242 return srcmap->type != IOMAP_MAPPED ||
243 (srcmap->flags & IOMAP_F_NEW) ||
244 pos >= i_size_read(iter->inode);
245 }
246
iomap_readpage_iter(const struct iomap_iter * iter,struct iomap_readpage_ctx * ctx,loff_t offset)247 static loff_t iomap_readpage_iter(const struct iomap_iter *iter,
248 struct iomap_readpage_ctx *ctx, loff_t offset)
249 {
250 const struct iomap *iomap = &iter->iomap;
251 loff_t pos = iter->pos + offset;
252 loff_t length = iomap_length(iter) - offset;
253 struct page *page = ctx->cur_page;
254 struct iomap_page *iop;
255 loff_t orig_pos = pos;
256 unsigned poff, plen;
257 sector_t sector;
258
259 if (iomap->type == IOMAP_INLINE) {
260 loff_t ret = iomap_read_inline_data(iter, page);
261
262 if (ret < 0)
263 return ret;
264 return 0;
265 }
266
267 /* zero post-eof blocks as the page may be mapped */
268 iop = iomap_page_create(iter->inode, page);
269 iomap_adjust_read_range(iter->inode, iop, &pos, length, &poff, &plen);
270 if (plen == 0)
271 goto done;
272
273 if (iomap_block_needs_zeroing(iter, pos)) {
274 zero_user(page, poff, plen);
275 iomap_set_range_uptodate(page, poff, plen);
276 goto done;
277 }
278
279 ctx->cur_page_in_bio = true;
280 if (iop)
281 atomic_add(plen, &iop->read_bytes_pending);
282
283 sector = iomap_sector(iomap, pos);
284 if (!ctx->bio ||
285 bio_end_sector(ctx->bio) != sector ||
286 bio_add_page(ctx->bio, page, plen, poff) != plen) {
287 gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
288 gfp_t orig_gfp = gfp;
289 unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE);
290
291 if (ctx->bio)
292 submit_bio(ctx->bio);
293
294 if (ctx->rac) /* same as readahead_gfp_mask */
295 gfp |= __GFP_NORETRY | __GFP_NOWARN;
296 ctx->bio = bio_alloc(gfp, bio_max_segs(nr_vecs));
297 /*
298 * If the bio_alloc fails, try it again for a single page to
299 * avoid having to deal with partial page reads. This emulates
300 * what do_mpage_readpage does.
301 */
302 if (!ctx->bio)
303 ctx->bio = bio_alloc(orig_gfp, 1);
304 ctx->bio->bi_opf = REQ_OP_READ;
305 if (ctx->rac)
306 ctx->bio->bi_opf |= REQ_RAHEAD;
307 ctx->bio->bi_iter.bi_sector = sector;
308 bio_set_dev(ctx->bio, iomap->bdev);
309 ctx->bio->bi_end_io = iomap_read_end_io;
310 __bio_add_page(ctx->bio, page, plen, poff);
311 }
312 done:
313 /*
314 * Move the caller beyond our range so that it keeps making progress.
315 * For that, we have to include any leading non-uptodate ranges, but
316 * we can skip trailing ones as they will be handled in the next
317 * iteration.
318 */
319 return pos - orig_pos + plen;
320 }
321
322 int
iomap_readpage(struct page * page,const struct iomap_ops * ops)323 iomap_readpage(struct page *page, const struct iomap_ops *ops)
324 {
325 struct iomap_iter iter = {
326 .inode = page->mapping->host,
327 .pos = page_offset(page),
328 .len = PAGE_SIZE,
329 };
330 struct iomap_readpage_ctx ctx = {
331 .cur_page = page,
332 };
333 int ret;
334
335 trace_iomap_readpage(page->mapping->host, 1);
336
337 while ((ret = iomap_iter(&iter, ops)) > 0)
338 iter.processed = iomap_readpage_iter(&iter, &ctx, 0);
339
340 if (ret < 0)
341 SetPageError(page);
342
343 if (ctx.bio) {
344 submit_bio(ctx.bio);
345 WARN_ON_ONCE(!ctx.cur_page_in_bio);
346 } else {
347 WARN_ON_ONCE(ctx.cur_page_in_bio);
348 unlock_page(page);
349 }
350
351 /*
352 * Just like mpage_readahead and block_read_full_page, we always
353 * return 0 and just mark the page as PageError on errors. This
354 * should be cleaned up throughout the stack eventually.
355 */
356 return 0;
357 }
358 EXPORT_SYMBOL_GPL(iomap_readpage);
359
iomap_readahead_iter(const struct iomap_iter * iter,struct iomap_readpage_ctx * ctx)360 static loff_t iomap_readahead_iter(const struct iomap_iter *iter,
361 struct iomap_readpage_ctx *ctx)
362 {
363 loff_t length = iomap_length(iter);
364 loff_t done, ret;
365
366 for (done = 0; done < length; done += ret) {
367 if (ctx->cur_page && offset_in_page(iter->pos + done) == 0) {
368 if (!ctx->cur_page_in_bio)
369 unlock_page(ctx->cur_page);
370 put_page(ctx->cur_page);
371 ctx->cur_page = NULL;
372 }
373 if (!ctx->cur_page) {
374 ctx->cur_page = readahead_page(ctx->rac);
375 ctx->cur_page_in_bio = false;
376 }
377 ret = iomap_readpage_iter(iter, ctx, done);
378 if (ret <= 0)
379 return ret;
380 }
381
382 return done;
383 }
384
385 /**
386 * iomap_readahead - Attempt to read pages from a file.
387 * @rac: Describes the pages to be read.
388 * @ops: The operations vector for the filesystem.
389 *
390 * This function is for filesystems to call to implement their readahead
391 * address_space operation.
392 *
393 * Context: The @ops callbacks may submit I/O (eg to read the addresses of
394 * blocks from disc), and may wait for it. The caller may be trying to
395 * access a different page, and so sleeping excessively should be avoided.
396 * It may allocate memory, but should avoid costly allocations. This
397 * function is called with memalloc_nofs set, so allocations will not cause
398 * the filesystem to be reentered.
399 */
iomap_readahead(struct readahead_control * rac,const struct iomap_ops * ops)400 void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops)
401 {
402 struct iomap_iter iter = {
403 .inode = rac->mapping->host,
404 .pos = readahead_pos(rac),
405 .len = readahead_length(rac),
406 };
407 struct iomap_readpage_ctx ctx = {
408 .rac = rac,
409 };
410
411 trace_iomap_readahead(rac->mapping->host, readahead_count(rac));
412
413 while (iomap_iter(&iter, ops) > 0)
414 iter.processed = iomap_readahead_iter(&iter, &ctx);
415
416 if (ctx.bio)
417 submit_bio(ctx.bio);
418 if (ctx.cur_page) {
419 if (!ctx.cur_page_in_bio)
420 unlock_page(ctx.cur_page);
421 put_page(ctx.cur_page);
422 }
423 }
424 EXPORT_SYMBOL_GPL(iomap_readahead);
425
426 /*
427 * iomap_is_partially_uptodate checks whether blocks within a page are
428 * uptodate or not.
429 *
430 * Returns true if all blocks which correspond to a file portion
431 * we want to read within the page are uptodate.
432 */
433 int
iomap_is_partially_uptodate(struct page * page,unsigned long from,unsigned long count)434 iomap_is_partially_uptodate(struct page *page, unsigned long from,
435 unsigned long count)
436 {
437 struct iomap_page *iop = to_iomap_page(page);
438 struct inode *inode = page->mapping->host;
439 unsigned len, first, last;
440 unsigned i;
441
442 /* Limit range to one page */
443 len = min_t(unsigned, PAGE_SIZE - from, count);
444
445 /* First and last blocks in range within page */
446 first = from >> inode->i_blkbits;
447 last = (from + len - 1) >> inode->i_blkbits;
448
449 if (iop) {
450 for (i = first; i <= last; i++)
451 if (!test_bit(i, iop->uptodate))
452 return 0;
453 return 1;
454 }
455
456 return 0;
457 }
458 EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
459
460 int
iomap_releasepage(struct page * page,gfp_t gfp_mask)461 iomap_releasepage(struct page *page, gfp_t gfp_mask)
462 {
463 trace_iomap_releasepage(page->mapping->host, page_offset(page),
464 PAGE_SIZE);
465
466 /*
467 * mm accommodates an old ext3 case where clean pages might not have had
468 * the dirty bit cleared. Thus, it can send actual dirty pages to
469 * ->releasepage() via shrink_active_list(); skip those here.
470 */
471 if (PageDirty(page) || PageWriteback(page))
472 return 0;
473 iomap_page_release(page);
474 return 1;
475 }
476 EXPORT_SYMBOL_GPL(iomap_releasepage);
477
478 void
iomap_invalidatepage(struct page * page,unsigned int offset,unsigned int len)479 iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len)
480 {
481 trace_iomap_invalidatepage(page->mapping->host, offset, len);
482
483 /*
484 * If we're invalidating the entire page, clear the dirty state from it
485 * and release it to avoid unnecessary buildup of the LRU.
486 */
487 if (offset == 0 && len == PAGE_SIZE) {
488 WARN_ON_ONCE(PageWriteback(page));
489 cancel_dirty_page(page);
490 iomap_page_release(page);
491 }
492 }
493 EXPORT_SYMBOL_GPL(iomap_invalidatepage);
494
495 #ifdef CONFIG_MIGRATION
496 int
iomap_migrate_page(struct address_space * mapping,struct page * newpage,struct page * page,enum migrate_mode mode)497 iomap_migrate_page(struct address_space *mapping, struct page *newpage,
498 struct page *page, enum migrate_mode mode)
499 {
500 int ret;
501
502 ret = migrate_page_move_mapping(mapping, newpage, page, 0);
503 if (ret != MIGRATEPAGE_SUCCESS)
504 return ret;
505
506 if (page_has_private(page))
507 attach_page_private(newpage, detach_page_private(page));
508
509 if (mode != MIGRATE_SYNC_NO_COPY)
510 migrate_page_copy(newpage, page);
511 else
512 migrate_page_states(newpage, page);
513 return MIGRATEPAGE_SUCCESS;
514 }
515 EXPORT_SYMBOL_GPL(iomap_migrate_page);
516 #endif /* CONFIG_MIGRATION */
517
518 static void
iomap_write_failed(struct inode * inode,loff_t pos,unsigned len)519 iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
520 {
521 loff_t i_size = i_size_read(inode);
522
523 /*
524 * Only truncate newly allocated pages beyoned EOF, even if the
525 * write started inside the existing inode size.
526 */
527 if (pos + len > i_size)
528 truncate_pagecache_range(inode, max(pos, i_size),
529 pos + len - 1);
530 }
531
532 static int
iomap_read_page_sync(loff_t block_start,struct page * page,unsigned poff,unsigned plen,const struct iomap * iomap)533 iomap_read_page_sync(loff_t block_start, struct page *page, unsigned poff,
534 unsigned plen, const struct iomap *iomap)
535 {
536 struct bio_vec bvec;
537 struct bio bio;
538
539 bio_init(&bio, &bvec, 1);
540 bio.bi_opf = REQ_OP_READ;
541 bio.bi_iter.bi_sector = iomap_sector(iomap, block_start);
542 bio_set_dev(&bio, iomap->bdev);
543 __bio_add_page(&bio, page, plen, poff);
544 return submit_bio_wait(&bio);
545 }
546
__iomap_write_begin(const struct iomap_iter * iter,loff_t pos,unsigned len,struct page * page)547 static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
548 unsigned len, struct page *page)
549 {
550 const struct iomap *srcmap = iomap_iter_srcmap(iter);
551 struct iomap_page *iop = iomap_page_create(iter->inode, page);
552 loff_t block_size = i_blocksize(iter->inode);
553 loff_t block_start = round_down(pos, block_size);
554 loff_t block_end = round_up(pos + len, block_size);
555 unsigned from = offset_in_page(pos), to = from + len, poff, plen;
556
557 if (PageUptodate(page))
558 return 0;
559 ClearPageError(page);
560
561 do {
562 iomap_adjust_read_range(iter->inode, iop, &block_start,
563 block_end - block_start, &poff, &plen);
564 if (plen == 0)
565 break;
566
567 if (!(iter->flags & IOMAP_UNSHARE) &&
568 (from <= poff || from >= poff + plen) &&
569 (to <= poff || to >= poff + plen))
570 continue;
571
572 if (iomap_block_needs_zeroing(iter, block_start)) {
573 if (WARN_ON_ONCE(iter->flags & IOMAP_UNSHARE))
574 return -EIO;
575 zero_user_segments(page, poff, from, to, poff + plen);
576 } else {
577 int status = iomap_read_page_sync(block_start, page,
578 poff, plen, srcmap);
579 if (status)
580 return status;
581 }
582 iomap_set_range_uptodate(page, poff, plen);
583 } while ((block_start += plen) < block_end);
584
585 return 0;
586 }
587
iomap_write_begin_inline(const struct iomap_iter * iter,struct page * page)588 static int iomap_write_begin_inline(const struct iomap_iter *iter,
589 struct page *page)
590 {
591 int ret;
592
593 /* needs more work for the tailpacking case; disable for now */
594 if (WARN_ON_ONCE(iomap_iter_srcmap(iter)->offset != 0))
595 return -EIO;
596 ret = iomap_read_inline_data(iter, page);
597 if (ret < 0)
598 return ret;
599 return 0;
600 }
601
iomap_write_begin(const struct iomap_iter * iter,loff_t pos,unsigned len,struct page ** pagep)602 static int iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
603 unsigned len, struct page **pagep)
604 {
605 const struct iomap_page_ops *page_ops = iter->iomap.page_ops;
606 const struct iomap *srcmap = iomap_iter_srcmap(iter);
607 struct page *page;
608 int status = 0;
609
610 BUG_ON(pos + len > iter->iomap.offset + iter->iomap.length);
611 if (srcmap != &iter->iomap)
612 BUG_ON(pos + len > srcmap->offset + srcmap->length);
613
614 if (fatal_signal_pending(current))
615 return -EINTR;
616
617 if (page_ops && page_ops->page_prepare) {
618 status = page_ops->page_prepare(iter->inode, pos, len);
619 if (status)
620 return status;
621 }
622
623 page = grab_cache_page_write_begin(iter->inode->i_mapping,
624 pos >> PAGE_SHIFT, AOP_FLAG_NOFS);
625 if (!page) {
626 status = -ENOMEM;
627 goto out_no_page;
628 }
629
630 if (srcmap->type == IOMAP_INLINE)
631 status = iomap_write_begin_inline(iter, page);
632 else if (srcmap->flags & IOMAP_F_BUFFER_HEAD)
633 status = __block_write_begin_int(page, pos, len, NULL, srcmap);
634 else
635 status = __iomap_write_begin(iter, pos, len, page);
636
637 if (unlikely(status))
638 goto out_unlock;
639
640 *pagep = page;
641 return 0;
642
643 out_unlock:
644 unlock_page(page);
645 put_page(page);
646 iomap_write_failed(iter->inode, pos, len);
647
648 out_no_page:
649 if (page_ops && page_ops->page_done)
650 page_ops->page_done(iter->inode, pos, 0, NULL);
651 return status;
652 }
653
__iomap_write_end(struct inode * inode,loff_t pos,size_t len,size_t copied,struct page * page)654 static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
655 size_t copied, struct page *page)
656 {
657 flush_dcache_page(page);
658
659 /*
660 * The blocks that were entirely written will now be uptodate, so we
661 * don't have to worry about a readpage reading them and overwriting a
662 * partial write. However, if we've encountered a short write and only
663 * partially written into a block, it will not be marked uptodate, so a
664 * readpage might come in and destroy our partial write.
665 *
666 * Do the simplest thing and just treat any short write to a
667 * non-uptodate page as a zero-length write, and force the caller to
668 * redo the whole thing.
669 */
670 if (unlikely(copied < len && !PageUptodate(page)))
671 return 0;
672 iomap_set_range_uptodate(page, offset_in_page(pos), len);
673 __set_page_dirty_nobuffers(page);
674 return copied;
675 }
676
iomap_write_end_inline(const struct iomap_iter * iter,struct page * page,loff_t pos,size_t copied)677 static size_t iomap_write_end_inline(const struct iomap_iter *iter,
678 struct page *page, loff_t pos, size_t copied)
679 {
680 const struct iomap *iomap = &iter->iomap;
681 void *addr;
682
683 WARN_ON_ONCE(!PageUptodate(page));
684 BUG_ON(!iomap_inline_data_valid(iomap));
685
686 flush_dcache_page(page);
687 addr = kmap_local_page(page) + pos;
688 memcpy(iomap_inline_data(iomap, pos), addr, copied);
689 kunmap_local(addr);
690
691 mark_inode_dirty(iter->inode);
692 return copied;
693 }
694
695 /* Returns the number of bytes copied. May be 0. Cannot be an errno. */
iomap_write_end(struct iomap_iter * iter,loff_t pos,size_t len,size_t copied,struct page * page)696 static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
697 size_t copied, struct page *page)
698 {
699 const struct iomap_page_ops *page_ops = iter->iomap.page_ops;
700 const struct iomap *srcmap = iomap_iter_srcmap(iter);
701 loff_t old_size = iter->inode->i_size;
702 size_t ret;
703
704 if (srcmap->type == IOMAP_INLINE) {
705 ret = iomap_write_end_inline(iter, page, pos, copied);
706 } else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) {
707 ret = block_write_end(NULL, iter->inode->i_mapping, pos, len,
708 copied, page, NULL);
709 } else {
710 ret = __iomap_write_end(iter->inode, pos, len, copied, page);
711 }
712
713 /*
714 * Update the in-memory inode size after copying the data into the page
715 * cache. It's up to the file system to write the updated size to disk,
716 * preferably after I/O completion so that no stale data is exposed.
717 */
718 if (pos + ret > old_size) {
719 i_size_write(iter->inode, pos + ret);
720 iter->iomap.flags |= IOMAP_F_SIZE_CHANGED;
721 }
722 unlock_page(page);
723
724 if (old_size < pos)
725 pagecache_isize_extended(iter->inode, old_size, pos);
726 if (page_ops && page_ops->page_done)
727 page_ops->page_done(iter->inode, pos, ret, page);
728 put_page(page);
729
730 if (ret < len)
731 iomap_write_failed(iter->inode, pos, len);
732 return ret;
733 }
734
iomap_write_iter(struct iomap_iter * iter,struct iov_iter * i)735 static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
736 {
737 loff_t length = iomap_length(iter);
738 loff_t pos = iter->pos;
739 ssize_t written = 0;
740 long status = 0;
741
742 do {
743 struct page *page;
744 unsigned long offset; /* Offset into pagecache page */
745 unsigned long bytes; /* Bytes to write to page */
746 size_t copied; /* Bytes copied from user */
747
748 offset = offset_in_page(pos);
749 bytes = min_t(unsigned long, PAGE_SIZE - offset,
750 iov_iter_count(i));
751 again:
752 if (bytes > length)
753 bytes = length;
754
755 /*
756 * Bring in the user page that we'll copy from _first_.
757 * Otherwise there's a nasty deadlock on copying from the
758 * same page as we're writing to, without it being marked
759 * up-to-date.
760 */
761 if (unlikely(fault_in_iov_iter_readable(i, bytes))) {
762 status = -EFAULT;
763 break;
764 }
765
766 status = iomap_write_begin(iter, pos, bytes, &page);
767 if (unlikely(status))
768 break;
769
770 if (mapping_writably_mapped(iter->inode->i_mapping))
771 flush_dcache_page(page);
772
773 copied = copy_page_from_iter_atomic(page, offset, bytes, i);
774
775 status = iomap_write_end(iter, pos, bytes, copied, page);
776
777 if (unlikely(copied != status))
778 iov_iter_revert(i, copied - status);
779
780 cond_resched();
781 if (unlikely(status == 0)) {
782 /*
783 * A short copy made iomap_write_end() reject the
784 * thing entirely. Might be memory poisoning
785 * halfway through, might be a race with munmap,
786 * might be severe memory pressure.
787 */
788 if (copied)
789 bytes = copied;
790 goto again;
791 }
792 pos += status;
793 written += status;
794 length -= status;
795
796 balance_dirty_pages_ratelimited(iter->inode->i_mapping);
797 } while (iov_iter_count(i) && length);
798
799 return written ? written : status;
800 }
801
802 ssize_t
iomap_file_buffered_write(struct kiocb * iocb,struct iov_iter * i,const struct iomap_ops * ops)803 iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i,
804 const struct iomap_ops *ops)
805 {
806 struct iomap_iter iter = {
807 .inode = iocb->ki_filp->f_mapping->host,
808 .pos = iocb->ki_pos,
809 .len = iov_iter_count(i),
810 .flags = IOMAP_WRITE,
811 };
812 int ret;
813
814 while ((ret = iomap_iter(&iter, ops)) > 0)
815 iter.processed = iomap_write_iter(&iter, i);
816 if (iter.pos == iocb->ki_pos)
817 return ret;
818 return iter.pos - iocb->ki_pos;
819 }
820 EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
821
iomap_unshare_iter(struct iomap_iter * iter)822 static loff_t iomap_unshare_iter(struct iomap_iter *iter)
823 {
824 struct iomap *iomap = &iter->iomap;
825 const struct iomap *srcmap = iomap_iter_srcmap(iter);
826 loff_t pos = iter->pos;
827 loff_t length = iomap_length(iter);
828 long status = 0;
829 loff_t written = 0;
830
831 /* don't bother with blocks that are not shared to start with */
832 if (!(iomap->flags & IOMAP_F_SHARED))
833 return length;
834 /* don't bother with holes or unwritten extents */
835 if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
836 return length;
837
838 do {
839 unsigned long offset = offset_in_page(pos);
840 unsigned long bytes = min_t(loff_t, PAGE_SIZE - offset, length);
841 struct page *page;
842
843 status = iomap_write_begin(iter, pos, bytes, &page);
844 if (unlikely(status))
845 return status;
846
847 status = iomap_write_end(iter, pos, bytes, bytes, page);
848 if (WARN_ON_ONCE(status == 0))
849 return -EIO;
850
851 cond_resched();
852
853 pos += status;
854 written += status;
855 length -= status;
856
857 balance_dirty_pages_ratelimited(iter->inode->i_mapping);
858 } while (length);
859
860 return written;
861 }
862
863 int
iomap_file_unshare(struct inode * inode,loff_t pos,loff_t len,const struct iomap_ops * ops)864 iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
865 const struct iomap_ops *ops)
866 {
867 struct iomap_iter iter = {
868 .inode = inode,
869 .pos = pos,
870 .len = len,
871 .flags = IOMAP_WRITE | IOMAP_UNSHARE,
872 };
873 int ret;
874
875 while ((ret = iomap_iter(&iter, ops)) > 0)
876 iter.processed = iomap_unshare_iter(&iter);
877 return ret;
878 }
879 EXPORT_SYMBOL_GPL(iomap_file_unshare);
880
__iomap_zero_iter(struct iomap_iter * iter,loff_t pos,u64 length)881 static s64 __iomap_zero_iter(struct iomap_iter *iter, loff_t pos, u64 length)
882 {
883 struct page *page;
884 int status;
885 unsigned offset = offset_in_page(pos);
886 unsigned bytes = min_t(u64, PAGE_SIZE - offset, length);
887
888 status = iomap_write_begin(iter, pos, bytes, &page);
889 if (status)
890 return status;
891
892 zero_user(page, offset, bytes);
893 mark_page_accessed(page);
894
895 return iomap_write_end(iter, pos, bytes, bytes, page);
896 }
897
iomap_zero_iter(struct iomap_iter * iter,bool * did_zero)898 static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
899 {
900 struct iomap *iomap = &iter->iomap;
901 const struct iomap *srcmap = iomap_iter_srcmap(iter);
902 loff_t pos = iter->pos;
903 loff_t length = iomap_length(iter);
904 loff_t written = 0;
905
906 /* already zeroed? we're done. */
907 if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
908 return length;
909
910 do {
911 s64 bytes;
912
913 if (IS_DAX(iter->inode))
914 bytes = dax_iomap_zero(pos, length, iomap);
915 else
916 bytes = __iomap_zero_iter(iter, pos, length);
917 if (bytes < 0)
918 return bytes;
919
920 pos += bytes;
921 length -= bytes;
922 written += bytes;
923 if (did_zero)
924 *did_zero = true;
925 } while (length > 0);
926
927 return written;
928 }
929
930 int
iomap_zero_range(struct inode * inode,loff_t pos,loff_t len,bool * did_zero,const struct iomap_ops * ops)931 iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
932 const struct iomap_ops *ops)
933 {
934 struct iomap_iter iter = {
935 .inode = inode,
936 .pos = pos,
937 .len = len,
938 .flags = IOMAP_ZERO,
939 };
940 int ret;
941
942 while ((ret = iomap_iter(&iter, ops)) > 0)
943 iter.processed = iomap_zero_iter(&iter, did_zero);
944 return ret;
945 }
946 EXPORT_SYMBOL_GPL(iomap_zero_range);
947
948 int
iomap_truncate_page(struct inode * inode,loff_t pos,bool * did_zero,const struct iomap_ops * ops)949 iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
950 const struct iomap_ops *ops)
951 {
952 unsigned int blocksize = i_blocksize(inode);
953 unsigned int off = pos & (blocksize - 1);
954
955 /* Block boundary? Nothing to do */
956 if (!off)
957 return 0;
958 return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops);
959 }
960 EXPORT_SYMBOL_GPL(iomap_truncate_page);
961
iomap_page_mkwrite_iter(struct iomap_iter * iter,struct page * page)962 static loff_t iomap_page_mkwrite_iter(struct iomap_iter *iter,
963 struct page *page)
964 {
965 loff_t length = iomap_length(iter);
966 int ret;
967
968 if (iter->iomap.flags & IOMAP_F_BUFFER_HEAD) {
969 ret = __block_write_begin_int(page, iter->pos, length, NULL,
970 &iter->iomap);
971 if (ret)
972 return ret;
973 block_commit_write(page, 0, length);
974 } else {
975 WARN_ON_ONCE(!PageUptodate(page));
976 set_page_dirty(page);
977 }
978
979 return length;
980 }
981
iomap_page_mkwrite(struct vm_fault * vmf,const struct iomap_ops * ops)982 vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
983 {
984 struct iomap_iter iter = {
985 .inode = file_inode(vmf->vma->vm_file),
986 .flags = IOMAP_WRITE | IOMAP_FAULT,
987 };
988 struct page *page = vmf->page;
989 ssize_t ret;
990
991 lock_page(page);
992 ret = page_mkwrite_check_truncate(page, iter.inode);
993 if (ret < 0)
994 goto out_unlock;
995 iter.pos = page_offset(page);
996 iter.len = ret;
997 while ((ret = iomap_iter(&iter, ops)) > 0)
998 iter.processed = iomap_page_mkwrite_iter(&iter, page);
999
1000 if (ret < 0)
1001 goto out_unlock;
1002 wait_for_stable_page(page);
1003 return VM_FAULT_LOCKED;
1004 out_unlock:
1005 unlock_page(page);
1006 return block_page_mkwrite_return(ret);
1007 }
1008 EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
1009
1010 static void
iomap_finish_page_writeback(struct inode * inode,struct page * page,int error,unsigned int len)1011 iomap_finish_page_writeback(struct inode *inode, struct page *page,
1012 int error, unsigned int len)
1013 {
1014 struct iomap_page *iop = to_iomap_page(page);
1015
1016 if (error) {
1017 SetPageError(page);
1018 mapping_set_error(inode->i_mapping, error);
1019 }
1020
1021 WARN_ON_ONCE(i_blocks_per_page(inode, page) > 1 && !iop);
1022 WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) <= 0);
1023
1024 if (!iop || atomic_sub_and_test(len, &iop->write_bytes_pending))
1025 end_page_writeback(page);
1026 }
1027
1028 /*
1029 * We're now finished for good with this ioend structure. Update the page
1030 * state, release holds on bios, and finally free up memory. Do not use the
1031 * ioend after this.
1032 */
1033 static void
iomap_finish_ioend(struct iomap_ioend * ioend,int error)1034 iomap_finish_ioend(struct iomap_ioend *ioend, int error)
1035 {
1036 struct inode *inode = ioend->io_inode;
1037 struct bio *bio = &ioend->io_inline_bio;
1038 struct bio *last = ioend->io_bio, *next;
1039 u64 start = bio->bi_iter.bi_sector;
1040 loff_t offset = ioend->io_offset;
1041 bool quiet = bio_flagged(bio, BIO_QUIET);
1042
1043 for (bio = &ioend->io_inline_bio; bio; bio = next) {
1044 struct bio_vec *bv;
1045 struct bvec_iter_all iter_all;
1046
1047 /*
1048 * For the last bio, bi_private points to the ioend, so we
1049 * need to explicitly end the iteration here.
1050 */
1051 if (bio == last)
1052 next = NULL;
1053 else
1054 next = bio->bi_private;
1055
1056 /* walk each page on bio, ending page IO on them */
1057 bio_for_each_segment_all(bv, bio, iter_all)
1058 iomap_finish_page_writeback(inode, bv->bv_page, error,
1059 bv->bv_len);
1060 bio_put(bio);
1061 }
1062 /* The ioend has been freed by bio_put() */
1063
1064 if (unlikely(error && !quiet)) {
1065 printk_ratelimited(KERN_ERR
1066 "%s: writeback error on inode %lu, offset %lld, sector %llu",
1067 inode->i_sb->s_id, inode->i_ino, offset, start);
1068 }
1069 }
1070
1071 void
iomap_finish_ioends(struct iomap_ioend * ioend,int error)1072 iomap_finish_ioends(struct iomap_ioend *ioend, int error)
1073 {
1074 struct list_head tmp;
1075
1076 list_replace_init(&ioend->io_list, &tmp);
1077 iomap_finish_ioend(ioend, error);
1078
1079 while (!list_empty(&tmp)) {
1080 ioend = list_first_entry(&tmp, struct iomap_ioend, io_list);
1081 list_del_init(&ioend->io_list);
1082 iomap_finish_ioend(ioend, error);
1083 }
1084 }
1085 EXPORT_SYMBOL_GPL(iomap_finish_ioends);
1086
1087 /*
1088 * We can merge two adjacent ioends if they have the same set of work to do.
1089 */
1090 static bool
iomap_ioend_can_merge(struct iomap_ioend * ioend,struct iomap_ioend * next)1091 iomap_ioend_can_merge(struct iomap_ioend *ioend, struct iomap_ioend *next)
1092 {
1093 if (ioend->io_bio->bi_status != next->io_bio->bi_status)
1094 return false;
1095 if ((ioend->io_flags & IOMAP_F_SHARED) ^
1096 (next->io_flags & IOMAP_F_SHARED))
1097 return false;
1098 if ((ioend->io_type == IOMAP_UNWRITTEN) ^
1099 (next->io_type == IOMAP_UNWRITTEN))
1100 return false;
1101 if (ioend->io_offset + ioend->io_size != next->io_offset)
1102 return false;
1103 return true;
1104 }
1105
1106 void
iomap_ioend_try_merge(struct iomap_ioend * ioend,struct list_head * more_ioends)1107 iomap_ioend_try_merge(struct iomap_ioend *ioend, struct list_head *more_ioends)
1108 {
1109 struct iomap_ioend *next;
1110
1111 INIT_LIST_HEAD(&ioend->io_list);
1112
1113 while ((next = list_first_entry_or_null(more_ioends, struct iomap_ioend,
1114 io_list))) {
1115 if (!iomap_ioend_can_merge(ioend, next))
1116 break;
1117 list_move_tail(&next->io_list, &ioend->io_list);
1118 ioend->io_size += next->io_size;
1119 }
1120 }
1121 EXPORT_SYMBOL_GPL(iomap_ioend_try_merge);
1122
1123 static int
iomap_ioend_compare(void * priv,const struct list_head * a,const struct list_head * b)1124 iomap_ioend_compare(void *priv, const struct list_head *a,
1125 const struct list_head *b)
1126 {
1127 struct iomap_ioend *ia = container_of(a, struct iomap_ioend, io_list);
1128 struct iomap_ioend *ib = container_of(b, struct iomap_ioend, io_list);
1129
1130 if (ia->io_offset < ib->io_offset)
1131 return -1;
1132 if (ia->io_offset > ib->io_offset)
1133 return 1;
1134 return 0;
1135 }
1136
1137 void
iomap_sort_ioends(struct list_head * ioend_list)1138 iomap_sort_ioends(struct list_head *ioend_list)
1139 {
1140 list_sort(NULL, ioend_list, iomap_ioend_compare);
1141 }
1142 EXPORT_SYMBOL_GPL(iomap_sort_ioends);
1143
iomap_writepage_end_bio(struct bio * bio)1144 static void iomap_writepage_end_bio(struct bio *bio)
1145 {
1146 struct iomap_ioend *ioend = bio->bi_private;
1147
1148 iomap_finish_ioend(ioend, blk_status_to_errno(bio->bi_status));
1149 }
1150
1151 /*
1152 * Submit the final bio for an ioend.
1153 *
1154 * If @error is non-zero, it means that we have a situation where some part of
1155 * the submission process has failed after we've marked pages for writeback
1156 * and unlocked them. In this situation, we need to fail the bio instead of
1157 * submitting it. This typically only happens on a filesystem shutdown.
1158 */
1159 static int
iomap_submit_ioend(struct iomap_writepage_ctx * wpc,struct iomap_ioend * ioend,int error)1160 iomap_submit_ioend(struct iomap_writepage_ctx *wpc, struct iomap_ioend *ioend,
1161 int error)
1162 {
1163 ioend->io_bio->bi_private = ioend;
1164 ioend->io_bio->bi_end_io = iomap_writepage_end_bio;
1165
1166 if (wpc->ops->prepare_ioend)
1167 error = wpc->ops->prepare_ioend(ioend, error);
1168 if (error) {
1169 /*
1170 * If we're failing the IO now, just mark the ioend with an
1171 * error and finish it. This will run IO completion immediately
1172 * as there is only one reference to the ioend at this point in
1173 * time.
1174 */
1175 ioend->io_bio->bi_status = errno_to_blk_status(error);
1176 bio_endio(ioend->io_bio);
1177 return error;
1178 }
1179
1180 submit_bio(ioend->io_bio);
1181 return 0;
1182 }
1183
1184 static struct iomap_ioend *
iomap_alloc_ioend(struct inode * inode,struct iomap_writepage_ctx * wpc,loff_t offset,sector_t sector,struct writeback_control * wbc)1185 iomap_alloc_ioend(struct inode *inode, struct iomap_writepage_ctx *wpc,
1186 loff_t offset, sector_t sector, struct writeback_control *wbc)
1187 {
1188 struct iomap_ioend *ioend;
1189 struct bio *bio;
1190
1191 bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_VECS, &iomap_ioend_bioset);
1192 bio_set_dev(bio, wpc->iomap.bdev);
1193 bio->bi_iter.bi_sector = sector;
1194 bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
1195 bio->bi_write_hint = inode->i_write_hint;
1196 wbc_init_bio(wbc, bio);
1197
1198 ioend = container_of(bio, struct iomap_ioend, io_inline_bio);
1199 INIT_LIST_HEAD(&ioend->io_list);
1200 ioend->io_type = wpc->iomap.type;
1201 ioend->io_flags = wpc->iomap.flags;
1202 ioend->io_inode = inode;
1203 ioend->io_size = 0;
1204 ioend->io_offset = offset;
1205 ioend->io_bio = bio;
1206 return ioend;
1207 }
1208
1209 /*
1210 * Allocate a new bio, and chain the old bio to the new one.
1211 *
1212 * Note that we have to perform the chaining in this unintuitive order
1213 * so that the bi_private linkage is set up in the right direction for the
1214 * traversal in iomap_finish_ioend().
1215 */
1216 static struct bio *
iomap_chain_bio(struct bio * prev)1217 iomap_chain_bio(struct bio *prev)
1218 {
1219 struct bio *new;
1220
1221 new = bio_alloc(GFP_NOFS, BIO_MAX_VECS);
1222 bio_copy_dev(new, prev);/* also copies over blkcg information */
1223 new->bi_iter.bi_sector = bio_end_sector(prev);
1224 new->bi_opf = prev->bi_opf;
1225 new->bi_write_hint = prev->bi_write_hint;
1226
1227 bio_chain(prev, new);
1228 bio_get(prev); /* for iomap_finish_ioend */
1229 submit_bio(prev);
1230 return new;
1231 }
1232
1233 static bool
iomap_can_add_to_ioend(struct iomap_writepage_ctx * wpc,loff_t offset,sector_t sector)1234 iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t offset,
1235 sector_t sector)
1236 {
1237 if ((wpc->iomap.flags & IOMAP_F_SHARED) !=
1238 (wpc->ioend->io_flags & IOMAP_F_SHARED))
1239 return false;
1240 if (wpc->iomap.type != wpc->ioend->io_type)
1241 return false;
1242 if (offset != wpc->ioend->io_offset + wpc->ioend->io_size)
1243 return false;
1244 if (sector != bio_end_sector(wpc->ioend->io_bio))
1245 return false;
1246 return true;
1247 }
1248
1249 /*
1250 * Test to see if we have an existing ioend structure that we could append to
1251 * first; otherwise finish off the current ioend and start another.
1252 */
1253 static void
iomap_add_to_ioend(struct inode * inode,loff_t offset,struct page * page,struct iomap_page * iop,struct iomap_writepage_ctx * wpc,struct writeback_control * wbc,struct list_head * iolist)1254 iomap_add_to_ioend(struct inode *inode, loff_t offset, struct page *page,
1255 struct iomap_page *iop, struct iomap_writepage_ctx *wpc,
1256 struct writeback_control *wbc, struct list_head *iolist)
1257 {
1258 sector_t sector = iomap_sector(&wpc->iomap, offset);
1259 unsigned len = i_blocksize(inode);
1260 unsigned poff = offset & (PAGE_SIZE - 1);
1261
1262 if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, offset, sector)) {
1263 if (wpc->ioend)
1264 list_add(&wpc->ioend->io_list, iolist);
1265 wpc->ioend = iomap_alloc_ioend(inode, wpc, offset, sector, wbc);
1266 }
1267
1268 if (bio_add_page(wpc->ioend->io_bio, page, len, poff) != len) {
1269 wpc->ioend->io_bio = iomap_chain_bio(wpc->ioend->io_bio);
1270 __bio_add_page(wpc->ioend->io_bio, page, len, poff);
1271 }
1272
1273 if (iop)
1274 atomic_add(len, &iop->write_bytes_pending);
1275 wpc->ioend->io_size += len;
1276 wbc_account_cgroup_owner(wbc, page, len);
1277 }
1278
1279 /*
1280 * We implement an immediate ioend submission policy here to avoid needing to
1281 * chain multiple ioends and hence nest mempool allocations which can violate
1282 * the forward progress guarantees we need to provide. The current ioend we're
1283 * adding blocks to is cached in the writepage context, and if the new block
1284 * doesn't append to the cached ioend, it will create a new ioend and cache that
1285 * instead.
1286 *
1287 * If a new ioend is created and cached, the old ioend is returned and queued
1288 * locally for submission once the entire page is processed or an error has been
1289 * detected. While ioends are submitted immediately after they are completed,
1290 * batching optimisations are provided by higher level block plugging.
1291 *
1292 * At the end of a writeback pass, there will be a cached ioend remaining on the
1293 * writepage context that the caller will need to submit.
1294 */
1295 static int
iomap_writepage_map(struct iomap_writepage_ctx * wpc,struct writeback_control * wbc,struct inode * inode,struct page * page,u64 end_offset)1296 iomap_writepage_map(struct iomap_writepage_ctx *wpc,
1297 struct writeback_control *wbc, struct inode *inode,
1298 struct page *page, u64 end_offset)
1299 {
1300 struct iomap_page *iop = iomap_page_create(inode, page);
1301 struct iomap_ioend *ioend, *next;
1302 unsigned len = i_blocksize(inode);
1303 u64 file_offset; /* file offset of page */
1304 int error = 0, count = 0, i;
1305 LIST_HEAD(submit_list);
1306
1307 WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) != 0);
1308
1309 /*
1310 * Walk through the page to find areas to write back. If we run off the
1311 * end of the current map or find the current map invalid, grab a new
1312 * one.
1313 */
1314 for (i = 0, file_offset = page_offset(page);
1315 i < (PAGE_SIZE >> inode->i_blkbits) && file_offset < end_offset;
1316 i++, file_offset += len) {
1317 if (iop && !test_bit(i, iop->uptodate))
1318 continue;
1319
1320 error = wpc->ops->map_blocks(wpc, inode, file_offset);
1321 if (error)
1322 break;
1323 if (WARN_ON_ONCE(wpc->iomap.type == IOMAP_INLINE))
1324 continue;
1325 if (wpc->iomap.type == IOMAP_HOLE)
1326 continue;
1327 iomap_add_to_ioend(inode, file_offset, page, iop, wpc, wbc,
1328 &submit_list);
1329 count++;
1330 }
1331
1332 WARN_ON_ONCE(!wpc->ioend && !list_empty(&submit_list));
1333 WARN_ON_ONCE(!PageLocked(page));
1334 WARN_ON_ONCE(PageWriteback(page));
1335 WARN_ON_ONCE(PageDirty(page));
1336
1337 /*
1338 * We cannot cancel the ioend directly here on error. We may have
1339 * already set other pages under writeback and hence we have to run I/O
1340 * completion to mark the error state of the pages under writeback
1341 * appropriately.
1342 */
1343 if (unlikely(error)) {
1344 /*
1345 * Let the filesystem know what portion of the current page
1346 * failed to map. If the page hasn't been added to ioend, it
1347 * won't be affected by I/O completion and we must unlock it
1348 * now.
1349 */
1350 if (wpc->ops->discard_page)
1351 wpc->ops->discard_page(page, file_offset);
1352 if (!count) {
1353 ClearPageUptodate(page);
1354 unlock_page(page);
1355 goto done;
1356 }
1357 }
1358
1359 set_page_writeback(page);
1360 unlock_page(page);
1361
1362 /*
1363 * Preserve the original error if there was one; catch
1364 * submission errors here and propagate into subsequent ioend
1365 * submissions.
1366 */
1367 list_for_each_entry_safe(ioend, next, &submit_list, io_list) {
1368 int error2;
1369
1370 list_del_init(&ioend->io_list);
1371 error2 = iomap_submit_ioend(wpc, ioend, error);
1372 if (error2 && !error)
1373 error = error2;
1374 }
1375
1376 /*
1377 * We can end up here with no error and nothing to write only if we race
1378 * with a partial page truncate on a sub-page block sized filesystem.
1379 */
1380 if (!count)
1381 end_page_writeback(page);
1382 done:
1383 mapping_set_error(page->mapping, error);
1384 return error;
1385 }
1386
1387 /*
1388 * Write out a dirty page.
1389 *
1390 * For delalloc space on the page, we need to allocate space and flush it.
1391 * For unwritten space on the page, we need to start the conversion to
1392 * regular allocated space.
1393 */
1394 static int
iomap_do_writepage(struct page * page,struct writeback_control * wbc,void * data)1395 iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data)
1396 {
1397 struct iomap_writepage_ctx *wpc = data;
1398 struct inode *inode = page->mapping->host;
1399 pgoff_t end_index;
1400 u64 end_offset;
1401 loff_t offset;
1402
1403 trace_iomap_writepage(inode, page_offset(page), PAGE_SIZE);
1404
1405 /*
1406 * Refuse to write the page out if we're called from reclaim context.
1407 *
1408 * This avoids stack overflows when called from deeply used stacks in
1409 * random callers for direct reclaim or memcg reclaim. We explicitly
1410 * allow reclaim from kswapd as the stack usage there is relatively low.
1411 *
1412 * This should never happen except in the case of a VM regression so
1413 * warn about it.
1414 */
1415 if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
1416 PF_MEMALLOC))
1417 goto redirty;
1418
1419 /*
1420 * Is this page beyond the end of the file?
1421 *
1422 * The page index is less than the end_index, adjust the end_offset
1423 * to the highest offset that this page should represent.
1424 * -----------------------------------------------------
1425 * | file mapping | <EOF> |
1426 * -----------------------------------------------------
1427 * | Page ... | Page N-2 | Page N-1 | Page N | |
1428 * ^--------------------------------^----------|--------
1429 * | desired writeback range | see else |
1430 * ---------------------------------^------------------|
1431 */
1432 offset = i_size_read(inode);
1433 end_index = offset >> PAGE_SHIFT;
1434 if (page->index < end_index)
1435 end_offset = (loff_t)(page->index + 1) << PAGE_SHIFT;
1436 else {
1437 /*
1438 * Check whether the page to write out is beyond or straddles
1439 * i_size or not.
1440 * -------------------------------------------------------
1441 * | file mapping | <EOF> |
1442 * -------------------------------------------------------
1443 * | Page ... | Page N-2 | Page N-1 | Page N | Beyond |
1444 * ^--------------------------------^-----------|---------
1445 * | | Straddles |
1446 * ---------------------------------^-----------|--------|
1447 */
1448 unsigned offset_into_page = offset & (PAGE_SIZE - 1);
1449
1450 /*
1451 * Skip the page if it's fully outside i_size, e.g. due to a
1452 * truncate operation that's in progress. We must redirty the
1453 * page so that reclaim stops reclaiming it. Otherwise
1454 * iomap_vm_releasepage() is called on it and gets confused.
1455 *
1456 * Note that the end_index is unsigned long. If the given
1457 * offset is greater than 16TB on a 32-bit system then if we
1458 * checked if the page is fully outside i_size with
1459 * "if (page->index >= end_index + 1)", "end_index + 1" would
1460 * overflow and evaluate to 0. Hence this page would be
1461 * redirtied and written out repeatedly, which would result in
1462 * an infinite loop; the user program performing this operation
1463 * would hang. Instead, we can detect this situation by
1464 * checking if the page is totally beyond i_size or if its
1465 * offset is just equal to the EOF.
1466 */
1467 if (page->index > end_index ||
1468 (page->index == end_index && offset_into_page == 0))
1469 goto redirty;
1470
1471 /*
1472 * The page straddles i_size. It must be zeroed out on each
1473 * and every writepage invocation because it may be mmapped.
1474 * "A file is mapped in multiples of the page size. For a file
1475 * that is not a multiple of the page size, the remaining
1476 * memory is zeroed when mapped, and writes to that region are
1477 * not written out to the file."
1478 */
1479 zero_user_segment(page, offset_into_page, PAGE_SIZE);
1480
1481 /* Adjust the end_offset to the end of file */
1482 end_offset = offset;
1483 }
1484
1485 return iomap_writepage_map(wpc, wbc, inode, page, end_offset);
1486
1487 redirty:
1488 redirty_page_for_writepage(wbc, page);
1489 unlock_page(page);
1490 return 0;
1491 }
1492
1493 int
iomap_writepage(struct page * page,struct writeback_control * wbc,struct iomap_writepage_ctx * wpc,const struct iomap_writeback_ops * ops)1494 iomap_writepage(struct page *page, struct writeback_control *wbc,
1495 struct iomap_writepage_ctx *wpc,
1496 const struct iomap_writeback_ops *ops)
1497 {
1498 int ret;
1499
1500 wpc->ops = ops;
1501 ret = iomap_do_writepage(page, wbc, wpc);
1502 if (!wpc->ioend)
1503 return ret;
1504 return iomap_submit_ioend(wpc, wpc->ioend, ret);
1505 }
1506 EXPORT_SYMBOL_GPL(iomap_writepage);
1507
1508 int
iomap_writepages(struct address_space * mapping,struct writeback_control * wbc,struct iomap_writepage_ctx * wpc,const struct iomap_writeback_ops * ops)1509 iomap_writepages(struct address_space *mapping, struct writeback_control *wbc,
1510 struct iomap_writepage_ctx *wpc,
1511 const struct iomap_writeback_ops *ops)
1512 {
1513 int ret;
1514
1515 wpc->ops = ops;
1516 ret = write_cache_pages(mapping, wbc, iomap_do_writepage, wpc);
1517 if (!wpc->ioend)
1518 return ret;
1519 return iomap_submit_ioend(wpc, wpc->ioend, ret);
1520 }
1521 EXPORT_SYMBOL_GPL(iomap_writepages);
1522
iomap_init(void)1523 static int __init iomap_init(void)
1524 {
1525 return bioset_init(&iomap_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE),
1526 offsetof(struct iomap_ioend, io_inline_bio),
1527 BIOSET_NEED_BVECS);
1528 }
1529 fs_initcall(iomap_init);
1530