• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2010 Red Hat, Inc.
4  * Copyright (c) 2016-2018 Christoph Hellwig.
5  */
6 #include <linux/module.h>
7 #include <linux/compiler.h>
8 #include <linux/fs.h>
9 #include <linux/iomap.h>
10 #include <linux/pagemap.h>
11 #include <linux/uio.h>
12 #include <linux/buffer_head.h>
13 #include <linux/dax.h>
14 #include <linux/writeback.h>
15 #include <linux/swap.h>
16 #include <linux/bio.h>
17 #include <linux/sched/signal.h>
18 #include <linux/migrate.h>
19 
20 #include "../internal.h"
21 
22 static struct iomap_page *
iomap_page_create(struct inode * inode,struct page * page)23 iomap_page_create(struct inode *inode, struct page *page)
24 {
25 	struct iomap_page *iop = to_iomap_page(page);
26 	unsigned int nr_blocks = PAGE_SIZE / i_blocksize(inode);
27 
28 	if (iop || i_blocksize(inode) == PAGE_SIZE)
29 		return iop;
30 
31 	iop = kmalloc(sizeof(*iop), GFP_NOFS | __GFP_NOFAIL);
32 	atomic_set(&iop->read_count, 0);
33 	atomic_set(&iop->write_count, 0);
34 	spin_lock_init(&iop->uptodate_lock);
35 	bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE);
36 	if (PageUptodate(page))
37 		bitmap_fill(iop->uptodate, nr_blocks);
38 
39 	/*
40 	 * migrate_page_move_mapping() assumes that pages with private data have
41 	 * their count elevated by 1.
42 	 */
43 	get_page(page);
44 	set_page_private(page, (unsigned long)iop);
45 	SetPagePrivate(page);
46 	return iop;
47 }
48 
49 static void
iomap_page_release(struct page * page)50 iomap_page_release(struct page *page)
51 {
52 	struct iomap_page *iop = to_iomap_page(page);
53 
54 	if (!iop)
55 		return;
56 	WARN_ON_ONCE(atomic_read(&iop->read_count));
57 	WARN_ON_ONCE(atomic_read(&iop->write_count));
58 	ClearPagePrivate(page);
59 	set_page_private(page, 0);
60 	put_page(page);
61 	kfree(iop);
62 }
63 
64 /*
65  * Calculate the range inside the page that we actually need to read.
66  */
67 static void
iomap_adjust_read_range(struct inode * inode,struct iomap_page * iop,loff_t * pos,loff_t length,unsigned * offp,unsigned * lenp)68 iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop,
69 		loff_t *pos, loff_t length, unsigned *offp, unsigned *lenp)
70 {
71 	loff_t orig_pos = *pos;
72 	loff_t isize = i_size_read(inode);
73 	unsigned block_bits = inode->i_blkbits;
74 	unsigned block_size = (1 << block_bits);
75 	unsigned poff = offset_in_page(*pos);
76 	unsigned plen = min_t(loff_t, PAGE_SIZE - poff, length);
77 	unsigned first = poff >> block_bits;
78 	unsigned last = (poff + plen - 1) >> block_bits;
79 
80 	/*
81 	 * If the block size is smaller than the page size we need to check the
82 	 * per-block uptodate status and adjust the offset and length if needed
83 	 * to avoid reading in already uptodate ranges.
84 	 */
85 	if (iop) {
86 		unsigned int i;
87 
88 		/* move forward for each leading block marked uptodate */
89 		for (i = first; i <= last; i++) {
90 			if (!test_bit(i, iop->uptodate))
91 				break;
92 			*pos += block_size;
93 			poff += block_size;
94 			plen -= block_size;
95 			first++;
96 		}
97 
98 		/* truncate len if we find any trailing uptodate block(s) */
99 		for ( ; i <= last; i++) {
100 			if (test_bit(i, iop->uptodate)) {
101 				plen -= (last - i + 1) * block_size;
102 				last = i - 1;
103 				break;
104 			}
105 		}
106 	}
107 
108 	/*
109 	 * If the extent spans the block that contains the i_size we need to
110 	 * handle both halves separately so that we properly zero data in the
111 	 * page cache for blocks that are entirely outside of i_size.
112 	 */
113 	if (orig_pos <= isize && orig_pos + length > isize) {
114 		unsigned end = offset_in_page(isize - 1) >> block_bits;
115 
116 		if (first <= end && last > end)
117 			plen -= (last - end) * block_size;
118 	}
119 
120 	*offp = poff;
121 	*lenp = plen;
122 }
123 
124 static void
iomap_iop_set_range_uptodate(struct page * page,unsigned off,unsigned len)125 iomap_iop_set_range_uptodate(struct page *page, unsigned off, unsigned len)
126 {
127 	struct iomap_page *iop = to_iomap_page(page);
128 	struct inode *inode = page->mapping->host;
129 	unsigned first = off >> inode->i_blkbits;
130 	unsigned last = (off + len - 1) >> inode->i_blkbits;
131 	bool uptodate = true;
132 	unsigned long flags;
133 	unsigned int i;
134 
135 	spin_lock_irqsave(&iop->uptodate_lock, flags);
136 	for (i = 0; i < PAGE_SIZE / i_blocksize(inode); i++) {
137 		if (i >= first && i <= last)
138 			set_bit(i, iop->uptodate);
139 		else if (!test_bit(i, iop->uptodate))
140 			uptodate = false;
141 	}
142 
143 	if (uptodate)
144 		SetPageUptodate(page);
145 	spin_unlock_irqrestore(&iop->uptodate_lock, flags);
146 }
147 
148 static void
iomap_set_range_uptodate(struct page * page,unsigned off,unsigned len)149 iomap_set_range_uptodate(struct page *page, unsigned off, unsigned len)
150 {
151 	if (PageError(page))
152 		return;
153 
154 	if (page_has_private(page))
155 		iomap_iop_set_range_uptodate(page, off, len);
156 	else
157 		SetPageUptodate(page);
158 }
159 
160 static void
iomap_read_finish(struct iomap_page * iop,struct page * page)161 iomap_read_finish(struct iomap_page *iop, struct page *page)
162 {
163 	if (!iop || atomic_dec_and_test(&iop->read_count))
164 		unlock_page(page);
165 }
166 
167 static void
iomap_read_page_end_io(struct bio_vec * bvec,int error)168 iomap_read_page_end_io(struct bio_vec *bvec, int error)
169 {
170 	struct page *page = bvec->bv_page;
171 	struct iomap_page *iop = to_iomap_page(page);
172 
173 	if (unlikely(error)) {
174 		ClearPageUptodate(page);
175 		SetPageError(page);
176 	} else {
177 		iomap_set_range_uptodate(page, bvec->bv_offset, bvec->bv_len);
178 	}
179 
180 	iomap_read_finish(iop, page);
181 }
182 
183 static void
iomap_read_end_io(struct bio * bio)184 iomap_read_end_io(struct bio *bio)
185 {
186 	int error = blk_status_to_errno(bio->bi_status);
187 	struct bio_vec *bvec;
188 	struct bvec_iter_all iter_all;
189 
190 	bio_for_each_segment_all(bvec, bio, iter_all)
191 		iomap_read_page_end_io(bvec, error);
192 	bio_put(bio);
193 }
194 
195 struct iomap_readpage_ctx {
196 	struct page		*cur_page;
197 	bool			cur_page_in_bio;
198 	bool			is_readahead;
199 	struct bio		*bio;
200 	struct list_head	*pages;
201 };
202 
203 static void
iomap_read_inline_data(struct inode * inode,struct page * page,struct iomap * iomap)204 iomap_read_inline_data(struct inode *inode, struct page *page,
205 		struct iomap *iomap)
206 {
207 	size_t size = i_size_read(inode);
208 	void *addr;
209 
210 	if (PageUptodate(page))
211 		return;
212 
213 	BUG_ON(page->index);
214 	BUG_ON(size > PAGE_SIZE - offset_in_page(iomap->inline_data));
215 
216 	addr = kmap_atomic(page);
217 	memcpy(addr, iomap->inline_data, size);
218 	memset(addr + size, 0, PAGE_SIZE - size);
219 	kunmap_atomic(addr);
220 	SetPageUptodate(page);
221 }
222 
223 static loff_t
iomap_readpage_actor(struct inode * inode,loff_t pos,loff_t length,void * data,struct iomap * iomap)224 iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
225 		struct iomap *iomap)
226 {
227 	struct iomap_readpage_ctx *ctx = data;
228 	struct page *page = ctx->cur_page;
229 	struct iomap_page *iop = iomap_page_create(inode, page);
230 	bool same_page = false, is_contig = false;
231 	loff_t orig_pos = pos;
232 	unsigned poff, plen;
233 	sector_t sector;
234 
235 	if (iomap->type == IOMAP_INLINE) {
236 		WARN_ON_ONCE(pos);
237 		iomap_read_inline_data(inode, page, iomap);
238 		return PAGE_SIZE;
239 	}
240 
241 	/* zero post-eof blocks as the page may be mapped */
242 	iomap_adjust_read_range(inode, iop, &pos, length, &poff, &plen);
243 	if (plen == 0)
244 		goto done;
245 
246 	if (iomap->type != IOMAP_MAPPED || pos >= i_size_read(inode)) {
247 		zero_user(page, poff, plen);
248 		iomap_set_range_uptodate(page, poff, plen);
249 		goto done;
250 	}
251 
252 	ctx->cur_page_in_bio = true;
253 
254 	/*
255 	 * Try to merge into a previous segment if we can.
256 	 */
257 	sector = iomap_sector(iomap, pos);
258 	if (ctx->bio && bio_end_sector(ctx->bio) == sector)
259 		is_contig = true;
260 
261 	if (is_contig &&
262 	    __bio_try_merge_page(ctx->bio, page, plen, poff, &same_page)) {
263 		if (!same_page && iop)
264 			atomic_inc(&iop->read_count);
265 		goto done;
266 	}
267 
268 	/*
269 	 * If we start a new segment we need to increase the read count, and we
270 	 * need to do so before submitting any previous full bio to make sure
271 	 * that we don't prematurely unlock the page.
272 	 */
273 	if (iop)
274 		atomic_inc(&iop->read_count);
275 
276 	if (!ctx->bio || !is_contig || bio_full(ctx->bio, plen)) {
277 		gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
278 		int nr_vecs = (length + PAGE_SIZE - 1) >> PAGE_SHIFT;
279 
280 		if (ctx->bio)
281 			submit_bio(ctx->bio);
282 
283 		if (ctx->is_readahead) /* same as readahead_gfp_mask */
284 			gfp |= __GFP_NORETRY | __GFP_NOWARN;
285 		ctx->bio = bio_alloc(gfp, min(BIO_MAX_PAGES, nr_vecs));
286 		ctx->bio->bi_opf = REQ_OP_READ;
287 		if (ctx->is_readahead)
288 			ctx->bio->bi_opf |= REQ_RAHEAD;
289 		ctx->bio->bi_iter.bi_sector = sector;
290 		bio_set_dev(ctx->bio, iomap->bdev);
291 		ctx->bio->bi_end_io = iomap_read_end_io;
292 	}
293 
294 	bio_add_page(ctx->bio, page, plen, poff);
295 done:
296 	/*
297 	 * Move the caller beyond our range so that it keeps making progress.
298 	 * For that we have to include any leading non-uptodate ranges, but
299 	 * we can skip trailing ones as they will be handled in the next
300 	 * iteration.
301 	 */
302 	return pos - orig_pos + plen;
303 }
304 
305 int
iomap_readpage(struct page * page,const struct iomap_ops * ops)306 iomap_readpage(struct page *page, const struct iomap_ops *ops)
307 {
308 	struct iomap_readpage_ctx ctx = { .cur_page = page };
309 	struct inode *inode = page->mapping->host;
310 	unsigned poff;
311 	loff_t ret;
312 
313 	for (poff = 0; poff < PAGE_SIZE; poff += ret) {
314 		ret = iomap_apply(inode, page_offset(page) + poff,
315 				PAGE_SIZE - poff, 0, ops, &ctx,
316 				iomap_readpage_actor);
317 		if (ret <= 0) {
318 			WARN_ON_ONCE(ret == 0);
319 			SetPageError(page);
320 			break;
321 		}
322 	}
323 
324 	if (ctx.bio) {
325 		submit_bio(ctx.bio);
326 		WARN_ON_ONCE(!ctx.cur_page_in_bio);
327 	} else {
328 		WARN_ON_ONCE(ctx.cur_page_in_bio);
329 		unlock_page(page);
330 	}
331 
332 	/*
333 	 * Just like mpage_readpages and block_read_full_page we always
334 	 * return 0 and just mark the page as PageError on errors.  This
335 	 * should be cleaned up all through the stack eventually.
336 	 */
337 	return 0;
338 }
339 EXPORT_SYMBOL_GPL(iomap_readpage);
340 
341 static struct page *
iomap_next_page(struct inode * inode,struct list_head * pages,loff_t pos,loff_t length,loff_t * done)342 iomap_next_page(struct inode *inode, struct list_head *pages, loff_t pos,
343 		loff_t length, loff_t *done)
344 {
345 	while (!list_empty(pages)) {
346 		struct page *page = lru_to_page(pages);
347 
348 		if (page_offset(page) >= (u64)pos + length)
349 			break;
350 
351 		list_del(&page->lru);
352 		if (!add_to_page_cache_lru(page, inode->i_mapping, page->index,
353 				GFP_NOFS))
354 			return page;
355 
356 		/*
357 		 * If we already have a page in the page cache at index we are
358 		 * done.  Upper layers don't care if it is uptodate after the
359 		 * readpages call itself as every page gets checked again once
360 		 * actually needed.
361 		 */
362 		*done += PAGE_SIZE;
363 		put_page(page);
364 	}
365 
366 	return NULL;
367 }
368 
369 static loff_t
iomap_readpages_actor(struct inode * inode,loff_t pos,loff_t length,void * data,struct iomap * iomap)370 iomap_readpages_actor(struct inode *inode, loff_t pos, loff_t length,
371 		void *data, struct iomap *iomap)
372 {
373 	struct iomap_readpage_ctx *ctx = data;
374 	loff_t done, ret;
375 
376 	for (done = 0; done < length; done += ret) {
377 		if (ctx->cur_page && offset_in_page(pos + done) == 0) {
378 			if (!ctx->cur_page_in_bio)
379 				unlock_page(ctx->cur_page);
380 			put_page(ctx->cur_page);
381 			ctx->cur_page = NULL;
382 		}
383 		if (!ctx->cur_page) {
384 			ctx->cur_page = iomap_next_page(inode, ctx->pages,
385 					pos, length, &done);
386 			if (!ctx->cur_page)
387 				break;
388 			ctx->cur_page_in_bio = false;
389 		}
390 		ret = iomap_readpage_actor(inode, pos + done, length - done,
391 				ctx, iomap);
392 	}
393 
394 	return done;
395 }
396 
397 int
iomap_readpages(struct address_space * mapping,struct list_head * pages,unsigned nr_pages,const struct iomap_ops * ops)398 iomap_readpages(struct address_space *mapping, struct list_head *pages,
399 		unsigned nr_pages, const struct iomap_ops *ops)
400 {
401 	struct iomap_readpage_ctx ctx = {
402 		.pages		= pages,
403 		.is_readahead	= true,
404 	};
405 	loff_t pos = page_offset(list_entry(pages->prev, struct page, lru));
406 	loff_t last = page_offset(list_entry(pages->next, struct page, lru));
407 	loff_t length = last - pos + PAGE_SIZE, ret = 0;
408 
409 	while (length > 0) {
410 		ret = iomap_apply(mapping->host, pos, length, 0, ops,
411 				&ctx, iomap_readpages_actor);
412 		if (ret <= 0) {
413 			WARN_ON_ONCE(ret == 0);
414 			goto done;
415 		}
416 		pos += ret;
417 		length -= ret;
418 	}
419 	ret = 0;
420 done:
421 	if (ctx.bio)
422 		submit_bio(ctx.bio);
423 	if (ctx.cur_page) {
424 		if (!ctx.cur_page_in_bio)
425 			unlock_page(ctx.cur_page);
426 		put_page(ctx.cur_page);
427 	}
428 
429 	/*
430 	 * Check that we didn't lose a page due to the arcance calling
431 	 * conventions..
432 	 */
433 	WARN_ON_ONCE(!ret && !list_empty(ctx.pages));
434 	return ret;
435 }
436 EXPORT_SYMBOL_GPL(iomap_readpages);
437 
438 /*
439  * iomap_is_partially_uptodate checks whether blocks within a page are
440  * uptodate or not.
441  *
442  * Returns true if all blocks which correspond to a file portion
443  * we want to read within the page are uptodate.
444  */
445 int
iomap_is_partially_uptodate(struct page * page,unsigned long from,unsigned long count)446 iomap_is_partially_uptodate(struct page *page, unsigned long from,
447 		unsigned long count)
448 {
449 	struct iomap_page *iop = to_iomap_page(page);
450 	struct inode *inode = page->mapping->host;
451 	unsigned len, first, last;
452 	unsigned i;
453 
454 	/* Limit range to one page */
455 	len = min_t(unsigned, PAGE_SIZE - from, count);
456 
457 	/* First and last blocks in range within page */
458 	first = from >> inode->i_blkbits;
459 	last = (from + len - 1) >> inode->i_blkbits;
460 
461 	if (iop) {
462 		for (i = first; i <= last; i++)
463 			if (!test_bit(i, iop->uptodate))
464 				return 0;
465 		return 1;
466 	}
467 
468 	return 0;
469 }
470 EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
471 
472 int
iomap_releasepage(struct page * page,gfp_t gfp_mask)473 iomap_releasepage(struct page *page, gfp_t gfp_mask)
474 {
475 	/*
476 	 * mm accommodates an old ext3 case where clean pages might not have had
477 	 * the dirty bit cleared. Thus, it can send actual dirty pages to
478 	 * ->releasepage() via shrink_active_list(), skip those here.
479 	 */
480 	if (PageDirty(page) || PageWriteback(page))
481 		return 0;
482 	iomap_page_release(page);
483 	return 1;
484 }
485 EXPORT_SYMBOL_GPL(iomap_releasepage);
486 
487 void
iomap_invalidatepage(struct page * page,unsigned int offset,unsigned int len)488 iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len)
489 {
490 	/*
491 	 * If we are invalidating the entire page, clear the dirty state from it
492 	 * and release it to avoid unnecessary buildup of the LRU.
493 	 */
494 	if (offset == 0 && len == PAGE_SIZE) {
495 		WARN_ON_ONCE(PageWriteback(page));
496 		cancel_dirty_page(page);
497 		iomap_page_release(page);
498 	}
499 }
500 EXPORT_SYMBOL_GPL(iomap_invalidatepage);
501 
502 #ifdef CONFIG_MIGRATION
503 int
iomap_migrate_page(struct address_space * mapping,struct page * newpage,struct page * page,enum migrate_mode mode)504 iomap_migrate_page(struct address_space *mapping, struct page *newpage,
505 		struct page *page, enum migrate_mode mode)
506 {
507 	int ret;
508 
509 	ret = migrate_page_move_mapping(mapping, newpage, page, 0);
510 	if (ret != MIGRATEPAGE_SUCCESS)
511 		return ret;
512 
513 	if (page_has_private(page)) {
514 		ClearPagePrivate(page);
515 		get_page(newpage);
516 		set_page_private(newpage, page_private(page));
517 		set_page_private(page, 0);
518 		put_page(page);
519 		SetPagePrivate(newpage);
520 	}
521 
522 	if (mode != MIGRATE_SYNC_NO_COPY)
523 		migrate_page_copy(newpage, page);
524 	else
525 		migrate_page_states(newpage, page);
526 	return MIGRATEPAGE_SUCCESS;
527 }
528 EXPORT_SYMBOL_GPL(iomap_migrate_page);
529 #endif /* CONFIG_MIGRATION */
530 
531 static void
iomap_write_failed(struct inode * inode,loff_t pos,unsigned len)532 iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
533 {
534 	loff_t i_size = i_size_read(inode);
535 
536 	/*
537 	 * Only truncate newly allocated pages beyoned EOF, even if the
538 	 * write started inside the existing inode size.
539 	 */
540 	if (pos + len > i_size)
541 		truncate_pagecache_range(inode, max(pos, i_size),
542 					 pos + len - 1);
543 }
544 
545 static int
iomap_read_page_sync(struct inode * inode,loff_t block_start,struct page * page,unsigned poff,unsigned plen,unsigned from,unsigned to,struct iomap * iomap)546 iomap_read_page_sync(struct inode *inode, loff_t block_start, struct page *page,
547 		unsigned poff, unsigned plen, unsigned from, unsigned to,
548 		struct iomap *iomap)
549 {
550 	struct bio_vec bvec;
551 	struct bio bio;
552 
553 	if (iomap->type != IOMAP_MAPPED || block_start >= i_size_read(inode)) {
554 		zero_user_segments(page, poff, from, to, poff + plen);
555 		iomap_set_range_uptodate(page, poff, plen);
556 		return 0;
557 	}
558 
559 	bio_init(&bio, &bvec, 1);
560 	bio.bi_opf = REQ_OP_READ;
561 	bio.bi_iter.bi_sector = iomap_sector(iomap, block_start);
562 	bio_set_dev(&bio, iomap->bdev);
563 	__bio_add_page(&bio, page, plen, poff);
564 	return submit_bio_wait(&bio);
565 }
566 
567 static int
__iomap_write_begin(struct inode * inode,loff_t pos,unsigned len,struct page * page,struct iomap * iomap)568 __iomap_write_begin(struct inode *inode, loff_t pos, unsigned len,
569 		struct page *page, struct iomap *iomap)
570 {
571 	struct iomap_page *iop = iomap_page_create(inode, page);
572 	loff_t block_size = i_blocksize(inode);
573 	loff_t block_start = pos & ~(block_size - 1);
574 	loff_t block_end = (pos + len + block_size - 1) & ~(block_size - 1);
575 	unsigned from = offset_in_page(pos), to = from + len, poff, plen;
576 	int status = 0;
577 
578 	if (PageUptodate(page))
579 		return 0;
580 	ClearPageError(page);
581 
582 	do {
583 		iomap_adjust_read_range(inode, iop, &block_start,
584 				block_end - block_start, &poff, &plen);
585 		if (plen == 0)
586 			break;
587 
588 		if ((from > poff && from < poff + plen) ||
589 		    (to > poff && to < poff + plen)) {
590 			status = iomap_read_page_sync(inode, block_start, page,
591 					poff, plen, from, to, iomap);
592 			if (status)
593 				break;
594 		}
595 
596 	} while ((block_start += plen) < block_end);
597 
598 	return status;
599 }
600 
601 static int
iomap_write_begin(struct inode * inode,loff_t pos,unsigned len,unsigned flags,struct page ** pagep,struct iomap * iomap)602 iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
603 		struct page **pagep, struct iomap *iomap)
604 {
605 	const struct iomap_page_ops *page_ops = iomap->page_ops;
606 	pgoff_t index = pos >> PAGE_SHIFT;
607 	struct page *page;
608 	int status = 0;
609 
610 	BUG_ON(pos + len > iomap->offset + iomap->length);
611 
612 	if (fatal_signal_pending(current))
613 		return -EINTR;
614 
615 	if (page_ops && page_ops->page_prepare) {
616 		status = page_ops->page_prepare(inode, pos, len, iomap);
617 		if (status)
618 			return status;
619 	}
620 
621 	page = grab_cache_page_write_begin(inode->i_mapping, index, flags);
622 	if (!page) {
623 		status = -ENOMEM;
624 		goto out_no_page;
625 	}
626 
627 	if (iomap->type == IOMAP_INLINE)
628 		iomap_read_inline_data(inode, page, iomap);
629 	else if (iomap->flags & IOMAP_F_BUFFER_HEAD)
630 		status = __block_write_begin_int(page, pos, len, NULL, iomap);
631 	else
632 		status = __iomap_write_begin(inode, pos, len, page, iomap);
633 
634 	if (unlikely(status))
635 		goto out_unlock;
636 
637 	*pagep = page;
638 	return 0;
639 
640 out_unlock:
641 	unlock_page(page);
642 	put_page(page);
643 	iomap_write_failed(inode, pos, len);
644 
645 out_no_page:
646 	if (page_ops && page_ops->page_done)
647 		page_ops->page_done(inode, pos, 0, NULL, iomap);
648 	return status;
649 }
650 
651 int
iomap_set_page_dirty(struct page * page)652 iomap_set_page_dirty(struct page *page)
653 {
654 	struct address_space *mapping = page_mapping(page);
655 	int newly_dirty;
656 
657 	if (unlikely(!mapping))
658 		return !TestSetPageDirty(page);
659 
660 	/*
661 	 * Lock out page->mem_cgroup migration to keep PageDirty
662 	 * synchronized with per-memcg dirty page counters.
663 	 */
664 	lock_page_memcg(page);
665 	newly_dirty = !TestSetPageDirty(page);
666 	if (newly_dirty)
667 		__set_page_dirty(page, mapping, 0);
668 	unlock_page_memcg(page);
669 
670 	if (newly_dirty)
671 		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
672 	return newly_dirty;
673 }
674 EXPORT_SYMBOL_GPL(iomap_set_page_dirty);
675 
676 static int
__iomap_write_end(struct inode * inode,loff_t pos,unsigned len,unsigned copied,struct page * page,struct iomap * iomap)677 __iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
678 		unsigned copied, struct page *page, struct iomap *iomap)
679 {
680 	flush_dcache_page(page);
681 
682 	/*
683 	 * The blocks that were entirely written will now be uptodate, so we
684 	 * don't have to worry about a readpage reading them and overwriting a
685 	 * partial write.  However if we have encountered a short write and only
686 	 * partially written into a block, it will not be marked uptodate, so a
687 	 * readpage might come in and destroy our partial write.
688 	 *
689 	 * Do the simplest thing, and just treat any short write to a non
690 	 * uptodate page as a zero-length write, and force the caller to redo
691 	 * the whole thing.
692 	 */
693 	if (unlikely(copied < len && !PageUptodate(page)))
694 		return 0;
695 	iomap_set_range_uptodate(page, offset_in_page(pos), len);
696 	iomap_set_page_dirty(page);
697 	return copied;
698 }
699 
700 static int
iomap_write_end_inline(struct inode * inode,struct page * page,struct iomap * iomap,loff_t pos,unsigned copied)701 iomap_write_end_inline(struct inode *inode, struct page *page,
702 		struct iomap *iomap, loff_t pos, unsigned copied)
703 {
704 	void *addr;
705 
706 	WARN_ON_ONCE(!PageUptodate(page));
707 	BUG_ON(pos + copied > PAGE_SIZE - offset_in_page(iomap->inline_data));
708 
709 	addr = kmap_atomic(page);
710 	memcpy(iomap->inline_data + pos, addr + pos, copied);
711 	kunmap_atomic(addr);
712 
713 	mark_inode_dirty(inode);
714 	return copied;
715 }
716 
717 static int
iomap_write_end(struct inode * inode,loff_t pos,unsigned len,unsigned copied,struct page * page,struct iomap * iomap)718 iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
719 		unsigned copied, struct page *page, struct iomap *iomap)
720 {
721 	const struct iomap_page_ops *page_ops = iomap->page_ops;
722 	loff_t old_size = inode->i_size;
723 	int ret;
724 
725 	if (iomap->type == IOMAP_INLINE) {
726 		ret = iomap_write_end_inline(inode, page, iomap, pos, copied);
727 	} else if (iomap->flags & IOMAP_F_BUFFER_HEAD) {
728 		ret = block_write_end(NULL, inode->i_mapping, pos, len, copied,
729 				page, NULL);
730 	} else {
731 		ret = __iomap_write_end(inode, pos, len, copied, page, iomap);
732 	}
733 
734 	/*
735 	 * Update the in-memory inode size after copying the data into the page
736 	 * cache.  It's up to the file system to write the updated size to disk,
737 	 * preferably after I/O completion so that no stale data is exposed.
738 	 */
739 	if (pos + ret > old_size) {
740 		i_size_write(inode, pos + ret);
741 		iomap->flags |= IOMAP_F_SIZE_CHANGED;
742 	}
743 	unlock_page(page);
744 
745 	if (old_size < pos)
746 		pagecache_isize_extended(inode, old_size, pos);
747 	if (page_ops && page_ops->page_done)
748 		page_ops->page_done(inode, pos, ret, page, iomap);
749 	put_page(page);
750 
751 	if (ret < len)
752 		iomap_write_failed(inode, pos, len);
753 	return ret;
754 }
755 
756 static loff_t
iomap_write_actor(struct inode * inode,loff_t pos,loff_t length,void * data,struct iomap * iomap)757 iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
758 		struct iomap *iomap)
759 {
760 	struct iov_iter *i = data;
761 	long status = 0;
762 	ssize_t written = 0;
763 	unsigned int flags = AOP_FLAG_NOFS;
764 
765 	do {
766 		struct page *page;
767 		unsigned long offset;	/* Offset into pagecache page */
768 		unsigned long bytes;	/* Bytes to write to page */
769 		size_t copied;		/* Bytes copied from user */
770 
771 		offset = offset_in_page(pos);
772 		bytes = min_t(unsigned long, PAGE_SIZE - offset,
773 						iov_iter_count(i));
774 again:
775 		if (bytes > length)
776 			bytes = length;
777 
778 		/*
779 		 * Bring in the user page that we will copy from _first_.
780 		 * Otherwise there's a nasty deadlock on copying from the
781 		 * same page as we're writing to, without it being marked
782 		 * up-to-date.
783 		 *
784 		 * Not only is this an optimisation, but it is also required
785 		 * to check that the address is actually valid, when atomic
786 		 * usercopies are used, below.
787 		 */
788 		if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
789 			status = -EFAULT;
790 			break;
791 		}
792 
793 		status = iomap_write_begin(inode, pos, bytes, flags, &page,
794 				iomap);
795 		if (unlikely(status))
796 			break;
797 
798 		if (mapping_writably_mapped(inode->i_mapping))
799 			flush_dcache_page(page);
800 
801 		copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
802 
803 		flush_dcache_page(page);
804 
805 		status = iomap_write_end(inode, pos, bytes, copied, page,
806 				iomap);
807 		if (unlikely(status < 0))
808 			break;
809 		copied = status;
810 
811 		cond_resched();
812 
813 		iov_iter_advance(i, copied);
814 		if (unlikely(copied == 0)) {
815 			/*
816 			 * If we were unable to copy any data at all, we must
817 			 * fall back to a single segment length write.
818 			 *
819 			 * If we didn't fallback here, we could livelock
820 			 * because not all segments in the iov can be copied at
821 			 * once without a pagefault.
822 			 */
823 			bytes = min_t(unsigned long, PAGE_SIZE - offset,
824 						iov_iter_single_seg_count(i));
825 			goto again;
826 		}
827 		pos += copied;
828 		written += copied;
829 		length -= copied;
830 
831 		balance_dirty_pages_ratelimited(inode->i_mapping);
832 	} while (iov_iter_count(i) && length);
833 
834 	return written ? written : status;
835 }
836 
837 ssize_t
iomap_file_buffered_write(struct kiocb * iocb,struct iov_iter * iter,const struct iomap_ops * ops)838 iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *iter,
839 		const struct iomap_ops *ops)
840 {
841 	struct inode *inode = iocb->ki_filp->f_mapping->host;
842 	loff_t pos = iocb->ki_pos, ret = 0, written = 0;
843 
844 	while (iov_iter_count(iter)) {
845 		ret = iomap_apply(inode, pos, iov_iter_count(iter),
846 				IOMAP_WRITE, ops, iter, iomap_write_actor);
847 		if (ret <= 0)
848 			break;
849 		pos += ret;
850 		written += ret;
851 	}
852 
853 	return written ? written : ret;
854 }
855 EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
856 
857 static struct page *
__iomap_read_page(struct inode * inode,loff_t offset)858 __iomap_read_page(struct inode *inode, loff_t offset)
859 {
860 	struct address_space *mapping = inode->i_mapping;
861 	struct page *page;
862 
863 	page = read_mapping_page(mapping, offset >> PAGE_SHIFT, NULL);
864 	if (IS_ERR(page))
865 		return page;
866 	if (!PageUptodate(page)) {
867 		put_page(page);
868 		return ERR_PTR(-EIO);
869 	}
870 	return page;
871 }
872 
873 static loff_t
iomap_dirty_actor(struct inode * inode,loff_t pos,loff_t length,void * data,struct iomap * iomap)874 iomap_dirty_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
875 		struct iomap *iomap)
876 {
877 	long status = 0;
878 	ssize_t written = 0;
879 
880 	do {
881 		struct page *page, *rpage;
882 		unsigned long offset;	/* Offset into pagecache page */
883 		unsigned long bytes;	/* Bytes to write to page */
884 
885 		offset = offset_in_page(pos);
886 		bytes = min_t(loff_t, PAGE_SIZE - offset, length);
887 
888 		rpage = __iomap_read_page(inode, pos);
889 		if (IS_ERR(rpage))
890 			return PTR_ERR(rpage);
891 
892 		status = iomap_write_begin(inode, pos, bytes,
893 					   AOP_FLAG_NOFS, &page, iomap);
894 		put_page(rpage);
895 		if (unlikely(status))
896 			return status;
897 
898 		WARN_ON_ONCE(!PageUptodate(page));
899 
900 		status = iomap_write_end(inode, pos, bytes, bytes, page, iomap);
901 		if (unlikely(status <= 0)) {
902 			if (WARN_ON_ONCE(status == 0))
903 				return -EIO;
904 			return status;
905 		}
906 
907 		cond_resched();
908 
909 		pos += status;
910 		written += status;
911 		length -= status;
912 
913 		balance_dirty_pages_ratelimited(inode->i_mapping);
914 	} while (length);
915 
916 	return written;
917 }
918 
919 int
iomap_file_dirty(struct inode * inode,loff_t pos,loff_t len,const struct iomap_ops * ops)920 iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len,
921 		const struct iomap_ops *ops)
922 {
923 	loff_t ret;
924 
925 	while (len) {
926 		ret = iomap_apply(inode, pos, len, IOMAP_WRITE, ops, NULL,
927 				iomap_dirty_actor);
928 		if (ret <= 0)
929 			return ret;
930 		pos += ret;
931 		len -= ret;
932 	}
933 
934 	return 0;
935 }
936 EXPORT_SYMBOL_GPL(iomap_file_dirty);
937 
iomap_zero(struct inode * inode,loff_t pos,unsigned offset,unsigned bytes,struct iomap * iomap)938 static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset,
939 		unsigned bytes, struct iomap *iomap)
940 {
941 	struct page *page;
942 	int status;
943 
944 	status = iomap_write_begin(inode, pos, bytes, AOP_FLAG_NOFS, &page,
945 				   iomap);
946 	if (status)
947 		return status;
948 
949 	zero_user(page, offset, bytes);
950 	mark_page_accessed(page);
951 
952 	return iomap_write_end(inode, pos, bytes, bytes, page, iomap);
953 }
954 
iomap_dax_zero(loff_t pos,unsigned offset,unsigned bytes,struct iomap * iomap)955 static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes,
956 		struct iomap *iomap)
957 {
958 	return __dax_zero_page_range(iomap->bdev, iomap->dax_dev,
959 			iomap_sector(iomap, pos & PAGE_MASK), offset, bytes);
960 }
961 
962 static loff_t
iomap_zero_range_actor(struct inode * inode,loff_t pos,loff_t count,void * data,struct iomap * iomap)963 iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count,
964 		void *data, struct iomap *iomap)
965 {
966 	bool *did_zero = data;
967 	loff_t written = 0;
968 	int status;
969 
970 	/* already zeroed?  we're done. */
971 	if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
972 		return count;
973 
974 	do {
975 		unsigned offset, bytes;
976 
977 		offset = offset_in_page(pos);
978 		bytes = min_t(loff_t, PAGE_SIZE - offset, count);
979 
980 		if (IS_DAX(inode))
981 			status = iomap_dax_zero(pos, offset, bytes, iomap);
982 		else
983 			status = iomap_zero(inode, pos, offset, bytes, iomap);
984 		if (status < 0)
985 			return status;
986 
987 		pos += bytes;
988 		count -= bytes;
989 		written += bytes;
990 		if (did_zero)
991 			*did_zero = true;
992 	} while (count > 0);
993 
994 	return written;
995 }
996 
997 int
iomap_zero_range(struct inode * inode,loff_t pos,loff_t len,bool * did_zero,const struct iomap_ops * ops)998 iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
999 		const struct iomap_ops *ops)
1000 {
1001 	loff_t ret;
1002 
1003 	while (len > 0) {
1004 		ret = iomap_apply(inode, pos, len, IOMAP_ZERO,
1005 				ops, did_zero, iomap_zero_range_actor);
1006 		if (ret <= 0)
1007 			return ret;
1008 
1009 		pos += ret;
1010 		len -= ret;
1011 	}
1012 
1013 	return 0;
1014 }
1015 EXPORT_SYMBOL_GPL(iomap_zero_range);
1016 
1017 int
iomap_truncate_page(struct inode * inode,loff_t pos,bool * did_zero,const struct iomap_ops * ops)1018 iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
1019 		const struct iomap_ops *ops)
1020 {
1021 	unsigned int blocksize = i_blocksize(inode);
1022 	unsigned int off = pos & (blocksize - 1);
1023 
1024 	/* Block boundary? Nothing to do */
1025 	if (!off)
1026 		return 0;
1027 	return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops);
1028 }
1029 EXPORT_SYMBOL_GPL(iomap_truncate_page);
1030 
1031 static loff_t
iomap_page_mkwrite_actor(struct inode * inode,loff_t pos,loff_t length,void * data,struct iomap * iomap)1032 iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
1033 		void *data, struct iomap *iomap)
1034 {
1035 	struct page *page = data;
1036 	int ret;
1037 
1038 	if (iomap->flags & IOMAP_F_BUFFER_HEAD) {
1039 		ret = __block_write_begin_int(page, pos, length, NULL, iomap);
1040 		if (ret)
1041 			return ret;
1042 		block_commit_write(page, 0, length);
1043 	} else {
1044 		WARN_ON_ONCE(!PageUptodate(page));
1045 		iomap_page_create(inode, page);
1046 		set_page_dirty(page);
1047 	}
1048 
1049 	return length;
1050 }
1051 
iomap_page_mkwrite(struct vm_fault * vmf,const struct iomap_ops * ops)1052 vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
1053 {
1054 	struct page *page = vmf->page;
1055 	struct inode *inode = file_inode(vmf->vma->vm_file);
1056 	unsigned long length;
1057 	loff_t offset, size;
1058 	ssize_t ret;
1059 
1060 	lock_page(page);
1061 	size = i_size_read(inode);
1062 	offset = page_offset(page);
1063 	if (page->mapping != inode->i_mapping || offset > size) {
1064 		/* We overload EFAULT to mean page got truncated */
1065 		ret = -EFAULT;
1066 		goto out_unlock;
1067 	}
1068 
1069 	/* page is wholly or partially inside EOF */
1070 	if (offset > size - PAGE_SIZE)
1071 		length = offset_in_page(size);
1072 	else
1073 		length = PAGE_SIZE;
1074 
1075 	while (length > 0) {
1076 		ret = iomap_apply(inode, offset, length,
1077 				IOMAP_WRITE | IOMAP_FAULT, ops, page,
1078 				iomap_page_mkwrite_actor);
1079 		if (unlikely(ret <= 0))
1080 			goto out_unlock;
1081 		offset += ret;
1082 		length -= ret;
1083 	}
1084 
1085 	wait_for_stable_page(page);
1086 	return VM_FAULT_LOCKED;
1087 out_unlock:
1088 	unlock_page(page);
1089 	return block_page_mkwrite_return(ret);
1090 }
1091 EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
1092