• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4  * Copyright (c) 2016-2018 Christoph Hellwig.
5  * All Rights Reserved.
6  */
7 #include "xfs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_inode.h"
14 #include "xfs_trans.h"
15 #include "xfs_iomap.h"
16 #include "xfs_trace.h"
17 #include "xfs_bmap.h"
18 #include "xfs_bmap_util.h"
19 #include "xfs_reflink.h"
20 
21 struct xfs_writepage_ctx {
22 	struct iomap_writepage_ctx ctx;
23 	unsigned int		data_seq;
24 	unsigned int		cow_seq;
25 };
26 
27 static inline struct xfs_writepage_ctx *
XFS_WPC(struct iomap_writepage_ctx * ctx)28 XFS_WPC(struct iomap_writepage_ctx *ctx)
29 {
30 	return container_of(ctx, struct xfs_writepage_ctx, ctx);
31 }
32 
33 /*
34  * Fast and loose check if this write could update the on-disk inode size.
35  */
xfs_ioend_is_append(struct iomap_ioend * ioend)36 static inline bool xfs_ioend_is_append(struct iomap_ioend *ioend)
37 {
38 	return ioend->io_offset + ioend->io_size >
39 		XFS_I(ioend->io_inode)->i_d.di_size;
40 }
41 
42 /*
43  * Update on-disk file size now that data has been written to disk.
44  */
45 STATIC int
__xfs_setfilesize(struct xfs_inode * ip,struct xfs_trans * tp,xfs_off_t offset,size_t size)46 __xfs_setfilesize(
47 	struct xfs_inode	*ip,
48 	struct xfs_trans	*tp,
49 	xfs_off_t		offset,
50 	size_t			size)
51 {
52 	xfs_fsize_t		isize;
53 
54 	xfs_ilock(ip, XFS_ILOCK_EXCL);
55 	isize = xfs_new_eof(ip, offset + size);
56 	if (!isize) {
57 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
58 		xfs_trans_cancel(tp);
59 		return 0;
60 	}
61 
62 	trace_xfs_setfilesize(ip, offset, size);
63 
64 	ip->i_d.di_size = isize;
65 	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
66 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
67 
68 	return xfs_trans_commit(tp);
69 }
70 
71 int
xfs_setfilesize(struct xfs_inode * ip,xfs_off_t offset,size_t size)72 xfs_setfilesize(
73 	struct xfs_inode	*ip,
74 	xfs_off_t		offset,
75 	size_t			size)
76 {
77 	struct xfs_mount	*mp = ip->i_mount;
78 	struct xfs_trans	*tp;
79 	int			error;
80 
81 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp);
82 	if (error)
83 		return error;
84 
85 	return __xfs_setfilesize(ip, tp, offset, size);
86 }
87 
88 STATIC int
xfs_setfilesize_ioend(struct iomap_ioend * ioend,int error)89 xfs_setfilesize_ioend(
90 	struct iomap_ioend	*ioend,
91 	int			error)
92 {
93 	struct xfs_inode	*ip = XFS_I(ioend->io_inode);
94 	struct xfs_trans	*tp = ioend->io_private;
95 
96 	/*
97 	 * The transaction may have been allocated in the I/O submission thread,
98 	 * thus we need to mark ourselves as being in a transaction manually.
99 	 * Similarly for freeze protection.
100 	 */
101 	xfs_trans_set_context(tp);
102 	__sb_writers_acquired(VFS_I(ip)->i_sb, SB_FREEZE_FS);
103 
104 	/* we abort the update if there was an IO error */
105 	if (error) {
106 		xfs_trans_cancel(tp);
107 		return error;
108 	}
109 
110 	return __xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size);
111 }
112 
113 /*
114  * IO write completion.
115  */
116 STATIC void
xfs_end_ioend(struct iomap_ioend * ioend)117 xfs_end_ioend(
118 	struct iomap_ioend	*ioend)
119 {
120 	struct xfs_inode	*ip = XFS_I(ioend->io_inode);
121 	struct xfs_mount	*mp = ip->i_mount;
122 	xfs_off_t		offset = ioend->io_offset;
123 	size_t			size = ioend->io_size;
124 	unsigned int		nofs_flag;
125 	int			error;
126 
127 	/*
128 	 * We can allocate memory here while doing writeback on behalf of
129 	 * memory reclaim.  To avoid memory allocation deadlocks set the
130 	 * task-wide nofs context for the following operations.
131 	 */
132 	nofs_flag = memalloc_nofs_save();
133 
134 	/*
135 	 * Just clean up the in-memory strutures if the fs has been shut down.
136 	 */
137 	if (XFS_FORCED_SHUTDOWN(mp)) {
138 		error = -EIO;
139 		goto done;
140 	}
141 
142 	/*
143 	 * Clean up all COW blocks and underlying data fork delalloc blocks on
144 	 * I/O error. The delalloc punch is required because this ioend was
145 	 * mapped to blocks in the COW fork and the associated pages are no
146 	 * longer dirty. If we don't remove delalloc blocks here, they become
147 	 * stale and can corrupt free space accounting on unmount.
148 	 */
149 	error = blk_status_to_errno(ioend->io_bio->bi_status);
150 	if (unlikely(error)) {
151 		if (ioend->io_flags & IOMAP_F_SHARED) {
152 			xfs_reflink_cancel_cow_range(ip, offset, size, true);
153 			xfs_bmap_punch_delalloc_range(ip,
154 						      XFS_B_TO_FSBT(mp, offset),
155 						      XFS_B_TO_FSB(mp, size));
156 		}
157 		goto done;
158 	}
159 
160 	/*
161 	 * Success: commit the COW or unwritten blocks if needed.
162 	 */
163 	if (ioend->io_flags & IOMAP_F_SHARED)
164 		error = xfs_reflink_end_cow(ip, offset, size);
165 	else if (ioend->io_type == IOMAP_UNWRITTEN)
166 		error = xfs_iomap_write_unwritten(ip, offset, size, false);
167 
168 	if (!error && xfs_ioend_is_append(ioend))
169 		error = xfs_setfilesize(ip, ioend->io_offset, ioend->io_size);
170 done:
171 	iomap_finish_ioends(ioend, error);
172 	memalloc_nofs_restore(nofs_flag);
173 }
174 
175 /*
176  * If the to be merged ioend has a preallocated transaction for file
177  * size updates we need to ensure the ioend it is merged into also
178  * has one.  If it already has one we can simply cancel the transaction
179  * as it is guaranteed to be clean.
180  */
181 static void
xfs_ioend_merge_private(struct iomap_ioend * ioend,struct iomap_ioend * next)182 xfs_ioend_merge_private(
183 	struct iomap_ioend	*ioend,
184 	struct iomap_ioend	*next)
185 {
186 	if (!ioend->io_private) {
187 		ioend->io_private = next->io_private;
188 		next->io_private = NULL;
189 	} else {
190 		xfs_setfilesize_ioend(next, -ECANCELED);
191 	}
192 }
193 
194 /* Finish all pending io completions. */
195 void
xfs_end_io(struct work_struct * work)196 xfs_end_io(
197 	struct work_struct	*work)
198 {
199 	struct xfs_inode	*ip =
200 		container_of(work, struct xfs_inode, i_ioend_work);
201 	struct iomap_ioend	*ioend;
202 	struct list_head	tmp;
203 	unsigned long		flags;
204 
205 	spin_lock_irqsave(&ip->i_ioend_lock, flags);
206 	list_replace_init(&ip->i_ioend_list, &tmp);
207 	spin_unlock_irqrestore(&ip->i_ioend_lock, flags);
208 
209 	iomap_sort_ioends(&tmp);
210 	while ((ioend = list_first_entry_or_null(&tmp, struct iomap_ioend,
211 			io_list))) {
212 		list_del_init(&ioend->io_list);
213 		iomap_ioend_try_merge(ioend, &tmp, xfs_ioend_merge_private);
214 		xfs_end_ioend(ioend);
215 	}
216 }
217 
xfs_ioend_needs_workqueue(struct iomap_ioend * ioend)218 static inline bool xfs_ioend_needs_workqueue(struct iomap_ioend *ioend)
219 {
220 	return xfs_ioend_is_append(ioend) ||
221 		ioend->io_type == IOMAP_UNWRITTEN ||
222 		(ioend->io_flags & IOMAP_F_SHARED);
223 }
224 
225 STATIC void
xfs_end_bio(struct bio * bio)226 xfs_end_bio(
227 	struct bio		*bio)
228 {
229 	struct iomap_ioend	*ioend = bio->bi_private;
230 	struct xfs_inode	*ip = XFS_I(ioend->io_inode);
231 	unsigned long		flags;
232 
233 	spin_lock_irqsave(&ip->i_ioend_lock, flags);
234 	if (list_empty(&ip->i_ioend_list))
235 		WARN_ON_ONCE(!queue_work(ip->i_mount->m_unwritten_workqueue,
236 					 &ip->i_ioend_work));
237 	list_add_tail(&ioend->io_list, &ip->i_ioend_list);
238 	spin_unlock_irqrestore(&ip->i_ioend_lock, flags);
239 }
240 
241 /*
242  * Fast revalidation of the cached writeback mapping. Return true if the current
243  * mapping is valid, false otherwise.
244  */
245 static bool
xfs_imap_valid(struct iomap_writepage_ctx * wpc,struct xfs_inode * ip,loff_t offset)246 xfs_imap_valid(
247 	struct iomap_writepage_ctx	*wpc,
248 	struct xfs_inode		*ip,
249 	loff_t				offset)
250 {
251 	if (offset < wpc->iomap.offset ||
252 	    offset >= wpc->iomap.offset + wpc->iomap.length)
253 		return false;
254 	/*
255 	 * If this is a COW mapping, it is sufficient to check that the mapping
256 	 * covers the offset. Be careful to check this first because the caller
257 	 * can revalidate a COW mapping without updating the data seqno.
258 	 */
259 	if (wpc->iomap.flags & IOMAP_F_SHARED)
260 		return true;
261 
262 	/*
263 	 * This is not a COW mapping. Check the sequence number of the data fork
264 	 * because concurrent changes could have invalidated the extent. Check
265 	 * the COW fork because concurrent changes since the last time we
266 	 * checked (and found nothing at this offset) could have added
267 	 * overlapping blocks.
268 	 */
269 	if (XFS_WPC(wpc)->data_seq != READ_ONCE(ip->i_df.if_seq))
270 		return false;
271 	if (xfs_inode_has_cow_data(ip) &&
272 	    XFS_WPC(wpc)->cow_seq != READ_ONCE(ip->i_cowfp->if_seq))
273 		return false;
274 	return true;
275 }
276 
277 /*
278  * Pass in a dellalloc extent and convert it to real extents, return the real
279  * extent that maps offset_fsb in wpc->iomap.
280  *
281  * The current page is held locked so nothing could have removed the block
282  * backing offset_fsb, although it could have moved from the COW to the data
283  * fork by another thread.
284  */
285 static int
xfs_convert_blocks(struct iomap_writepage_ctx * wpc,struct xfs_inode * ip,int whichfork,loff_t offset)286 xfs_convert_blocks(
287 	struct iomap_writepage_ctx *wpc,
288 	struct xfs_inode	*ip,
289 	int			whichfork,
290 	loff_t			offset)
291 {
292 	int			error;
293 	unsigned		*seq;
294 
295 	if (whichfork == XFS_COW_FORK)
296 		seq = &XFS_WPC(wpc)->cow_seq;
297 	else
298 		seq = &XFS_WPC(wpc)->data_seq;
299 
300 	/*
301 	 * Attempt to allocate whatever delalloc extent currently backs offset
302 	 * and put the result into wpc->iomap.  Allocate in a loop because it
303 	 * may take several attempts to allocate real blocks for a contiguous
304 	 * delalloc extent if free space is sufficiently fragmented.
305 	 */
306 	do {
307 		error = xfs_bmapi_convert_delalloc(ip, whichfork, offset,
308 				&wpc->iomap, seq);
309 		if (error)
310 			return error;
311 	} while (wpc->iomap.offset + wpc->iomap.length <= offset);
312 
313 	return 0;
314 }
315 
316 static int
xfs_map_blocks(struct iomap_writepage_ctx * wpc,struct inode * inode,loff_t offset)317 xfs_map_blocks(
318 	struct iomap_writepage_ctx *wpc,
319 	struct inode		*inode,
320 	loff_t			offset)
321 {
322 	struct xfs_inode	*ip = XFS_I(inode);
323 	struct xfs_mount	*mp = ip->i_mount;
324 	ssize_t			count = i_blocksize(inode);
325 	xfs_fileoff_t		offset_fsb = XFS_B_TO_FSBT(mp, offset);
326 	xfs_fileoff_t		end_fsb = XFS_B_TO_FSB(mp, offset + count);
327 	xfs_fileoff_t		cow_fsb;
328 	int			whichfork;
329 	struct xfs_bmbt_irec	imap;
330 	struct xfs_iext_cursor	icur;
331 	int			retries = 0;
332 	int			error = 0;
333 
334 	if (XFS_FORCED_SHUTDOWN(mp))
335 		return -EIO;
336 
337 	/*
338 	 * COW fork blocks can overlap data fork blocks even if the blocks
339 	 * aren't shared.  COW I/O always takes precedent, so we must always
340 	 * check for overlap on reflink inodes unless the mapping is already a
341 	 * COW one, or the COW fork hasn't changed from the last time we looked
342 	 * at it.
343 	 *
344 	 * It's safe to check the COW fork if_seq here without the ILOCK because
345 	 * we've indirectly protected against concurrent updates: writeback has
346 	 * the page locked, which prevents concurrent invalidations by reflink
347 	 * and directio and prevents concurrent buffered writes to the same
348 	 * page.  Changes to if_seq always happen under i_lock, which protects
349 	 * against concurrent updates and provides a memory barrier on the way
350 	 * out that ensures that we always see the current value.
351 	 */
352 	if (xfs_imap_valid(wpc, ip, offset))
353 		return 0;
354 
355 	/*
356 	 * If we don't have a valid map, now it's time to get a new one for this
357 	 * offset.  This will convert delayed allocations (including COW ones)
358 	 * into real extents.  If we return without a valid map, it means we
359 	 * landed in a hole and we skip the block.
360 	 */
361 retry:
362 	cow_fsb = NULLFILEOFF;
363 	whichfork = XFS_DATA_FORK;
364 	xfs_ilock(ip, XFS_ILOCK_SHARED);
365 	ASSERT(ip->i_df.if_format != XFS_DINODE_FMT_BTREE ||
366 	       (ip->i_df.if_flags & XFS_IFEXTENTS));
367 
368 	/*
369 	 * Check if this is offset is covered by a COW extents, and if yes use
370 	 * it directly instead of looking up anything in the data fork.
371 	 */
372 	if (xfs_inode_has_cow_data(ip) &&
373 	    xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &imap))
374 		cow_fsb = imap.br_startoff;
375 	if (cow_fsb != NULLFILEOFF && cow_fsb <= offset_fsb) {
376 		XFS_WPC(wpc)->cow_seq = READ_ONCE(ip->i_cowfp->if_seq);
377 		xfs_iunlock(ip, XFS_ILOCK_SHARED);
378 
379 		whichfork = XFS_COW_FORK;
380 		goto allocate_blocks;
381 	}
382 
383 	/*
384 	 * No COW extent overlap. Revalidate now that we may have updated
385 	 * ->cow_seq. If the data mapping is still valid, we're done.
386 	 */
387 	if (xfs_imap_valid(wpc, ip, offset)) {
388 		xfs_iunlock(ip, XFS_ILOCK_SHARED);
389 		return 0;
390 	}
391 
392 	/*
393 	 * If we don't have a valid map, now it's time to get a new one for this
394 	 * offset.  This will convert delayed allocations (including COW ones)
395 	 * into real extents.
396 	 */
397 	if (!xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap))
398 		imap.br_startoff = end_fsb;	/* fake a hole past EOF */
399 	XFS_WPC(wpc)->data_seq = READ_ONCE(ip->i_df.if_seq);
400 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
401 
402 	/* landed in a hole or beyond EOF? */
403 	if (imap.br_startoff > offset_fsb) {
404 		imap.br_blockcount = imap.br_startoff - offset_fsb;
405 		imap.br_startoff = offset_fsb;
406 		imap.br_startblock = HOLESTARTBLOCK;
407 		imap.br_state = XFS_EXT_NORM;
408 	}
409 
410 	/*
411 	 * Truncate to the next COW extent if there is one.  This is the only
412 	 * opportunity to do this because we can skip COW fork lookups for the
413 	 * subsequent blocks in the mapping; however, the requirement to treat
414 	 * the COW range separately remains.
415 	 */
416 	if (cow_fsb != NULLFILEOFF &&
417 	    cow_fsb < imap.br_startoff + imap.br_blockcount)
418 		imap.br_blockcount = cow_fsb - imap.br_startoff;
419 
420 	/* got a delalloc extent? */
421 	if (imap.br_startblock != HOLESTARTBLOCK &&
422 	    isnullstartblock(imap.br_startblock))
423 		goto allocate_blocks;
424 
425 	xfs_bmbt_to_iomap(ip, &wpc->iomap, &imap, 0);
426 	trace_xfs_map_blocks_found(ip, offset, count, whichfork, &imap);
427 	return 0;
428 allocate_blocks:
429 	error = xfs_convert_blocks(wpc, ip, whichfork, offset);
430 	if (error) {
431 		/*
432 		 * If we failed to find the extent in the COW fork we might have
433 		 * raced with a COW to data fork conversion or truncate.
434 		 * Restart the lookup to catch the extent in the data fork for
435 		 * the former case, but prevent additional retries to avoid
436 		 * looping forever for the latter case.
437 		 */
438 		if (error == -EAGAIN && whichfork == XFS_COW_FORK && !retries++)
439 			goto retry;
440 		ASSERT(error != -EAGAIN);
441 		return error;
442 	}
443 
444 	/*
445 	 * Due to merging the return real extent might be larger than the
446 	 * original delalloc one.  Trim the return extent to the next COW
447 	 * boundary again to force a re-lookup.
448 	 */
449 	if (whichfork != XFS_COW_FORK && cow_fsb != NULLFILEOFF) {
450 		loff_t		cow_offset = XFS_FSB_TO_B(mp, cow_fsb);
451 
452 		if (cow_offset < wpc->iomap.offset + wpc->iomap.length)
453 			wpc->iomap.length = cow_offset - wpc->iomap.offset;
454 	}
455 
456 	ASSERT(wpc->iomap.offset <= offset);
457 	ASSERT(wpc->iomap.offset + wpc->iomap.length > offset);
458 	trace_xfs_map_blocks_alloc(ip, offset, count, whichfork, &imap);
459 	return 0;
460 }
461 
462 static int
xfs_prepare_ioend(struct iomap_ioend * ioend,int status)463 xfs_prepare_ioend(
464 	struct iomap_ioend	*ioend,
465 	int			status)
466 {
467 	unsigned int		nofs_flag;
468 
469 	/*
470 	 * We can allocate memory here while doing writeback on behalf of
471 	 * memory reclaim.  To avoid memory allocation deadlocks set the
472 	 * task-wide nofs context for the following operations.
473 	 */
474 	nofs_flag = memalloc_nofs_save();
475 
476 	/* Convert CoW extents to regular */
477 	if (!status && (ioend->io_flags & IOMAP_F_SHARED)) {
478 		status = xfs_reflink_convert_cow(XFS_I(ioend->io_inode),
479 				ioend->io_offset, ioend->io_size);
480 	}
481 
482 	memalloc_nofs_restore(nofs_flag);
483 
484 	if (xfs_ioend_needs_workqueue(ioend))
485 		ioend->io_bio->bi_end_io = xfs_end_bio;
486 	return status;
487 }
488 
489 /*
490  * If the page has delalloc blocks on it, we need to punch them out before we
491  * invalidate the page.  If we don't, we leave a stale delalloc mapping on the
492  * inode that can trip up a later direct I/O read operation on the same region.
493  *
494  * We prevent this by truncating away the delalloc regions on the page.  Because
495  * they are delalloc, we can do this without needing a transaction. Indeed - if
496  * we get ENOSPC errors, we have to be able to do this truncation without a
497  * transaction as there is no space left for block reservation (typically why we
498  * see a ENOSPC in writeback).
499  */
500 static void
xfs_discard_page(struct page * page,loff_t fileoff)501 xfs_discard_page(
502 	struct page		*page,
503 	loff_t			fileoff)
504 {
505 	struct inode		*inode = page->mapping->host;
506 	struct xfs_inode	*ip = XFS_I(inode);
507 	struct xfs_mount	*mp = ip->i_mount;
508 	unsigned int		pageoff = offset_in_page(fileoff);
509 	xfs_fileoff_t		start_fsb = XFS_B_TO_FSBT(mp, fileoff);
510 	xfs_fileoff_t		pageoff_fsb = XFS_B_TO_FSBT(mp, pageoff);
511 	int			error;
512 
513 	if (XFS_FORCED_SHUTDOWN(mp))
514 		goto out_invalidate;
515 
516 	xfs_alert_ratelimited(mp,
517 		"page discard on page "PTR_FMT", inode 0x%llx, offset %llu.",
518 			page, ip->i_ino, fileoff);
519 
520 	error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
521 			i_blocks_per_page(inode, page) - pageoff_fsb);
522 	if (error && !XFS_FORCED_SHUTDOWN(mp))
523 		xfs_alert(mp, "page discard unable to remove delalloc mapping.");
524 out_invalidate:
525 	iomap_invalidatepage(page, pageoff, PAGE_SIZE - pageoff);
526 }
527 
528 static const struct iomap_writeback_ops xfs_writeback_ops = {
529 	.map_blocks		= xfs_map_blocks,
530 	.prepare_ioend		= xfs_prepare_ioend,
531 	.discard_page		= xfs_discard_page,
532 };
533 
534 STATIC int
xfs_vm_writepage(struct page * page,struct writeback_control * wbc)535 xfs_vm_writepage(
536 	struct page		*page,
537 	struct writeback_control *wbc)
538 {
539 	struct xfs_writepage_ctx wpc = { };
540 
541 	if (WARN_ON_ONCE(current->journal_info)) {
542 		redirty_page_for_writepage(wbc, page);
543 		unlock_page(page);
544 		return 0;
545 	}
546 
547 	return iomap_writepage(page, wbc, &wpc.ctx, &xfs_writeback_ops);
548 }
549 
550 STATIC int
xfs_vm_writepages(struct address_space * mapping,struct writeback_control * wbc)551 xfs_vm_writepages(
552 	struct address_space	*mapping,
553 	struct writeback_control *wbc)
554 {
555 	struct xfs_writepage_ctx wpc = { };
556 
557 	/*
558 	 * Writing back data in a transaction context can result in recursive
559 	 * transactions. This is bad, so issue a warning and get out of here.
560 	 */
561 	if (WARN_ON_ONCE(current->journal_info))
562 		return 0;
563 
564 	xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
565 	return iomap_writepages(mapping, wbc, &wpc.ctx, &xfs_writeback_ops);
566 }
567 
568 STATIC int
xfs_dax_writepages(struct address_space * mapping,struct writeback_control * wbc)569 xfs_dax_writepages(
570 	struct address_space	*mapping,
571 	struct writeback_control *wbc)
572 {
573 	struct xfs_inode	*ip = XFS_I(mapping->host);
574 
575 	xfs_iflags_clear(ip, XFS_ITRUNCATED);
576 	return dax_writeback_mapping_range(mapping,
577 			xfs_inode_buftarg(ip)->bt_daxdev, wbc);
578 }
579 
580 STATIC sector_t
xfs_vm_bmap(struct address_space * mapping,sector_t block)581 xfs_vm_bmap(
582 	struct address_space	*mapping,
583 	sector_t		block)
584 {
585 	struct xfs_inode	*ip = XFS_I(mapping->host);
586 
587 	trace_xfs_vm_bmap(ip);
588 
589 	/*
590 	 * The swap code (ab-)uses ->bmap to get a block mapping and then
591 	 * bypasses the file system for actual I/O.  We really can't allow
592 	 * that on reflinks inodes, so we have to skip out here.  And yes,
593 	 * 0 is the magic code for a bmap error.
594 	 *
595 	 * Since we don't pass back blockdev info, we can't return bmap
596 	 * information for rt files either.
597 	 */
598 	if (xfs_is_cow_inode(ip) || XFS_IS_REALTIME_INODE(ip))
599 		return 0;
600 	return iomap_bmap(mapping, block, &xfs_read_iomap_ops);
601 }
602 
603 STATIC int
xfs_vm_readpage(struct file * unused,struct page * page)604 xfs_vm_readpage(
605 	struct file		*unused,
606 	struct page		*page)
607 {
608 	return iomap_readpage(page, &xfs_read_iomap_ops);
609 }
610 
611 STATIC void
xfs_vm_readahead(struct readahead_control * rac)612 xfs_vm_readahead(
613 	struct readahead_control	*rac)
614 {
615 	iomap_readahead(rac, &xfs_read_iomap_ops);
616 }
617 
618 static int
xfs_iomap_swapfile_activate(struct swap_info_struct * sis,struct file * swap_file,sector_t * span)619 xfs_iomap_swapfile_activate(
620 	struct swap_info_struct		*sis,
621 	struct file			*swap_file,
622 	sector_t			*span)
623 {
624 	sis->bdev = xfs_inode_buftarg(XFS_I(file_inode(swap_file)))->bt_bdev;
625 	return iomap_swapfile_activate(sis, swap_file, span,
626 			&xfs_read_iomap_ops);
627 }
628 
629 const struct address_space_operations xfs_address_space_operations = {
630 	.readpage		= xfs_vm_readpage,
631 	.readahead		= xfs_vm_readahead,
632 	.writepage		= xfs_vm_writepage,
633 	.writepages		= xfs_vm_writepages,
634 	.set_page_dirty		= iomap_set_page_dirty,
635 	.releasepage		= iomap_releasepage,
636 	.invalidatepage		= iomap_invalidatepage,
637 	.bmap			= xfs_vm_bmap,
638 	.direct_IO		= noop_direct_IO,
639 	.migratepage		= iomap_migrate_page,
640 	.is_partially_uptodate  = iomap_is_partially_uptodate,
641 	.error_remove_page	= generic_error_remove_page,
642 	.swap_activate		= xfs_iomap_swapfile_activate,
643 };
644 
645 const struct address_space_operations xfs_dax_aops = {
646 	.writepages		= xfs_dax_writepages,
647 	.direct_IO		= noop_direct_IO,
648 	.set_page_dirty		= noop_set_page_dirty,
649 	.invalidatepage		= noop_invalidatepage,
650 	.swap_activate		= xfs_iomap_swapfile_activate,
651 };
652