1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * Copyright (c) 2016-2018 Christoph Hellwig.
5 * All Rights Reserved.
6 */
7 #include "xfs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_inode.h"
14 #include "xfs_trans.h"
15 #include "xfs_iomap.h"
16 #include "xfs_trace.h"
17 #include "xfs_bmap.h"
18 #include "xfs_bmap_util.h"
19 #include "xfs_reflink.h"
20
21 struct xfs_writepage_ctx {
22 struct iomap_writepage_ctx ctx;
23 unsigned int data_seq;
24 unsigned int cow_seq;
25 };
26
27 static inline struct xfs_writepage_ctx *
XFS_WPC(struct iomap_writepage_ctx * ctx)28 XFS_WPC(struct iomap_writepage_ctx *ctx)
29 {
30 return container_of(ctx, struct xfs_writepage_ctx, ctx);
31 }
32
33 /*
34 * Fast and loose check if this write could update the on-disk inode size.
35 */
xfs_ioend_is_append(struct iomap_ioend * ioend)36 static inline bool xfs_ioend_is_append(struct iomap_ioend *ioend)
37 {
38 return ioend->io_offset + ioend->io_size >
39 XFS_I(ioend->io_inode)->i_d.di_size;
40 }
41
42 STATIC int
xfs_setfilesize_trans_alloc(struct iomap_ioend * ioend)43 xfs_setfilesize_trans_alloc(
44 struct iomap_ioend *ioend)
45 {
46 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
47 struct xfs_trans *tp;
48 int error;
49
50 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp);
51 if (error)
52 return error;
53
54 ioend->io_private = tp;
55
56 /*
57 * We may pass freeze protection with a transaction. So tell lockdep
58 * we released it.
59 */
60 __sb_writers_release(ioend->io_inode->i_sb, SB_FREEZE_FS);
61 /*
62 * We hand off the transaction to the completion thread now, so
63 * clear the flag here.
64 */
65 xfs_trans_clear_context(tp);
66 return 0;
67 }
68
69 /*
70 * Update on-disk file size now that data has been written to disk.
71 */
72 STATIC int
__xfs_setfilesize(struct xfs_inode * ip,struct xfs_trans * tp,xfs_off_t offset,size_t size)73 __xfs_setfilesize(
74 struct xfs_inode *ip,
75 struct xfs_trans *tp,
76 xfs_off_t offset,
77 size_t size)
78 {
79 xfs_fsize_t isize;
80
81 xfs_ilock(ip, XFS_ILOCK_EXCL);
82 isize = xfs_new_eof(ip, offset + size);
83 if (!isize) {
84 xfs_iunlock(ip, XFS_ILOCK_EXCL);
85 xfs_trans_cancel(tp);
86 return 0;
87 }
88
89 trace_xfs_setfilesize(ip, offset, size);
90
91 ip->i_d.di_size = isize;
92 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
93 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
94
95 return xfs_trans_commit(tp);
96 }
97
98 int
xfs_setfilesize(struct xfs_inode * ip,xfs_off_t offset,size_t size)99 xfs_setfilesize(
100 struct xfs_inode *ip,
101 xfs_off_t offset,
102 size_t size)
103 {
104 struct xfs_mount *mp = ip->i_mount;
105 struct xfs_trans *tp;
106 int error;
107
108 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp);
109 if (error)
110 return error;
111
112 return __xfs_setfilesize(ip, tp, offset, size);
113 }
114
115 STATIC int
xfs_setfilesize_ioend(struct iomap_ioend * ioend,int error)116 xfs_setfilesize_ioend(
117 struct iomap_ioend *ioend,
118 int error)
119 {
120 struct xfs_inode *ip = XFS_I(ioend->io_inode);
121 struct xfs_trans *tp = ioend->io_private;
122
123 /*
124 * The transaction may have been allocated in the I/O submission thread,
125 * thus we need to mark ourselves as being in a transaction manually.
126 * Similarly for freeze protection.
127 */
128 xfs_trans_set_context(tp);
129 __sb_writers_acquired(VFS_I(ip)->i_sb, SB_FREEZE_FS);
130
131 /* we abort the update if there was an IO error */
132 if (error) {
133 xfs_trans_cancel(tp);
134 return error;
135 }
136
137 return __xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size);
138 }
139
140 /*
141 * IO write completion.
142 */
143 STATIC void
xfs_end_ioend(struct iomap_ioend * ioend)144 xfs_end_ioend(
145 struct iomap_ioend *ioend)
146 {
147 struct xfs_inode *ip = XFS_I(ioend->io_inode);
148 struct xfs_mount *mp = ip->i_mount;
149 xfs_off_t offset = ioend->io_offset;
150 size_t size = ioend->io_size;
151 unsigned int nofs_flag;
152 int error;
153
154 /*
155 * We can allocate memory here while doing writeback on behalf of
156 * memory reclaim. To avoid memory allocation deadlocks set the
157 * task-wide nofs context for the following operations.
158 */
159 nofs_flag = memalloc_nofs_save();
160
161 /*
162 * Just clean up the in-memory strutures if the fs has been shut down.
163 */
164 if (XFS_FORCED_SHUTDOWN(mp)) {
165 error = -EIO;
166 goto done;
167 }
168
169 /*
170 * Clean up all COW blocks and underlying data fork delalloc blocks on
171 * I/O error. The delalloc punch is required because this ioend was
172 * mapped to blocks in the COW fork and the associated pages are no
173 * longer dirty. If we don't remove delalloc blocks here, they become
174 * stale and can corrupt free space accounting on unmount.
175 */
176 error = blk_status_to_errno(ioend->io_bio->bi_status);
177 if (unlikely(error)) {
178 if (ioend->io_flags & IOMAP_F_SHARED) {
179 xfs_reflink_cancel_cow_range(ip, offset, size, true);
180 xfs_bmap_punch_delalloc_range(ip,
181 XFS_B_TO_FSBT(mp, offset),
182 XFS_B_TO_FSB(mp, size));
183 }
184 goto done;
185 }
186
187 /*
188 * Success: commit the COW or unwritten blocks if needed.
189 */
190 if (ioend->io_flags & IOMAP_F_SHARED)
191 error = xfs_reflink_end_cow(ip, offset, size);
192 else if (ioend->io_type == IOMAP_UNWRITTEN)
193 error = xfs_iomap_write_unwritten(ip, offset, size, false);
194 else
195 ASSERT(!xfs_ioend_is_append(ioend) || ioend->io_private);
196
197 done:
198 if (ioend->io_private)
199 error = xfs_setfilesize_ioend(ioend, error);
200 iomap_finish_ioends(ioend, error);
201 memalloc_nofs_restore(nofs_flag);
202 }
203
204 /*
205 * If the to be merged ioend has a preallocated transaction for file
206 * size updates we need to ensure the ioend it is merged into also
207 * has one. If it already has one we can simply cancel the transaction
208 * as it is guaranteed to be clean.
209 */
210 static void
xfs_ioend_merge_private(struct iomap_ioend * ioend,struct iomap_ioend * next)211 xfs_ioend_merge_private(
212 struct iomap_ioend *ioend,
213 struct iomap_ioend *next)
214 {
215 if (!ioend->io_private) {
216 ioend->io_private = next->io_private;
217 next->io_private = NULL;
218 } else {
219 xfs_setfilesize_ioend(next, -ECANCELED);
220 }
221 }
222
223 /* Finish all pending io completions. */
224 void
xfs_end_io(struct work_struct * work)225 xfs_end_io(
226 struct work_struct *work)
227 {
228 struct xfs_inode *ip =
229 container_of(work, struct xfs_inode, i_ioend_work);
230 struct iomap_ioend *ioend;
231 struct list_head tmp;
232 unsigned long flags;
233
234 spin_lock_irqsave(&ip->i_ioend_lock, flags);
235 list_replace_init(&ip->i_ioend_list, &tmp);
236 spin_unlock_irqrestore(&ip->i_ioend_lock, flags);
237
238 iomap_sort_ioends(&tmp);
239 while ((ioend = list_first_entry_or_null(&tmp, struct iomap_ioend,
240 io_list))) {
241 list_del_init(&ioend->io_list);
242 iomap_ioend_try_merge(ioend, &tmp, xfs_ioend_merge_private);
243 xfs_end_ioend(ioend);
244 }
245 }
246
xfs_ioend_needs_workqueue(struct iomap_ioend * ioend)247 static inline bool xfs_ioend_needs_workqueue(struct iomap_ioend *ioend)
248 {
249 return ioend->io_private ||
250 ioend->io_type == IOMAP_UNWRITTEN ||
251 (ioend->io_flags & IOMAP_F_SHARED);
252 }
253
254 STATIC void
xfs_end_bio(struct bio * bio)255 xfs_end_bio(
256 struct bio *bio)
257 {
258 struct iomap_ioend *ioend = bio->bi_private;
259 struct xfs_inode *ip = XFS_I(ioend->io_inode);
260 unsigned long flags;
261
262 ASSERT(xfs_ioend_needs_workqueue(ioend));
263
264 spin_lock_irqsave(&ip->i_ioend_lock, flags);
265 if (list_empty(&ip->i_ioend_list))
266 WARN_ON_ONCE(!queue_work(ip->i_mount->m_unwritten_workqueue,
267 &ip->i_ioend_work));
268 list_add_tail(&ioend->io_list, &ip->i_ioend_list);
269 spin_unlock_irqrestore(&ip->i_ioend_lock, flags);
270 }
271
272 /*
273 * Fast revalidation of the cached writeback mapping. Return true if the current
274 * mapping is valid, false otherwise.
275 */
276 static bool
xfs_imap_valid(struct iomap_writepage_ctx * wpc,struct xfs_inode * ip,loff_t offset)277 xfs_imap_valid(
278 struct iomap_writepage_ctx *wpc,
279 struct xfs_inode *ip,
280 loff_t offset)
281 {
282 if (offset < wpc->iomap.offset ||
283 offset >= wpc->iomap.offset + wpc->iomap.length)
284 return false;
285 /*
286 * If this is a COW mapping, it is sufficient to check that the mapping
287 * covers the offset. Be careful to check this first because the caller
288 * can revalidate a COW mapping without updating the data seqno.
289 */
290 if (wpc->iomap.flags & IOMAP_F_SHARED)
291 return true;
292
293 /*
294 * This is not a COW mapping. Check the sequence number of the data fork
295 * because concurrent changes could have invalidated the extent. Check
296 * the COW fork because concurrent changes since the last time we
297 * checked (and found nothing at this offset) could have added
298 * overlapping blocks.
299 */
300 if (XFS_WPC(wpc)->data_seq != READ_ONCE(ip->i_df.if_seq))
301 return false;
302 if (xfs_inode_has_cow_data(ip) &&
303 XFS_WPC(wpc)->cow_seq != READ_ONCE(ip->i_cowfp->if_seq))
304 return false;
305 return true;
306 }
307
308 /*
309 * Pass in a dellalloc extent and convert it to real extents, return the real
310 * extent that maps offset_fsb in wpc->iomap.
311 *
312 * The current page is held locked so nothing could have removed the block
313 * backing offset_fsb, although it could have moved from the COW to the data
314 * fork by another thread.
315 */
316 static int
xfs_convert_blocks(struct iomap_writepage_ctx * wpc,struct xfs_inode * ip,int whichfork,loff_t offset)317 xfs_convert_blocks(
318 struct iomap_writepage_ctx *wpc,
319 struct xfs_inode *ip,
320 int whichfork,
321 loff_t offset)
322 {
323 int error;
324 unsigned *seq;
325
326 if (whichfork == XFS_COW_FORK)
327 seq = &XFS_WPC(wpc)->cow_seq;
328 else
329 seq = &XFS_WPC(wpc)->data_seq;
330
331 /*
332 * Attempt to allocate whatever delalloc extent currently backs offset
333 * and put the result into wpc->iomap. Allocate in a loop because it
334 * may take several attempts to allocate real blocks for a contiguous
335 * delalloc extent if free space is sufficiently fragmented.
336 */
337 do {
338 error = xfs_bmapi_convert_delalloc(ip, whichfork, offset,
339 &wpc->iomap, seq);
340 if (error)
341 return error;
342 } while (wpc->iomap.offset + wpc->iomap.length <= offset);
343
344 return 0;
345 }
346
347 static int
xfs_map_blocks(struct iomap_writepage_ctx * wpc,struct inode * inode,loff_t offset)348 xfs_map_blocks(
349 struct iomap_writepage_ctx *wpc,
350 struct inode *inode,
351 loff_t offset)
352 {
353 struct xfs_inode *ip = XFS_I(inode);
354 struct xfs_mount *mp = ip->i_mount;
355 ssize_t count = i_blocksize(inode);
356 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
357 xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + count);
358 xfs_fileoff_t cow_fsb;
359 int whichfork;
360 struct xfs_bmbt_irec imap;
361 struct xfs_iext_cursor icur;
362 int retries = 0;
363 int error = 0;
364
365 if (XFS_FORCED_SHUTDOWN(mp))
366 return -EIO;
367
368 /*
369 * COW fork blocks can overlap data fork blocks even if the blocks
370 * aren't shared. COW I/O always takes precedent, so we must always
371 * check for overlap on reflink inodes unless the mapping is already a
372 * COW one, or the COW fork hasn't changed from the last time we looked
373 * at it.
374 *
375 * It's safe to check the COW fork if_seq here without the ILOCK because
376 * we've indirectly protected against concurrent updates: writeback has
377 * the page locked, which prevents concurrent invalidations by reflink
378 * and directio and prevents concurrent buffered writes to the same
379 * page. Changes to if_seq always happen under i_lock, which protects
380 * against concurrent updates and provides a memory barrier on the way
381 * out that ensures that we always see the current value.
382 */
383 if (xfs_imap_valid(wpc, ip, offset))
384 return 0;
385
386 /*
387 * If we don't have a valid map, now it's time to get a new one for this
388 * offset. This will convert delayed allocations (including COW ones)
389 * into real extents. If we return without a valid map, it means we
390 * landed in a hole and we skip the block.
391 */
392 retry:
393 cow_fsb = NULLFILEOFF;
394 whichfork = XFS_DATA_FORK;
395 xfs_ilock(ip, XFS_ILOCK_SHARED);
396 ASSERT(ip->i_df.if_format != XFS_DINODE_FMT_BTREE ||
397 (ip->i_df.if_flags & XFS_IFEXTENTS));
398
399 /*
400 * Check if this is offset is covered by a COW extents, and if yes use
401 * it directly instead of looking up anything in the data fork.
402 */
403 if (xfs_inode_has_cow_data(ip) &&
404 xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &imap))
405 cow_fsb = imap.br_startoff;
406 if (cow_fsb != NULLFILEOFF && cow_fsb <= offset_fsb) {
407 XFS_WPC(wpc)->cow_seq = READ_ONCE(ip->i_cowfp->if_seq);
408 xfs_iunlock(ip, XFS_ILOCK_SHARED);
409
410 whichfork = XFS_COW_FORK;
411 goto allocate_blocks;
412 }
413
414 /*
415 * No COW extent overlap. Revalidate now that we may have updated
416 * ->cow_seq. If the data mapping is still valid, we're done.
417 */
418 if (xfs_imap_valid(wpc, ip, offset)) {
419 xfs_iunlock(ip, XFS_ILOCK_SHARED);
420 return 0;
421 }
422
423 /*
424 * If we don't have a valid map, now it's time to get a new one for this
425 * offset. This will convert delayed allocations (including COW ones)
426 * into real extents.
427 */
428 if (!xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap))
429 imap.br_startoff = end_fsb; /* fake a hole past EOF */
430 XFS_WPC(wpc)->data_seq = READ_ONCE(ip->i_df.if_seq);
431 xfs_iunlock(ip, XFS_ILOCK_SHARED);
432
433 /* landed in a hole or beyond EOF? */
434 if (imap.br_startoff > offset_fsb) {
435 imap.br_blockcount = imap.br_startoff - offset_fsb;
436 imap.br_startoff = offset_fsb;
437 imap.br_startblock = HOLESTARTBLOCK;
438 imap.br_state = XFS_EXT_NORM;
439 }
440
441 /*
442 * Truncate to the next COW extent if there is one. This is the only
443 * opportunity to do this because we can skip COW fork lookups for the
444 * subsequent blocks in the mapping; however, the requirement to treat
445 * the COW range separately remains.
446 */
447 if (cow_fsb != NULLFILEOFF &&
448 cow_fsb < imap.br_startoff + imap.br_blockcount)
449 imap.br_blockcount = cow_fsb - imap.br_startoff;
450
451 /* got a delalloc extent? */
452 if (imap.br_startblock != HOLESTARTBLOCK &&
453 isnullstartblock(imap.br_startblock))
454 goto allocate_blocks;
455
456 xfs_bmbt_to_iomap(ip, &wpc->iomap, &imap, 0);
457 trace_xfs_map_blocks_found(ip, offset, count, whichfork, &imap);
458 return 0;
459 allocate_blocks:
460 error = xfs_convert_blocks(wpc, ip, whichfork, offset);
461 if (error) {
462 /*
463 * If we failed to find the extent in the COW fork we might have
464 * raced with a COW to data fork conversion or truncate.
465 * Restart the lookup to catch the extent in the data fork for
466 * the former case, but prevent additional retries to avoid
467 * looping forever for the latter case.
468 */
469 if (error == -EAGAIN && whichfork == XFS_COW_FORK && !retries++)
470 goto retry;
471 ASSERT(error != -EAGAIN);
472 return error;
473 }
474
475 /*
476 * Due to merging the return real extent might be larger than the
477 * original delalloc one. Trim the return extent to the next COW
478 * boundary again to force a re-lookup.
479 */
480 if (whichfork != XFS_COW_FORK && cow_fsb != NULLFILEOFF) {
481 loff_t cow_offset = XFS_FSB_TO_B(mp, cow_fsb);
482
483 if (cow_offset < wpc->iomap.offset + wpc->iomap.length)
484 wpc->iomap.length = cow_offset - wpc->iomap.offset;
485 }
486
487 ASSERT(wpc->iomap.offset <= offset);
488 ASSERT(wpc->iomap.offset + wpc->iomap.length > offset);
489 trace_xfs_map_blocks_alloc(ip, offset, count, whichfork, &imap);
490 return 0;
491 }
492
493 static int
xfs_prepare_ioend(struct iomap_ioend * ioend,int status)494 xfs_prepare_ioend(
495 struct iomap_ioend *ioend,
496 int status)
497 {
498 unsigned int nofs_flag;
499
500 /*
501 * We can allocate memory here while doing writeback on behalf of
502 * memory reclaim. To avoid memory allocation deadlocks set the
503 * task-wide nofs context for the following operations.
504 */
505 nofs_flag = memalloc_nofs_save();
506
507 /* Convert CoW extents to regular */
508 if (!status && (ioend->io_flags & IOMAP_F_SHARED)) {
509 status = xfs_reflink_convert_cow(XFS_I(ioend->io_inode),
510 ioend->io_offset, ioend->io_size);
511 }
512
513 /* Reserve log space if we might write beyond the on-disk inode size. */
514 if (!status &&
515 ((ioend->io_flags & IOMAP_F_SHARED) ||
516 ioend->io_type != IOMAP_UNWRITTEN) &&
517 xfs_ioend_is_append(ioend) &&
518 !ioend->io_private)
519 status = xfs_setfilesize_trans_alloc(ioend);
520
521 memalloc_nofs_restore(nofs_flag);
522
523 if (xfs_ioend_needs_workqueue(ioend))
524 ioend->io_bio->bi_end_io = xfs_end_bio;
525 return status;
526 }
527
528 /*
529 * If the page has delalloc blocks on it, we need to punch them out before we
530 * invalidate the page. If we don't, we leave a stale delalloc mapping on the
531 * inode that can trip up a later direct I/O read operation on the same region.
532 *
533 * We prevent this by truncating away the delalloc regions on the page. Because
534 * they are delalloc, we can do this without needing a transaction. Indeed - if
535 * we get ENOSPC errors, we have to be able to do this truncation without a
536 * transaction as there is no space left for block reservation (typically why we
537 * see a ENOSPC in writeback).
538 */
539 static void
xfs_discard_page(struct page * page,loff_t fileoff)540 xfs_discard_page(
541 struct page *page,
542 loff_t fileoff)
543 {
544 struct inode *inode = page->mapping->host;
545 struct xfs_inode *ip = XFS_I(inode);
546 struct xfs_mount *mp = ip->i_mount;
547 unsigned int pageoff = offset_in_page(fileoff);
548 xfs_fileoff_t start_fsb = XFS_B_TO_FSBT(mp, fileoff);
549 xfs_fileoff_t pageoff_fsb = XFS_B_TO_FSBT(mp, pageoff);
550 int error;
551
552 if (XFS_FORCED_SHUTDOWN(mp))
553 goto out_invalidate;
554
555 xfs_alert_ratelimited(mp,
556 "page discard on page "PTR_FMT", inode 0x%llx, offset %llu.",
557 page, ip->i_ino, fileoff);
558
559 error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
560 i_blocks_per_page(inode, page) - pageoff_fsb);
561 if (error && !XFS_FORCED_SHUTDOWN(mp))
562 xfs_alert(mp, "page discard unable to remove delalloc mapping.");
563 out_invalidate:
564 iomap_invalidatepage(page, pageoff, PAGE_SIZE - pageoff);
565 }
566
567 static const struct iomap_writeback_ops xfs_writeback_ops = {
568 .map_blocks = xfs_map_blocks,
569 .prepare_ioend = xfs_prepare_ioend,
570 .discard_page = xfs_discard_page,
571 };
572
573 STATIC int
xfs_vm_writepage(struct page * page,struct writeback_control * wbc)574 xfs_vm_writepage(
575 struct page *page,
576 struct writeback_control *wbc)
577 {
578 struct xfs_writepage_ctx wpc = { };
579
580 if (WARN_ON_ONCE(current->journal_info)) {
581 redirty_page_for_writepage(wbc, page);
582 unlock_page(page);
583 return 0;
584 }
585
586 return iomap_writepage(page, wbc, &wpc.ctx, &xfs_writeback_ops);
587 }
588
589 STATIC int
xfs_vm_writepages(struct address_space * mapping,struct writeback_control * wbc)590 xfs_vm_writepages(
591 struct address_space *mapping,
592 struct writeback_control *wbc)
593 {
594 struct xfs_writepage_ctx wpc = { };
595
596 /*
597 * Writing back data in a transaction context can result in recursive
598 * transactions. This is bad, so issue a warning and get out of here.
599 */
600 if (WARN_ON_ONCE(current->journal_info))
601 return 0;
602
603 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
604 return iomap_writepages(mapping, wbc, &wpc.ctx, &xfs_writeback_ops);
605 }
606
607 STATIC int
xfs_dax_writepages(struct address_space * mapping,struct writeback_control * wbc)608 xfs_dax_writepages(
609 struct address_space *mapping,
610 struct writeback_control *wbc)
611 {
612 struct xfs_inode *ip = XFS_I(mapping->host);
613
614 xfs_iflags_clear(ip, XFS_ITRUNCATED);
615 return dax_writeback_mapping_range(mapping,
616 xfs_inode_buftarg(ip)->bt_daxdev, wbc);
617 }
618
619 STATIC sector_t
xfs_vm_bmap(struct address_space * mapping,sector_t block)620 xfs_vm_bmap(
621 struct address_space *mapping,
622 sector_t block)
623 {
624 struct xfs_inode *ip = XFS_I(mapping->host);
625
626 trace_xfs_vm_bmap(ip);
627
628 /*
629 * The swap code (ab-)uses ->bmap to get a block mapping and then
630 * bypasses the file system for actual I/O. We really can't allow
631 * that on reflinks inodes, so we have to skip out here. And yes,
632 * 0 is the magic code for a bmap error.
633 *
634 * Since we don't pass back blockdev info, we can't return bmap
635 * information for rt files either.
636 */
637 if (xfs_is_cow_inode(ip) || XFS_IS_REALTIME_INODE(ip))
638 return 0;
639 return iomap_bmap(mapping, block, &xfs_read_iomap_ops);
640 }
641
642 STATIC int
xfs_vm_readpage(struct file * unused,struct page * page)643 xfs_vm_readpage(
644 struct file *unused,
645 struct page *page)
646 {
647 return iomap_readpage(page, &xfs_read_iomap_ops);
648 }
649
650 STATIC void
xfs_vm_readahead(struct readahead_control * rac)651 xfs_vm_readahead(
652 struct readahead_control *rac)
653 {
654 iomap_readahead(rac, &xfs_read_iomap_ops);
655 }
656
657 static int
xfs_iomap_swapfile_activate(struct swap_info_struct * sis,struct file * swap_file,sector_t * span)658 xfs_iomap_swapfile_activate(
659 struct swap_info_struct *sis,
660 struct file *swap_file,
661 sector_t *span)
662 {
663 sis->bdev = xfs_inode_buftarg(XFS_I(file_inode(swap_file)))->bt_bdev;
664 return iomap_swapfile_activate(sis, swap_file, span,
665 &xfs_read_iomap_ops);
666 }
667
668 const struct address_space_operations xfs_address_space_operations = {
669 .readpage = xfs_vm_readpage,
670 .readahead = xfs_vm_readahead,
671 .writepage = xfs_vm_writepage,
672 .writepages = xfs_vm_writepages,
673 .set_page_dirty = iomap_set_page_dirty,
674 .releasepage = iomap_releasepage,
675 .invalidatepage = iomap_invalidatepage,
676 .bmap = xfs_vm_bmap,
677 .direct_IO = noop_direct_IO,
678 .migratepage = iomap_migrate_page,
679 .is_partially_uptodate = iomap_is_partially_uptodate,
680 .error_remove_page = generic_error_remove_page,
681 .swap_activate = xfs_iomap_swapfile_activate,
682 };
683
684 const struct address_space_operations xfs_dax_aops = {
685 .writepages = xfs_dax_writepages,
686 .direct_IO = noop_direct_IO,
687 .set_page_dirty = noop_set_page_dirty,
688 .invalidatepage = noop_invalidatepage,
689 .swap_activate = xfs_iomap_swapfile_activate,
690 };
691