1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright (C) 2016 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
5 */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_defer.h"
14 #include "xfs_inode.h"
15 #include "xfs_trans.h"
16 #include "xfs_bmap.h"
17 #include "xfs_bmap_util.h"
18 #include "xfs_trace.h"
19 #include "xfs_icache.h"
20 #include "xfs_btree.h"
21 #include "xfs_refcount_btree.h"
22 #include "xfs_refcount.h"
23 #include "xfs_bmap_btree.h"
24 #include "xfs_trans_space.h"
25 #include "xfs_bit.h"
26 #include "xfs_alloc.h"
27 #include "xfs_quota.h"
28 #include "xfs_reflink.h"
29 #include "xfs_iomap.h"
30 #include "xfs_sb.h"
31 #include "xfs_ag_resv.h"
32
33 /*
34 * Copy on Write of Shared Blocks
35 *
36 * XFS must preserve "the usual" file semantics even when two files share
37 * the same physical blocks. This means that a write to one file must not
38 * alter the blocks in a different file; the way that we'll do that is
39 * through the use of a copy-on-write mechanism. At a high level, that
40 * means that when we want to write to a shared block, we allocate a new
41 * block, write the data to the new block, and if that succeeds we map the
42 * new block into the file.
43 *
44 * XFS provides a "delayed allocation" mechanism that defers the allocation
45 * of disk blocks to dirty-but-not-yet-mapped file blocks as long as
46 * possible. This reduces fragmentation by enabling the filesystem to ask
47 * for bigger chunks less often, which is exactly what we want for CoW.
48 *
49 * The delalloc mechanism begins when the kernel wants to make a block
50 * writable (write_begin or page_mkwrite). If the offset is not mapped, we
51 * create a delalloc mapping, which is a regular in-core extent, but without
52 * a real startblock. (For delalloc mappings, the startblock encodes both
53 * a flag that this is a delalloc mapping, and a worst-case estimate of how
54 * many blocks might be required to put the mapping into the BMBT.) delalloc
55 * mappings are a reservation against the free space in the filesystem;
56 * adjacent mappings can also be combined into fewer larger mappings.
57 *
58 * As an optimization, the CoW extent size hint (cowextsz) creates
59 * outsized aligned delalloc reservations in the hope of landing out of
60 * order nearby CoW writes in a single extent on disk, thereby reducing
61 * fragmentation and improving future performance.
62 *
63 * D: --RRRRRRSSSRRRRRRRR--- (data fork)
64 * C: ------DDDDDDD--------- (CoW fork)
65 *
66 * When dirty pages are being written out (typically in writepage), the
67 * delalloc reservations are converted into unwritten mappings by
68 * allocating blocks and replacing the delalloc mapping with real ones.
69 * A delalloc mapping can be replaced by several unwritten ones if the
70 * free space is fragmented.
71 *
72 * D: --RRRRRRSSSRRRRRRRR---
73 * C: ------UUUUUUU---------
74 *
75 * We want to adapt the delalloc mechanism for copy-on-write, since the
76 * write paths are similar. The first two steps (creating the reservation
77 * and allocating the blocks) are exactly the same as delalloc except that
78 * the mappings must be stored in a separate CoW fork because we do not want
79 * to disturb the mapping in the data fork until we're sure that the write
80 * succeeded. IO completion in this case is the process of removing the old
81 * mapping from the data fork and moving the new mapping from the CoW fork to
82 * the data fork. This will be discussed shortly.
83 *
84 * For now, unaligned directio writes will be bounced back to the page cache.
85 * Block-aligned directio writes will use the same mechanism as buffered
86 * writes.
87 *
88 * Just prior to submitting the actual disk write requests, we convert
89 * the extents representing the range of the file actually being written
90 * (as opposed to extra pieces created for the cowextsize hint) to real
91 * extents. This will become important in the next step:
92 *
93 * D: --RRRRRRSSSRRRRRRRR---
94 * C: ------UUrrUUU---------
95 *
96 * CoW remapping must be done after the data block write completes,
97 * because we don't want to destroy the old data fork map until we're sure
98 * the new block has been written. Since the new mappings are kept in a
99 * separate fork, we can simply iterate these mappings to find the ones
100 * that cover the file blocks that we just CoW'd. For each extent, simply
101 * unmap the corresponding range in the data fork, map the new range into
102 * the data fork, and remove the extent from the CoW fork. Because of
103 * the presence of the cowextsize hint, however, we must be careful
104 * only to remap the blocks that we've actually written out -- we must
105 * never remap delalloc reservations nor CoW staging blocks that have
106 * yet to be written. This corresponds exactly to the real extents in
107 * the CoW fork:
108 *
109 * D: --RRRRRRrrSRRRRRRRR---
110 * C: ------UU--UUU---------
111 *
112 * Since the remapping operation can be applied to an arbitrary file
113 * range, we record the need for the remap step as a flag in the ioend
114 * instead of declaring a new IO type. This is required for direct io
115 * because we only have ioend for the whole dio, and we have to be able to
116 * remember the presence of unwritten blocks and CoW blocks with a single
117 * ioend structure. Better yet, the more ground we can cover with one
118 * ioend, the better.
119 */
120
121 /*
122 * Given an AG extent, find the lowest-numbered run of shared blocks
123 * within that range and return the range in fbno/flen. If
124 * find_end_of_shared is true, return the longest contiguous extent of
125 * shared blocks. If there are no shared extents, fbno and flen will
126 * be set to NULLAGBLOCK and 0, respectively.
127 */
128 int
xfs_reflink_find_shared(struct xfs_mount * mp,struct xfs_trans * tp,xfs_agnumber_t agno,xfs_agblock_t agbno,xfs_extlen_t aglen,xfs_agblock_t * fbno,xfs_extlen_t * flen,bool find_end_of_shared)129 xfs_reflink_find_shared(
130 struct xfs_mount *mp,
131 struct xfs_trans *tp,
132 xfs_agnumber_t agno,
133 xfs_agblock_t agbno,
134 xfs_extlen_t aglen,
135 xfs_agblock_t *fbno,
136 xfs_extlen_t *flen,
137 bool find_end_of_shared)
138 {
139 struct xfs_buf *agbp;
140 struct xfs_btree_cur *cur;
141 int error;
142
143 error = xfs_alloc_read_agf(mp, tp, agno, 0, &agbp);
144 if (error)
145 return error;
146
147 cur = xfs_refcountbt_init_cursor(mp, tp, agbp, agno);
148
149 error = xfs_refcount_find_shared(cur, agbno, aglen, fbno, flen,
150 find_end_of_shared);
151
152 xfs_btree_del_cursor(cur, error);
153
154 xfs_trans_brelse(tp, agbp);
155 return error;
156 }
157
158 /*
159 * Trim the mapping to the next block where there's a change in the
160 * shared/unshared status. More specifically, this means that we
161 * find the lowest-numbered extent of shared blocks that coincides with
162 * the given block mapping. If the shared extent overlaps the start of
163 * the mapping, trim the mapping to the end of the shared extent. If
164 * the shared region intersects the mapping, trim the mapping to the
165 * start of the shared extent. If there are no shared regions that
166 * overlap, just return the original extent.
167 */
168 int
xfs_reflink_trim_around_shared(struct xfs_inode * ip,struct xfs_bmbt_irec * irec,bool * shared)169 xfs_reflink_trim_around_shared(
170 struct xfs_inode *ip,
171 struct xfs_bmbt_irec *irec,
172 bool *shared)
173 {
174 xfs_agnumber_t agno;
175 xfs_agblock_t agbno;
176 xfs_extlen_t aglen;
177 xfs_agblock_t fbno;
178 xfs_extlen_t flen;
179 int error = 0;
180
181 /* Holes, unwritten, and delalloc extents cannot be shared */
182 if (!xfs_is_cow_inode(ip) || !xfs_bmap_is_written_extent(irec)) {
183 *shared = false;
184 return 0;
185 }
186
187 trace_xfs_reflink_trim_around_shared(ip, irec);
188
189 agno = XFS_FSB_TO_AGNO(ip->i_mount, irec->br_startblock);
190 agbno = XFS_FSB_TO_AGBNO(ip->i_mount, irec->br_startblock);
191 aglen = irec->br_blockcount;
192
193 error = xfs_reflink_find_shared(ip->i_mount, NULL, agno, agbno,
194 aglen, &fbno, &flen, true);
195 if (error)
196 return error;
197
198 *shared = false;
199 if (fbno == NULLAGBLOCK) {
200 /* No shared blocks at all. */
201 return 0;
202 } else if (fbno == agbno) {
203 /*
204 * The start of this extent is shared. Truncate the
205 * mapping at the end of the shared region so that a
206 * subsequent iteration starts at the start of the
207 * unshared region.
208 */
209 irec->br_blockcount = flen;
210 *shared = true;
211 return 0;
212 } else {
213 /*
214 * There's a shared extent midway through this extent.
215 * Truncate the mapping at the start of the shared
216 * extent so that a subsequent iteration starts at the
217 * start of the shared region.
218 */
219 irec->br_blockcount = fbno - agbno;
220 return 0;
221 }
222 }
223
224 int
xfs_bmap_trim_cow(struct xfs_inode * ip,struct xfs_bmbt_irec * imap,bool * shared)225 xfs_bmap_trim_cow(
226 struct xfs_inode *ip,
227 struct xfs_bmbt_irec *imap,
228 bool *shared)
229 {
230 /* We can't update any real extents in always COW mode. */
231 if (xfs_is_always_cow_inode(ip) &&
232 !isnullstartblock(imap->br_startblock)) {
233 *shared = true;
234 return 0;
235 }
236
237 /* Trim the mapping to the nearest shared extent boundary. */
238 return xfs_reflink_trim_around_shared(ip, imap, shared);
239 }
240
241 static int
xfs_reflink_convert_cow_locked(struct xfs_inode * ip,xfs_fileoff_t offset_fsb,xfs_filblks_t count_fsb)242 xfs_reflink_convert_cow_locked(
243 struct xfs_inode *ip,
244 xfs_fileoff_t offset_fsb,
245 xfs_filblks_t count_fsb)
246 {
247 struct xfs_iext_cursor icur;
248 struct xfs_bmbt_irec got;
249 struct xfs_btree_cur *dummy_cur = NULL;
250 int dummy_logflags;
251 int error = 0;
252
253 if (!xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &got))
254 return 0;
255
256 do {
257 if (got.br_startoff >= offset_fsb + count_fsb)
258 break;
259 if (got.br_state == XFS_EXT_NORM)
260 continue;
261 if (WARN_ON_ONCE(isnullstartblock(got.br_startblock)))
262 return -EIO;
263
264 xfs_trim_extent(&got, offset_fsb, count_fsb);
265 if (!got.br_blockcount)
266 continue;
267
268 got.br_state = XFS_EXT_NORM;
269 error = xfs_bmap_add_extent_unwritten_real(NULL, ip,
270 XFS_COW_FORK, &icur, &dummy_cur, &got,
271 &dummy_logflags);
272 if (error)
273 return error;
274 } while (xfs_iext_next_extent(ip->i_cowfp, &icur, &got));
275
276 return error;
277 }
278
279 /* Convert all of the unwritten CoW extents in a file's range to real ones. */
280 int
xfs_reflink_convert_cow(struct xfs_inode * ip,xfs_off_t offset,xfs_off_t count)281 xfs_reflink_convert_cow(
282 struct xfs_inode *ip,
283 xfs_off_t offset,
284 xfs_off_t count)
285 {
286 struct xfs_mount *mp = ip->i_mount;
287 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
288 xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + count);
289 xfs_filblks_t count_fsb = end_fsb - offset_fsb;
290 int error;
291
292 ASSERT(count != 0);
293
294 xfs_ilock(ip, XFS_ILOCK_EXCL);
295 error = xfs_reflink_convert_cow_locked(ip, offset_fsb, count_fsb);
296 xfs_iunlock(ip, XFS_ILOCK_EXCL);
297 return error;
298 }
299
300 /*
301 * Find the extent that maps the given range in the COW fork. Even if the extent
302 * is not shared we might have a preallocation for it in the COW fork. If so we
303 * use it that rather than trigger a new allocation.
304 */
305 static int
xfs_find_trim_cow_extent(struct xfs_inode * ip,struct xfs_bmbt_irec * imap,struct xfs_bmbt_irec * cmap,bool * shared,bool * found)306 xfs_find_trim_cow_extent(
307 struct xfs_inode *ip,
308 struct xfs_bmbt_irec *imap,
309 struct xfs_bmbt_irec *cmap,
310 bool *shared,
311 bool *found)
312 {
313 xfs_fileoff_t offset_fsb = imap->br_startoff;
314 xfs_filblks_t count_fsb = imap->br_blockcount;
315 struct xfs_iext_cursor icur;
316
317 *found = false;
318
319 /*
320 * If we don't find an overlapping extent, trim the range we need to
321 * allocate to fit the hole we found.
322 */
323 if (!xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, cmap))
324 cmap->br_startoff = offset_fsb + count_fsb;
325 if (cmap->br_startoff > offset_fsb) {
326 xfs_trim_extent(imap, imap->br_startoff,
327 cmap->br_startoff - imap->br_startoff);
328 return xfs_bmap_trim_cow(ip, imap, shared);
329 }
330
331 *shared = true;
332 if (isnullstartblock(cmap->br_startblock)) {
333 xfs_trim_extent(imap, cmap->br_startoff, cmap->br_blockcount);
334 return 0;
335 }
336
337 /* real extent found - no need to allocate */
338 xfs_trim_extent(cmap, offset_fsb, count_fsb);
339 *found = true;
340 return 0;
341 }
342
343 /* Allocate all CoW reservations covering a range of blocks in a file. */
344 int
xfs_reflink_allocate_cow(struct xfs_inode * ip,struct xfs_bmbt_irec * imap,struct xfs_bmbt_irec * cmap,bool * shared,uint * lockmode,bool convert_now)345 xfs_reflink_allocate_cow(
346 struct xfs_inode *ip,
347 struct xfs_bmbt_irec *imap,
348 struct xfs_bmbt_irec *cmap,
349 bool *shared,
350 uint *lockmode,
351 bool convert_now)
352 {
353 struct xfs_mount *mp = ip->i_mount;
354 xfs_fileoff_t offset_fsb = imap->br_startoff;
355 xfs_filblks_t count_fsb = imap->br_blockcount;
356 struct xfs_trans *tp;
357 int nimaps, error = 0;
358 bool found;
359 xfs_filblks_t resaligned;
360 xfs_extlen_t resblks = 0;
361
362 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
363 if (!ip->i_cowfp) {
364 ASSERT(!xfs_is_reflink_inode(ip));
365 xfs_ifork_init_cow(ip);
366 }
367
368 error = xfs_find_trim_cow_extent(ip, imap, cmap, shared, &found);
369 if (error || !*shared)
370 return error;
371 if (found)
372 goto convert;
373
374 resaligned = xfs_aligned_fsb_count(imap->br_startoff,
375 imap->br_blockcount, xfs_get_cowextsz_hint(ip));
376 resblks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
377
378 xfs_iunlock(ip, *lockmode);
379 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
380 *lockmode = XFS_ILOCK_EXCL;
381 xfs_ilock(ip, *lockmode);
382
383 if (error)
384 return error;
385
386 error = xfs_qm_dqattach_locked(ip, false);
387 if (error)
388 goto out_trans_cancel;
389
390 /*
391 * Check for an overlapping extent again now that we dropped the ilock.
392 */
393 error = xfs_find_trim_cow_extent(ip, imap, cmap, shared, &found);
394 if (error || !*shared)
395 goto out_trans_cancel;
396 if (found) {
397 xfs_trans_cancel(tp);
398 goto convert;
399 }
400
401 error = xfs_trans_reserve_quota_nblks(tp, ip, resblks, 0,
402 XFS_QMOPT_RES_REGBLKS);
403 if (error)
404 goto out_trans_cancel;
405
406 xfs_trans_ijoin(tp, ip, 0);
407
408 /* Allocate the entire reservation as unwritten blocks. */
409 nimaps = 1;
410 error = xfs_bmapi_write(tp, ip, imap->br_startoff, imap->br_blockcount,
411 XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC, 0, cmap,
412 &nimaps);
413 if (error)
414 goto out_unreserve;
415
416 xfs_inode_set_cowblocks_tag(ip);
417 error = xfs_trans_commit(tp);
418 if (error)
419 return error;
420
421 /*
422 * Allocation succeeded but the requested range was not even partially
423 * satisfied? Bail out!
424 */
425 if (nimaps == 0)
426 return -ENOSPC;
427 convert:
428 xfs_trim_extent(cmap, offset_fsb, count_fsb);
429 /*
430 * COW fork extents are supposed to remain unwritten until we're ready
431 * to initiate a disk write. For direct I/O we are going to write the
432 * data and need the conversion, but for buffered writes we're done.
433 */
434 if (!convert_now || cmap->br_state == XFS_EXT_NORM)
435 return 0;
436 trace_xfs_reflink_convert_cow(ip, cmap);
437 return xfs_reflink_convert_cow_locked(ip, offset_fsb, count_fsb);
438
439 out_unreserve:
440 xfs_trans_unreserve_quota_nblks(tp, ip, (long)resblks, 0,
441 XFS_QMOPT_RES_REGBLKS);
442 out_trans_cancel:
443 xfs_trans_cancel(tp);
444 return error;
445 }
446
447 /*
448 * Cancel CoW reservations for some block range of an inode.
449 *
450 * If cancel_real is true this function cancels all COW fork extents for the
451 * inode; if cancel_real is false, real extents are not cleared.
452 *
453 * Caller must have already joined the inode to the current transaction. The
454 * inode will be joined to the transaction returned to the caller.
455 */
456 int
xfs_reflink_cancel_cow_blocks(struct xfs_inode * ip,struct xfs_trans ** tpp,xfs_fileoff_t offset_fsb,xfs_fileoff_t end_fsb,bool cancel_real)457 xfs_reflink_cancel_cow_blocks(
458 struct xfs_inode *ip,
459 struct xfs_trans **tpp,
460 xfs_fileoff_t offset_fsb,
461 xfs_fileoff_t end_fsb,
462 bool cancel_real)
463 {
464 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
465 struct xfs_bmbt_irec got, del;
466 struct xfs_iext_cursor icur;
467 int error = 0;
468
469 if (!xfs_inode_has_cow_data(ip))
470 return 0;
471 if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got))
472 return 0;
473
474 /* Walk backwards until we're out of the I/O range... */
475 while (got.br_startoff + got.br_blockcount > offset_fsb) {
476 del = got;
477 xfs_trim_extent(&del, offset_fsb, end_fsb - offset_fsb);
478
479 /* Extent delete may have bumped ext forward */
480 if (!del.br_blockcount) {
481 xfs_iext_prev(ifp, &icur);
482 goto next_extent;
483 }
484
485 trace_xfs_reflink_cancel_cow(ip, &del);
486
487 if (isnullstartblock(del.br_startblock)) {
488 error = xfs_bmap_del_extent_delay(ip, XFS_COW_FORK,
489 &icur, &got, &del);
490 if (error)
491 break;
492 } else if (del.br_state == XFS_EXT_UNWRITTEN || cancel_real) {
493 ASSERT((*tpp)->t_firstblock == NULLFSBLOCK);
494
495 /* Free the CoW orphan record. */
496 xfs_refcount_free_cow_extent(*tpp, del.br_startblock,
497 del.br_blockcount);
498
499 xfs_bmap_add_free(*tpp, del.br_startblock,
500 del.br_blockcount, NULL);
501
502 /* Roll the transaction */
503 error = xfs_defer_finish(tpp);
504 if (error)
505 break;
506
507 /* Remove the mapping from the CoW fork. */
508 xfs_bmap_del_extent_cow(ip, &icur, &got, &del);
509
510 /* Remove the quota reservation */
511 error = xfs_trans_reserve_quota_nblks(NULL, ip,
512 -(long)del.br_blockcount, 0,
513 XFS_QMOPT_RES_REGBLKS);
514 if (error)
515 break;
516 } else {
517 /* Didn't do anything, push cursor back. */
518 xfs_iext_prev(ifp, &icur);
519 }
520 next_extent:
521 if (!xfs_iext_get_extent(ifp, &icur, &got))
522 break;
523 }
524
525 /* clear tag if cow fork is emptied */
526 if (!ifp->if_bytes)
527 xfs_inode_clear_cowblocks_tag(ip);
528 return error;
529 }
530
531 /*
532 * Cancel CoW reservations for some byte range of an inode.
533 *
534 * If cancel_real is true this function cancels all COW fork extents for the
535 * inode; if cancel_real is false, real extents are not cleared.
536 */
537 int
xfs_reflink_cancel_cow_range(struct xfs_inode * ip,xfs_off_t offset,xfs_off_t count,bool cancel_real)538 xfs_reflink_cancel_cow_range(
539 struct xfs_inode *ip,
540 xfs_off_t offset,
541 xfs_off_t count,
542 bool cancel_real)
543 {
544 struct xfs_trans *tp;
545 xfs_fileoff_t offset_fsb;
546 xfs_fileoff_t end_fsb;
547 int error;
548
549 trace_xfs_reflink_cancel_cow_range(ip, offset, count);
550 ASSERT(ip->i_cowfp);
551
552 offset_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
553 if (count == NULLFILEOFF)
554 end_fsb = NULLFILEOFF;
555 else
556 end_fsb = XFS_B_TO_FSB(ip->i_mount, offset + count);
557
558 /* Start a rolling transaction to remove the mappings */
559 error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_write,
560 0, 0, 0, &tp);
561 if (error)
562 goto out;
563
564 xfs_ilock(ip, XFS_ILOCK_EXCL);
565 xfs_trans_ijoin(tp, ip, 0);
566
567 /* Scrape out the old CoW reservations */
568 error = xfs_reflink_cancel_cow_blocks(ip, &tp, offset_fsb, end_fsb,
569 cancel_real);
570 if (error)
571 goto out_cancel;
572
573 error = xfs_trans_commit(tp);
574
575 xfs_iunlock(ip, XFS_ILOCK_EXCL);
576 return error;
577
578 out_cancel:
579 xfs_trans_cancel(tp);
580 xfs_iunlock(ip, XFS_ILOCK_EXCL);
581 out:
582 trace_xfs_reflink_cancel_cow_range_error(ip, error, _RET_IP_);
583 return error;
584 }
585
586 /*
587 * Remap part of the CoW fork into the data fork.
588 *
589 * We aim to remap the range starting at @offset_fsb and ending at @end_fsb
590 * into the data fork; this function will remap what it can (at the end of the
591 * range) and update @end_fsb appropriately. Each remap gets its own
592 * transaction because we can end up merging and splitting bmbt blocks for
593 * every remap operation and we'd like to keep the block reservation
594 * requirements as low as possible.
595 */
596 STATIC int
xfs_reflink_end_cow_extent(struct xfs_inode * ip,xfs_fileoff_t offset_fsb,xfs_fileoff_t * end_fsb)597 xfs_reflink_end_cow_extent(
598 struct xfs_inode *ip,
599 xfs_fileoff_t offset_fsb,
600 xfs_fileoff_t *end_fsb)
601 {
602 struct xfs_bmbt_irec got, del;
603 struct xfs_iext_cursor icur;
604 struct xfs_mount *mp = ip->i_mount;
605 struct xfs_trans *tp;
606 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
607 xfs_filblks_t rlen;
608 unsigned int resblks;
609 int error;
610
611 /* No COW extents? That's easy! */
612 if (ifp->if_bytes == 0) {
613 *end_fsb = offset_fsb;
614 return 0;
615 }
616
617 resblks = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK);
618 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0,
619 XFS_TRANS_RESERVE, &tp);
620 if (error)
621 return error;
622
623 /*
624 * Lock the inode. We have to ijoin without automatic unlock because
625 * the lead transaction is the refcountbt record deletion; the data
626 * fork update follows as a deferred log item.
627 */
628 xfs_ilock(ip, XFS_ILOCK_EXCL);
629 xfs_trans_ijoin(tp, ip, 0);
630
631 /*
632 * In case of racing, overlapping AIO writes no COW extents might be
633 * left by the time I/O completes for the loser of the race. In that
634 * case we are done.
635 */
636 if (!xfs_iext_lookup_extent_before(ip, ifp, end_fsb, &icur, &got) ||
637 got.br_startoff + got.br_blockcount <= offset_fsb) {
638 *end_fsb = offset_fsb;
639 goto out_cancel;
640 }
641
642 /*
643 * Structure copy @got into @del, then trim @del to the range that we
644 * were asked to remap. We preserve @got for the eventual CoW fork
645 * deletion; from now on @del represents the mapping that we're
646 * actually remapping.
647 */
648 del = got;
649 xfs_trim_extent(&del, offset_fsb, *end_fsb - offset_fsb);
650
651 ASSERT(del.br_blockcount > 0);
652
653 /*
654 * Only remap real extents that contain data. With AIO, speculative
655 * preallocations can leak into the range we are called upon, and we
656 * need to skip them.
657 */
658 if (!xfs_bmap_is_written_extent(&got)) {
659 *end_fsb = del.br_startoff;
660 goto out_cancel;
661 }
662
663 /* Unmap the old blocks in the data fork. */
664 rlen = del.br_blockcount;
665 error = __xfs_bunmapi(tp, ip, del.br_startoff, &rlen, 0, 1);
666 if (error)
667 goto out_cancel;
668
669 /* Trim the extent to whatever got unmapped. */
670 xfs_trim_extent(&del, del.br_startoff + rlen, del.br_blockcount - rlen);
671 trace_xfs_reflink_cow_remap(ip, &del);
672
673 /* Free the CoW orphan record. */
674 xfs_refcount_free_cow_extent(tp, del.br_startblock, del.br_blockcount);
675
676 /* Map the new blocks into the data fork. */
677 xfs_bmap_map_extent(tp, ip, &del);
678
679 /* Charge this new data fork mapping to the on-disk quota. */
680 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_DELBCOUNT,
681 (long)del.br_blockcount);
682
683 /* Remove the mapping from the CoW fork. */
684 xfs_bmap_del_extent_cow(ip, &icur, &got, &del);
685
686 error = xfs_trans_commit(tp);
687 xfs_iunlock(ip, XFS_ILOCK_EXCL);
688 if (error)
689 return error;
690
691 /* Update the caller about how much progress we made. */
692 *end_fsb = del.br_startoff;
693 return 0;
694
695 out_cancel:
696 xfs_trans_cancel(tp);
697 xfs_iunlock(ip, XFS_ILOCK_EXCL);
698 return error;
699 }
700
701 /*
702 * Remap parts of a file's data fork after a successful CoW.
703 */
704 int
xfs_reflink_end_cow(struct xfs_inode * ip,xfs_off_t offset,xfs_off_t count)705 xfs_reflink_end_cow(
706 struct xfs_inode *ip,
707 xfs_off_t offset,
708 xfs_off_t count)
709 {
710 xfs_fileoff_t offset_fsb;
711 xfs_fileoff_t end_fsb;
712 int error = 0;
713
714 trace_xfs_reflink_end_cow(ip, offset, count);
715
716 offset_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
717 end_fsb = XFS_B_TO_FSB(ip->i_mount, offset + count);
718
719 /*
720 * Walk backwards until we're out of the I/O range. The loop function
721 * repeatedly cycles the ILOCK to allocate one transaction per remapped
722 * extent.
723 *
724 * If we're being called by writeback then the pages will still
725 * have PageWriteback set, which prevents races with reflink remapping
726 * and truncate. Reflink remapping prevents races with writeback by
727 * taking the iolock and mmaplock before flushing the pages and
728 * remapping, which means there won't be any further writeback or page
729 * cache dirtying until the reflink completes.
730 *
731 * We should never have two threads issuing writeback for the same file
732 * region. There are also have post-eof checks in the writeback
733 * preparation code so that we don't bother writing out pages that are
734 * about to be truncated.
735 *
736 * If we're being called as part of directio write completion, the dio
737 * count is still elevated, which reflink and truncate will wait for.
738 * Reflink remapping takes the iolock and mmaplock and waits for
739 * pending dio to finish, which should prevent any directio until the
740 * remap completes. Multiple concurrent directio writes to the same
741 * region are handled by end_cow processing only occurring for the
742 * threads which succeed; the outcome of multiple overlapping direct
743 * writes is not well defined anyway.
744 *
745 * It's possible that a buffered write and a direct write could collide
746 * here (the buffered write stumbles in after the dio flushes and
747 * invalidates the page cache and immediately queues writeback), but we
748 * have never supported this 100%. If either disk write succeeds the
749 * blocks will be remapped.
750 */
751 while (end_fsb > offset_fsb && !error)
752 error = xfs_reflink_end_cow_extent(ip, offset_fsb, &end_fsb);
753
754 if (error)
755 trace_xfs_reflink_end_cow_error(ip, error, _RET_IP_);
756 return error;
757 }
758
759 /*
760 * Free leftover CoW reservations that didn't get cleaned out.
761 */
762 int
xfs_reflink_recover_cow(struct xfs_mount * mp)763 xfs_reflink_recover_cow(
764 struct xfs_mount *mp)
765 {
766 xfs_agnumber_t agno;
767 int error = 0;
768
769 if (!xfs_sb_version_hasreflink(&mp->m_sb))
770 return 0;
771
772 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
773 error = xfs_refcount_recover_cow_leftovers(mp, agno);
774 if (error)
775 break;
776 }
777
778 return error;
779 }
780
781 /*
782 * Reflinking (Block) Ranges of Two Files Together
783 *
784 * First, ensure that the reflink flag is set on both inodes. The flag is an
785 * optimization to avoid unnecessary refcount btree lookups in the write path.
786 *
787 * Now we can iteratively remap the range of extents (and holes) in src to the
788 * corresponding ranges in dest. Let drange and srange denote the ranges of
789 * logical blocks in dest and src touched by the reflink operation.
790 *
791 * While the length of drange is greater than zero,
792 * - Read src's bmbt at the start of srange ("imap")
793 * - If imap doesn't exist, make imap appear to start at the end of srange
794 * with zero length.
795 * - If imap starts before srange, advance imap to start at srange.
796 * - If imap goes beyond srange, truncate imap to end at the end of srange.
797 * - Punch (imap start - srange start + imap len) blocks from dest at
798 * offset (drange start).
799 * - If imap points to a real range of pblks,
800 * > Increase the refcount of the imap's pblks
801 * > Map imap's pblks into dest at the offset
802 * (drange start + imap start - srange start)
803 * - Advance drange and srange by (imap start - srange start + imap len)
804 *
805 * Finally, if the reflink made dest longer, update both the in-core and
806 * on-disk file sizes.
807 *
808 * ASCII Art Demonstration:
809 *
810 * Let's say we want to reflink this source file:
811 *
812 * ----SSSSSSS-SSSSS----SSSSSS (src file)
813 * <-------------------->
814 *
815 * into this destination file:
816 *
817 * --DDDDDDDDDDDDDDDDDDD--DDD (dest file)
818 * <-------------------->
819 * '-' means a hole, and 'S' and 'D' are written blocks in the src and dest.
820 * Observe that the range has different logical offsets in either file.
821 *
822 * Consider that the first extent in the source file doesn't line up with our
823 * reflink range. Unmapping and remapping are separate operations, so we can
824 * unmap more blocks from the destination file than we remap.
825 *
826 * ----SSSSSSS-SSSSS----SSSSSS
827 * <------->
828 * --DDDDD---------DDDDD--DDD
829 * <------->
830 *
831 * Now remap the source extent into the destination file:
832 *
833 * ----SSSSSSS-SSSSS----SSSSSS
834 * <------->
835 * --DDDDD--SSSSSSSDDDDD--DDD
836 * <------->
837 *
838 * Do likewise with the second hole and extent in our range. Holes in the
839 * unmap range don't affect our operation.
840 *
841 * ----SSSSSSS-SSSSS----SSSSSS
842 * <---->
843 * --DDDDD--SSSSSSS-SSSSS-DDD
844 * <---->
845 *
846 * Finally, unmap and remap part of the third extent. This will increase the
847 * size of the destination file.
848 *
849 * ----SSSSSSS-SSSSS----SSSSSS
850 * <----->
851 * --DDDDD--SSSSSSS-SSSSS----SSS
852 * <----->
853 *
854 * Once we update the destination file's i_size, we're done.
855 */
856
857 /*
858 * Ensure the reflink bit is set in both inodes.
859 */
860 STATIC int
xfs_reflink_set_inode_flag(struct xfs_inode * src,struct xfs_inode * dest)861 xfs_reflink_set_inode_flag(
862 struct xfs_inode *src,
863 struct xfs_inode *dest)
864 {
865 struct xfs_mount *mp = src->i_mount;
866 int error;
867 struct xfs_trans *tp;
868
869 if (xfs_is_reflink_inode(src) && xfs_is_reflink_inode(dest))
870 return 0;
871
872 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp);
873 if (error)
874 goto out_error;
875
876 /* Lock both files against IO */
877 if (src->i_ino == dest->i_ino)
878 xfs_ilock(src, XFS_ILOCK_EXCL);
879 else
880 xfs_lock_two_inodes(src, XFS_ILOCK_EXCL, dest, XFS_ILOCK_EXCL);
881
882 if (!xfs_is_reflink_inode(src)) {
883 trace_xfs_reflink_set_inode_flag(src);
884 xfs_trans_ijoin(tp, src, XFS_ILOCK_EXCL);
885 src->i_d.di_flags2 |= XFS_DIFLAG2_REFLINK;
886 xfs_trans_log_inode(tp, src, XFS_ILOG_CORE);
887 xfs_ifork_init_cow(src);
888 } else
889 xfs_iunlock(src, XFS_ILOCK_EXCL);
890
891 if (src->i_ino == dest->i_ino)
892 goto commit_flags;
893
894 if (!xfs_is_reflink_inode(dest)) {
895 trace_xfs_reflink_set_inode_flag(dest);
896 xfs_trans_ijoin(tp, dest, XFS_ILOCK_EXCL);
897 dest->i_d.di_flags2 |= XFS_DIFLAG2_REFLINK;
898 xfs_trans_log_inode(tp, dest, XFS_ILOG_CORE);
899 xfs_ifork_init_cow(dest);
900 } else
901 xfs_iunlock(dest, XFS_ILOCK_EXCL);
902
903 commit_flags:
904 error = xfs_trans_commit(tp);
905 if (error)
906 goto out_error;
907 return error;
908
909 out_error:
910 trace_xfs_reflink_set_inode_flag_error(dest, error, _RET_IP_);
911 return error;
912 }
913
914 /*
915 * Update destination inode size & cowextsize hint, if necessary.
916 */
917 int
xfs_reflink_update_dest(struct xfs_inode * dest,xfs_off_t newlen,xfs_extlen_t cowextsize,unsigned int remap_flags)918 xfs_reflink_update_dest(
919 struct xfs_inode *dest,
920 xfs_off_t newlen,
921 xfs_extlen_t cowextsize,
922 unsigned int remap_flags)
923 {
924 struct xfs_mount *mp = dest->i_mount;
925 struct xfs_trans *tp;
926 int error;
927
928 if (newlen <= i_size_read(VFS_I(dest)) && cowextsize == 0)
929 return 0;
930
931 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp);
932 if (error)
933 goto out_error;
934
935 xfs_ilock(dest, XFS_ILOCK_EXCL);
936 xfs_trans_ijoin(tp, dest, XFS_ILOCK_EXCL);
937
938 if (newlen > i_size_read(VFS_I(dest))) {
939 trace_xfs_reflink_update_inode_size(dest, newlen);
940 i_size_write(VFS_I(dest), newlen);
941 dest->i_d.di_size = newlen;
942 }
943
944 if (cowextsize) {
945 dest->i_d.di_cowextsize = cowextsize;
946 dest->i_d.di_flags2 |= XFS_DIFLAG2_COWEXTSIZE;
947 }
948
949 xfs_trans_log_inode(tp, dest, XFS_ILOG_CORE);
950
951 error = xfs_trans_commit(tp);
952 if (error)
953 goto out_error;
954 return error;
955
956 out_error:
957 trace_xfs_reflink_update_inode_size_error(dest, error, _RET_IP_);
958 return error;
959 }
960
961 /*
962 * Do we have enough reserve in this AG to handle a reflink? The refcount
963 * btree already reserved all the space it needs, but the rmap btree can grow
964 * infinitely, so we won't allow more reflinks when the AG is down to the
965 * btree reserves.
966 */
967 static int
xfs_reflink_ag_has_free_space(struct xfs_mount * mp,xfs_agnumber_t agno)968 xfs_reflink_ag_has_free_space(
969 struct xfs_mount *mp,
970 xfs_agnumber_t agno)
971 {
972 struct xfs_perag *pag;
973 int error = 0;
974
975 if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
976 return 0;
977
978 pag = xfs_perag_get(mp, agno);
979 if (xfs_ag_resv_critical(pag, XFS_AG_RESV_RMAPBT) ||
980 xfs_ag_resv_critical(pag, XFS_AG_RESV_METADATA))
981 error = -ENOSPC;
982 xfs_perag_put(pag);
983 return error;
984 }
985
986 /*
987 * Remap the given extent into the file. The dmap blockcount will be set to
988 * the number of blocks that were actually remapped.
989 */
990 STATIC int
xfs_reflink_remap_extent(struct xfs_inode * ip,struct xfs_bmbt_irec * dmap,xfs_off_t new_isize)991 xfs_reflink_remap_extent(
992 struct xfs_inode *ip,
993 struct xfs_bmbt_irec *dmap,
994 xfs_off_t new_isize)
995 {
996 struct xfs_bmbt_irec smap;
997 struct xfs_mount *mp = ip->i_mount;
998 struct xfs_trans *tp;
999 xfs_off_t newlen;
1000 int64_t qres, qdelta;
1001 unsigned int resblks;
1002 bool smap_real;
1003 bool dmap_written = xfs_bmap_is_written_extent(dmap);
1004 int nimaps;
1005 int error;
1006
1007 /* Start a rolling transaction to switch the mappings */
1008 resblks = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK);
1009 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
1010 if (error)
1011 goto out;
1012
1013 xfs_ilock(ip, XFS_ILOCK_EXCL);
1014 xfs_trans_ijoin(tp, ip, 0);
1015
1016 /*
1017 * Read what's currently mapped in the destination file into smap.
1018 * If smap isn't a hole, we will have to remove it before we can add
1019 * dmap to the destination file.
1020 */
1021 nimaps = 1;
1022 error = xfs_bmapi_read(ip, dmap->br_startoff, dmap->br_blockcount,
1023 &smap, &nimaps, 0);
1024 if (error)
1025 goto out_cancel;
1026 ASSERT(nimaps == 1 && smap.br_startoff == dmap->br_startoff);
1027 smap_real = xfs_bmap_is_real_extent(&smap);
1028
1029 /*
1030 * We can only remap as many blocks as the smaller of the two extent
1031 * maps, because we can only remap one extent at a time.
1032 */
1033 dmap->br_blockcount = min(dmap->br_blockcount, smap.br_blockcount);
1034 ASSERT(dmap->br_blockcount == smap.br_blockcount);
1035
1036 trace_xfs_reflink_remap_extent_dest(ip, &smap);
1037
1038 /*
1039 * Two extents mapped to the same physical block must not have
1040 * different states; that's filesystem corruption. Move on to the next
1041 * extent if they're both holes or both the same physical extent.
1042 */
1043 if (dmap->br_startblock == smap.br_startblock) {
1044 if (dmap->br_state != smap.br_state)
1045 error = -EFSCORRUPTED;
1046 goto out_cancel;
1047 }
1048
1049 /* If both extents are unwritten, leave them alone. */
1050 if (dmap->br_state == XFS_EXT_UNWRITTEN &&
1051 smap.br_state == XFS_EXT_UNWRITTEN)
1052 goto out_cancel;
1053
1054 /* No reflinking if the AG of the dest mapping is low on space. */
1055 if (dmap_written) {
1056 error = xfs_reflink_ag_has_free_space(mp,
1057 XFS_FSB_TO_AGNO(mp, dmap->br_startblock));
1058 if (error)
1059 goto out_cancel;
1060 }
1061
1062 /*
1063 * Compute quota reservation if we think the quota block counter for
1064 * this file could increase.
1065 *
1066 * Adding a written extent to the extent map can cause a bmbt split,
1067 * and removing a mapped extent from the extent can cause a bmbt split.
1068 * The two operations cannot both cause a split since they operate on
1069 * the same index in the bmap btree, so we only need a reservation for
1070 * one bmbt split if either thing is happening.
1071 *
1072 * If we are mapping a written extent into the file, we need to have
1073 * enough quota block count reservation to handle the blocks in that
1074 * extent. We log only the delta to the quota block counts, so if the
1075 * extent we're unmapping also has blocks allocated to it, we don't
1076 * need a quota reservation for the extent itself.
1077 *
1078 * Note that if we're replacing a delalloc reservation with a written
1079 * extent, we have to take the full quota reservation because removing
1080 * the delalloc reservation gives the block count back to the quota
1081 * count. This is suboptimal, but the VFS flushed the dest range
1082 * before we started. That should have removed all the delalloc
1083 * reservations, but we code defensively.
1084 */
1085 qres = qdelta = 0;
1086 if (smap_real || dmap_written)
1087 qres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK);
1088 if (!smap_real && dmap_written)
1089 qres += dmap->br_blockcount;
1090 if (qres > 0) {
1091 error = xfs_trans_reserve_quota_nblks(tp, ip, qres, 0,
1092 XFS_QMOPT_RES_REGBLKS);
1093 if (error)
1094 goto out_cancel;
1095 }
1096
1097 if (smap_real) {
1098 /*
1099 * If the extent we're unmapping is backed by storage (written
1100 * or not), unmap the extent and drop its refcount.
1101 */
1102 xfs_bmap_unmap_extent(tp, ip, &smap);
1103 xfs_refcount_decrease_extent(tp, &smap);
1104 qdelta -= smap.br_blockcount;
1105 } else if (smap.br_startblock == DELAYSTARTBLOCK) {
1106 xfs_filblks_t len = smap.br_blockcount;
1107
1108 /*
1109 * If the extent we're unmapping is a delalloc reservation,
1110 * we can use the regular bunmapi function to release the
1111 * incore state. Dropping the delalloc reservation takes care
1112 * of the quota reservation for us.
1113 */
1114 error = __xfs_bunmapi(NULL, ip, smap.br_startoff, &len, 0, 1);
1115 if (error)
1116 goto out_cancel;
1117 ASSERT(len == 0);
1118 }
1119
1120 /*
1121 * If the extent we're sharing is backed by written storage, increase
1122 * its refcount and map it into the file.
1123 */
1124 if (dmap_written) {
1125 xfs_refcount_increase_extent(tp, dmap);
1126 xfs_bmap_map_extent(tp, ip, dmap);
1127 qdelta += dmap->br_blockcount;
1128 }
1129
1130 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, qdelta);
1131
1132 /* Update dest isize if needed. */
1133 newlen = XFS_FSB_TO_B(mp, dmap->br_startoff + dmap->br_blockcount);
1134 newlen = min_t(xfs_off_t, newlen, new_isize);
1135 if (newlen > i_size_read(VFS_I(ip))) {
1136 trace_xfs_reflink_update_inode_size(ip, newlen);
1137 i_size_write(VFS_I(ip), newlen);
1138 ip->i_d.di_size = newlen;
1139 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1140 }
1141
1142 /* Commit everything and unlock. */
1143 error = xfs_trans_commit(tp);
1144 goto out_unlock;
1145
1146 out_cancel:
1147 xfs_trans_cancel(tp);
1148 out_unlock:
1149 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1150 out:
1151 if (error)
1152 trace_xfs_reflink_remap_extent_error(ip, error, _RET_IP_);
1153 return error;
1154 }
1155
1156 /* Remap a range of one file to the other. */
1157 int
xfs_reflink_remap_blocks(struct xfs_inode * src,loff_t pos_in,struct xfs_inode * dest,loff_t pos_out,loff_t remap_len,loff_t * remapped)1158 xfs_reflink_remap_blocks(
1159 struct xfs_inode *src,
1160 loff_t pos_in,
1161 struct xfs_inode *dest,
1162 loff_t pos_out,
1163 loff_t remap_len,
1164 loff_t *remapped)
1165 {
1166 struct xfs_bmbt_irec imap;
1167 struct xfs_mount *mp = src->i_mount;
1168 xfs_fileoff_t srcoff = XFS_B_TO_FSBT(mp, pos_in);
1169 xfs_fileoff_t destoff = XFS_B_TO_FSBT(mp, pos_out);
1170 xfs_filblks_t len;
1171 xfs_filblks_t remapped_len = 0;
1172 xfs_off_t new_isize = pos_out + remap_len;
1173 int nimaps;
1174 int error = 0;
1175
1176 len = min_t(xfs_filblks_t, XFS_B_TO_FSB(mp, remap_len),
1177 XFS_MAX_FILEOFF);
1178
1179 trace_xfs_reflink_remap_blocks(src, srcoff, len, dest, destoff);
1180
1181 while (len > 0) {
1182 unsigned int lock_mode;
1183
1184 /* Read extent from the source file */
1185 nimaps = 1;
1186 lock_mode = xfs_ilock_data_map_shared(src);
1187 error = xfs_bmapi_read(src, srcoff, len, &imap, &nimaps, 0);
1188 xfs_iunlock(src, lock_mode);
1189 if (error)
1190 break;
1191 /*
1192 * The caller supposedly flushed all dirty pages in the source
1193 * file range, which means that writeback should have allocated
1194 * or deleted all delalloc reservations in that range. If we
1195 * find one, that's a good sign that something is seriously
1196 * wrong here.
1197 */
1198 ASSERT(nimaps == 1 && imap.br_startoff == srcoff);
1199 if (imap.br_startblock == DELAYSTARTBLOCK) {
1200 ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1201 error = -EFSCORRUPTED;
1202 break;
1203 }
1204
1205 trace_xfs_reflink_remap_extent_src(src, &imap);
1206
1207 /* Remap into the destination file at the given offset. */
1208 imap.br_startoff = destoff;
1209 error = xfs_reflink_remap_extent(dest, &imap, new_isize);
1210 if (error)
1211 break;
1212
1213 if (fatal_signal_pending(current)) {
1214 error = -EINTR;
1215 break;
1216 }
1217
1218 /* Advance drange/srange */
1219 srcoff += imap.br_blockcount;
1220 destoff += imap.br_blockcount;
1221 len -= imap.br_blockcount;
1222 remapped_len += imap.br_blockcount;
1223 }
1224
1225 if (error)
1226 trace_xfs_reflink_remap_blocks_error(dest, error, _RET_IP_);
1227 *remapped = min_t(loff_t, remap_len,
1228 XFS_FSB_TO_B(src->i_mount, remapped_len));
1229 return error;
1230 }
1231
1232 /*
1233 * If we're reflinking to a point past the destination file's EOF, we must
1234 * zero any speculative post-EOF preallocations that sit between the old EOF
1235 * and the destination file offset.
1236 */
1237 static int
xfs_reflink_zero_posteof(struct xfs_inode * ip,loff_t pos)1238 xfs_reflink_zero_posteof(
1239 struct xfs_inode *ip,
1240 loff_t pos)
1241 {
1242 loff_t isize = i_size_read(VFS_I(ip));
1243
1244 if (pos <= isize)
1245 return 0;
1246
1247 trace_xfs_zero_eof(ip, isize, pos - isize);
1248 return iomap_zero_range(VFS_I(ip), isize, pos - isize, NULL,
1249 &xfs_buffered_write_iomap_ops);
1250 }
1251
1252 /*
1253 * Prepare two files for range cloning. Upon a successful return both inodes
1254 * will have the iolock and mmaplock held, the page cache of the out file will
1255 * be truncated, and any leases on the out file will have been broken. This
1256 * function borrows heavily from xfs_file_aio_write_checks.
1257 *
1258 * The VFS allows partial EOF blocks to "match" for dedupe even though it hasn't
1259 * checked that the bytes beyond EOF physically match. Hence we cannot use the
1260 * EOF block in the source dedupe range because it's not a complete block match,
1261 * hence can introduce a corruption into the file that has it's block replaced.
1262 *
1263 * In similar fashion, the VFS file cloning also allows partial EOF blocks to be
1264 * "block aligned" for the purposes of cloning entire files. However, if the
1265 * source file range includes the EOF block and it lands within the existing EOF
1266 * of the destination file, then we can expose stale data from beyond the source
1267 * file EOF in the destination file.
1268 *
1269 * XFS doesn't support partial block sharing, so in both cases we have check
1270 * these cases ourselves. For dedupe, we can simply round the length to dedupe
1271 * down to the previous whole block and ignore the partial EOF block. While this
1272 * means we can't dedupe the last block of a file, this is an acceptible
1273 * tradeoff for simplicity on implementation.
1274 *
1275 * For cloning, we want to share the partial EOF block if it is also the new EOF
1276 * block of the destination file. If the partial EOF block lies inside the
1277 * existing destination EOF, then we have to abort the clone to avoid exposing
1278 * stale data in the destination file. Hence we reject these clone attempts with
1279 * -EINVAL in this case.
1280 */
1281 int
xfs_reflink_remap_prep(struct file * file_in,loff_t pos_in,struct file * file_out,loff_t pos_out,loff_t * len,unsigned int remap_flags)1282 xfs_reflink_remap_prep(
1283 struct file *file_in,
1284 loff_t pos_in,
1285 struct file *file_out,
1286 loff_t pos_out,
1287 loff_t *len,
1288 unsigned int remap_flags)
1289 {
1290 struct inode *inode_in = file_inode(file_in);
1291 struct xfs_inode *src = XFS_I(inode_in);
1292 struct inode *inode_out = file_inode(file_out);
1293 struct xfs_inode *dest = XFS_I(inode_out);
1294 int ret;
1295
1296 /* Lock both files against IO */
1297 ret = xfs_ilock2_io_mmap(src, dest);
1298 if (ret)
1299 return ret;
1300
1301 /* Check file eligibility and prepare for block sharing. */
1302 ret = -EINVAL;
1303 /* Don't reflink realtime inodes */
1304 if (XFS_IS_REALTIME_INODE(src) || XFS_IS_REALTIME_INODE(dest))
1305 goto out_unlock;
1306
1307 /* Don't share DAX file data for now. */
1308 if (IS_DAX(inode_in) || IS_DAX(inode_out))
1309 goto out_unlock;
1310
1311 ret = generic_remap_file_range_prep(file_in, pos_in, file_out, pos_out,
1312 len, remap_flags);
1313 if (ret || *len == 0)
1314 goto out_unlock;
1315
1316 /* Attach dquots to dest inode before changing block map */
1317 ret = xfs_qm_dqattach(dest);
1318 if (ret)
1319 goto out_unlock;
1320
1321 /*
1322 * Zero existing post-eof speculative preallocations in the destination
1323 * file.
1324 */
1325 ret = xfs_reflink_zero_posteof(dest, pos_out);
1326 if (ret)
1327 goto out_unlock;
1328
1329 /* Set flags and remap blocks. */
1330 ret = xfs_reflink_set_inode_flag(src, dest);
1331 if (ret)
1332 goto out_unlock;
1333
1334 /*
1335 * If pos_out > EOF, we may have dirtied blocks between EOF and
1336 * pos_out. In that case, we need to extend the flush and unmap to cover
1337 * from EOF to the end of the copy length.
1338 */
1339 if (pos_out > XFS_ISIZE(dest)) {
1340 loff_t flen = *len + (pos_out - XFS_ISIZE(dest));
1341 ret = xfs_flush_unmap_range(dest, XFS_ISIZE(dest), flen);
1342 } else {
1343 ret = xfs_flush_unmap_range(dest, pos_out, *len);
1344 }
1345 if (ret)
1346 goto out_unlock;
1347
1348 return 0;
1349 out_unlock:
1350 xfs_iunlock2_io_mmap(src, dest);
1351 return ret;
1352 }
1353
1354 /* Does this inode need the reflink flag? */
1355 int
xfs_reflink_inode_has_shared_extents(struct xfs_trans * tp,struct xfs_inode * ip,bool * has_shared)1356 xfs_reflink_inode_has_shared_extents(
1357 struct xfs_trans *tp,
1358 struct xfs_inode *ip,
1359 bool *has_shared)
1360 {
1361 struct xfs_bmbt_irec got;
1362 struct xfs_mount *mp = ip->i_mount;
1363 struct xfs_ifork *ifp;
1364 xfs_agnumber_t agno;
1365 xfs_agblock_t agbno;
1366 xfs_extlen_t aglen;
1367 xfs_agblock_t rbno;
1368 xfs_extlen_t rlen;
1369 struct xfs_iext_cursor icur;
1370 bool found;
1371 int error;
1372
1373 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1374 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1375 error = xfs_iread_extents(tp, ip, XFS_DATA_FORK);
1376 if (error)
1377 return error;
1378 }
1379
1380 *has_shared = false;
1381 found = xfs_iext_lookup_extent(ip, ifp, 0, &icur, &got);
1382 while (found) {
1383 if (isnullstartblock(got.br_startblock) ||
1384 got.br_state != XFS_EXT_NORM)
1385 goto next;
1386 agno = XFS_FSB_TO_AGNO(mp, got.br_startblock);
1387 agbno = XFS_FSB_TO_AGBNO(mp, got.br_startblock);
1388 aglen = got.br_blockcount;
1389
1390 error = xfs_reflink_find_shared(mp, tp, agno, agbno, aglen,
1391 &rbno, &rlen, false);
1392 if (error)
1393 return error;
1394 /* Is there still a shared block here? */
1395 if (rbno != NULLAGBLOCK) {
1396 *has_shared = true;
1397 return 0;
1398 }
1399 next:
1400 found = xfs_iext_next_extent(ifp, &icur, &got);
1401 }
1402
1403 return 0;
1404 }
1405
1406 /*
1407 * Clear the inode reflink flag if there are no shared extents.
1408 *
1409 * The caller is responsible for joining the inode to the transaction passed in.
1410 * The inode will be joined to the transaction that is returned to the caller.
1411 */
1412 int
xfs_reflink_clear_inode_flag(struct xfs_inode * ip,struct xfs_trans ** tpp)1413 xfs_reflink_clear_inode_flag(
1414 struct xfs_inode *ip,
1415 struct xfs_trans **tpp)
1416 {
1417 bool needs_flag;
1418 int error = 0;
1419
1420 ASSERT(xfs_is_reflink_inode(ip));
1421
1422 error = xfs_reflink_inode_has_shared_extents(*tpp, ip, &needs_flag);
1423 if (error || needs_flag)
1424 return error;
1425
1426 /*
1427 * We didn't find any shared blocks so turn off the reflink flag.
1428 * First, get rid of any leftover CoW mappings.
1429 */
1430 error = xfs_reflink_cancel_cow_blocks(ip, tpp, 0, XFS_MAX_FILEOFF,
1431 true);
1432 if (error)
1433 return error;
1434
1435 /* Clear the inode flag. */
1436 trace_xfs_reflink_unset_inode_flag(ip);
1437 ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
1438 xfs_inode_clear_cowblocks_tag(ip);
1439 xfs_trans_log_inode(*tpp, ip, XFS_ILOG_CORE);
1440
1441 return error;
1442 }
1443
1444 /*
1445 * Clear the inode reflink flag if there are no shared extents and the size
1446 * hasn't changed.
1447 */
1448 STATIC int
xfs_reflink_try_clear_inode_flag(struct xfs_inode * ip)1449 xfs_reflink_try_clear_inode_flag(
1450 struct xfs_inode *ip)
1451 {
1452 struct xfs_mount *mp = ip->i_mount;
1453 struct xfs_trans *tp;
1454 int error = 0;
1455
1456 /* Start a rolling transaction to remove the mappings */
1457 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0, &tp);
1458 if (error)
1459 return error;
1460
1461 xfs_ilock(ip, XFS_ILOCK_EXCL);
1462 xfs_trans_ijoin(tp, ip, 0);
1463
1464 error = xfs_reflink_clear_inode_flag(ip, &tp);
1465 if (error)
1466 goto cancel;
1467
1468 error = xfs_trans_commit(tp);
1469 if (error)
1470 goto out;
1471
1472 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1473 return 0;
1474 cancel:
1475 xfs_trans_cancel(tp);
1476 out:
1477 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1478 return error;
1479 }
1480
1481 /*
1482 * Pre-COW all shared blocks within a given byte range of a file and turn off
1483 * the reflink flag if we unshare all of the file's blocks.
1484 */
1485 int
xfs_reflink_unshare(struct xfs_inode * ip,xfs_off_t offset,xfs_off_t len)1486 xfs_reflink_unshare(
1487 struct xfs_inode *ip,
1488 xfs_off_t offset,
1489 xfs_off_t len)
1490 {
1491 struct inode *inode = VFS_I(ip);
1492 int error;
1493
1494 if (!xfs_is_reflink_inode(ip))
1495 return 0;
1496
1497 trace_xfs_reflink_unshare(ip, offset, len);
1498
1499 inode_dio_wait(inode);
1500
1501 error = iomap_file_unshare(inode, offset, len,
1502 &xfs_buffered_write_iomap_ops);
1503 if (error)
1504 goto out;
1505
1506 error = filemap_write_and_wait_range(inode->i_mapping, offset,
1507 offset + len - 1);
1508 if (error)
1509 goto out;
1510
1511 /* Turn off the reflink flag if possible. */
1512 error = xfs_reflink_try_clear_inode_flag(ip);
1513 if (error)
1514 goto out;
1515 return 0;
1516
1517 out:
1518 trace_xfs_reflink_unshare_error(ip, error, _RET_IP_);
1519 return error;
1520 }
1521