1 /*
2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
24 #include "xfs_bit.h"
25 #include "xfs_sb.h"
26 #include "xfs_mount.h"
27 #include "xfs_defer.h"
28 #include "xfs_da_format.h"
29 #include "xfs_da_btree.h"
30 #include "xfs_dir2.h"
31 #include "xfs_inode.h"
32 #include "xfs_btree.h"
33 #include "xfs_trans.h"
34 #include "xfs_inode_item.h"
35 #include "xfs_extfree_item.h"
36 #include "xfs_alloc.h"
37 #include "xfs_bmap.h"
38 #include "xfs_bmap_util.h"
39 #include "xfs_bmap_btree.h"
40 #include "xfs_rtalloc.h"
41 #include "xfs_error.h"
42 #include "xfs_quota.h"
43 #include "xfs_trans_space.h"
44 #include "xfs_buf_item.h"
45 #include "xfs_trace.h"
46 #include "xfs_symlink.h"
47 #include "xfs_attr_leaf.h"
48 #include "xfs_filestream.h"
49 #include "xfs_rmap.h"
50 #include "xfs_ag_resv.h"
51 #include "xfs_refcount.h"
52 #include "xfs_rmap_btree.h"
53 #include "xfs_icache.h"
54
55
56 kmem_zone_t *xfs_bmap_free_item_zone;
57
58 /*
59 * Miscellaneous helper functions
60 */
61
62 /*
63 * Compute and fill in the value of the maximum depth of a bmap btree
64 * in this filesystem. Done once, during mount.
65 */
66 void
xfs_bmap_compute_maxlevels(xfs_mount_t * mp,int whichfork)67 xfs_bmap_compute_maxlevels(
68 xfs_mount_t *mp, /* file system mount structure */
69 int whichfork) /* data or attr fork */
70 {
71 int level; /* btree level */
72 uint maxblocks; /* max blocks at this level */
73 uint maxleafents; /* max leaf entries possible */
74 int maxrootrecs; /* max records in root block */
75 int minleafrecs; /* min records in leaf block */
76 int minnoderecs; /* min records in node block */
77 int sz; /* root block size */
78
79 /*
80 * The maximum number of extents in a file, hence the maximum
81 * number of leaf entries, is controlled by the type of di_nextents
82 * (a signed 32-bit number, xfs_extnum_t), or by di_anextents
83 * (a signed 16-bit number, xfs_aextnum_t).
84 *
85 * Note that we can no longer assume that if we are in ATTR1 that
86 * the fork offset of all the inodes will be
87 * (xfs_default_attroffset(ip) >> 3) because we could have mounted
88 * with ATTR2 and then mounted back with ATTR1, keeping the
89 * di_forkoff's fixed but probably at various positions. Therefore,
90 * for both ATTR1 and ATTR2 we have to assume the worst case scenario
91 * of a minimum size available.
92 */
93 if (whichfork == XFS_DATA_FORK) {
94 maxleafents = MAXEXTNUM;
95 sz = XFS_BMDR_SPACE_CALC(MINDBTPTRS);
96 } else {
97 maxleafents = MAXAEXTNUM;
98 sz = XFS_BMDR_SPACE_CALC(MINABTPTRS);
99 }
100 maxrootrecs = xfs_bmdr_maxrecs(sz, 0);
101 minleafrecs = mp->m_bmap_dmnr[0];
102 minnoderecs = mp->m_bmap_dmnr[1];
103 maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs;
104 for (level = 1; maxblocks > 1; level++) {
105 if (maxblocks <= maxrootrecs)
106 maxblocks = 1;
107 else
108 maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs;
109 }
110 mp->m_bm_maxlevels[whichfork] = level;
111 }
112
113 STATIC int /* error */
xfs_bmbt_lookup_eq(struct xfs_btree_cur * cur,xfs_fileoff_t off,xfs_fsblock_t bno,xfs_filblks_t len,int * stat)114 xfs_bmbt_lookup_eq(
115 struct xfs_btree_cur *cur,
116 xfs_fileoff_t off,
117 xfs_fsblock_t bno,
118 xfs_filblks_t len,
119 int *stat) /* success/failure */
120 {
121 cur->bc_rec.b.br_startoff = off;
122 cur->bc_rec.b.br_startblock = bno;
123 cur->bc_rec.b.br_blockcount = len;
124 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
125 }
126
127 STATIC int /* error */
xfs_bmbt_lookup_ge(struct xfs_btree_cur * cur,xfs_fileoff_t off,xfs_fsblock_t bno,xfs_filblks_t len,int * stat)128 xfs_bmbt_lookup_ge(
129 struct xfs_btree_cur *cur,
130 xfs_fileoff_t off,
131 xfs_fsblock_t bno,
132 xfs_filblks_t len,
133 int *stat) /* success/failure */
134 {
135 cur->bc_rec.b.br_startoff = off;
136 cur->bc_rec.b.br_startblock = bno;
137 cur->bc_rec.b.br_blockcount = len;
138 return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
139 }
140
141 /*
142 * Check if the inode needs to be converted to btree format.
143 */
xfs_bmap_needs_btree(struct xfs_inode * ip,int whichfork)144 static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork)
145 {
146 return whichfork != XFS_COW_FORK &&
147 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
148 XFS_IFORK_NEXTENTS(ip, whichfork) >
149 XFS_IFORK_MAXEXT(ip, whichfork);
150 }
151
152 /*
153 * Check if the inode should be converted to extent format.
154 */
xfs_bmap_wants_extents(struct xfs_inode * ip,int whichfork)155 static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork)
156 {
157 return whichfork != XFS_COW_FORK &&
158 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE &&
159 XFS_IFORK_NEXTENTS(ip, whichfork) <=
160 XFS_IFORK_MAXEXT(ip, whichfork);
161 }
162
163 /*
164 * Update the record referred to by cur to the value given
165 * by [off, bno, len, state].
166 * This either works (return 0) or gets an EFSCORRUPTED error.
167 */
168 STATIC int
xfs_bmbt_update(struct xfs_btree_cur * cur,xfs_fileoff_t off,xfs_fsblock_t bno,xfs_filblks_t len,xfs_exntst_t state)169 xfs_bmbt_update(
170 struct xfs_btree_cur *cur,
171 xfs_fileoff_t off,
172 xfs_fsblock_t bno,
173 xfs_filblks_t len,
174 xfs_exntst_t state)
175 {
176 union xfs_btree_rec rec;
177
178 xfs_bmbt_disk_set_allf(&rec.bmbt, off, bno, len, state);
179 return xfs_btree_update(cur, &rec);
180 }
181
182 /*
183 * Compute the worst-case number of indirect blocks that will be used
184 * for ip's delayed extent of length "len".
185 */
186 STATIC xfs_filblks_t
xfs_bmap_worst_indlen(xfs_inode_t * ip,xfs_filblks_t len)187 xfs_bmap_worst_indlen(
188 xfs_inode_t *ip, /* incore inode pointer */
189 xfs_filblks_t len) /* delayed extent length */
190 {
191 int level; /* btree level number */
192 int maxrecs; /* maximum record count at this level */
193 xfs_mount_t *mp; /* mount structure */
194 xfs_filblks_t rval; /* return value */
195 xfs_filblks_t orig_len;
196
197 mp = ip->i_mount;
198
199 /* Calculate the worst-case size of the bmbt. */
200 orig_len = len;
201 maxrecs = mp->m_bmap_dmxr[0];
202 for (level = 0, rval = 0;
203 level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK);
204 level++) {
205 len += maxrecs - 1;
206 do_div(len, maxrecs);
207 rval += len;
208 if (len == 1) {
209 rval += XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) -
210 level - 1;
211 break;
212 }
213 if (level == 0)
214 maxrecs = mp->m_bmap_dmxr[1];
215 }
216
217 /* Calculate the worst-case size of the rmapbt. */
218 if (xfs_sb_version_hasrmapbt(&mp->m_sb))
219 rval += 1 + xfs_rmapbt_calc_size(mp, orig_len) +
220 mp->m_rmap_maxlevels;
221
222 return rval;
223 }
224
225 /*
226 * Calculate the default attribute fork offset for newly created inodes.
227 */
228 uint
xfs_default_attroffset(struct xfs_inode * ip)229 xfs_default_attroffset(
230 struct xfs_inode *ip)
231 {
232 struct xfs_mount *mp = ip->i_mount;
233 uint offset;
234
235 if (mp->m_sb.sb_inodesize == 256) {
236 offset = XFS_LITINO(mp, ip->i_d.di_version) -
237 XFS_BMDR_SPACE_CALC(MINABTPTRS);
238 } else {
239 offset = XFS_BMDR_SPACE_CALC(6 * MINABTPTRS);
240 }
241
242 ASSERT(offset < XFS_LITINO(mp, ip->i_d.di_version));
243 return offset;
244 }
245
246 /*
247 * Helper routine to reset inode di_forkoff field when switching
248 * attribute fork from local to extent format - we reset it where
249 * possible to make space available for inline data fork extents.
250 */
251 STATIC void
xfs_bmap_forkoff_reset(xfs_inode_t * ip,int whichfork)252 xfs_bmap_forkoff_reset(
253 xfs_inode_t *ip,
254 int whichfork)
255 {
256 if (whichfork == XFS_ATTR_FORK &&
257 ip->i_d.di_format != XFS_DINODE_FMT_DEV &&
258 ip->i_d.di_format != XFS_DINODE_FMT_UUID &&
259 ip->i_d.di_format != XFS_DINODE_FMT_BTREE) {
260 uint dfl_forkoff = xfs_default_attroffset(ip) >> 3;
261
262 if (dfl_forkoff > ip->i_d.di_forkoff)
263 ip->i_d.di_forkoff = dfl_forkoff;
264 }
265 }
266
267 #ifdef DEBUG
268 STATIC struct xfs_buf *
xfs_bmap_get_bp(struct xfs_btree_cur * cur,xfs_fsblock_t bno)269 xfs_bmap_get_bp(
270 struct xfs_btree_cur *cur,
271 xfs_fsblock_t bno)
272 {
273 struct xfs_log_item_desc *lidp;
274 int i;
275
276 if (!cur)
277 return NULL;
278
279 for (i = 0; i < XFS_BTREE_MAXLEVELS; i++) {
280 if (!cur->bc_bufs[i])
281 break;
282 if (XFS_BUF_ADDR(cur->bc_bufs[i]) == bno)
283 return cur->bc_bufs[i];
284 }
285
286 /* Chase down all the log items to see if the bp is there */
287 list_for_each_entry(lidp, &cur->bc_tp->t_items, lid_trans) {
288 struct xfs_buf_log_item *bip;
289 bip = (struct xfs_buf_log_item *)lidp->lid_item;
290 if (bip->bli_item.li_type == XFS_LI_BUF &&
291 XFS_BUF_ADDR(bip->bli_buf) == bno)
292 return bip->bli_buf;
293 }
294
295 return NULL;
296 }
297
298 STATIC void
xfs_check_block(struct xfs_btree_block * block,xfs_mount_t * mp,int root,short sz)299 xfs_check_block(
300 struct xfs_btree_block *block,
301 xfs_mount_t *mp,
302 int root,
303 short sz)
304 {
305 int i, j, dmxr;
306 __be64 *pp, *thispa; /* pointer to block address */
307 xfs_bmbt_key_t *prevp, *keyp;
308
309 ASSERT(be16_to_cpu(block->bb_level) > 0);
310
311 prevp = NULL;
312 for( i = 1; i <= xfs_btree_get_numrecs(block); i++) {
313 dmxr = mp->m_bmap_dmxr[0];
314 keyp = XFS_BMBT_KEY_ADDR(mp, block, i);
315
316 if (prevp) {
317 ASSERT(be64_to_cpu(prevp->br_startoff) <
318 be64_to_cpu(keyp->br_startoff));
319 }
320 prevp = keyp;
321
322 /*
323 * Compare the block numbers to see if there are dups.
324 */
325 if (root)
326 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, i, sz);
327 else
328 pp = XFS_BMBT_PTR_ADDR(mp, block, i, dmxr);
329
330 for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) {
331 if (root)
332 thispa = XFS_BMAP_BROOT_PTR_ADDR(mp, block, j, sz);
333 else
334 thispa = XFS_BMBT_PTR_ADDR(mp, block, j, dmxr);
335 if (*thispa == *pp) {
336 xfs_warn(mp, "%s: thispa(%d) == pp(%d) %Ld",
337 __func__, j, i,
338 (unsigned long long)be64_to_cpu(*thispa));
339 panic("%s: ptrs are equal in node\n",
340 __func__);
341 }
342 }
343 }
344 }
345
346 /*
347 * Check that the extents for the inode ip are in the right order in all
348 * btree leaves. THis becomes prohibitively expensive for large extent count
349 * files, so don't bother with inodes that have more than 10,000 extents in
350 * them. The btree record ordering checks will still be done, so for such large
351 * bmapbt constructs that is going to catch most corruptions.
352 */
353 STATIC void
xfs_bmap_check_leaf_extents(xfs_btree_cur_t * cur,xfs_inode_t * ip,int whichfork)354 xfs_bmap_check_leaf_extents(
355 xfs_btree_cur_t *cur, /* btree cursor or null */
356 xfs_inode_t *ip, /* incore inode pointer */
357 int whichfork) /* data or attr fork */
358 {
359 struct xfs_btree_block *block; /* current btree block */
360 xfs_fsblock_t bno; /* block # of "block" */
361 xfs_buf_t *bp; /* buffer for "block" */
362 int error; /* error return value */
363 xfs_extnum_t i=0, j; /* index into the extents list */
364 xfs_ifork_t *ifp; /* fork structure */
365 int level; /* btree level, for checking */
366 xfs_mount_t *mp; /* file system mount structure */
367 __be64 *pp; /* pointer to block address */
368 xfs_bmbt_rec_t *ep; /* pointer to current extent */
369 xfs_bmbt_rec_t last = {0, 0}; /* last extent in prev block */
370 xfs_bmbt_rec_t *nextp; /* pointer to next extent */
371 int bp_release = 0;
372
373 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) {
374 return;
375 }
376
377 /* skip large extent count inodes */
378 if (ip->i_d.di_nextents > 10000)
379 return;
380
381 bno = NULLFSBLOCK;
382 mp = ip->i_mount;
383 ifp = XFS_IFORK_PTR(ip, whichfork);
384 block = ifp->if_broot;
385 /*
386 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
387 */
388 level = be16_to_cpu(block->bb_level);
389 ASSERT(level > 0);
390 xfs_check_block(block, mp, 1, ifp->if_broot_bytes);
391 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
392 bno = be64_to_cpu(*pp);
393
394 ASSERT(bno != NULLFSBLOCK);
395 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
396 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
397
398 /*
399 * Go down the tree until leaf level is reached, following the first
400 * pointer (leftmost) at each level.
401 */
402 while (level-- > 0) {
403 /* See if buf is in cur first */
404 bp_release = 0;
405 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
406 if (!bp) {
407 bp_release = 1;
408 error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp,
409 XFS_BMAP_BTREE_REF,
410 &xfs_bmbt_buf_ops);
411 if (error)
412 goto error_norelse;
413 }
414 block = XFS_BUF_TO_BLOCK(bp);
415 if (level == 0)
416 break;
417
418 /*
419 * Check this block for basic sanity (increasing keys and
420 * no duplicate blocks).
421 */
422
423 xfs_check_block(block, mp, 0, 0);
424 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
425 bno = be64_to_cpu(*pp);
426 XFS_WANT_CORRUPTED_GOTO(mp,
427 XFS_FSB_SANITY_CHECK(mp, bno), error0);
428 if (bp_release) {
429 bp_release = 0;
430 xfs_trans_brelse(NULL, bp);
431 }
432 }
433
434 /*
435 * Here with bp and block set to the leftmost leaf node in the tree.
436 */
437 i = 0;
438
439 /*
440 * Loop over all leaf nodes checking that all extents are in the right order.
441 */
442 for (;;) {
443 xfs_fsblock_t nextbno;
444 xfs_extnum_t num_recs;
445
446
447 num_recs = xfs_btree_get_numrecs(block);
448
449 /*
450 * Read-ahead the next leaf block, if any.
451 */
452
453 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
454
455 /*
456 * Check all the extents to make sure they are OK.
457 * If we had a previous block, the last entry should
458 * conform with the first entry in this one.
459 */
460
461 ep = XFS_BMBT_REC_ADDR(mp, block, 1);
462 if (i) {
463 ASSERT(xfs_bmbt_disk_get_startoff(&last) +
464 xfs_bmbt_disk_get_blockcount(&last) <=
465 xfs_bmbt_disk_get_startoff(ep));
466 }
467 for (j = 1; j < num_recs; j++) {
468 nextp = XFS_BMBT_REC_ADDR(mp, block, j + 1);
469 ASSERT(xfs_bmbt_disk_get_startoff(ep) +
470 xfs_bmbt_disk_get_blockcount(ep) <=
471 xfs_bmbt_disk_get_startoff(nextp));
472 ep = nextp;
473 }
474
475 last = *ep;
476 i += num_recs;
477 if (bp_release) {
478 bp_release = 0;
479 xfs_trans_brelse(NULL, bp);
480 }
481 bno = nextbno;
482 /*
483 * If we've reached the end, stop.
484 */
485 if (bno == NULLFSBLOCK)
486 break;
487
488 bp_release = 0;
489 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
490 if (!bp) {
491 bp_release = 1;
492 error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp,
493 XFS_BMAP_BTREE_REF,
494 &xfs_bmbt_buf_ops);
495 if (error)
496 goto error_norelse;
497 }
498 block = XFS_BUF_TO_BLOCK(bp);
499 }
500
501 return;
502
503 error0:
504 xfs_warn(mp, "%s: at error0", __func__);
505 if (bp_release)
506 xfs_trans_brelse(NULL, bp);
507 error_norelse:
508 xfs_warn(mp, "%s: BAD after btree leaves for %d extents",
509 __func__, i);
510 panic("%s: CORRUPTED BTREE OR SOMETHING", __func__);
511 return;
512 }
513
514 /*
515 * Add bmap trace insert entries for all the contents of the extent records.
516 */
517 void
xfs_bmap_trace_exlist(xfs_inode_t * ip,xfs_extnum_t cnt,int whichfork,unsigned long caller_ip)518 xfs_bmap_trace_exlist(
519 xfs_inode_t *ip, /* incore inode pointer */
520 xfs_extnum_t cnt, /* count of entries in the list */
521 int whichfork, /* data or attr or cow fork */
522 unsigned long caller_ip)
523 {
524 xfs_extnum_t idx; /* extent record index */
525 xfs_ifork_t *ifp; /* inode fork pointer */
526 int state = 0;
527
528 if (whichfork == XFS_ATTR_FORK)
529 state |= BMAP_ATTRFORK;
530 else if (whichfork == XFS_COW_FORK)
531 state |= BMAP_COWFORK;
532
533 ifp = XFS_IFORK_PTR(ip, whichfork);
534 ASSERT(cnt == xfs_iext_count(ifp));
535 for (idx = 0; idx < cnt; idx++)
536 trace_xfs_extlist(ip, idx, state, caller_ip);
537 }
538
539 /*
540 * Validate that the bmbt_irecs being returned from bmapi are valid
541 * given the caller's original parameters. Specifically check the
542 * ranges of the returned irecs to ensure that they only extend beyond
543 * the given parameters if the XFS_BMAPI_ENTIRE flag was set.
544 */
545 STATIC void
xfs_bmap_validate_ret(xfs_fileoff_t bno,xfs_filblks_t len,int flags,xfs_bmbt_irec_t * mval,int nmap,int ret_nmap)546 xfs_bmap_validate_ret(
547 xfs_fileoff_t bno,
548 xfs_filblks_t len,
549 int flags,
550 xfs_bmbt_irec_t *mval,
551 int nmap,
552 int ret_nmap)
553 {
554 int i; /* index to map values */
555
556 ASSERT(ret_nmap <= nmap);
557
558 for (i = 0; i < ret_nmap; i++) {
559 ASSERT(mval[i].br_blockcount > 0);
560 if (!(flags & XFS_BMAPI_ENTIRE)) {
561 ASSERT(mval[i].br_startoff >= bno);
562 ASSERT(mval[i].br_blockcount <= len);
563 ASSERT(mval[i].br_startoff + mval[i].br_blockcount <=
564 bno + len);
565 } else {
566 ASSERT(mval[i].br_startoff < bno + len);
567 ASSERT(mval[i].br_startoff + mval[i].br_blockcount >
568 bno);
569 }
570 ASSERT(i == 0 ||
571 mval[i - 1].br_startoff + mval[i - 1].br_blockcount ==
572 mval[i].br_startoff);
573 ASSERT(mval[i].br_startblock != DELAYSTARTBLOCK &&
574 mval[i].br_startblock != HOLESTARTBLOCK);
575 ASSERT(mval[i].br_state == XFS_EXT_NORM ||
576 mval[i].br_state == XFS_EXT_UNWRITTEN);
577 }
578 }
579
580 #else
581 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
582 #define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0)
583 #endif /* DEBUG */
584
585 /*
586 * bmap free list manipulation functions
587 */
588
589 /*
590 * Add the extent to the list of extents to be free at transaction end.
591 * The list is maintained sorted (by block number).
592 */
593 void
xfs_bmap_add_free(struct xfs_mount * mp,struct xfs_defer_ops * dfops,xfs_fsblock_t bno,xfs_filblks_t len,struct xfs_owner_info * oinfo)594 xfs_bmap_add_free(
595 struct xfs_mount *mp,
596 struct xfs_defer_ops *dfops,
597 xfs_fsblock_t bno,
598 xfs_filblks_t len,
599 struct xfs_owner_info *oinfo)
600 {
601 struct xfs_extent_free_item *new; /* new element */
602 #ifdef DEBUG
603 xfs_agnumber_t agno;
604 xfs_agblock_t agbno;
605
606 ASSERT(bno != NULLFSBLOCK);
607 ASSERT(len > 0);
608 ASSERT(len <= MAXEXTLEN);
609 ASSERT(!isnullstartblock(bno));
610 agno = XFS_FSB_TO_AGNO(mp, bno);
611 agbno = XFS_FSB_TO_AGBNO(mp, bno);
612 ASSERT(agno < mp->m_sb.sb_agcount);
613 ASSERT(agbno < mp->m_sb.sb_agblocks);
614 ASSERT(len < mp->m_sb.sb_agblocks);
615 ASSERT(agbno + len <= mp->m_sb.sb_agblocks);
616 #endif
617 ASSERT(xfs_bmap_free_item_zone != NULL);
618
619 new = kmem_zone_alloc(xfs_bmap_free_item_zone, KM_SLEEP);
620 new->xefi_startblock = bno;
621 new->xefi_blockcount = (xfs_extlen_t)len;
622 if (oinfo)
623 new->xefi_oinfo = *oinfo;
624 else
625 xfs_rmap_skip_owner_update(&new->xefi_oinfo);
626 trace_xfs_bmap_free_defer(mp, XFS_FSB_TO_AGNO(mp, bno), 0,
627 XFS_FSB_TO_AGBNO(mp, bno), len);
628 xfs_defer_add(dfops, XFS_DEFER_OPS_TYPE_FREE, &new->xefi_list);
629 }
630
631 /*
632 * Inode fork format manipulation functions
633 */
634
635 /*
636 * Transform a btree format file with only one leaf node, where the
637 * extents list will fit in the inode, into an extents format file.
638 * Since the file extents are already in-core, all we have to do is
639 * give up the space for the btree root and pitch the leaf block.
640 */
641 STATIC int /* error */
xfs_bmap_btree_to_extents(xfs_trans_t * tp,xfs_inode_t * ip,xfs_btree_cur_t * cur,int * logflagsp,int whichfork)642 xfs_bmap_btree_to_extents(
643 xfs_trans_t *tp, /* transaction pointer */
644 xfs_inode_t *ip, /* incore inode pointer */
645 xfs_btree_cur_t *cur, /* btree cursor */
646 int *logflagsp, /* inode logging flags */
647 int whichfork) /* data or attr fork */
648 {
649 /* REFERENCED */
650 struct xfs_btree_block *cblock;/* child btree block */
651 xfs_fsblock_t cbno; /* child block number */
652 xfs_buf_t *cbp; /* child block's buffer */
653 int error; /* error return value */
654 xfs_ifork_t *ifp; /* inode fork data */
655 xfs_mount_t *mp; /* mount point structure */
656 __be64 *pp; /* ptr to block address */
657 struct xfs_btree_block *rblock;/* root btree block */
658 struct xfs_owner_info oinfo;
659
660 mp = ip->i_mount;
661 ifp = XFS_IFORK_PTR(ip, whichfork);
662 ASSERT(whichfork != XFS_COW_FORK);
663 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
664 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
665 rblock = ifp->if_broot;
666 ASSERT(be16_to_cpu(rblock->bb_level) == 1);
667 ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1);
668 ASSERT(xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0) == 1);
669 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, ifp->if_broot_bytes);
670 cbno = be64_to_cpu(*pp);
671 *logflagsp = 0;
672 #ifdef DEBUG
673 if ((error = xfs_btree_check_lptr(cur, cbno, 1)))
674 return error;
675 #endif
676 error = xfs_btree_read_bufl(mp, tp, cbno, 0, &cbp, XFS_BMAP_BTREE_REF,
677 &xfs_bmbt_buf_ops);
678 if (error)
679 return error;
680 cblock = XFS_BUF_TO_BLOCK(cbp);
681 if ((error = xfs_btree_check_block(cur, cblock, 0, cbp)))
682 return error;
683 xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork);
684 xfs_bmap_add_free(mp, cur->bc_private.b.dfops, cbno, 1, &oinfo);
685 ip->i_d.di_nblocks--;
686 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
687 xfs_trans_binval(tp, cbp);
688 if (cur->bc_bufs[0] == cbp)
689 cur->bc_bufs[0] = NULL;
690 xfs_iroot_realloc(ip, -1, whichfork);
691 ASSERT(ifp->if_broot == NULL);
692 ASSERT((ifp->if_flags & XFS_IFBROOT) == 0);
693 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
694 *logflagsp = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
695 return 0;
696 }
697
698 /*
699 * Convert an extents-format file into a btree-format file.
700 * The new file will have a root block (in the inode) and a single child block.
701 */
702 STATIC int /* error */
xfs_bmap_extents_to_btree(xfs_trans_t * tp,xfs_inode_t * ip,xfs_fsblock_t * firstblock,struct xfs_defer_ops * dfops,xfs_btree_cur_t ** curp,int wasdel,int * logflagsp,int whichfork)703 xfs_bmap_extents_to_btree(
704 xfs_trans_t *tp, /* transaction pointer */
705 xfs_inode_t *ip, /* incore inode pointer */
706 xfs_fsblock_t *firstblock, /* first-block-allocated */
707 struct xfs_defer_ops *dfops, /* blocks freed in xaction */
708 xfs_btree_cur_t **curp, /* cursor returned to caller */
709 int wasdel, /* converting a delayed alloc */
710 int *logflagsp, /* inode logging flags */
711 int whichfork) /* data or attr fork */
712 {
713 struct xfs_btree_block *ablock; /* allocated (child) bt block */
714 xfs_buf_t *abp; /* buffer for ablock */
715 xfs_alloc_arg_t args; /* allocation arguments */
716 xfs_bmbt_rec_t *arp; /* child record pointer */
717 struct xfs_btree_block *block; /* btree root block */
718 xfs_btree_cur_t *cur; /* bmap btree cursor */
719 xfs_bmbt_rec_host_t *ep; /* extent record pointer */
720 int error; /* error return value */
721 xfs_extnum_t i, cnt; /* extent record index */
722 xfs_ifork_t *ifp; /* inode fork pointer */
723 xfs_bmbt_key_t *kp; /* root block key pointer */
724 xfs_mount_t *mp; /* mount structure */
725 xfs_extnum_t nextents; /* number of file extents */
726 xfs_bmbt_ptr_t *pp; /* root block address pointer */
727
728 mp = ip->i_mount;
729 ASSERT(whichfork != XFS_COW_FORK);
730 ifp = XFS_IFORK_PTR(ip, whichfork);
731 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS);
732
733 /*
734 * Make space in the inode incore.
735 */
736 xfs_iroot_realloc(ip, 1, whichfork);
737 ifp->if_flags |= XFS_IFBROOT;
738
739 /*
740 * Fill in the root.
741 */
742 block = ifp->if_broot;
743 if (xfs_sb_version_hascrc(&mp->m_sb))
744 xfs_btree_init_block_int(mp, block, XFS_BUF_DADDR_NULL,
745 XFS_BMAP_CRC_MAGIC, 1, 1, ip->i_ino,
746 XFS_BTREE_LONG_PTRS | XFS_BTREE_CRC_BLOCKS);
747 else
748 xfs_btree_init_block_int(mp, block, XFS_BUF_DADDR_NULL,
749 XFS_BMAP_MAGIC, 1, 1, ip->i_ino,
750 XFS_BTREE_LONG_PTRS);
751
752 /*
753 * Need a cursor. Can't allocate until bb_level is filled in.
754 */
755 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
756 cur->bc_private.b.firstblock = *firstblock;
757 cur->bc_private.b.dfops = dfops;
758 cur->bc_private.b.flags = wasdel ? XFS_BTCUR_BPRV_WASDEL : 0;
759 /*
760 * Convert to a btree with two levels, one record in root.
761 */
762 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_BTREE);
763 memset(&args, 0, sizeof(args));
764 args.tp = tp;
765 args.mp = mp;
766 xfs_rmap_ino_bmbt_owner(&args.oinfo, ip->i_ino, whichfork);
767 args.firstblock = *firstblock;
768 if (*firstblock == NULLFSBLOCK) {
769 args.type = XFS_ALLOCTYPE_START_BNO;
770 args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino);
771 } else if (dfops->dop_low) {
772 args.type = XFS_ALLOCTYPE_START_BNO;
773 try_another_ag:
774 args.fsbno = *firstblock;
775 } else {
776 args.type = XFS_ALLOCTYPE_NEAR_BNO;
777 args.fsbno = *firstblock;
778 }
779 args.minlen = args.maxlen = args.prod = 1;
780 args.wasdel = wasdel;
781 *logflagsp = 0;
782 if ((error = xfs_alloc_vextent(&args))) {
783 xfs_iroot_realloc(ip, -1, whichfork);
784 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
785 return error;
786 }
787
788 /*
789 * During a CoW operation, the allocation and bmbt updates occur in
790 * different transactions. The mapping code tries to put new bmbt
791 * blocks near extents being mapped, but the only way to guarantee this
792 * is if the alloc and the mapping happen in a single transaction that
793 * has a block reservation. That isn't the case here, so if we run out
794 * of space we'll try again with another AG.
795 */
796 if (xfs_sb_version_hasreflink(&cur->bc_mp->m_sb) &&
797 args.fsbno == NULLFSBLOCK &&
798 args.type == XFS_ALLOCTYPE_NEAR_BNO) {
799 args.type = XFS_ALLOCTYPE_FIRST_AG;
800 goto try_another_ag;
801 }
802 if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
803 xfs_iroot_realloc(ip, -1, whichfork);
804 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
805 return -ENOSPC;
806 }
807 /*
808 * Allocation can't fail, the space was reserved.
809 */
810 ASSERT(*firstblock == NULLFSBLOCK ||
811 args.agno >= XFS_FSB_TO_AGNO(mp, *firstblock));
812 *firstblock = cur->bc_private.b.firstblock = args.fsbno;
813 cur->bc_private.b.allocated++;
814 ip->i_d.di_nblocks++;
815 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
816 abp = xfs_btree_get_bufl(mp, tp, args.fsbno, 0);
817 /*
818 * Fill in the child block.
819 */
820 abp->b_ops = &xfs_bmbt_buf_ops;
821 ablock = XFS_BUF_TO_BLOCK(abp);
822 if (xfs_sb_version_hascrc(&mp->m_sb))
823 xfs_btree_init_block_int(mp, ablock, abp->b_bn,
824 XFS_BMAP_CRC_MAGIC, 0, 0, ip->i_ino,
825 XFS_BTREE_LONG_PTRS | XFS_BTREE_CRC_BLOCKS);
826 else
827 xfs_btree_init_block_int(mp, ablock, abp->b_bn,
828 XFS_BMAP_MAGIC, 0, 0, ip->i_ino,
829 XFS_BTREE_LONG_PTRS);
830
831 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1);
832 nextents = xfs_iext_count(ifp);
833 for (cnt = i = 0; i < nextents; i++) {
834 ep = xfs_iext_get_ext(ifp, i);
835 if (!isnullstartblock(xfs_bmbt_get_startblock(ep))) {
836 arp->l0 = cpu_to_be64(ep->l0);
837 arp->l1 = cpu_to_be64(ep->l1);
838 arp++; cnt++;
839 }
840 }
841 ASSERT(cnt == XFS_IFORK_NEXTENTS(ip, whichfork));
842 xfs_btree_set_numrecs(ablock, cnt);
843
844 /*
845 * Fill in the root key and pointer.
846 */
847 kp = XFS_BMBT_KEY_ADDR(mp, block, 1);
848 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1);
849 kp->br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(arp));
850 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, xfs_bmbt_get_maxrecs(cur,
851 be16_to_cpu(block->bb_level)));
852 *pp = cpu_to_be64(args.fsbno);
853
854 /*
855 * Do all this logging at the end so that
856 * the root is at the right level.
857 */
858 xfs_btree_log_block(cur, abp, XFS_BB_ALL_BITS);
859 xfs_btree_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs));
860 ASSERT(*curp == NULL);
861 *curp = cur;
862 *logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork);
863 return 0;
864 }
865
866 /*
867 * Convert a local file to an extents file.
868 * This code is out of bounds for data forks of regular files,
869 * since the file data needs to get logged so things will stay consistent.
870 * (The bmap-level manipulations are ok, though).
871 */
872 void
xfs_bmap_local_to_extents_empty(struct xfs_inode * ip,int whichfork)873 xfs_bmap_local_to_extents_empty(
874 struct xfs_inode *ip,
875 int whichfork)
876 {
877 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
878
879 ASSERT(whichfork != XFS_COW_FORK);
880 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
881 ASSERT(ifp->if_bytes == 0);
882 ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) == 0);
883
884 xfs_bmap_forkoff_reset(ip, whichfork);
885 ifp->if_flags &= ~XFS_IFINLINE;
886 ifp->if_flags |= XFS_IFEXTENTS;
887 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
888 }
889
890
891 STATIC int /* error */
xfs_bmap_local_to_extents(xfs_trans_t * tp,xfs_inode_t * ip,xfs_fsblock_t * firstblock,xfs_extlen_t total,int * logflagsp,int whichfork,void (* init_fn)(struct xfs_trans * tp,struct xfs_buf * bp,struct xfs_inode * ip,struct xfs_ifork * ifp))892 xfs_bmap_local_to_extents(
893 xfs_trans_t *tp, /* transaction pointer */
894 xfs_inode_t *ip, /* incore inode pointer */
895 xfs_fsblock_t *firstblock, /* first block allocated in xaction */
896 xfs_extlen_t total, /* total blocks needed by transaction */
897 int *logflagsp, /* inode logging flags */
898 int whichfork,
899 void (*init_fn)(struct xfs_trans *tp,
900 struct xfs_buf *bp,
901 struct xfs_inode *ip,
902 struct xfs_ifork *ifp))
903 {
904 int error = 0;
905 int flags; /* logging flags returned */
906 xfs_ifork_t *ifp; /* inode fork pointer */
907 xfs_alloc_arg_t args; /* allocation arguments */
908 xfs_buf_t *bp; /* buffer for extent block */
909 xfs_bmbt_rec_host_t *ep; /* extent record pointer */
910
911 /*
912 * We don't want to deal with the case of keeping inode data inline yet.
913 * So sending the data fork of a regular inode is invalid.
914 */
915 ASSERT(!(S_ISREG(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK));
916 ifp = XFS_IFORK_PTR(ip, whichfork);
917 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
918
919 if (!ifp->if_bytes) {
920 xfs_bmap_local_to_extents_empty(ip, whichfork);
921 flags = XFS_ILOG_CORE;
922 goto done;
923 }
924
925 flags = 0;
926 error = 0;
927 ASSERT((ifp->if_flags & (XFS_IFINLINE|XFS_IFEXTENTS|XFS_IFEXTIREC)) ==
928 XFS_IFINLINE);
929 memset(&args, 0, sizeof(args));
930 args.tp = tp;
931 args.mp = ip->i_mount;
932 xfs_rmap_ino_owner(&args.oinfo, ip->i_ino, whichfork, 0);
933 args.firstblock = *firstblock;
934 /*
935 * Allocate a block. We know we need only one, since the
936 * file currently fits in an inode.
937 */
938 if (*firstblock == NULLFSBLOCK) {
939 try_another_ag:
940 args.fsbno = XFS_INO_TO_FSB(args.mp, ip->i_ino);
941 args.type = XFS_ALLOCTYPE_START_BNO;
942 } else {
943 args.fsbno = *firstblock;
944 args.type = XFS_ALLOCTYPE_NEAR_BNO;
945 }
946 args.total = total;
947 args.minlen = args.maxlen = args.prod = 1;
948 error = xfs_alloc_vextent(&args);
949 if (error)
950 goto done;
951
952 /*
953 * During a CoW operation, the allocation and bmbt updates occur in
954 * different transactions. The mapping code tries to put new bmbt
955 * blocks near extents being mapped, but the only way to guarantee this
956 * is if the alloc and the mapping happen in a single transaction that
957 * has a block reservation. That isn't the case here, so if we run out
958 * of space we'll try again with another AG.
959 */
960 if (xfs_sb_version_hasreflink(&ip->i_mount->m_sb) &&
961 args.fsbno == NULLFSBLOCK &&
962 args.type == XFS_ALLOCTYPE_NEAR_BNO) {
963 goto try_another_ag;
964 }
965 /* Can't fail, the space was reserved. */
966 ASSERT(args.fsbno != NULLFSBLOCK);
967 ASSERT(args.len == 1);
968 *firstblock = args.fsbno;
969 bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0);
970
971 /*
972 * Initialize the block, copy the data and log the remote buffer.
973 *
974 * The callout is responsible for logging because the remote format
975 * might differ from the local format and thus we don't know how much to
976 * log here. Note that init_fn must also set the buffer log item type
977 * correctly.
978 */
979 init_fn(tp, bp, ip, ifp);
980
981 /* account for the change in fork size */
982 xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
983 xfs_bmap_local_to_extents_empty(ip, whichfork);
984 flags |= XFS_ILOG_CORE;
985
986 xfs_iext_add(ifp, 0, 1);
987 ep = xfs_iext_get_ext(ifp, 0);
988 xfs_bmbt_set_allf(ep, 0, args.fsbno, 1, XFS_EXT_NORM);
989 trace_xfs_bmap_post_update(ip, 0,
990 whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0,
991 _THIS_IP_);
992 XFS_IFORK_NEXT_SET(ip, whichfork, 1);
993 ip->i_d.di_nblocks = 1;
994 xfs_trans_mod_dquot_byino(tp, ip,
995 XFS_TRANS_DQ_BCOUNT, 1L);
996 flags |= xfs_ilog_fext(whichfork);
997
998 done:
999 *logflagsp = flags;
1000 return error;
1001 }
1002
1003 /*
1004 * Called from xfs_bmap_add_attrfork to handle btree format files.
1005 */
1006 STATIC int /* error */
xfs_bmap_add_attrfork_btree(xfs_trans_t * tp,xfs_inode_t * ip,xfs_fsblock_t * firstblock,struct xfs_defer_ops * dfops,int * flags)1007 xfs_bmap_add_attrfork_btree(
1008 xfs_trans_t *tp, /* transaction pointer */
1009 xfs_inode_t *ip, /* incore inode pointer */
1010 xfs_fsblock_t *firstblock, /* first block allocated */
1011 struct xfs_defer_ops *dfops, /* blocks to free at commit */
1012 int *flags) /* inode logging flags */
1013 {
1014 xfs_btree_cur_t *cur; /* btree cursor */
1015 int error; /* error return value */
1016 xfs_mount_t *mp; /* file system mount struct */
1017 int stat; /* newroot status */
1018
1019 mp = ip->i_mount;
1020 if (ip->i_df.if_broot_bytes <= XFS_IFORK_DSIZE(ip))
1021 *flags |= XFS_ILOG_DBROOT;
1022 else {
1023 cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK);
1024 cur->bc_private.b.dfops = dfops;
1025 cur->bc_private.b.firstblock = *firstblock;
1026 if ((error = xfs_bmbt_lookup_ge(cur, 0, 0, 0, &stat)))
1027 goto error0;
1028 /* must be at least one entry */
1029 XFS_WANT_CORRUPTED_GOTO(mp, stat == 1, error0);
1030 if ((error = xfs_btree_new_iroot(cur, flags, &stat)))
1031 goto error0;
1032 if (stat == 0) {
1033 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
1034 return -ENOSPC;
1035 }
1036 *firstblock = cur->bc_private.b.firstblock;
1037 cur->bc_private.b.allocated = 0;
1038 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
1039 }
1040 return 0;
1041 error0:
1042 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
1043 return error;
1044 }
1045
1046 /*
1047 * Called from xfs_bmap_add_attrfork to handle extents format files.
1048 */
1049 STATIC int /* error */
xfs_bmap_add_attrfork_extents(xfs_trans_t * tp,xfs_inode_t * ip,xfs_fsblock_t * firstblock,struct xfs_defer_ops * dfops,int * flags)1050 xfs_bmap_add_attrfork_extents(
1051 xfs_trans_t *tp, /* transaction pointer */
1052 xfs_inode_t *ip, /* incore inode pointer */
1053 xfs_fsblock_t *firstblock, /* first block allocated */
1054 struct xfs_defer_ops *dfops, /* blocks to free at commit */
1055 int *flags) /* inode logging flags */
1056 {
1057 xfs_btree_cur_t *cur; /* bmap btree cursor */
1058 int error; /* error return value */
1059
1060 if (ip->i_d.di_nextents * sizeof(xfs_bmbt_rec_t) <= XFS_IFORK_DSIZE(ip))
1061 return 0;
1062 cur = NULL;
1063 error = xfs_bmap_extents_to_btree(tp, ip, firstblock, dfops, &cur, 0,
1064 flags, XFS_DATA_FORK);
1065 if (cur) {
1066 cur->bc_private.b.allocated = 0;
1067 xfs_btree_del_cursor(cur,
1068 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
1069 }
1070 return error;
1071 }
1072
1073 /*
1074 * Called from xfs_bmap_add_attrfork to handle local format files. Each
1075 * different data fork content type needs a different callout to do the
1076 * conversion. Some are basic and only require special block initialisation
1077 * callouts for the data formating, others (directories) are so specialised they
1078 * handle everything themselves.
1079 *
1080 * XXX (dgc): investigate whether directory conversion can use the generic
1081 * formatting callout. It should be possible - it's just a very complex
1082 * formatter.
1083 */
1084 STATIC int /* error */
xfs_bmap_add_attrfork_local(xfs_trans_t * tp,xfs_inode_t * ip,xfs_fsblock_t * firstblock,struct xfs_defer_ops * dfops,int * flags)1085 xfs_bmap_add_attrfork_local(
1086 xfs_trans_t *tp, /* transaction pointer */
1087 xfs_inode_t *ip, /* incore inode pointer */
1088 xfs_fsblock_t *firstblock, /* first block allocated */
1089 struct xfs_defer_ops *dfops, /* blocks to free at commit */
1090 int *flags) /* inode logging flags */
1091 {
1092 xfs_da_args_t dargs; /* args for dir/attr code */
1093
1094 if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip))
1095 return 0;
1096
1097 if (S_ISDIR(VFS_I(ip)->i_mode)) {
1098 memset(&dargs, 0, sizeof(dargs));
1099 dargs.geo = ip->i_mount->m_dir_geo;
1100 dargs.dp = ip;
1101 dargs.firstblock = firstblock;
1102 dargs.dfops = dfops;
1103 dargs.total = dargs.geo->fsbcount;
1104 dargs.whichfork = XFS_DATA_FORK;
1105 dargs.trans = tp;
1106 return xfs_dir2_sf_to_block(&dargs);
1107 }
1108
1109 if (S_ISLNK(VFS_I(ip)->i_mode))
1110 return xfs_bmap_local_to_extents(tp, ip, firstblock, 1,
1111 flags, XFS_DATA_FORK,
1112 xfs_symlink_local_to_remote);
1113
1114 /* should only be called for types that support local format data */
1115 ASSERT(0);
1116 return -EFSCORRUPTED;
1117 }
1118
1119 /*
1120 * Convert inode from non-attributed to attributed.
1121 * Must not be in a transaction, ip must not be locked.
1122 */
1123 int /* error code */
xfs_bmap_add_attrfork(xfs_inode_t * ip,int size,int rsvd)1124 xfs_bmap_add_attrfork(
1125 xfs_inode_t *ip, /* incore inode pointer */
1126 int size, /* space new attribute needs */
1127 int rsvd) /* xact may use reserved blks */
1128 {
1129 xfs_fsblock_t firstblock; /* 1st block/ag allocated */
1130 struct xfs_defer_ops dfops; /* freed extent records */
1131 xfs_mount_t *mp; /* mount structure */
1132 xfs_trans_t *tp; /* transaction pointer */
1133 int blks; /* space reservation */
1134 int version = 1; /* superblock attr version */
1135 int logflags; /* logging flags */
1136 int error; /* error return value */
1137
1138 ASSERT(XFS_IFORK_Q(ip) == 0);
1139
1140 mp = ip->i_mount;
1141 ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
1142
1143 blks = XFS_ADDAFORK_SPACE_RES(mp);
1144
1145 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_addafork, blks, 0,
1146 rsvd ? XFS_TRANS_RESERVE : 0, &tp);
1147 if (error)
1148 return error;
1149
1150 xfs_ilock(ip, XFS_ILOCK_EXCL);
1151 error = xfs_trans_reserve_quota_nblks(tp, ip, blks, 0, rsvd ?
1152 XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
1153 XFS_QMOPT_RES_REGBLKS);
1154 if (error)
1155 goto trans_cancel;
1156 if (XFS_IFORK_Q(ip))
1157 goto trans_cancel;
1158 if (ip->i_d.di_anextents != 0) {
1159 error = -EFSCORRUPTED;
1160 goto trans_cancel;
1161 }
1162 if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS) {
1163 /*
1164 * For inodes coming from pre-6.2 filesystems.
1165 */
1166 ASSERT(ip->i_d.di_aformat == 0);
1167 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
1168 }
1169
1170 xfs_trans_ijoin(tp, ip, 0);
1171 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1172
1173 switch (ip->i_d.di_format) {
1174 case XFS_DINODE_FMT_DEV:
1175 ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
1176 break;
1177 case XFS_DINODE_FMT_UUID:
1178 ip->i_d.di_forkoff = roundup(sizeof(uuid_t), 8) >> 3;
1179 break;
1180 case XFS_DINODE_FMT_LOCAL:
1181 case XFS_DINODE_FMT_EXTENTS:
1182 case XFS_DINODE_FMT_BTREE:
1183 ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size);
1184 if (!ip->i_d.di_forkoff)
1185 ip->i_d.di_forkoff = xfs_default_attroffset(ip) >> 3;
1186 else if (mp->m_flags & XFS_MOUNT_ATTR2)
1187 version = 2;
1188 break;
1189 default:
1190 ASSERT(0);
1191 error = -EINVAL;
1192 goto trans_cancel;
1193 }
1194
1195 ASSERT(ip->i_afp == NULL);
1196 ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP);
1197 ip->i_afp->if_flags = XFS_IFEXTENTS;
1198 logflags = 0;
1199 xfs_defer_init(&dfops, &firstblock);
1200 switch (ip->i_d.di_format) {
1201 case XFS_DINODE_FMT_LOCAL:
1202 error = xfs_bmap_add_attrfork_local(tp, ip, &firstblock, &dfops,
1203 &logflags);
1204 break;
1205 case XFS_DINODE_FMT_EXTENTS:
1206 error = xfs_bmap_add_attrfork_extents(tp, ip, &firstblock,
1207 &dfops, &logflags);
1208 break;
1209 case XFS_DINODE_FMT_BTREE:
1210 error = xfs_bmap_add_attrfork_btree(tp, ip, &firstblock, &dfops,
1211 &logflags);
1212 break;
1213 default:
1214 error = 0;
1215 break;
1216 }
1217 if (logflags)
1218 xfs_trans_log_inode(tp, ip, logflags);
1219 if (error)
1220 goto bmap_cancel;
1221 if (!xfs_sb_version_hasattr(&mp->m_sb) ||
1222 (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2)) {
1223 bool log_sb = false;
1224
1225 spin_lock(&mp->m_sb_lock);
1226 if (!xfs_sb_version_hasattr(&mp->m_sb)) {
1227 xfs_sb_version_addattr(&mp->m_sb);
1228 log_sb = true;
1229 }
1230 if (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2) {
1231 xfs_sb_version_addattr2(&mp->m_sb);
1232 log_sb = true;
1233 }
1234 spin_unlock(&mp->m_sb_lock);
1235 if (log_sb)
1236 xfs_log_sb(tp);
1237 }
1238
1239 error = xfs_defer_finish(&tp, &dfops, NULL);
1240 if (error)
1241 goto bmap_cancel;
1242 error = xfs_trans_commit(tp);
1243 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1244 return error;
1245
1246 bmap_cancel:
1247 xfs_defer_cancel(&dfops);
1248 trans_cancel:
1249 xfs_trans_cancel(tp);
1250 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1251 return error;
1252 }
1253
1254 /*
1255 * Internal and external extent tree search functions.
1256 */
1257
1258 /*
1259 * Read in the extents to if_extents.
1260 * All inode fields are set up by caller, we just traverse the btree
1261 * and copy the records in. If the file system cannot contain unwritten
1262 * extents, the records are checked for no "state" flags.
1263 */
1264 int /* error */
xfs_bmap_read_extents(xfs_trans_t * tp,xfs_inode_t * ip,int whichfork)1265 xfs_bmap_read_extents(
1266 xfs_trans_t *tp, /* transaction pointer */
1267 xfs_inode_t *ip, /* incore inode */
1268 int whichfork) /* data or attr fork */
1269 {
1270 struct xfs_btree_block *block; /* current btree block */
1271 xfs_fsblock_t bno; /* block # of "block" */
1272 xfs_buf_t *bp; /* buffer for "block" */
1273 int error; /* error return value */
1274 xfs_exntfmt_t exntf; /* XFS_EXTFMT_NOSTATE, if checking */
1275 xfs_extnum_t i, j; /* index into the extents list */
1276 xfs_ifork_t *ifp; /* fork structure */
1277 int level; /* btree level, for checking */
1278 xfs_mount_t *mp; /* file system mount structure */
1279 __be64 *pp; /* pointer to block address */
1280 /* REFERENCED */
1281 xfs_extnum_t room; /* number of entries there's room for */
1282
1283 mp = ip->i_mount;
1284 ifp = XFS_IFORK_PTR(ip, whichfork);
1285 exntf = (whichfork != XFS_DATA_FORK) ? XFS_EXTFMT_NOSTATE :
1286 XFS_EXTFMT_INODE(ip);
1287 block = ifp->if_broot;
1288 /*
1289 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
1290 */
1291 level = be16_to_cpu(block->bb_level);
1292 ASSERT(level > 0);
1293 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
1294 bno = be64_to_cpu(*pp);
1295
1296 /*
1297 * Go down the tree until leaf level is reached, following the first
1298 * pointer (leftmost) at each level.
1299 */
1300 while (level-- > 0) {
1301 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
1302 XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops);
1303 if (error)
1304 return error;
1305 block = XFS_BUF_TO_BLOCK(bp);
1306 if (level == 0)
1307 break;
1308 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
1309 bno = be64_to_cpu(*pp);
1310 XFS_WANT_CORRUPTED_GOTO(mp,
1311 XFS_FSB_SANITY_CHECK(mp, bno), error0);
1312 xfs_trans_brelse(tp, bp);
1313 }
1314 /*
1315 * Here with bp and block set to the leftmost leaf node in the tree.
1316 */
1317 room = xfs_iext_count(ifp);
1318 i = 0;
1319 /*
1320 * Loop over all leaf nodes. Copy information to the extent records.
1321 */
1322 for (;;) {
1323 xfs_bmbt_rec_t *frp;
1324 xfs_fsblock_t nextbno;
1325 xfs_extnum_t num_recs;
1326 xfs_extnum_t start;
1327
1328 num_recs = xfs_btree_get_numrecs(block);
1329 if (unlikely(i + num_recs > room)) {
1330 ASSERT(i + num_recs <= room);
1331 xfs_warn(ip->i_mount,
1332 "corrupt dinode %Lu, (btree extents).",
1333 (unsigned long long) ip->i_ino);
1334 XFS_CORRUPTION_ERROR("xfs_bmap_read_extents(1)",
1335 XFS_ERRLEVEL_LOW, ip->i_mount, block);
1336 goto error0;
1337 }
1338 /*
1339 * Read-ahead the next leaf block, if any.
1340 */
1341 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
1342 if (nextbno != NULLFSBLOCK)
1343 xfs_btree_reada_bufl(mp, nextbno, 1,
1344 &xfs_bmbt_buf_ops);
1345 /*
1346 * Copy records into the extent records.
1347 */
1348 frp = XFS_BMBT_REC_ADDR(mp, block, 1);
1349 start = i;
1350 for (j = 0; j < num_recs; j++, i++, frp++) {
1351 xfs_bmbt_rec_host_t *trp = xfs_iext_get_ext(ifp, i);
1352 trp->l0 = be64_to_cpu(frp->l0);
1353 trp->l1 = be64_to_cpu(frp->l1);
1354 }
1355 if (exntf == XFS_EXTFMT_NOSTATE) {
1356 /*
1357 * Check all attribute bmap btree records and
1358 * any "older" data bmap btree records for a
1359 * set bit in the "extent flag" position.
1360 */
1361 if (unlikely(xfs_check_nostate_extents(ifp,
1362 start, num_recs))) {
1363 XFS_ERROR_REPORT("xfs_bmap_read_extents(2)",
1364 XFS_ERRLEVEL_LOW,
1365 ip->i_mount);
1366 goto error0;
1367 }
1368 }
1369 xfs_trans_brelse(tp, bp);
1370 bno = nextbno;
1371 /*
1372 * If we've reached the end, stop.
1373 */
1374 if (bno == NULLFSBLOCK)
1375 break;
1376 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
1377 XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops);
1378 if (error)
1379 return error;
1380 block = XFS_BUF_TO_BLOCK(bp);
1381 }
1382 if (i != XFS_IFORK_NEXTENTS(ip, whichfork))
1383 return -EFSCORRUPTED;
1384 ASSERT(i == xfs_iext_count(ifp));
1385 XFS_BMAP_TRACE_EXLIST(ip, i, whichfork);
1386 return 0;
1387 error0:
1388 xfs_trans_brelse(tp, bp);
1389 return -EFSCORRUPTED;
1390 }
1391
1392
1393 /*
1394 * Search the extent records for the entry containing block bno.
1395 * If bno lies in a hole, point to the next entry. If bno lies
1396 * past eof, *eofp will be set, and *prevp will contain the last
1397 * entry (null if none). Else, *lastxp will be set to the index
1398 * of the found entry; *gotp will contain the entry.
1399 */
1400 STATIC xfs_bmbt_rec_host_t * /* pointer to found extent entry */
xfs_bmap_search_multi_extents(xfs_ifork_t * ifp,xfs_fileoff_t bno,int * eofp,xfs_extnum_t * lastxp,xfs_bmbt_irec_t * gotp,xfs_bmbt_irec_t * prevp)1401 xfs_bmap_search_multi_extents(
1402 xfs_ifork_t *ifp, /* inode fork pointer */
1403 xfs_fileoff_t bno, /* block number searched for */
1404 int *eofp, /* out: end of file found */
1405 xfs_extnum_t *lastxp, /* out: last extent index */
1406 xfs_bmbt_irec_t *gotp, /* out: extent entry found */
1407 xfs_bmbt_irec_t *prevp) /* out: previous extent entry found */
1408 {
1409 xfs_bmbt_rec_host_t *ep; /* extent record pointer */
1410 xfs_extnum_t lastx; /* last extent index */
1411
1412 /*
1413 * Initialize the extent entry structure to catch access to
1414 * uninitialized br_startblock field.
1415 */
1416 gotp->br_startoff = 0xffa5a5a5a5a5a5a5LL;
1417 gotp->br_blockcount = 0xa55a5a5a5a5a5a5aLL;
1418 gotp->br_state = XFS_EXT_INVALID;
1419 gotp->br_startblock = 0xffffa5a5a5a5a5a5LL;
1420 prevp->br_startoff = NULLFILEOFF;
1421
1422 ep = xfs_iext_bno_to_ext(ifp, bno, &lastx);
1423 if (lastx > 0) {
1424 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, lastx - 1), prevp);
1425 }
1426 if (lastx < xfs_iext_count(ifp)) {
1427 xfs_bmbt_get_all(ep, gotp);
1428 *eofp = 0;
1429 } else {
1430 if (lastx > 0) {
1431 *gotp = *prevp;
1432 }
1433 *eofp = 1;
1434 ep = NULL;
1435 }
1436 *lastxp = lastx;
1437 return ep;
1438 }
1439
1440 /*
1441 * Search the extents list for the inode, for the extent containing bno.
1442 * If bno lies in a hole, point to the next entry. If bno lies past eof,
1443 * *eofp will be set, and *prevp will contain the last entry (null if none).
1444 * Else, *lastxp will be set to the index of the found
1445 * entry; *gotp will contain the entry.
1446 */
1447 xfs_bmbt_rec_host_t * /* pointer to found extent entry */
xfs_bmap_search_extents(xfs_inode_t * ip,xfs_fileoff_t bno,int fork,int * eofp,xfs_extnum_t * lastxp,xfs_bmbt_irec_t * gotp,xfs_bmbt_irec_t * prevp)1448 xfs_bmap_search_extents(
1449 xfs_inode_t *ip, /* incore inode pointer */
1450 xfs_fileoff_t bno, /* block number searched for */
1451 int fork, /* data or attr fork */
1452 int *eofp, /* out: end of file found */
1453 xfs_extnum_t *lastxp, /* out: last extent index */
1454 xfs_bmbt_irec_t *gotp, /* out: extent entry found */
1455 xfs_bmbt_irec_t *prevp) /* out: previous extent entry found */
1456 {
1457 xfs_ifork_t *ifp; /* inode fork pointer */
1458 xfs_bmbt_rec_host_t *ep; /* extent record pointer */
1459
1460 XFS_STATS_INC(ip->i_mount, xs_look_exlist);
1461 ifp = XFS_IFORK_PTR(ip, fork);
1462
1463 ep = xfs_bmap_search_multi_extents(ifp, bno, eofp, lastxp, gotp, prevp);
1464
1465 if (unlikely(!(gotp->br_startblock) && (*lastxp != NULLEXTNUM) &&
1466 !(XFS_IS_REALTIME_INODE(ip) && fork == XFS_DATA_FORK))) {
1467 xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO,
1468 "Access to block zero in inode %llu "
1469 "start_block: %llx start_off: %llx "
1470 "blkcnt: %llx extent-state: %x lastx: %x",
1471 (unsigned long long)ip->i_ino,
1472 (unsigned long long)gotp->br_startblock,
1473 (unsigned long long)gotp->br_startoff,
1474 (unsigned long long)gotp->br_blockcount,
1475 gotp->br_state, *lastxp);
1476 *lastxp = NULLEXTNUM;
1477 *eofp = 1;
1478 return NULL;
1479 }
1480 return ep;
1481 }
1482
1483 /*
1484 * Returns the file-relative block number of the first unused block(s)
1485 * in the file with at least "len" logically contiguous blocks free.
1486 * This is the lowest-address hole if the file has holes, else the first block
1487 * past the end of file.
1488 * Return 0 if the file is currently local (in-inode).
1489 */
1490 int /* error */
xfs_bmap_first_unused(xfs_trans_t * tp,xfs_inode_t * ip,xfs_extlen_t len,xfs_fileoff_t * first_unused,int whichfork)1491 xfs_bmap_first_unused(
1492 xfs_trans_t *tp, /* transaction pointer */
1493 xfs_inode_t *ip, /* incore inode */
1494 xfs_extlen_t len, /* size of hole to find */
1495 xfs_fileoff_t *first_unused, /* unused block */
1496 int whichfork) /* data or attr fork */
1497 {
1498 int error; /* error return value */
1499 int idx; /* extent record index */
1500 xfs_ifork_t *ifp; /* inode fork pointer */
1501 xfs_fileoff_t lastaddr; /* last block number seen */
1502 xfs_fileoff_t lowest; /* lowest useful block */
1503 xfs_fileoff_t max; /* starting useful block */
1504 xfs_fileoff_t off; /* offset for this block */
1505 xfs_extnum_t nextents; /* number of extent entries */
1506
1507 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE ||
1508 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ||
1509 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
1510 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
1511 *first_unused = 0;
1512 return 0;
1513 }
1514 ifp = XFS_IFORK_PTR(ip, whichfork);
1515 if (!(ifp->if_flags & XFS_IFEXTENTS) &&
1516 (error = xfs_iread_extents(tp, ip, whichfork)))
1517 return error;
1518 lowest = *first_unused;
1519 nextents = xfs_iext_count(ifp);
1520 for (idx = 0, lastaddr = 0, max = lowest; idx < nextents; idx++) {
1521 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, idx);
1522 off = xfs_bmbt_get_startoff(ep);
1523 /*
1524 * See if the hole before this extent will work.
1525 */
1526 if (off >= lowest + len && off - max >= len) {
1527 *first_unused = max;
1528 return 0;
1529 }
1530 lastaddr = off + xfs_bmbt_get_blockcount(ep);
1531 max = XFS_FILEOFF_MAX(lastaddr, lowest);
1532 }
1533 *first_unused = max;
1534 return 0;
1535 }
1536
1537 /*
1538 * Returns the file-relative block number of the last block - 1 before
1539 * last_block (input value) in the file.
1540 * This is not based on i_size, it is based on the extent records.
1541 * Returns 0 for local files, as they do not have extent records.
1542 */
1543 int /* error */
xfs_bmap_last_before(xfs_trans_t * tp,xfs_inode_t * ip,xfs_fileoff_t * last_block,int whichfork)1544 xfs_bmap_last_before(
1545 xfs_trans_t *tp, /* transaction pointer */
1546 xfs_inode_t *ip, /* incore inode */
1547 xfs_fileoff_t *last_block, /* last block */
1548 int whichfork) /* data or attr fork */
1549 {
1550 xfs_fileoff_t bno; /* input file offset */
1551 int eof; /* hit end of file */
1552 xfs_bmbt_rec_host_t *ep; /* pointer to last extent */
1553 int error; /* error return value */
1554 xfs_bmbt_irec_t got; /* current extent value */
1555 xfs_ifork_t *ifp; /* inode fork pointer */
1556 xfs_extnum_t lastx; /* last extent used */
1557 xfs_bmbt_irec_t prev; /* previous extent value */
1558
1559 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
1560 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
1561 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL)
1562 return -EIO;
1563 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
1564 *last_block = 0;
1565 return 0;
1566 }
1567 ifp = XFS_IFORK_PTR(ip, whichfork);
1568 if (!(ifp->if_flags & XFS_IFEXTENTS) &&
1569 (error = xfs_iread_extents(tp, ip, whichfork)))
1570 return error;
1571 bno = *last_block - 1;
1572 ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got,
1573 &prev);
1574 if (eof || xfs_bmbt_get_startoff(ep) > bno) {
1575 if (prev.br_startoff == NULLFILEOFF)
1576 *last_block = 0;
1577 else
1578 *last_block = prev.br_startoff + prev.br_blockcount;
1579 }
1580 /*
1581 * Otherwise *last_block is already the right answer.
1582 */
1583 return 0;
1584 }
1585
1586 int
xfs_bmap_last_extent(struct xfs_trans * tp,struct xfs_inode * ip,int whichfork,struct xfs_bmbt_irec * rec,int * is_empty)1587 xfs_bmap_last_extent(
1588 struct xfs_trans *tp,
1589 struct xfs_inode *ip,
1590 int whichfork,
1591 struct xfs_bmbt_irec *rec,
1592 int *is_empty)
1593 {
1594 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
1595 int error;
1596 int nextents;
1597
1598 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1599 error = xfs_iread_extents(tp, ip, whichfork);
1600 if (error)
1601 return error;
1602 }
1603
1604 nextents = xfs_iext_count(ifp);
1605 if (nextents == 0) {
1606 *is_empty = 1;
1607 return 0;
1608 }
1609
1610 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, nextents - 1), rec);
1611 *is_empty = 0;
1612 return 0;
1613 }
1614
1615 /*
1616 * Check the last inode extent to determine whether this allocation will result
1617 * in blocks being allocated at the end of the file. When we allocate new data
1618 * blocks at the end of the file which do not start at the previous data block,
1619 * we will try to align the new blocks at stripe unit boundaries.
1620 *
1621 * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be
1622 * at, or past the EOF.
1623 */
1624 STATIC int
xfs_bmap_isaeof(struct xfs_bmalloca * bma,int whichfork)1625 xfs_bmap_isaeof(
1626 struct xfs_bmalloca *bma,
1627 int whichfork)
1628 {
1629 struct xfs_bmbt_irec rec;
1630 int is_empty;
1631 int error;
1632
1633 bma->aeof = 0;
1634 error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec,
1635 &is_empty);
1636 if (error)
1637 return error;
1638
1639 if (is_empty) {
1640 bma->aeof = 1;
1641 return 0;
1642 }
1643
1644 /*
1645 * Check if we are allocation or past the last extent, or at least into
1646 * the last delayed allocated extent.
1647 */
1648 bma->aeof = bma->offset >= rec.br_startoff + rec.br_blockcount ||
1649 (bma->offset >= rec.br_startoff &&
1650 isnullstartblock(rec.br_startblock));
1651 return 0;
1652 }
1653
1654 /*
1655 * Returns the file-relative block number of the first block past eof in
1656 * the file. This is not based on i_size, it is based on the extent records.
1657 * Returns 0 for local files, as they do not have extent records.
1658 */
1659 int
xfs_bmap_last_offset(struct xfs_inode * ip,xfs_fileoff_t * last_block,int whichfork)1660 xfs_bmap_last_offset(
1661 struct xfs_inode *ip,
1662 xfs_fileoff_t *last_block,
1663 int whichfork)
1664 {
1665 struct xfs_bmbt_irec rec;
1666 int is_empty;
1667 int error;
1668
1669 *last_block = 0;
1670
1671 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL)
1672 return 0;
1673
1674 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
1675 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
1676 return -EIO;
1677
1678 error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, &is_empty);
1679 if (error || is_empty)
1680 return error;
1681
1682 *last_block = rec.br_startoff + rec.br_blockcount;
1683 return 0;
1684 }
1685
1686 /*
1687 * Returns whether the selected fork of the inode has exactly one
1688 * block or not. For the data fork we check this matches di_size,
1689 * implying the file's range is 0..bsize-1.
1690 */
1691 int /* 1=>1 block, 0=>otherwise */
xfs_bmap_one_block(xfs_inode_t * ip,int whichfork)1692 xfs_bmap_one_block(
1693 xfs_inode_t *ip, /* incore inode */
1694 int whichfork) /* data or attr fork */
1695 {
1696 xfs_bmbt_rec_host_t *ep; /* ptr to fork's extent */
1697 xfs_ifork_t *ifp; /* inode fork pointer */
1698 int rval; /* return value */
1699 xfs_bmbt_irec_t s; /* internal version of extent */
1700
1701 #ifndef DEBUG
1702 if (whichfork == XFS_DATA_FORK)
1703 return XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize;
1704 #endif /* !DEBUG */
1705 if (XFS_IFORK_NEXTENTS(ip, whichfork) != 1)
1706 return 0;
1707 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
1708 return 0;
1709 ifp = XFS_IFORK_PTR(ip, whichfork);
1710 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
1711 ep = xfs_iext_get_ext(ifp, 0);
1712 xfs_bmbt_get_all(ep, &s);
1713 rval = s.br_startoff == 0 && s.br_blockcount == 1;
1714 if (rval && whichfork == XFS_DATA_FORK)
1715 ASSERT(XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize);
1716 return rval;
1717 }
1718
1719 /*
1720 * Extent tree manipulation functions used during allocation.
1721 */
1722
1723 /*
1724 * Convert a delayed allocation to a real allocation.
1725 */
1726 STATIC int /* error */
xfs_bmap_add_extent_delay_real(struct xfs_bmalloca * bma,int whichfork)1727 xfs_bmap_add_extent_delay_real(
1728 struct xfs_bmalloca *bma,
1729 int whichfork)
1730 {
1731 struct xfs_bmbt_irec *new = &bma->got;
1732 int diff; /* temp value */
1733 xfs_bmbt_rec_host_t *ep; /* extent entry for idx */
1734 int error; /* error return value */
1735 int i; /* temp state */
1736 xfs_ifork_t *ifp; /* inode fork pointer */
1737 xfs_fileoff_t new_endoff; /* end offset of new entry */
1738 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
1739 /* left is 0, right is 1, prev is 2 */
1740 int rval=0; /* return value (logging flags) */
1741 int state = 0;/* state bits, accessed thru macros */
1742 xfs_filblks_t da_new; /* new count del alloc blocks used */
1743 xfs_filblks_t da_old; /* old count del alloc blocks used */
1744 xfs_filblks_t temp=0; /* value for da_new calculations */
1745 xfs_filblks_t temp2=0;/* value for da_new calculations */
1746 int tmp_rval; /* partial logging flags */
1747 struct xfs_mount *mp;
1748 xfs_extnum_t *nextents;
1749
1750 mp = bma->ip->i_mount;
1751 ifp = XFS_IFORK_PTR(bma->ip, whichfork);
1752 ASSERT(whichfork != XFS_ATTR_FORK);
1753 nextents = (whichfork == XFS_COW_FORK ? &bma->ip->i_cnextents :
1754 &bma->ip->i_d.di_nextents);
1755
1756 ASSERT(bma->idx >= 0);
1757 ASSERT(bma->idx <= xfs_iext_count(ifp));
1758 ASSERT(!isnullstartblock(new->br_startblock));
1759 ASSERT(!bma->cur ||
1760 (bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
1761
1762 XFS_STATS_INC(mp, xs_add_exlist);
1763
1764 #define LEFT r[0]
1765 #define RIGHT r[1]
1766 #define PREV r[2]
1767
1768 if (whichfork == XFS_COW_FORK)
1769 state |= BMAP_COWFORK;
1770
1771 /*
1772 * Set up a bunch of variables to make the tests simpler.
1773 */
1774 ep = xfs_iext_get_ext(ifp, bma->idx);
1775 xfs_bmbt_get_all(ep, &PREV);
1776 new_endoff = new->br_startoff + new->br_blockcount;
1777 ASSERT(PREV.br_startoff <= new->br_startoff);
1778 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
1779
1780 da_old = startblockval(PREV.br_startblock);
1781 da_new = 0;
1782
1783 /*
1784 * Set flags determining what part of the previous delayed allocation
1785 * extent is being replaced by a real allocation.
1786 */
1787 if (PREV.br_startoff == new->br_startoff)
1788 state |= BMAP_LEFT_FILLING;
1789 if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
1790 state |= BMAP_RIGHT_FILLING;
1791
1792 /*
1793 * Check and set flags if this segment has a left neighbor.
1794 * Don't set contiguous if the combined extent would be too large.
1795 */
1796 if (bma->idx > 0) {
1797 state |= BMAP_LEFT_VALID;
1798 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1), &LEFT);
1799
1800 if (isnullstartblock(LEFT.br_startblock))
1801 state |= BMAP_LEFT_DELAY;
1802 }
1803
1804 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
1805 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
1806 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
1807 LEFT.br_state == new->br_state &&
1808 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
1809 state |= BMAP_LEFT_CONTIG;
1810
1811 /*
1812 * Check and set flags if this segment has a right neighbor.
1813 * Don't set contiguous if the combined extent would be too large.
1814 * Also check for all-three-contiguous being too large.
1815 */
1816 if (bma->idx < xfs_iext_count(ifp) - 1) {
1817 state |= BMAP_RIGHT_VALID;
1818 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx + 1), &RIGHT);
1819
1820 if (isnullstartblock(RIGHT.br_startblock))
1821 state |= BMAP_RIGHT_DELAY;
1822 }
1823
1824 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
1825 new_endoff == RIGHT.br_startoff &&
1826 new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
1827 new->br_state == RIGHT.br_state &&
1828 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
1829 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1830 BMAP_RIGHT_FILLING)) !=
1831 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1832 BMAP_RIGHT_FILLING) ||
1833 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
1834 <= MAXEXTLEN))
1835 state |= BMAP_RIGHT_CONTIG;
1836
1837 error = 0;
1838 /*
1839 * Switch out based on the FILLING and CONTIG state bits.
1840 */
1841 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1842 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
1843 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1844 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1845 /*
1846 * Filling in all of a previously delayed allocation extent.
1847 * The left and right neighbors are both contiguous with new.
1848 */
1849 bma->idx--;
1850 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1851 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
1852 LEFT.br_blockcount + PREV.br_blockcount +
1853 RIGHT.br_blockcount);
1854 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
1855
1856 xfs_iext_remove(bma->ip, bma->idx + 1, 2, state);
1857 (*nextents)--;
1858 if (bma->cur == NULL)
1859 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1860 else {
1861 rval = XFS_ILOG_CORE;
1862 error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff,
1863 RIGHT.br_startblock,
1864 RIGHT.br_blockcount, &i);
1865 if (error)
1866 goto done;
1867 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1868 error = xfs_btree_delete(bma->cur, &i);
1869 if (error)
1870 goto done;
1871 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1872 error = xfs_btree_decrement(bma->cur, 0, &i);
1873 if (error)
1874 goto done;
1875 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1876 error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
1877 LEFT.br_startblock,
1878 LEFT.br_blockcount +
1879 PREV.br_blockcount +
1880 RIGHT.br_blockcount, LEFT.br_state);
1881 if (error)
1882 goto done;
1883 }
1884 break;
1885
1886 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1887 /*
1888 * Filling in all of a previously delayed allocation extent.
1889 * The left neighbor is contiguous, the right is not.
1890 */
1891 bma->idx--;
1892
1893 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1894 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
1895 LEFT.br_blockcount + PREV.br_blockcount);
1896 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
1897
1898 xfs_iext_remove(bma->ip, bma->idx + 1, 1, state);
1899 if (bma->cur == NULL)
1900 rval = XFS_ILOG_DEXT;
1901 else {
1902 rval = 0;
1903 error = xfs_bmbt_lookup_eq(bma->cur, LEFT.br_startoff,
1904 LEFT.br_startblock, LEFT.br_blockcount,
1905 &i);
1906 if (error)
1907 goto done;
1908 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1909 error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
1910 LEFT.br_startblock,
1911 LEFT.br_blockcount +
1912 PREV.br_blockcount, LEFT.br_state);
1913 if (error)
1914 goto done;
1915 }
1916 break;
1917
1918 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1919 /*
1920 * Filling in all of a previously delayed allocation extent.
1921 * The right neighbor is contiguous, the left is not.
1922 */
1923 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1924 xfs_bmbt_set_startblock(ep, new->br_startblock);
1925 xfs_bmbt_set_blockcount(ep,
1926 PREV.br_blockcount + RIGHT.br_blockcount);
1927 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
1928
1929 xfs_iext_remove(bma->ip, bma->idx + 1, 1, state);
1930 if (bma->cur == NULL)
1931 rval = XFS_ILOG_DEXT;
1932 else {
1933 rval = 0;
1934 error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff,
1935 RIGHT.br_startblock,
1936 RIGHT.br_blockcount, &i);
1937 if (error)
1938 goto done;
1939 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1940 error = xfs_bmbt_update(bma->cur, PREV.br_startoff,
1941 new->br_startblock,
1942 PREV.br_blockcount +
1943 RIGHT.br_blockcount, PREV.br_state);
1944 if (error)
1945 goto done;
1946 }
1947 break;
1948
1949 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
1950 /*
1951 * Filling in all of a previously delayed allocation extent.
1952 * Neither the left nor right neighbors are contiguous with
1953 * the new one.
1954 */
1955 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1956 xfs_bmbt_set_startblock(ep, new->br_startblock);
1957 xfs_bmbt_set_state(ep, new->br_state);
1958 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
1959
1960 (*nextents)++;
1961 if (bma->cur == NULL)
1962 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1963 else {
1964 rval = XFS_ILOG_CORE;
1965 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
1966 new->br_startblock, new->br_blockcount,
1967 &i);
1968 if (error)
1969 goto done;
1970 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
1971 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
1972 error = xfs_btree_insert(bma->cur, &i);
1973 if (error)
1974 goto done;
1975 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1976 }
1977 break;
1978
1979 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
1980 /*
1981 * Filling in the first part of a previous delayed allocation.
1982 * The left neighbor is contiguous.
1983 */
1984 trace_xfs_bmap_pre_update(bma->ip, bma->idx - 1, state, _THIS_IP_);
1985 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx - 1),
1986 LEFT.br_blockcount + new->br_blockcount);
1987 xfs_bmbt_set_startoff(ep,
1988 PREV.br_startoff + new->br_blockcount);
1989 trace_xfs_bmap_post_update(bma->ip, bma->idx - 1, state, _THIS_IP_);
1990
1991 temp = PREV.br_blockcount - new->br_blockcount;
1992 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1993 xfs_bmbt_set_blockcount(ep, temp);
1994 if (bma->cur == NULL)
1995 rval = XFS_ILOG_DEXT;
1996 else {
1997 rval = 0;
1998 error = xfs_bmbt_lookup_eq(bma->cur, LEFT.br_startoff,
1999 LEFT.br_startblock, LEFT.br_blockcount,
2000 &i);
2001 if (error)
2002 goto done;
2003 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2004 error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
2005 LEFT.br_startblock,
2006 LEFT.br_blockcount +
2007 new->br_blockcount,
2008 LEFT.br_state);
2009 if (error)
2010 goto done;
2011 }
2012 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
2013 startblockval(PREV.br_startblock));
2014 xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
2015 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
2016
2017 bma->idx--;
2018 break;
2019
2020 case BMAP_LEFT_FILLING:
2021 /*
2022 * Filling in the first part of a previous delayed allocation.
2023 * The left neighbor is not contiguous.
2024 */
2025 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
2026 xfs_bmbt_set_startoff(ep, new_endoff);
2027 temp = PREV.br_blockcount - new->br_blockcount;
2028 xfs_bmbt_set_blockcount(ep, temp);
2029 xfs_iext_insert(bma->ip, bma->idx, 1, new, state);
2030 (*nextents)++;
2031 if (bma->cur == NULL)
2032 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2033 else {
2034 rval = XFS_ILOG_CORE;
2035 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
2036 new->br_startblock, new->br_blockcount,
2037 &i);
2038 if (error)
2039 goto done;
2040 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
2041 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
2042 error = xfs_btree_insert(bma->cur, &i);
2043 if (error)
2044 goto done;
2045 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2046 }
2047
2048 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
2049 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
2050 bma->firstblock, bma->dfops,
2051 &bma->cur, 1, &tmp_rval, whichfork);
2052 rval |= tmp_rval;
2053 if (error)
2054 goto done;
2055 }
2056 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
2057 startblockval(PREV.br_startblock) -
2058 (bma->cur ? bma->cur->bc_private.b.allocated : 0));
2059 ep = xfs_iext_get_ext(ifp, bma->idx + 1);
2060 xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
2061 trace_xfs_bmap_post_update(bma->ip, bma->idx + 1, state, _THIS_IP_);
2062 break;
2063
2064 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2065 /*
2066 * Filling in the last part of a previous delayed allocation.
2067 * The right neighbor is contiguous with the new allocation.
2068 */
2069 temp = PREV.br_blockcount - new->br_blockcount;
2070 trace_xfs_bmap_pre_update(bma->ip, bma->idx + 1, state, _THIS_IP_);
2071 xfs_bmbt_set_blockcount(ep, temp);
2072 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, bma->idx + 1),
2073 new->br_startoff, new->br_startblock,
2074 new->br_blockcount + RIGHT.br_blockcount,
2075 RIGHT.br_state);
2076 trace_xfs_bmap_post_update(bma->ip, bma->idx + 1, state, _THIS_IP_);
2077 if (bma->cur == NULL)
2078 rval = XFS_ILOG_DEXT;
2079 else {
2080 rval = 0;
2081 error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff,
2082 RIGHT.br_startblock,
2083 RIGHT.br_blockcount, &i);
2084 if (error)
2085 goto done;
2086 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2087 error = xfs_bmbt_update(bma->cur, new->br_startoff,
2088 new->br_startblock,
2089 new->br_blockcount +
2090 RIGHT.br_blockcount,
2091 RIGHT.br_state);
2092 if (error)
2093 goto done;
2094 }
2095
2096 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
2097 startblockval(PREV.br_startblock));
2098 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
2099 xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
2100 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
2101
2102 bma->idx++;
2103 break;
2104
2105 case BMAP_RIGHT_FILLING:
2106 /*
2107 * Filling in the last part of a previous delayed allocation.
2108 * The right neighbor is not contiguous.
2109 */
2110 temp = PREV.br_blockcount - new->br_blockcount;
2111 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
2112 xfs_bmbt_set_blockcount(ep, temp);
2113 xfs_iext_insert(bma->ip, bma->idx + 1, 1, new, state);
2114 (*nextents)++;
2115 if (bma->cur == NULL)
2116 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2117 else {
2118 rval = XFS_ILOG_CORE;
2119 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
2120 new->br_startblock, new->br_blockcount,
2121 &i);
2122 if (error)
2123 goto done;
2124 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
2125 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
2126 error = xfs_btree_insert(bma->cur, &i);
2127 if (error)
2128 goto done;
2129 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2130 }
2131
2132 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
2133 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
2134 bma->firstblock, bma->dfops, &bma->cur, 1,
2135 &tmp_rval, whichfork);
2136 rval |= tmp_rval;
2137 if (error)
2138 goto done;
2139 }
2140 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
2141 startblockval(PREV.br_startblock) -
2142 (bma->cur ? bma->cur->bc_private.b.allocated : 0));
2143 ep = xfs_iext_get_ext(ifp, bma->idx);
2144 xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
2145 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
2146
2147 bma->idx++;
2148 break;
2149
2150 case 0:
2151 /*
2152 * Filling in the middle part of a previous delayed allocation.
2153 * Contiguity is impossible here.
2154 * This case is avoided almost all the time.
2155 *
2156 * We start with a delayed allocation:
2157 *
2158 * +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+
2159 * PREV @ idx
2160 *
2161 * and we are allocating:
2162 * +rrrrrrrrrrrrrrrrr+
2163 * new
2164 *
2165 * and we set it up for insertion as:
2166 * +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+
2167 * new
2168 * PREV @ idx LEFT RIGHT
2169 * inserted at idx + 1
2170 */
2171 temp = new->br_startoff - PREV.br_startoff;
2172 temp2 = PREV.br_startoff + PREV.br_blockcount - new_endoff;
2173 trace_xfs_bmap_pre_update(bma->ip, bma->idx, 0, _THIS_IP_);
2174 xfs_bmbt_set_blockcount(ep, temp); /* truncate PREV */
2175 LEFT = *new;
2176 RIGHT.br_state = PREV.br_state;
2177 RIGHT.br_startblock = nullstartblock(
2178 (int)xfs_bmap_worst_indlen(bma->ip, temp2));
2179 RIGHT.br_startoff = new_endoff;
2180 RIGHT.br_blockcount = temp2;
2181 /* insert LEFT (r[0]) and RIGHT (r[1]) at the same time */
2182 xfs_iext_insert(bma->ip, bma->idx + 1, 2, &LEFT, state);
2183 (*nextents)++;
2184 if (bma->cur == NULL)
2185 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2186 else {
2187 rval = XFS_ILOG_CORE;
2188 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
2189 new->br_startblock, new->br_blockcount,
2190 &i);
2191 if (error)
2192 goto done;
2193 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
2194 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
2195 error = xfs_btree_insert(bma->cur, &i);
2196 if (error)
2197 goto done;
2198 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2199 }
2200
2201 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
2202 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
2203 bma->firstblock, bma->dfops, &bma->cur,
2204 1, &tmp_rval, whichfork);
2205 rval |= tmp_rval;
2206 if (error)
2207 goto done;
2208 }
2209 temp = xfs_bmap_worst_indlen(bma->ip, temp);
2210 temp2 = xfs_bmap_worst_indlen(bma->ip, temp2);
2211 diff = (int)(temp + temp2 -
2212 (startblockval(PREV.br_startblock) -
2213 (bma->cur ?
2214 bma->cur->bc_private.b.allocated : 0)));
2215 if (diff > 0) {
2216 error = xfs_mod_fdblocks(bma->ip->i_mount,
2217 -((int64_t)diff), false);
2218 ASSERT(!error);
2219 if (error)
2220 goto done;
2221 }
2222
2223 ep = xfs_iext_get_ext(ifp, bma->idx);
2224 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
2225 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
2226 trace_xfs_bmap_pre_update(bma->ip, bma->idx + 2, state, _THIS_IP_);
2227 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, bma->idx + 2),
2228 nullstartblock((int)temp2));
2229 trace_xfs_bmap_post_update(bma->ip, bma->idx + 2, state, _THIS_IP_);
2230
2231 bma->idx++;
2232 da_new = temp + temp2;
2233 break;
2234
2235 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2236 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2237 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
2238 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2239 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2240 case BMAP_LEFT_CONTIG:
2241 case BMAP_RIGHT_CONTIG:
2242 /*
2243 * These cases are all impossible.
2244 */
2245 ASSERT(0);
2246 }
2247
2248 /* add reverse mapping */
2249 error = xfs_rmap_map_extent(mp, bma->dfops, bma->ip, whichfork, new);
2250 if (error)
2251 goto done;
2252
2253 /* convert to a btree if necessary */
2254 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
2255 int tmp_logflags; /* partial log flag return val */
2256
2257 ASSERT(bma->cur == NULL);
2258 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
2259 bma->firstblock, bma->dfops, &bma->cur,
2260 da_old > 0, &tmp_logflags, whichfork);
2261 bma->logflags |= tmp_logflags;
2262 if (error)
2263 goto done;
2264 }
2265
2266 /* adjust for changes in reserved delayed indirect blocks */
2267 if (da_old || da_new) {
2268 temp = da_new;
2269 if (bma->cur)
2270 temp += bma->cur->bc_private.b.allocated;
2271 if (temp < da_old)
2272 xfs_mod_fdblocks(bma->ip->i_mount,
2273 (int64_t)(da_old - temp), false);
2274 }
2275
2276 /* clear out the allocated field, done with it now in any case. */
2277 if (bma->cur)
2278 bma->cur->bc_private.b.allocated = 0;
2279
2280 xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork);
2281 done:
2282 if (whichfork != XFS_COW_FORK)
2283 bma->logflags |= rval;
2284 return error;
2285 #undef LEFT
2286 #undef RIGHT
2287 #undef PREV
2288 }
2289
2290 /*
2291 * Convert an unwritten allocation to a real allocation or vice versa.
2292 */
2293 STATIC int /* error */
xfs_bmap_add_extent_unwritten_real(struct xfs_trans * tp,xfs_inode_t * ip,int whichfork,xfs_extnum_t * idx,xfs_btree_cur_t ** curp,xfs_bmbt_irec_t * new,xfs_fsblock_t * first,struct xfs_defer_ops * dfops,int * logflagsp)2294 xfs_bmap_add_extent_unwritten_real(
2295 struct xfs_trans *tp,
2296 xfs_inode_t *ip, /* incore inode pointer */
2297 int whichfork,
2298 xfs_extnum_t *idx, /* extent number to update/insert */
2299 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
2300 xfs_bmbt_irec_t *new, /* new data to add to file extents */
2301 xfs_fsblock_t *first, /* pointer to firstblock variable */
2302 struct xfs_defer_ops *dfops, /* list of extents to be freed */
2303 int *logflagsp) /* inode logging flags */
2304 {
2305 xfs_btree_cur_t *cur; /* btree cursor */
2306 xfs_bmbt_rec_host_t *ep; /* extent entry for idx */
2307 int error; /* error return value */
2308 int i; /* temp state */
2309 xfs_ifork_t *ifp; /* inode fork pointer */
2310 xfs_fileoff_t new_endoff; /* end offset of new entry */
2311 xfs_exntst_t newext; /* new extent state */
2312 xfs_exntst_t oldext; /* old extent state */
2313 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
2314 /* left is 0, right is 1, prev is 2 */
2315 int rval=0; /* return value (logging flags) */
2316 int state = 0;/* state bits, accessed thru macros */
2317 struct xfs_mount *mp = ip->i_mount;
2318
2319 *logflagsp = 0;
2320
2321 cur = *curp;
2322 ifp = XFS_IFORK_PTR(ip, whichfork);
2323 if (whichfork == XFS_COW_FORK)
2324 state |= BMAP_COWFORK;
2325
2326 ASSERT(*idx >= 0);
2327 ASSERT(*idx <= xfs_iext_count(ifp));
2328 ASSERT(!isnullstartblock(new->br_startblock));
2329
2330 XFS_STATS_INC(mp, xs_add_exlist);
2331
2332 #define LEFT r[0]
2333 #define RIGHT r[1]
2334 #define PREV r[2]
2335
2336 /*
2337 * Set up a bunch of variables to make the tests simpler.
2338 */
2339 error = 0;
2340 ep = xfs_iext_get_ext(ifp, *idx);
2341 xfs_bmbt_get_all(ep, &PREV);
2342 newext = new->br_state;
2343 oldext = (newext == XFS_EXT_UNWRITTEN) ?
2344 XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
2345 ASSERT(PREV.br_state == oldext);
2346 new_endoff = new->br_startoff + new->br_blockcount;
2347 ASSERT(PREV.br_startoff <= new->br_startoff);
2348 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
2349
2350 /*
2351 * Set flags determining what part of the previous oldext allocation
2352 * extent is being replaced by a newext allocation.
2353 */
2354 if (PREV.br_startoff == new->br_startoff)
2355 state |= BMAP_LEFT_FILLING;
2356 if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
2357 state |= BMAP_RIGHT_FILLING;
2358
2359 /*
2360 * Check and set flags if this segment has a left neighbor.
2361 * Don't set contiguous if the combined extent would be too large.
2362 */
2363 if (*idx > 0) {
2364 state |= BMAP_LEFT_VALID;
2365 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &LEFT);
2366
2367 if (isnullstartblock(LEFT.br_startblock))
2368 state |= BMAP_LEFT_DELAY;
2369 }
2370
2371 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
2372 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
2373 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
2374 LEFT.br_state == newext &&
2375 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
2376 state |= BMAP_LEFT_CONTIG;
2377
2378 /*
2379 * Check and set flags if this segment has a right neighbor.
2380 * Don't set contiguous if the combined extent would be too large.
2381 * Also check for all-three-contiguous being too large.
2382 */
2383 if (*idx < xfs_iext_count(ifp) - 1) {
2384 state |= BMAP_RIGHT_VALID;
2385 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx + 1), &RIGHT);
2386 if (isnullstartblock(RIGHT.br_startblock))
2387 state |= BMAP_RIGHT_DELAY;
2388 }
2389
2390 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
2391 new_endoff == RIGHT.br_startoff &&
2392 new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
2393 newext == RIGHT.br_state &&
2394 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
2395 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
2396 BMAP_RIGHT_FILLING)) !=
2397 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
2398 BMAP_RIGHT_FILLING) ||
2399 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
2400 <= MAXEXTLEN))
2401 state |= BMAP_RIGHT_CONTIG;
2402
2403 /*
2404 * Switch out based on the FILLING and CONTIG state bits.
2405 */
2406 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
2407 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
2408 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
2409 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2410 /*
2411 * Setting all of a previous oldext extent to newext.
2412 * The left and right neighbors are both contiguous with new.
2413 */
2414 --*idx;
2415
2416 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2417 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
2418 LEFT.br_blockcount + PREV.br_blockcount +
2419 RIGHT.br_blockcount);
2420 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2421
2422 xfs_iext_remove(ip, *idx + 1, 2, state);
2423 XFS_IFORK_NEXT_SET(ip, whichfork,
2424 XFS_IFORK_NEXTENTS(ip, whichfork) - 2);
2425 if (cur == NULL)
2426 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2427 else {
2428 rval = XFS_ILOG_CORE;
2429 if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
2430 RIGHT.br_startblock,
2431 RIGHT.br_blockcount, &i)))
2432 goto done;
2433 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2434 if ((error = xfs_btree_delete(cur, &i)))
2435 goto done;
2436 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2437 if ((error = xfs_btree_decrement(cur, 0, &i)))
2438 goto done;
2439 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2440 if ((error = xfs_btree_delete(cur, &i)))
2441 goto done;
2442 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2443 if ((error = xfs_btree_decrement(cur, 0, &i)))
2444 goto done;
2445 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2446 if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
2447 LEFT.br_startblock,
2448 LEFT.br_blockcount + PREV.br_blockcount +
2449 RIGHT.br_blockcount, LEFT.br_state)))
2450 goto done;
2451 }
2452 break;
2453
2454 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2455 /*
2456 * Setting all of a previous oldext extent to newext.
2457 * The left neighbor is contiguous, the right is not.
2458 */
2459 --*idx;
2460
2461 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2462 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
2463 LEFT.br_blockcount + PREV.br_blockcount);
2464 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2465
2466 xfs_iext_remove(ip, *idx + 1, 1, state);
2467 XFS_IFORK_NEXT_SET(ip, whichfork,
2468 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
2469 if (cur == NULL)
2470 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2471 else {
2472 rval = XFS_ILOG_CORE;
2473 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
2474 PREV.br_startblock, PREV.br_blockcount,
2475 &i)))
2476 goto done;
2477 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2478 if ((error = xfs_btree_delete(cur, &i)))
2479 goto done;
2480 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2481 if ((error = xfs_btree_decrement(cur, 0, &i)))
2482 goto done;
2483 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2484 if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
2485 LEFT.br_startblock,
2486 LEFT.br_blockcount + PREV.br_blockcount,
2487 LEFT.br_state)))
2488 goto done;
2489 }
2490 break;
2491
2492 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2493 /*
2494 * Setting all of a previous oldext extent to newext.
2495 * The right neighbor is contiguous, the left is not.
2496 */
2497 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2498 xfs_bmbt_set_blockcount(ep,
2499 PREV.br_blockcount + RIGHT.br_blockcount);
2500 xfs_bmbt_set_state(ep, newext);
2501 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2502 xfs_iext_remove(ip, *idx + 1, 1, state);
2503 XFS_IFORK_NEXT_SET(ip, whichfork,
2504 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
2505 if (cur == NULL)
2506 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2507 else {
2508 rval = XFS_ILOG_CORE;
2509 if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
2510 RIGHT.br_startblock,
2511 RIGHT.br_blockcount, &i)))
2512 goto done;
2513 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2514 if ((error = xfs_btree_delete(cur, &i)))
2515 goto done;
2516 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2517 if ((error = xfs_btree_decrement(cur, 0, &i)))
2518 goto done;
2519 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2520 if ((error = xfs_bmbt_update(cur, new->br_startoff,
2521 new->br_startblock,
2522 new->br_blockcount + RIGHT.br_blockcount,
2523 newext)))
2524 goto done;
2525 }
2526 break;
2527
2528 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
2529 /*
2530 * Setting all of a previous oldext extent to newext.
2531 * Neither the left nor right neighbors are contiguous with
2532 * the new one.
2533 */
2534 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2535 xfs_bmbt_set_state(ep, newext);
2536 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2537
2538 if (cur == NULL)
2539 rval = XFS_ILOG_DEXT;
2540 else {
2541 rval = 0;
2542 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
2543 new->br_startblock, new->br_blockcount,
2544 &i)))
2545 goto done;
2546 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2547 if ((error = xfs_bmbt_update(cur, new->br_startoff,
2548 new->br_startblock, new->br_blockcount,
2549 newext)))
2550 goto done;
2551 }
2552 break;
2553
2554 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
2555 /*
2556 * Setting the first part of a previous oldext extent to newext.
2557 * The left neighbor is contiguous.
2558 */
2559 trace_xfs_bmap_pre_update(ip, *idx - 1, state, _THIS_IP_);
2560 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx - 1),
2561 LEFT.br_blockcount + new->br_blockcount);
2562 xfs_bmbt_set_startoff(ep,
2563 PREV.br_startoff + new->br_blockcount);
2564 trace_xfs_bmap_post_update(ip, *idx - 1, state, _THIS_IP_);
2565
2566 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2567 xfs_bmbt_set_startblock(ep,
2568 new->br_startblock + new->br_blockcount);
2569 xfs_bmbt_set_blockcount(ep,
2570 PREV.br_blockcount - new->br_blockcount);
2571 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2572
2573 --*idx;
2574
2575 if (cur == NULL)
2576 rval = XFS_ILOG_DEXT;
2577 else {
2578 rval = 0;
2579 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
2580 PREV.br_startblock, PREV.br_blockcount,
2581 &i)))
2582 goto done;
2583 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2584 if ((error = xfs_bmbt_update(cur,
2585 PREV.br_startoff + new->br_blockcount,
2586 PREV.br_startblock + new->br_blockcount,
2587 PREV.br_blockcount - new->br_blockcount,
2588 oldext)))
2589 goto done;
2590 if ((error = xfs_btree_decrement(cur, 0, &i)))
2591 goto done;
2592 error = xfs_bmbt_update(cur, LEFT.br_startoff,
2593 LEFT.br_startblock,
2594 LEFT.br_blockcount + new->br_blockcount,
2595 LEFT.br_state);
2596 if (error)
2597 goto done;
2598 }
2599 break;
2600
2601 case BMAP_LEFT_FILLING:
2602 /*
2603 * Setting the first part of a previous oldext extent to newext.
2604 * The left neighbor is not contiguous.
2605 */
2606 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2607 ASSERT(ep && xfs_bmbt_get_state(ep) == oldext);
2608 xfs_bmbt_set_startoff(ep, new_endoff);
2609 xfs_bmbt_set_blockcount(ep,
2610 PREV.br_blockcount - new->br_blockcount);
2611 xfs_bmbt_set_startblock(ep,
2612 new->br_startblock + new->br_blockcount);
2613 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2614
2615 xfs_iext_insert(ip, *idx, 1, new, state);
2616 XFS_IFORK_NEXT_SET(ip, whichfork,
2617 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
2618 if (cur == NULL)
2619 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2620 else {
2621 rval = XFS_ILOG_CORE;
2622 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
2623 PREV.br_startblock, PREV.br_blockcount,
2624 &i)))
2625 goto done;
2626 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2627 if ((error = xfs_bmbt_update(cur,
2628 PREV.br_startoff + new->br_blockcount,
2629 PREV.br_startblock + new->br_blockcount,
2630 PREV.br_blockcount - new->br_blockcount,
2631 oldext)))
2632 goto done;
2633 cur->bc_rec.b = *new;
2634 if ((error = xfs_btree_insert(cur, &i)))
2635 goto done;
2636 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2637 }
2638 break;
2639
2640 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2641 /*
2642 * Setting the last part of a previous oldext extent to newext.
2643 * The right neighbor is contiguous with the new allocation.
2644 */
2645 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2646 xfs_bmbt_set_blockcount(ep,
2647 PREV.br_blockcount - new->br_blockcount);
2648 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2649
2650 ++*idx;
2651
2652 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2653 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx),
2654 new->br_startoff, new->br_startblock,
2655 new->br_blockcount + RIGHT.br_blockcount, newext);
2656 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2657
2658 if (cur == NULL)
2659 rval = XFS_ILOG_DEXT;
2660 else {
2661 rval = 0;
2662 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
2663 PREV.br_startblock,
2664 PREV.br_blockcount, &i)))
2665 goto done;
2666 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2667 if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
2668 PREV.br_startblock,
2669 PREV.br_blockcount - new->br_blockcount,
2670 oldext)))
2671 goto done;
2672 if ((error = xfs_btree_increment(cur, 0, &i)))
2673 goto done;
2674 if ((error = xfs_bmbt_update(cur, new->br_startoff,
2675 new->br_startblock,
2676 new->br_blockcount + RIGHT.br_blockcount,
2677 newext)))
2678 goto done;
2679 }
2680 break;
2681
2682 case BMAP_RIGHT_FILLING:
2683 /*
2684 * Setting the last part of a previous oldext extent to newext.
2685 * The right neighbor is not contiguous.
2686 */
2687 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2688 xfs_bmbt_set_blockcount(ep,
2689 PREV.br_blockcount - new->br_blockcount);
2690 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2691
2692 ++*idx;
2693 xfs_iext_insert(ip, *idx, 1, new, state);
2694
2695 XFS_IFORK_NEXT_SET(ip, whichfork,
2696 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
2697 if (cur == NULL)
2698 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2699 else {
2700 rval = XFS_ILOG_CORE;
2701 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
2702 PREV.br_startblock, PREV.br_blockcount,
2703 &i)))
2704 goto done;
2705 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2706 if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
2707 PREV.br_startblock,
2708 PREV.br_blockcount - new->br_blockcount,
2709 oldext)))
2710 goto done;
2711 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
2712 new->br_startblock, new->br_blockcount,
2713 &i)))
2714 goto done;
2715 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
2716 cur->bc_rec.b.br_state = new->br_state;
2717 if ((error = xfs_btree_insert(cur, &i)))
2718 goto done;
2719 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2720 }
2721 break;
2722
2723 case 0:
2724 /*
2725 * Setting the middle part of a previous oldext extent to
2726 * newext. Contiguity is impossible here.
2727 * One extent becomes three extents.
2728 */
2729 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2730 xfs_bmbt_set_blockcount(ep,
2731 new->br_startoff - PREV.br_startoff);
2732 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2733
2734 r[0] = *new;
2735 r[1].br_startoff = new_endoff;
2736 r[1].br_blockcount =
2737 PREV.br_startoff + PREV.br_blockcount - new_endoff;
2738 r[1].br_startblock = new->br_startblock + new->br_blockcount;
2739 r[1].br_state = oldext;
2740
2741 ++*idx;
2742 xfs_iext_insert(ip, *idx, 2, &r[0], state);
2743
2744 XFS_IFORK_NEXT_SET(ip, whichfork,
2745 XFS_IFORK_NEXTENTS(ip, whichfork) + 2);
2746 if (cur == NULL)
2747 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2748 else {
2749 rval = XFS_ILOG_CORE;
2750 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
2751 PREV.br_startblock, PREV.br_blockcount,
2752 &i)))
2753 goto done;
2754 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2755 /* new right extent - oldext */
2756 if ((error = xfs_bmbt_update(cur, r[1].br_startoff,
2757 r[1].br_startblock, r[1].br_blockcount,
2758 r[1].br_state)))
2759 goto done;
2760 /* new left extent - oldext */
2761 cur->bc_rec.b = PREV;
2762 cur->bc_rec.b.br_blockcount =
2763 new->br_startoff - PREV.br_startoff;
2764 if ((error = xfs_btree_insert(cur, &i)))
2765 goto done;
2766 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2767 /*
2768 * Reset the cursor to the position of the new extent
2769 * we are about to insert as we can't trust it after
2770 * the previous insert.
2771 */
2772 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
2773 new->br_startblock, new->br_blockcount,
2774 &i)))
2775 goto done;
2776 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
2777 /* new middle extent - newext */
2778 cur->bc_rec.b.br_state = new->br_state;
2779 if ((error = xfs_btree_insert(cur, &i)))
2780 goto done;
2781 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2782 }
2783 break;
2784
2785 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2786 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2787 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
2788 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2789 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2790 case BMAP_LEFT_CONTIG:
2791 case BMAP_RIGHT_CONTIG:
2792 /*
2793 * These cases are all impossible.
2794 */
2795 ASSERT(0);
2796 }
2797
2798 /* update reverse mappings */
2799 error = xfs_rmap_convert_extent(mp, dfops, ip, whichfork, new);
2800 if (error)
2801 goto done;
2802
2803 /* convert to a btree if necessary */
2804 if (xfs_bmap_needs_btree(ip, whichfork)) {
2805 int tmp_logflags; /* partial log flag return val */
2806
2807 ASSERT(cur == NULL);
2808 error = xfs_bmap_extents_to_btree(tp, ip, first, dfops, &cur,
2809 0, &tmp_logflags, whichfork);
2810 *logflagsp |= tmp_logflags;
2811 if (error)
2812 goto done;
2813 }
2814
2815 /* clear out the allocated field, done with it now in any case. */
2816 if (cur) {
2817 cur->bc_private.b.allocated = 0;
2818 *curp = cur;
2819 }
2820
2821 xfs_bmap_check_leaf_extents(*curp, ip, whichfork);
2822 done:
2823 *logflagsp |= rval;
2824 return error;
2825 #undef LEFT
2826 #undef RIGHT
2827 #undef PREV
2828 }
2829
2830 /*
2831 * Convert a hole to a delayed allocation.
2832 */
2833 STATIC void
xfs_bmap_add_extent_hole_delay(xfs_inode_t * ip,int whichfork,xfs_extnum_t * idx,xfs_bmbt_irec_t * new)2834 xfs_bmap_add_extent_hole_delay(
2835 xfs_inode_t *ip, /* incore inode pointer */
2836 int whichfork,
2837 xfs_extnum_t *idx, /* extent number to update/insert */
2838 xfs_bmbt_irec_t *new) /* new data to add to file extents */
2839 {
2840 xfs_ifork_t *ifp; /* inode fork pointer */
2841 xfs_bmbt_irec_t left; /* left neighbor extent entry */
2842 xfs_filblks_t newlen=0; /* new indirect size */
2843 xfs_filblks_t oldlen=0; /* old indirect size */
2844 xfs_bmbt_irec_t right; /* right neighbor extent entry */
2845 int state; /* state bits, accessed thru macros */
2846 xfs_filblks_t temp=0; /* temp for indirect calculations */
2847
2848 ifp = XFS_IFORK_PTR(ip, whichfork);
2849 state = 0;
2850 if (whichfork == XFS_COW_FORK)
2851 state |= BMAP_COWFORK;
2852 ASSERT(isnullstartblock(new->br_startblock));
2853
2854 /*
2855 * Check and set flags if this segment has a left neighbor
2856 */
2857 if (*idx > 0) {
2858 state |= BMAP_LEFT_VALID;
2859 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &left);
2860
2861 if (isnullstartblock(left.br_startblock))
2862 state |= BMAP_LEFT_DELAY;
2863 }
2864
2865 /*
2866 * Check and set flags if the current (right) segment exists.
2867 * If it doesn't exist, we're converting the hole at end-of-file.
2868 */
2869 if (*idx < xfs_iext_count(ifp)) {
2870 state |= BMAP_RIGHT_VALID;
2871 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx), &right);
2872
2873 if (isnullstartblock(right.br_startblock))
2874 state |= BMAP_RIGHT_DELAY;
2875 }
2876
2877 /*
2878 * Set contiguity flags on the left and right neighbors.
2879 * Don't let extents get too large, even if the pieces are contiguous.
2880 */
2881 if ((state & BMAP_LEFT_VALID) && (state & BMAP_LEFT_DELAY) &&
2882 left.br_startoff + left.br_blockcount == new->br_startoff &&
2883 left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
2884 state |= BMAP_LEFT_CONTIG;
2885
2886 if ((state & BMAP_RIGHT_VALID) && (state & BMAP_RIGHT_DELAY) &&
2887 new->br_startoff + new->br_blockcount == right.br_startoff &&
2888 new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
2889 (!(state & BMAP_LEFT_CONTIG) ||
2890 (left.br_blockcount + new->br_blockcount +
2891 right.br_blockcount <= MAXEXTLEN)))
2892 state |= BMAP_RIGHT_CONTIG;
2893
2894 /*
2895 * Switch out based on the contiguity flags.
2896 */
2897 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
2898 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2899 /*
2900 * New allocation is contiguous with delayed allocations
2901 * on the left and on the right.
2902 * Merge all three into a single extent record.
2903 */
2904 --*idx;
2905 temp = left.br_blockcount + new->br_blockcount +
2906 right.br_blockcount;
2907
2908 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2909 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), temp);
2910 oldlen = startblockval(left.br_startblock) +
2911 startblockval(new->br_startblock) +
2912 startblockval(right.br_startblock);
2913 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2914 oldlen);
2915 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx),
2916 nullstartblock((int)newlen));
2917 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2918
2919 xfs_iext_remove(ip, *idx + 1, 1, state);
2920 break;
2921
2922 case BMAP_LEFT_CONTIG:
2923 /*
2924 * New allocation is contiguous with a delayed allocation
2925 * on the left.
2926 * Merge the new allocation with the left neighbor.
2927 */
2928 --*idx;
2929 temp = left.br_blockcount + new->br_blockcount;
2930
2931 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2932 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), temp);
2933 oldlen = startblockval(left.br_startblock) +
2934 startblockval(new->br_startblock);
2935 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2936 oldlen);
2937 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx),
2938 nullstartblock((int)newlen));
2939 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2940 break;
2941
2942 case BMAP_RIGHT_CONTIG:
2943 /*
2944 * New allocation is contiguous with a delayed allocation
2945 * on the right.
2946 * Merge the new allocation with the right neighbor.
2947 */
2948 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2949 temp = new->br_blockcount + right.br_blockcount;
2950 oldlen = startblockval(new->br_startblock) +
2951 startblockval(right.br_startblock);
2952 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2953 oldlen);
2954 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx),
2955 new->br_startoff,
2956 nullstartblock((int)newlen), temp, right.br_state);
2957 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2958 break;
2959
2960 case 0:
2961 /*
2962 * New allocation is not contiguous with another
2963 * delayed allocation.
2964 * Insert a new entry.
2965 */
2966 oldlen = newlen = 0;
2967 xfs_iext_insert(ip, *idx, 1, new, state);
2968 break;
2969 }
2970 if (oldlen != newlen) {
2971 ASSERT(oldlen > newlen);
2972 xfs_mod_fdblocks(ip->i_mount, (int64_t)(oldlen - newlen),
2973 false);
2974 /*
2975 * Nothing to do for disk quota accounting here.
2976 */
2977 }
2978 }
2979
2980 /*
2981 * Convert a hole to a real allocation.
2982 */
2983 STATIC int /* error */
xfs_bmap_add_extent_hole_real(struct xfs_bmalloca * bma,int whichfork)2984 xfs_bmap_add_extent_hole_real(
2985 struct xfs_bmalloca *bma,
2986 int whichfork)
2987 {
2988 struct xfs_bmbt_irec *new = &bma->got;
2989 int error; /* error return value */
2990 int i; /* temp state */
2991 xfs_ifork_t *ifp; /* inode fork pointer */
2992 xfs_bmbt_irec_t left; /* left neighbor extent entry */
2993 xfs_bmbt_irec_t right; /* right neighbor extent entry */
2994 int rval=0; /* return value (logging flags) */
2995 int state; /* state bits, accessed thru macros */
2996 struct xfs_mount *mp;
2997
2998 mp = bma->ip->i_mount;
2999 ifp = XFS_IFORK_PTR(bma->ip, whichfork);
3000
3001 ASSERT(bma->idx >= 0);
3002 ASSERT(bma->idx <= xfs_iext_count(ifp));
3003 ASSERT(!isnullstartblock(new->br_startblock));
3004 ASSERT(!bma->cur ||
3005 !(bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
3006 ASSERT(whichfork != XFS_COW_FORK);
3007
3008 XFS_STATS_INC(mp, xs_add_exlist);
3009
3010 state = 0;
3011 if (whichfork == XFS_ATTR_FORK)
3012 state |= BMAP_ATTRFORK;
3013
3014 /*
3015 * Check and set flags if this segment has a left neighbor.
3016 */
3017 if (bma->idx > 0) {
3018 state |= BMAP_LEFT_VALID;
3019 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1), &left);
3020 if (isnullstartblock(left.br_startblock))
3021 state |= BMAP_LEFT_DELAY;
3022 }
3023
3024 /*
3025 * Check and set flags if this segment has a current value.
3026 * Not true if we're inserting into the "hole" at eof.
3027 */
3028 if (bma->idx < xfs_iext_count(ifp)) {
3029 state |= BMAP_RIGHT_VALID;
3030 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx), &right);
3031 if (isnullstartblock(right.br_startblock))
3032 state |= BMAP_RIGHT_DELAY;
3033 }
3034
3035 /*
3036 * We're inserting a real allocation between "left" and "right".
3037 * Set the contiguity flags. Don't let extents get too large.
3038 */
3039 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
3040 left.br_startoff + left.br_blockcount == new->br_startoff &&
3041 left.br_startblock + left.br_blockcount == new->br_startblock &&
3042 left.br_state == new->br_state &&
3043 left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
3044 state |= BMAP_LEFT_CONTIG;
3045
3046 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
3047 new->br_startoff + new->br_blockcount == right.br_startoff &&
3048 new->br_startblock + new->br_blockcount == right.br_startblock &&
3049 new->br_state == right.br_state &&
3050 new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
3051 (!(state & BMAP_LEFT_CONTIG) ||
3052 left.br_blockcount + new->br_blockcount +
3053 right.br_blockcount <= MAXEXTLEN))
3054 state |= BMAP_RIGHT_CONTIG;
3055
3056 error = 0;
3057 /*
3058 * Select which case we're in here, and implement it.
3059 */
3060 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
3061 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
3062 /*
3063 * New allocation is contiguous with real allocations on the
3064 * left and on the right.
3065 * Merge all three into a single extent record.
3066 */
3067 --bma->idx;
3068 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
3069 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
3070 left.br_blockcount + new->br_blockcount +
3071 right.br_blockcount);
3072 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
3073
3074 xfs_iext_remove(bma->ip, bma->idx + 1, 1, state);
3075
3076 XFS_IFORK_NEXT_SET(bma->ip, whichfork,
3077 XFS_IFORK_NEXTENTS(bma->ip, whichfork) - 1);
3078 if (bma->cur == NULL) {
3079 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
3080 } else {
3081 rval = XFS_ILOG_CORE;
3082 error = xfs_bmbt_lookup_eq(bma->cur, right.br_startoff,
3083 right.br_startblock, right.br_blockcount,
3084 &i);
3085 if (error)
3086 goto done;
3087 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
3088 error = xfs_btree_delete(bma->cur, &i);
3089 if (error)
3090 goto done;
3091 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
3092 error = xfs_btree_decrement(bma->cur, 0, &i);
3093 if (error)
3094 goto done;
3095 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
3096 error = xfs_bmbt_update(bma->cur, left.br_startoff,
3097 left.br_startblock,
3098 left.br_blockcount +
3099 new->br_blockcount +
3100 right.br_blockcount,
3101 left.br_state);
3102 if (error)
3103 goto done;
3104 }
3105 break;
3106
3107 case BMAP_LEFT_CONTIG:
3108 /*
3109 * New allocation is contiguous with a real allocation
3110 * on the left.
3111 * Merge the new allocation with the left neighbor.
3112 */
3113 --bma->idx;
3114 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
3115 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
3116 left.br_blockcount + new->br_blockcount);
3117 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
3118
3119 if (bma->cur == NULL) {
3120 rval = xfs_ilog_fext(whichfork);
3121 } else {
3122 rval = 0;
3123 error = xfs_bmbt_lookup_eq(bma->cur, left.br_startoff,
3124 left.br_startblock, left.br_blockcount,
3125 &i);
3126 if (error)
3127 goto done;
3128 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
3129 error = xfs_bmbt_update(bma->cur, left.br_startoff,
3130 left.br_startblock,
3131 left.br_blockcount +
3132 new->br_blockcount,
3133 left.br_state);
3134 if (error)
3135 goto done;
3136 }
3137 break;
3138
3139 case BMAP_RIGHT_CONTIG:
3140 /*
3141 * New allocation is contiguous with a real allocation
3142 * on the right.
3143 * Merge the new allocation with the right neighbor.
3144 */
3145 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
3146 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, bma->idx),
3147 new->br_startoff, new->br_startblock,
3148 new->br_blockcount + right.br_blockcount,
3149 right.br_state);
3150 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
3151
3152 if (bma->cur == NULL) {
3153 rval = xfs_ilog_fext(whichfork);
3154 } else {
3155 rval = 0;
3156 error = xfs_bmbt_lookup_eq(bma->cur,
3157 right.br_startoff,
3158 right.br_startblock,
3159 right.br_blockcount, &i);
3160 if (error)
3161 goto done;
3162 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
3163 error = xfs_bmbt_update(bma->cur, new->br_startoff,
3164 new->br_startblock,
3165 new->br_blockcount +
3166 right.br_blockcount,
3167 right.br_state);
3168 if (error)
3169 goto done;
3170 }
3171 break;
3172
3173 case 0:
3174 /*
3175 * New allocation is not contiguous with another
3176 * real allocation.
3177 * Insert a new entry.
3178 */
3179 xfs_iext_insert(bma->ip, bma->idx, 1, new, state);
3180 XFS_IFORK_NEXT_SET(bma->ip, whichfork,
3181 XFS_IFORK_NEXTENTS(bma->ip, whichfork) + 1);
3182 if (bma->cur == NULL) {
3183 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
3184 } else {
3185 rval = XFS_ILOG_CORE;
3186 error = xfs_bmbt_lookup_eq(bma->cur,
3187 new->br_startoff,
3188 new->br_startblock,
3189 new->br_blockcount, &i);
3190 if (error)
3191 goto done;
3192 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
3193 bma->cur->bc_rec.b.br_state = new->br_state;
3194 error = xfs_btree_insert(bma->cur, &i);
3195 if (error)
3196 goto done;
3197 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
3198 }
3199 break;
3200 }
3201
3202 /* add reverse mapping */
3203 error = xfs_rmap_map_extent(mp, bma->dfops, bma->ip, whichfork, new);
3204 if (error)
3205 goto done;
3206
3207 /* convert to a btree if necessary */
3208 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
3209 int tmp_logflags; /* partial log flag return val */
3210
3211 ASSERT(bma->cur == NULL);
3212 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
3213 bma->firstblock, bma->dfops, &bma->cur,
3214 0, &tmp_logflags, whichfork);
3215 bma->logflags |= tmp_logflags;
3216 if (error)
3217 goto done;
3218 }
3219
3220 /* clear out the allocated field, done with it now in any case. */
3221 if (bma->cur)
3222 bma->cur->bc_private.b.allocated = 0;
3223
3224 xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork);
3225 done:
3226 bma->logflags |= rval;
3227 return error;
3228 }
3229
3230 /*
3231 * Functions used in the extent read, allocate and remove paths
3232 */
3233
3234 /*
3235 * Adjust the size of the new extent based on di_extsize and rt extsize.
3236 */
3237 int
xfs_bmap_extsize_align(xfs_mount_t * mp,xfs_bmbt_irec_t * gotp,xfs_bmbt_irec_t * prevp,xfs_extlen_t extsz,int rt,int eof,int delay,int convert,xfs_fileoff_t * offp,xfs_extlen_t * lenp)3238 xfs_bmap_extsize_align(
3239 xfs_mount_t *mp,
3240 xfs_bmbt_irec_t *gotp, /* next extent pointer */
3241 xfs_bmbt_irec_t *prevp, /* previous extent pointer */
3242 xfs_extlen_t extsz, /* align to this extent size */
3243 int rt, /* is this a realtime inode? */
3244 int eof, /* is extent at end-of-file? */
3245 int delay, /* creating delalloc extent? */
3246 int convert, /* overwriting unwritten extent? */
3247 xfs_fileoff_t *offp, /* in/out: aligned offset */
3248 xfs_extlen_t *lenp) /* in/out: aligned length */
3249 {
3250 xfs_fileoff_t orig_off; /* original offset */
3251 xfs_extlen_t orig_alen; /* original length */
3252 xfs_fileoff_t orig_end; /* original off+len */
3253 xfs_fileoff_t nexto; /* next file offset */
3254 xfs_fileoff_t prevo; /* previous file offset */
3255 xfs_fileoff_t align_off; /* temp for offset */
3256 xfs_extlen_t align_alen; /* temp for length */
3257 xfs_extlen_t temp; /* temp for calculations */
3258
3259 if (convert)
3260 return 0;
3261
3262 orig_off = align_off = *offp;
3263 orig_alen = align_alen = *lenp;
3264 orig_end = orig_off + orig_alen;
3265
3266 /*
3267 * If this request overlaps an existing extent, then don't
3268 * attempt to perform any additional alignment.
3269 */
3270 if (!delay && !eof &&
3271 (orig_off >= gotp->br_startoff) &&
3272 (orig_end <= gotp->br_startoff + gotp->br_blockcount)) {
3273 return 0;
3274 }
3275
3276 /*
3277 * If the file offset is unaligned vs. the extent size
3278 * we need to align it. This will be possible unless
3279 * the file was previously written with a kernel that didn't
3280 * perform this alignment, or if a truncate shot us in the
3281 * foot.
3282 */
3283 temp = do_mod(orig_off, extsz);
3284 if (temp) {
3285 align_alen += temp;
3286 align_off -= temp;
3287 }
3288
3289 /* Same adjustment for the end of the requested area. */
3290 temp = (align_alen % extsz);
3291 if (temp)
3292 align_alen += extsz - temp;
3293
3294 /*
3295 * For large extent hint sizes, the aligned extent might be larger than
3296 * MAXEXTLEN. In that case, reduce the size by an extsz so that it pulls
3297 * the length back under MAXEXTLEN. The outer allocation loops handle
3298 * short allocation just fine, so it is safe to do this. We only want to
3299 * do it when we are forced to, though, because it means more allocation
3300 * operations are required.
3301 */
3302 while (align_alen > MAXEXTLEN)
3303 align_alen -= extsz;
3304 ASSERT(align_alen <= MAXEXTLEN);
3305
3306 /*
3307 * If the previous block overlaps with this proposed allocation
3308 * then move the start forward without adjusting the length.
3309 */
3310 if (prevp->br_startoff != NULLFILEOFF) {
3311 if (prevp->br_startblock == HOLESTARTBLOCK)
3312 prevo = prevp->br_startoff;
3313 else
3314 prevo = prevp->br_startoff + prevp->br_blockcount;
3315 } else
3316 prevo = 0;
3317 if (align_off != orig_off && align_off < prevo)
3318 align_off = prevo;
3319 /*
3320 * If the next block overlaps with this proposed allocation
3321 * then move the start back without adjusting the length,
3322 * but not before offset 0.
3323 * This may of course make the start overlap previous block,
3324 * and if we hit the offset 0 limit then the next block
3325 * can still overlap too.
3326 */
3327 if (!eof && gotp->br_startoff != NULLFILEOFF) {
3328 if ((delay && gotp->br_startblock == HOLESTARTBLOCK) ||
3329 (!delay && gotp->br_startblock == DELAYSTARTBLOCK))
3330 nexto = gotp->br_startoff + gotp->br_blockcount;
3331 else
3332 nexto = gotp->br_startoff;
3333 } else
3334 nexto = NULLFILEOFF;
3335 if (!eof &&
3336 align_off + align_alen != orig_end &&
3337 align_off + align_alen > nexto)
3338 align_off = nexto > align_alen ? nexto - align_alen : 0;
3339 /*
3340 * If we're now overlapping the next or previous extent that
3341 * means we can't fit an extsz piece in this hole. Just move
3342 * the start forward to the first valid spot and set
3343 * the length so we hit the end.
3344 */
3345 if (align_off != orig_off && align_off < prevo)
3346 align_off = prevo;
3347 if (align_off + align_alen != orig_end &&
3348 align_off + align_alen > nexto &&
3349 nexto != NULLFILEOFF) {
3350 ASSERT(nexto > prevo);
3351 align_alen = nexto - align_off;
3352 }
3353
3354 /*
3355 * If realtime, and the result isn't a multiple of the realtime
3356 * extent size we need to remove blocks until it is.
3357 */
3358 if (rt && (temp = (align_alen % mp->m_sb.sb_rextsize))) {
3359 /*
3360 * We're not covering the original request, or
3361 * we won't be able to once we fix the length.
3362 */
3363 if (orig_off < align_off ||
3364 orig_end > align_off + align_alen ||
3365 align_alen - temp < orig_alen)
3366 return -EINVAL;
3367 /*
3368 * Try to fix it by moving the start up.
3369 */
3370 if (align_off + temp <= orig_off) {
3371 align_alen -= temp;
3372 align_off += temp;
3373 }
3374 /*
3375 * Try to fix it by moving the end in.
3376 */
3377 else if (align_off + align_alen - temp >= orig_end)
3378 align_alen -= temp;
3379 /*
3380 * Set the start to the minimum then trim the length.
3381 */
3382 else {
3383 align_alen -= orig_off - align_off;
3384 align_off = orig_off;
3385 align_alen -= align_alen % mp->m_sb.sb_rextsize;
3386 }
3387 /*
3388 * Result doesn't cover the request, fail it.
3389 */
3390 if (orig_off < align_off || orig_end > align_off + align_alen)
3391 return -EINVAL;
3392 } else {
3393 ASSERT(orig_off >= align_off);
3394 /* see MAXEXTLEN handling above */
3395 ASSERT(orig_end <= align_off + align_alen ||
3396 align_alen + extsz > MAXEXTLEN);
3397 }
3398
3399 #ifdef DEBUG
3400 if (!eof && gotp->br_startoff != NULLFILEOFF)
3401 ASSERT(align_off + align_alen <= gotp->br_startoff);
3402 if (prevp->br_startoff != NULLFILEOFF)
3403 ASSERT(align_off >= prevp->br_startoff + prevp->br_blockcount);
3404 #endif
3405
3406 *lenp = align_alen;
3407 *offp = align_off;
3408 return 0;
3409 }
3410
3411 #define XFS_ALLOC_GAP_UNITS 4
3412
3413 void
xfs_bmap_adjacent(struct xfs_bmalloca * ap)3414 xfs_bmap_adjacent(
3415 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
3416 {
3417 xfs_fsblock_t adjust; /* adjustment to block numbers */
3418 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */
3419 xfs_mount_t *mp; /* mount point structure */
3420 int nullfb; /* true if ap->firstblock isn't set */
3421 int rt; /* true if inode is realtime */
3422
3423 #define ISVALID(x,y) \
3424 (rt ? \
3425 (x) < mp->m_sb.sb_rblocks : \
3426 XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && \
3427 XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \
3428 XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks)
3429
3430 mp = ap->ip->i_mount;
3431 nullfb = *ap->firstblock == NULLFSBLOCK;
3432 rt = XFS_IS_REALTIME_INODE(ap->ip) &&
3433 xfs_alloc_is_userdata(ap->datatype);
3434 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock);
3435 /*
3436 * If allocating at eof, and there's a previous real block,
3437 * try to use its last block as our starting point.
3438 */
3439 if (ap->eof && ap->prev.br_startoff != NULLFILEOFF &&
3440 !isnullstartblock(ap->prev.br_startblock) &&
3441 ISVALID(ap->prev.br_startblock + ap->prev.br_blockcount,
3442 ap->prev.br_startblock)) {
3443 ap->blkno = ap->prev.br_startblock + ap->prev.br_blockcount;
3444 /*
3445 * Adjust for the gap between prevp and us.
3446 */
3447 adjust = ap->offset -
3448 (ap->prev.br_startoff + ap->prev.br_blockcount);
3449 if (adjust &&
3450 ISVALID(ap->blkno + adjust, ap->prev.br_startblock))
3451 ap->blkno += adjust;
3452 }
3453 /*
3454 * If not at eof, then compare the two neighbor blocks.
3455 * Figure out whether either one gives us a good starting point,
3456 * and pick the better one.
3457 */
3458 else if (!ap->eof) {
3459 xfs_fsblock_t gotbno; /* right side block number */
3460 xfs_fsblock_t gotdiff=0; /* right side difference */
3461 xfs_fsblock_t prevbno; /* left side block number */
3462 xfs_fsblock_t prevdiff=0; /* left side difference */
3463
3464 /*
3465 * If there's a previous (left) block, select a requested
3466 * start block based on it.
3467 */
3468 if (ap->prev.br_startoff != NULLFILEOFF &&
3469 !isnullstartblock(ap->prev.br_startblock) &&
3470 (prevbno = ap->prev.br_startblock +
3471 ap->prev.br_blockcount) &&
3472 ISVALID(prevbno, ap->prev.br_startblock)) {
3473 /*
3474 * Calculate gap to end of previous block.
3475 */
3476 adjust = prevdiff = ap->offset -
3477 (ap->prev.br_startoff +
3478 ap->prev.br_blockcount);
3479 /*
3480 * Figure the startblock based on the previous block's
3481 * end and the gap size.
3482 * Heuristic!
3483 * If the gap is large relative to the piece we're
3484 * allocating, or using it gives us an invalid block
3485 * number, then just use the end of the previous block.
3486 */
3487 if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
3488 ISVALID(prevbno + prevdiff,
3489 ap->prev.br_startblock))
3490 prevbno += adjust;
3491 else
3492 prevdiff += adjust;
3493 /*
3494 * If the firstblock forbids it, can't use it,
3495 * must use default.
3496 */
3497 if (!rt && !nullfb &&
3498 XFS_FSB_TO_AGNO(mp, prevbno) != fb_agno)
3499 prevbno = NULLFSBLOCK;
3500 }
3501 /*
3502 * No previous block or can't follow it, just default.
3503 */
3504 else
3505 prevbno = NULLFSBLOCK;
3506 /*
3507 * If there's a following (right) block, select a requested
3508 * start block based on it.
3509 */
3510 if (!isnullstartblock(ap->got.br_startblock)) {
3511 /*
3512 * Calculate gap to start of next block.
3513 */
3514 adjust = gotdiff = ap->got.br_startoff - ap->offset;
3515 /*
3516 * Figure the startblock based on the next block's
3517 * start and the gap size.
3518 */
3519 gotbno = ap->got.br_startblock;
3520 /*
3521 * Heuristic!
3522 * If the gap is large relative to the piece we're
3523 * allocating, or using it gives us an invalid block
3524 * number, then just use the start of the next block
3525 * offset by our length.
3526 */
3527 if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
3528 ISVALID(gotbno - gotdiff, gotbno))
3529 gotbno -= adjust;
3530 else if (ISVALID(gotbno - ap->length, gotbno)) {
3531 gotbno -= ap->length;
3532 gotdiff += adjust - ap->length;
3533 } else
3534 gotdiff += adjust;
3535 /*
3536 * If the firstblock forbids it, can't use it,
3537 * must use default.
3538 */
3539 if (!rt && !nullfb &&
3540 XFS_FSB_TO_AGNO(mp, gotbno) != fb_agno)
3541 gotbno = NULLFSBLOCK;
3542 }
3543 /*
3544 * No next block, just default.
3545 */
3546 else
3547 gotbno = NULLFSBLOCK;
3548 /*
3549 * If both valid, pick the better one, else the only good
3550 * one, else ap->blkno is already set (to 0 or the inode block).
3551 */
3552 if (prevbno != NULLFSBLOCK && gotbno != NULLFSBLOCK)
3553 ap->blkno = prevdiff <= gotdiff ? prevbno : gotbno;
3554 else if (prevbno != NULLFSBLOCK)
3555 ap->blkno = prevbno;
3556 else if (gotbno != NULLFSBLOCK)
3557 ap->blkno = gotbno;
3558 }
3559 #undef ISVALID
3560 }
3561
3562 static int
xfs_bmap_longest_free_extent(struct xfs_trans * tp,xfs_agnumber_t ag,xfs_extlen_t * blen,int * notinit)3563 xfs_bmap_longest_free_extent(
3564 struct xfs_trans *tp,
3565 xfs_agnumber_t ag,
3566 xfs_extlen_t *blen,
3567 int *notinit)
3568 {
3569 struct xfs_mount *mp = tp->t_mountp;
3570 struct xfs_perag *pag;
3571 xfs_extlen_t longest;
3572 int error = 0;
3573
3574 pag = xfs_perag_get(mp, ag);
3575 if (!pag->pagf_init) {
3576 error = xfs_alloc_pagf_init(mp, tp, ag, XFS_ALLOC_FLAG_TRYLOCK);
3577 if (error)
3578 goto out;
3579
3580 if (!pag->pagf_init) {
3581 *notinit = 1;
3582 goto out;
3583 }
3584 }
3585
3586 longest = xfs_alloc_longest_free_extent(mp, pag,
3587 xfs_alloc_min_freelist(mp, pag),
3588 xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE));
3589 if (*blen < longest)
3590 *blen = longest;
3591
3592 out:
3593 xfs_perag_put(pag);
3594 return error;
3595 }
3596
3597 static void
xfs_bmap_select_minlen(struct xfs_bmalloca * ap,struct xfs_alloc_arg * args,xfs_extlen_t * blen,int notinit)3598 xfs_bmap_select_minlen(
3599 struct xfs_bmalloca *ap,
3600 struct xfs_alloc_arg *args,
3601 xfs_extlen_t *blen,
3602 int notinit)
3603 {
3604 if (notinit || *blen < ap->minlen) {
3605 /*
3606 * Since we did a BUF_TRYLOCK above, it is possible that
3607 * there is space for this request.
3608 */
3609 args->minlen = ap->minlen;
3610 } else if (*blen < args->maxlen) {
3611 /*
3612 * If the best seen length is less than the request length,
3613 * use the best as the minimum.
3614 */
3615 args->minlen = *blen;
3616 } else {
3617 /*
3618 * Otherwise we've seen an extent as big as maxlen, use that
3619 * as the minimum.
3620 */
3621 args->minlen = args->maxlen;
3622 }
3623 }
3624
3625 STATIC int
xfs_bmap_btalloc_nullfb(struct xfs_bmalloca * ap,struct xfs_alloc_arg * args,xfs_extlen_t * blen)3626 xfs_bmap_btalloc_nullfb(
3627 struct xfs_bmalloca *ap,
3628 struct xfs_alloc_arg *args,
3629 xfs_extlen_t *blen)
3630 {
3631 struct xfs_mount *mp = ap->ip->i_mount;
3632 xfs_agnumber_t ag, startag;
3633 int notinit = 0;
3634 int error;
3635
3636 args->type = XFS_ALLOCTYPE_START_BNO;
3637 args->total = ap->total;
3638
3639 startag = ag = XFS_FSB_TO_AGNO(mp, args->fsbno);
3640 if (startag == NULLAGNUMBER)
3641 startag = ag = 0;
3642
3643 while (*blen < args->maxlen) {
3644 error = xfs_bmap_longest_free_extent(args->tp, ag, blen,
3645 ¬init);
3646 if (error)
3647 return error;
3648
3649 if (++ag == mp->m_sb.sb_agcount)
3650 ag = 0;
3651 if (ag == startag)
3652 break;
3653 }
3654
3655 xfs_bmap_select_minlen(ap, args, blen, notinit);
3656 return 0;
3657 }
3658
3659 STATIC int
xfs_bmap_btalloc_filestreams(struct xfs_bmalloca * ap,struct xfs_alloc_arg * args,xfs_extlen_t * blen)3660 xfs_bmap_btalloc_filestreams(
3661 struct xfs_bmalloca *ap,
3662 struct xfs_alloc_arg *args,
3663 xfs_extlen_t *blen)
3664 {
3665 struct xfs_mount *mp = ap->ip->i_mount;
3666 xfs_agnumber_t ag;
3667 int notinit = 0;
3668 int error;
3669
3670 args->type = XFS_ALLOCTYPE_NEAR_BNO;
3671 args->total = ap->total;
3672
3673 ag = XFS_FSB_TO_AGNO(mp, args->fsbno);
3674 if (ag == NULLAGNUMBER)
3675 ag = 0;
3676
3677 error = xfs_bmap_longest_free_extent(args->tp, ag, blen, ¬init);
3678 if (error)
3679 return error;
3680
3681 if (*blen < args->maxlen) {
3682 error = xfs_filestream_new_ag(ap, &ag);
3683 if (error)
3684 return error;
3685
3686 error = xfs_bmap_longest_free_extent(args->tp, ag, blen,
3687 ¬init);
3688 if (error)
3689 return error;
3690
3691 }
3692
3693 xfs_bmap_select_minlen(ap, args, blen, notinit);
3694
3695 /*
3696 * Set the failure fallback case to look in the selected AG as stream
3697 * may have moved.
3698 */
3699 ap->blkno = args->fsbno = XFS_AGB_TO_FSB(mp, ag, 0);
3700 return 0;
3701 }
3702
3703 STATIC int
xfs_bmap_btalloc(struct xfs_bmalloca * ap)3704 xfs_bmap_btalloc(
3705 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
3706 {
3707 xfs_mount_t *mp; /* mount point structure */
3708 xfs_alloctype_t atype = 0; /* type for allocation routines */
3709 xfs_extlen_t align = 0; /* minimum allocation alignment */
3710 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */
3711 xfs_agnumber_t ag;
3712 xfs_alloc_arg_t args;
3713 xfs_extlen_t blen;
3714 xfs_extlen_t nextminlen = 0;
3715 int nullfb; /* true if ap->firstblock isn't set */
3716 int isaligned;
3717 int tryagain;
3718 int error;
3719 int stripe_align;
3720
3721 ASSERT(ap->length);
3722
3723 mp = ap->ip->i_mount;
3724
3725 /* stripe alignment for allocation is determined by mount parameters */
3726 stripe_align = 0;
3727 if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC))
3728 stripe_align = mp->m_swidth;
3729 else if (mp->m_dalign)
3730 stripe_align = mp->m_dalign;
3731
3732 if (ap->flags & XFS_BMAPI_COWFORK)
3733 align = xfs_get_cowextsz_hint(ap->ip);
3734 else if (xfs_alloc_is_userdata(ap->datatype))
3735 align = xfs_get_extsz_hint(ap->ip);
3736 if (align) {
3737 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
3738 align, 0, ap->eof, 0, ap->conv,
3739 &ap->offset, &ap->length);
3740 ASSERT(!error);
3741 ASSERT(ap->length);
3742 }
3743
3744
3745 nullfb = *ap->firstblock == NULLFSBLOCK;
3746 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock);
3747 if (nullfb) {
3748 if (xfs_alloc_is_userdata(ap->datatype) &&
3749 xfs_inode_is_filestream(ap->ip)) {
3750 ag = xfs_filestream_lookup_ag(ap->ip);
3751 ag = (ag != NULLAGNUMBER) ? ag : 0;
3752 ap->blkno = XFS_AGB_TO_FSB(mp, ag, 0);
3753 } else {
3754 ap->blkno = XFS_INO_TO_FSB(mp, ap->ip->i_ino);
3755 }
3756 } else
3757 ap->blkno = *ap->firstblock;
3758
3759 xfs_bmap_adjacent(ap);
3760
3761 /*
3762 * If allowed, use ap->blkno; otherwise must use firstblock since
3763 * it's in the right allocation group.
3764 */
3765 if (nullfb || XFS_FSB_TO_AGNO(mp, ap->blkno) == fb_agno)
3766 ;
3767 else
3768 ap->blkno = *ap->firstblock;
3769 /*
3770 * Normal allocation, done through xfs_alloc_vextent.
3771 */
3772 tryagain = isaligned = 0;
3773 memset(&args, 0, sizeof(args));
3774 args.tp = ap->tp;
3775 args.mp = mp;
3776 args.fsbno = ap->blkno;
3777 xfs_rmap_skip_owner_update(&args.oinfo);
3778
3779 /* Trim the allocation back to the maximum an AG can fit. */
3780 args.maxlen = MIN(ap->length, mp->m_ag_max_usable);
3781 args.firstblock = *ap->firstblock;
3782 blen = 0;
3783 if (nullfb) {
3784 /*
3785 * Search for an allocation group with a single extent large
3786 * enough for the request. If one isn't found, then adjust
3787 * the minimum allocation size to the largest space found.
3788 */
3789 if (xfs_alloc_is_userdata(ap->datatype) &&
3790 xfs_inode_is_filestream(ap->ip))
3791 error = xfs_bmap_btalloc_filestreams(ap, &args, &blen);
3792 else
3793 error = xfs_bmap_btalloc_nullfb(ap, &args, &blen);
3794 if (error)
3795 return error;
3796 } else if (ap->dfops->dop_low) {
3797 if (xfs_inode_is_filestream(ap->ip))
3798 args.type = XFS_ALLOCTYPE_FIRST_AG;
3799 else
3800 args.type = XFS_ALLOCTYPE_START_BNO;
3801 args.total = args.minlen = ap->minlen;
3802 } else {
3803 args.type = XFS_ALLOCTYPE_NEAR_BNO;
3804 args.total = ap->total;
3805 args.minlen = ap->minlen;
3806 }
3807 /* apply extent size hints if obtained earlier */
3808 if (align) {
3809 args.prod = align;
3810 if ((args.mod = (xfs_extlen_t)do_mod(ap->offset, args.prod)))
3811 args.mod = (xfs_extlen_t)(args.prod - args.mod);
3812 } else if (mp->m_sb.sb_blocksize >= PAGE_SIZE) {
3813 args.prod = 1;
3814 args.mod = 0;
3815 } else {
3816 args.prod = PAGE_SIZE >> mp->m_sb.sb_blocklog;
3817 if ((args.mod = (xfs_extlen_t)(do_mod(ap->offset, args.prod))))
3818 args.mod = (xfs_extlen_t)(args.prod - args.mod);
3819 }
3820 /*
3821 * If we are not low on available data blocks, and the
3822 * underlying logical volume manager is a stripe, and
3823 * the file offset is zero then try to allocate data
3824 * blocks on stripe unit boundary.
3825 * NOTE: ap->aeof is only set if the allocation length
3826 * is >= the stripe unit and the allocation offset is
3827 * at the end of file.
3828 */
3829 if (!ap->dfops->dop_low && ap->aeof) {
3830 if (!ap->offset) {
3831 args.alignment = stripe_align;
3832 atype = args.type;
3833 isaligned = 1;
3834 /*
3835 * Adjust for alignment
3836 */
3837 if (blen > args.alignment && blen <= args.maxlen)
3838 args.minlen = blen - args.alignment;
3839 args.minalignslop = 0;
3840 } else {
3841 /*
3842 * First try an exact bno allocation.
3843 * If it fails then do a near or start bno
3844 * allocation with alignment turned on.
3845 */
3846 atype = args.type;
3847 tryagain = 1;
3848 args.type = XFS_ALLOCTYPE_THIS_BNO;
3849 args.alignment = 1;
3850 /*
3851 * Compute the minlen+alignment for the
3852 * next case. Set slop so that the value
3853 * of minlen+alignment+slop doesn't go up
3854 * between the calls.
3855 */
3856 if (blen > stripe_align && blen <= args.maxlen)
3857 nextminlen = blen - stripe_align;
3858 else
3859 nextminlen = args.minlen;
3860 if (nextminlen + stripe_align > args.minlen + 1)
3861 args.minalignslop =
3862 nextminlen + stripe_align -
3863 args.minlen - 1;
3864 else
3865 args.minalignslop = 0;
3866 }
3867 } else {
3868 args.alignment = 1;
3869 args.minalignslop = 0;
3870 }
3871 args.minleft = ap->minleft;
3872 args.wasdel = ap->wasdel;
3873 args.resv = XFS_AG_RESV_NONE;
3874 args.datatype = ap->datatype;
3875 if (ap->datatype & XFS_ALLOC_USERDATA_ZERO)
3876 args.ip = ap->ip;
3877
3878 error = xfs_alloc_vextent(&args);
3879 if (error)
3880 return error;
3881
3882 if (tryagain && args.fsbno == NULLFSBLOCK) {
3883 /*
3884 * Exact allocation failed. Now try with alignment
3885 * turned on.
3886 */
3887 args.type = atype;
3888 args.fsbno = ap->blkno;
3889 args.alignment = stripe_align;
3890 args.minlen = nextminlen;
3891 args.minalignslop = 0;
3892 isaligned = 1;
3893 if ((error = xfs_alloc_vextent(&args)))
3894 return error;
3895 }
3896 if (isaligned && args.fsbno == NULLFSBLOCK) {
3897 /*
3898 * allocation failed, so turn off alignment and
3899 * try again.
3900 */
3901 args.type = atype;
3902 args.fsbno = ap->blkno;
3903 args.alignment = 0;
3904 if ((error = xfs_alloc_vextent(&args)))
3905 return error;
3906 }
3907 if (args.fsbno == NULLFSBLOCK && nullfb &&
3908 args.minlen > ap->minlen) {
3909 args.minlen = ap->minlen;
3910 args.type = XFS_ALLOCTYPE_START_BNO;
3911 args.fsbno = ap->blkno;
3912 if ((error = xfs_alloc_vextent(&args)))
3913 return error;
3914 }
3915 if (args.fsbno == NULLFSBLOCK && nullfb) {
3916 args.fsbno = 0;
3917 args.type = XFS_ALLOCTYPE_FIRST_AG;
3918 args.total = ap->minlen;
3919 if ((error = xfs_alloc_vextent(&args)))
3920 return error;
3921 ap->dfops->dop_low = true;
3922 }
3923 if (args.fsbno != NULLFSBLOCK) {
3924 /*
3925 * check the allocation happened at the same or higher AG than
3926 * the first block that was allocated.
3927 */
3928 ASSERT(*ap->firstblock == NULLFSBLOCK ||
3929 XFS_FSB_TO_AGNO(mp, *ap->firstblock) <=
3930 XFS_FSB_TO_AGNO(mp, args.fsbno));
3931
3932 ap->blkno = args.fsbno;
3933 if (*ap->firstblock == NULLFSBLOCK)
3934 *ap->firstblock = args.fsbno;
3935 ASSERT(nullfb || fb_agno <= args.agno);
3936 ap->length = args.len;
3937 if (!(ap->flags & XFS_BMAPI_COWFORK))
3938 ap->ip->i_d.di_nblocks += args.len;
3939 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
3940 if (ap->wasdel)
3941 ap->ip->i_delayed_blks -= args.len;
3942 /*
3943 * Adjust the disk quota also. This was reserved
3944 * earlier.
3945 */
3946 xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
3947 ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT :
3948 XFS_TRANS_DQ_BCOUNT,
3949 (long) args.len);
3950 } else {
3951 ap->blkno = NULLFSBLOCK;
3952 ap->length = 0;
3953 }
3954 return 0;
3955 }
3956
3957 /*
3958 * For a remap operation, just "allocate" an extent at the address that the
3959 * caller passed in, and ensure that the AGFL is the right size. The caller
3960 * will then map the "allocated" extent into the file somewhere.
3961 */
3962 STATIC int
xfs_bmap_remap_alloc(struct xfs_bmalloca * ap)3963 xfs_bmap_remap_alloc(
3964 struct xfs_bmalloca *ap)
3965 {
3966 struct xfs_trans *tp = ap->tp;
3967 struct xfs_mount *mp = tp->t_mountp;
3968 xfs_fsblock_t bno;
3969 struct xfs_alloc_arg args;
3970 int error;
3971
3972 /*
3973 * validate that the block number is legal - the enables us to detect
3974 * and handle a silent filesystem corruption rather than crashing.
3975 */
3976 memset(&args, 0, sizeof(struct xfs_alloc_arg));
3977 args.tp = ap->tp;
3978 args.mp = ap->tp->t_mountp;
3979 bno = *ap->firstblock;
3980 args.agno = XFS_FSB_TO_AGNO(mp, bno);
3981 args.agbno = XFS_FSB_TO_AGBNO(mp, bno);
3982 if (args.agno >= mp->m_sb.sb_agcount ||
3983 args.agbno >= mp->m_sb.sb_agblocks)
3984 return -EFSCORRUPTED;
3985
3986 /* "Allocate" the extent from the range we passed in. */
3987 trace_xfs_bmap_remap_alloc(ap->ip, *ap->firstblock, ap->length);
3988 ap->blkno = bno;
3989 ap->ip->i_d.di_nblocks += ap->length;
3990 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
3991
3992 /* Fix the freelist, like a real allocator does. */
3993 args.datatype = ap->datatype;
3994 args.pag = xfs_perag_get(args.mp, args.agno);
3995 ASSERT(args.pag);
3996
3997 /*
3998 * The freelist fixing code will decline the allocation if
3999 * the size and shape of the free space doesn't allow for
4000 * allocating the extent and updating all the metadata that
4001 * happens during an allocation. We're remapping, not
4002 * allocating, so skip that check by pretending to be freeing.
4003 */
4004 error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING);
4005 xfs_perag_put(args.pag);
4006 if (error)
4007 trace_xfs_bmap_remap_alloc_error(ap->ip, error, _RET_IP_);
4008 return error;
4009 }
4010
4011 /*
4012 * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file.
4013 * It figures out where to ask the underlying allocator to put the new extent.
4014 */
4015 STATIC int
xfs_bmap_alloc(struct xfs_bmalloca * ap)4016 xfs_bmap_alloc(
4017 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
4018 {
4019 if (ap->flags & XFS_BMAPI_REMAP)
4020 return xfs_bmap_remap_alloc(ap);
4021 if (XFS_IS_REALTIME_INODE(ap->ip) &&
4022 xfs_alloc_is_userdata(ap->datatype))
4023 return xfs_bmap_rtalloc(ap);
4024 return xfs_bmap_btalloc(ap);
4025 }
4026
4027 /* Trim extent to fit a logical block range. */
4028 void
xfs_trim_extent(struct xfs_bmbt_irec * irec,xfs_fileoff_t bno,xfs_filblks_t len)4029 xfs_trim_extent(
4030 struct xfs_bmbt_irec *irec,
4031 xfs_fileoff_t bno,
4032 xfs_filblks_t len)
4033 {
4034 xfs_fileoff_t distance;
4035 xfs_fileoff_t end = bno + len;
4036
4037 if (irec->br_startoff + irec->br_blockcount <= bno ||
4038 irec->br_startoff >= end) {
4039 irec->br_blockcount = 0;
4040 return;
4041 }
4042
4043 if (irec->br_startoff < bno) {
4044 distance = bno - irec->br_startoff;
4045 if (isnullstartblock(irec->br_startblock))
4046 irec->br_startblock = DELAYSTARTBLOCK;
4047 if (irec->br_startblock != DELAYSTARTBLOCK &&
4048 irec->br_startblock != HOLESTARTBLOCK)
4049 irec->br_startblock += distance;
4050 irec->br_startoff += distance;
4051 irec->br_blockcount -= distance;
4052 }
4053
4054 if (end < irec->br_startoff + irec->br_blockcount) {
4055 distance = irec->br_startoff + irec->br_blockcount - end;
4056 irec->br_blockcount -= distance;
4057 }
4058 }
4059
4060 /* trim extent to within eof */
4061 void
xfs_trim_extent_eof(struct xfs_bmbt_irec * irec,struct xfs_inode * ip)4062 xfs_trim_extent_eof(
4063 struct xfs_bmbt_irec *irec,
4064 struct xfs_inode *ip)
4065
4066 {
4067 xfs_trim_extent(irec, 0, XFS_B_TO_FSB(ip->i_mount,
4068 i_size_read(VFS_I(ip))));
4069 }
4070
4071 /*
4072 * Trim the returned map to the required bounds
4073 */
4074 STATIC void
xfs_bmapi_trim_map(struct xfs_bmbt_irec * mval,struct xfs_bmbt_irec * got,xfs_fileoff_t * bno,xfs_filblks_t len,xfs_fileoff_t obno,xfs_fileoff_t end,int n,int flags)4075 xfs_bmapi_trim_map(
4076 struct xfs_bmbt_irec *mval,
4077 struct xfs_bmbt_irec *got,
4078 xfs_fileoff_t *bno,
4079 xfs_filblks_t len,
4080 xfs_fileoff_t obno,
4081 xfs_fileoff_t end,
4082 int n,
4083 int flags)
4084 {
4085 if ((flags & XFS_BMAPI_ENTIRE) ||
4086 got->br_startoff + got->br_blockcount <= obno) {
4087 *mval = *got;
4088 if (isnullstartblock(got->br_startblock))
4089 mval->br_startblock = DELAYSTARTBLOCK;
4090 return;
4091 }
4092
4093 if (obno > *bno)
4094 *bno = obno;
4095 ASSERT((*bno >= obno) || (n == 0));
4096 ASSERT(*bno < end);
4097 mval->br_startoff = *bno;
4098 if (isnullstartblock(got->br_startblock))
4099 mval->br_startblock = DELAYSTARTBLOCK;
4100 else
4101 mval->br_startblock = got->br_startblock +
4102 (*bno - got->br_startoff);
4103 /*
4104 * Return the minimum of what we got and what we asked for for
4105 * the length. We can use the len variable here because it is
4106 * modified below and we could have been there before coming
4107 * here if the first part of the allocation didn't overlap what
4108 * was asked for.
4109 */
4110 mval->br_blockcount = XFS_FILBLKS_MIN(end - *bno,
4111 got->br_blockcount - (*bno - got->br_startoff));
4112 mval->br_state = got->br_state;
4113 ASSERT(mval->br_blockcount <= len);
4114 return;
4115 }
4116
4117 /*
4118 * Update and validate the extent map to return
4119 */
4120 STATIC void
xfs_bmapi_update_map(struct xfs_bmbt_irec ** map,xfs_fileoff_t * bno,xfs_filblks_t * len,xfs_fileoff_t obno,xfs_fileoff_t end,int * n,int flags)4121 xfs_bmapi_update_map(
4122 struct xfs_bmbt_irec **map,
4123 xfs_fileoff_t *bno,
4124 xfs_filblks_t *len,
4125 xfs_fileoff_t obno,
4126 xfs_fileoff_t end,
4127 int *n,
4128 int flags)
4129 {
4130 xfs_bmbt_irec_t *mval = *map;
4131
4132 ASSERT((flags & XFS_BMAPI_ENTIRE) ||
4133 ((mval->br_startoff + mval->br_blockcount) <= end));
4134 ASSERT((flags & XFS_BMAPI_ENTIRE) || (mval->br_blockcount <= *len) ||
4135 (mval->br_startoff < obno));
4136
4137 *bno = mval->br_startoff + mval->br_blockcount;
4138 *len = end - *bno;
4139 if (*n > 0 && mval->br_startoff == mval[-1].br_startoff) {
4140 /* update previous map with new information */
4141 ASSERT(mval->br_startblock == mval[-1].br_startblock);
4142 ASSERT(mval->br_blockcount > mval[-1].br_blockcount);
4143 ASSERT(mval->br_state == mval[-1].br_state);
4144 mval[-1].br_blockcount = mval->br_blockcount;
4145 mval[-1].br_state = mval->br_state;
4146 } else if (*n > 0 && mval->br_startblock != DELAYSTARTBLOCK &&
4147 mval[-1].br_startblock != DELAYSTARTBLOCK &&
4148 mval[-1].br_startblock != HOLESTARTBLOCK &&
4149 mval->br_startblock == mval[-1].br_startblock +
4150 mval[-1].br_blockcount &&
4151 ((flags & XFS_BMAPI_IGSTATE) ||
4152 mval[-1].br_state == mval->br_state)) {
4153 ASSERT(mval->br_startoff ==
4154 mval[-1].br_startoff + mval[-1].br_blockcount);
4155 mval[-1].br_blockcount += mval->br_blockcount;
4156 } else if (*n > 0 &&
4157 mval->br_startblock == DELAYSTARTBLOCK &&
4158 mval[-1].br_startblock == DELAYSTARTBLOCK &&
4159 mval->br_startoff ==
4160 mval[-1].br_startoff + mval[-1].br_blockcount) {
4161 mval[-1].br_blockcount += mval->br_blockcount;
4162 mval[-1].br_state = mval->br_state;
4163 } else if (!((*n == 0) &&
4164 ((mval->br_startoff + mval->br_blockcount) <=
4165 obno))) {
4166 mval++;
4167 (*n)++;
4168 }
4169 *map = mval;
4170 }
4171
4172 /*
4173 * Map file blocks to filesystem blocks without allocation.
4174 */
4175 int
xfs_bmapi_read(struct xfs_inode * ip,xfs_fileoff_t bno,xfs_filblks_t len,struct xfs_bmbt_irec * mval,int * nmap,int flags)4176 xfs_bmapi_read(
4177 struct xfs_inode *ip,
4178 xfs_fileoff_t bno,
4179 xfs_filblks_t len,
4180 struct xfs_bmbt_irec *mval,
4181 int *nmap,
4182 int flags)
4183 {
4184 struct xfs_mount *mp = ip->i_mount;
4185 struct xfs_ifork *ifp;
4186 struct xfs_bmbt_irec got;
4187 struct xfs_bmbt_irec prev;
4188 xfs_fileoff_t obno;
4189 xfs_fileoff_t end;
4190 xfs_extnum_t lastx;
4191 int error;
4192 int eof;
4193 int n = 0;
4194 int whichfork = xfs_bmapi_whichfork(flags);
4195
4196 ASSERT(*nmap >= 1);
4197 ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK|XFS_BMAPI_ENTIRE|
4198 XFS_BMAPI_IGSTATE|XFS_BMAPI_COWFORK)));
4199 ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED|XFS_ILOCK_EXCL));
4200
4201 if (unlikely(XFS_TEST_ERROR(
4202 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
4203 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
4204 mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
4205 XFS_ERROR_REPORT("xfs_bmapi_read", XFS_ERRLEVEL_LOW, mp);
4206 return -EFSCORRUPTED;
4207 }
4208
4209 if (XFS_FORCED_SHUTDOWN(mp))
4210 return -EIO;
4211
4212 XFS_STATS_INC(mp, xs_blk_mapr);
4213
4214 ifp = XFS_IFORK_PTR(ip, whichfork);
4215
4216 /* No CoW fork? Return a hole. */
4217 if (whichfork == XFS_COW_FORK && !ifp) {
4218 mval->br_startoff = bno;
4219 mval->br_startblock = HOLESTARTBLOCK;
4220 mval->br_blockcount = len;
4221 mval->br_state = XFS_EXT_NORM;
4222 *nmap = 1;
4223 return 0;
4224 }
4225
4226 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
4227 error = xfs_iread_extents(NULL, ip, whichfork);
4228 if (error)
4229 return error;
4230 }
4231
4232 xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got, &prev);
4233 end = bno + len;
4234 obno = bno;
4235
4236 while (bno < end && n < *nmap) {
4237 /* Reading past eof, act as though there's a hole up to end. */
4238 if (eof)
4239 got.br_startoff = end;
4240 if (got.br_startoff > bno) {
4241 /* Reading in a hole. */
4242 mval->br_startoff = bno;
4243 mval->br_startblock = HOLESTARTBLOCK;
4244 mval->br_blockcount =
4245 XFS_FILBLKS_MIN(len, got.br_startoff - bno);
4246 mval->br_state = XFS_EXT_NORM;
4247 bno += mval->br_blockcount;
4248 len -= mval->br_blockcount;
4249 mval++;
4250 n++;
4251 continue;
4252 }
4253
4254 /* set up the extent map to return. */
4255 xfs_bmapi_trim_map(mval, &got, &bno, len, obno, end, n, flags);
4256 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
4257
4258 /* If we're done, stop now. */
4259 if (bno >= end || n >= *nmap)
4260 break;
4261
4262 /* Else go on to the next record. */
4263 if (++lastx < xfs_iext_count(ifp))
4264 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, lastx), &got);
4265 else
4266 eof = 1;
4267 }
4268 *nmap = n;
4269 return 0;
4270 }
4271
4272 /*
4273 * Add a delayed allocation extent to an inode. Blocks are reserved from the
4274 * global pool and the extent inserted into the inode in-core extent tree.
4275 *
4276 * On entry, got refers to the first extent beyond the offset of the extent to
4277 * allocate or eof is specified if no such extent exists. On return, got refers
4278 * to the extent record that was inserted to the inode fork.
4279 *
4280 * Note that the allocated extent may have been merged with contiguous extents
4281 * during insertion into the inode fork. Thus, got does not reflect the current
4282 * state of the inode fork on return. If necessary, the caller can use lastx to
4283 * look up the updated record in the inode fork.
4284 */
4285 int
xfs_bmapi_reserve_delalloc(struct xfs_inode * ip,int whichfork,xfs_fileoff_t off,xfs_filblks_t len,xfs_filblks_t prealloc,struct xfs_bmbt_irec * got,xfs_extnum_t * lastx,int eof)4286 xfs_bmapi_reserve_delalloc(
4287 struct xfs_inode *ip,
4288 int whichfork,
4289 xfs_fileoff_t off,
4290 xfs_filblks_t len,
4291 xfs_filblks_t prealloc,
4292 struct xfs_bmbt_irec *got,
4293 xfs_extnum_t *lastx,
4294 int eof)
4295 {
4296 struct xfs_mount *mp = ip->i_mount;
4297 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
4298 xfs_extlen_t alen;
4299 xfs_extlen_t indlen;
4300 char rt = XFS_IS_REALTIME_INODE(ip);
4301 xfs_extlen_t extsz;
4302 int error;
4303 xfs_fileoff_t aoff = off;
4304
4305 /*
4306 * Cap the alloc length. Keep track of prealloc so we know whether to
4307 * tag the inode before we return.
4308 */
4309 alen = XFS_FILBLKS_MIN(len + prealloc, MAXEXTLEN);
4310 if (!eof)
4311 alen = XFS_FILBLKS_MIN(alen, got->br_startoff - aoff);
4312 if (prealloc && alen >= len)
4313 prealloc = alen - len;
4314
4315 /* Figure out the extent size, adjust alen */
4316 if (whichfork == XFS_COW_FORK)
4317 extsz = xfs_get_cowextsz_hint(ip);
4318 else
4319 extsz = xfs_get_extsz_hint(ip);
4320 if (extsz) {
4321 struct xfs_bmbt_irec prev;
4322
4323 if (!xfs_iext_get_extent(ifp, *lastx - 1, &prev))
4324 prev.br_startoff = NULLFILEOFF;
4325
4326 error = xfs_bmap_extsize_align(mp, got, &prev, extsz, rt, eof,
4327 1, 0, &aoff, &alen);
4328 ASSERT(!error);
4329 }
4330
4331 if (rt)
4332 extsz = alen / mp->m_sb.sb_rextsize;
4333
4334 /*
4335 * Make a transaction-less quota reservation for delayed allocation
4336 * blocks. This number gets adjusted later. We return if we haven't
4337 * allocated blocks already inside this loop.
4338 */
4339 error = xfs_trans_reserve_quota_nblks(NULL, ip, (long)alen, 0,
4340 rt ? XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS);
4341 if (error)
4342 return error;
4343
4344 /*
4345 * Split changing sb for alen and indlen since they could be coming
4346 * from different places.
4347 */
4348 indlen = (xfs_extlen_t)xfs_bmap_worst_indlen(ip, alen);
4349 ASSERT(indlen > 0);
4350
4351 if (rt) {
4352 error = xfs_mod_frextents(mp, -((int64_t)extsz));
4353 } else {
4354 error = xfs_mod_fdblocks(mp, -((int64_t)alen), false);
4355 }
4356
4357 if (error)
4358 goto out_unreserve_quota;
4359
4360 error = xfs_mod_fdblocks(mp, -((int64_t)indlen), false);
4361 if (error)
4362 goto out_unreserve_blocks;
4363
4364
4365 ip->i_delayed_blks += alen;
4366
4367 got->br_startoff = aoff;
4368 got->br_startblock = nullstartblock(indlen);
4369 got->br_blockcount = alen;
4370 got->br_state = XFS_EXT_NORM;
4371
4372 xfs_bmap_add_extent_hole_delay(ip, whichfork, lastx, got);
4373
4374 /*
4375 * Tag the inode if blocks were preallocated. Note that COW fork
4376 * preallocation can occur at the start or end of the extent, even when
4377 * prealloc == 0, so we must also check the aligned offset and length.
4378 */
4379 if (whichfork == XFS_DATA_FORK && prealloc)
4380 xfs_inode_set_eofblocks_tag(ip);
4381 if (whichfork == XFS_COW_FORK && (prealloc || aoff < off || alen > len))
4382 xfs_inode_set_cowblocks_tag(ip);
4383
4384 return 0;
4385
4386 out_unreserve_blocks:
4387 if (rt)
4388 xfs_mod_frextents(mp, extsz);
4389 else
4390 xfs_mod_fdblocks(mp, alen, false);
4391 out_unreserve_quota:
4392 if (XFS_IS_QUOTA_ON(mp))
4393 xfs_trans_unreserve_quota_nblks(NULL, ip, (long)alen, 0, rt ?
4394 XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS);
4395 return error;
4396 }
4397
4398 static int
xfs_bmapi_allocate(struct xfs_bmalloca * bma)4399 xfs_bmapi_allocate(
4400 struct xfs_bmalloca *bma)
4401 {
4402 struct xfs_mount *mp = bma->ip->i_mount;
4403 int whichfork = xfs_bmapi_whichfork(bma->flags);
4404 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork);
4405 int tmp_logflags = 0;
4406 int error;
4407
4408 ASSERT(bma->length > 0);
4409
4410 /*
4411 * For the wasdelay case, we could also just allocate the stuff asked
4412 * for in this bmap call but that wouldn't be as good.
4413 */
4414 if (bma->wasdel) {
4415 bma->length = (xfs_extlen_t)bma->got.br_blockcount;
4416 bma->offset = bma->got.br_startoff;
4417 if (bma->idx != NULLEXTNUM && bma->idx) {
4418 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1),
4419 &bma->prev);
4420 }
4421 } else {
4422 bma->length = XFS_FILBLKS_MIN(bma->length, MAXEXTLEN);
4423 if (!bma->eof)
4424 bma->length = XFS_FILBLKS_MIN(bma->length,
4425 bma->got.br_startoff - bma->offset);
4426 }
4427
4428 /*
4429 * Set the data type being allocated. For the data fork, the first data
4430 * in the file is treated differently to all other allocations. For the
4431 * attribute fork, we only need to ensure the allocated range is not on
4432 * the busy list.
4433 */
4434 if (!(bma->flags & XFS_BMAPI_METADATA)) {
4435 bma->datatype = XFS_ALLOC_NOBUSY;
4436 if (whichfork == XFS_DATA_FORK) {
4437 if (bma->offset == 0)
4438 bma->datatype |= XFS_ALLOC_INITIAL_USER_DATA;
4439 else
4440 bma->datatype |= XFS_ALLOC_USERDATA;
4441 }
4442 if (bma->flags & XFS_BMAPI_ZERO)
4443 bma->datatype |= XFS_ALLOC_USERDATA_ZERO;
4444 }
4445
4446 bma->minlen = (bma->flags & XFS_BMAPI_CONTIG) ? bma->length : 1;
4447
4448 /*
4449 * Only want to do the alignment at the eof if it is userdata and
4450 * allocation length is larger than a stripe unit.
4451 */
4452 if (mp->m_dalign && bma->length >= mp->m_dalign &&
4453 !(bma->flags & XFS_BMAPI_METADATA) && whichfork == XFS_DATA_FORK) {
4454 error = xfs_bmap_isaeof(bma, whichfork);
4455 if (error)
4456 return error;
4457 }
4458
4459 error = xfs_bmap_alloc(bma);
4460 if (error)
4461 return error;
4462
4463 if (bma->cur)
4464 bma->cur->bc_private.b.firstblock = *bma->firstblock;
4465 if (bma->blkno == NULLFSBLOCK)
4466 return 0;
4467 if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) {
4468 bma->cur = xfs_bmbt_init_cursor(mp, bma->tp, bma->ip, whichfork);
4469 bma->cur->bc_private.b.firstblock = *bma->firstblock;
4470 bma->cur->bc_private.b.dfops = bma->dfops;
4471 }
4472 /*
4473 * Bump the number of extents we've allocated
4474 * in this call.
4475 */
4476 bma->nallocs++;
4477
4478 if (bma->cur)
4479 bma->cur->bc_private.b.flags =
4480 bma->wasdel ? XFS_BTCUR_BPRV_WASDEL : 0;
4481
4482 bma->got.br_startoff = bma->offset;
4483 bma->got.br_startblock = bma->blkno;
4484 bma->got.br_blockcount = bma->length;
4485 bma->got.br_state = XFS_EXT_NORM;
4486
4487 /*
4488 * In the data fork, a wasdelay extent has been initialized, so
4489 * shouldn't be flagged as unwritten.
4490 *
4491 * For the cow fork, however, we convert delalloc reservations
4492 * (extents allocated for speculative preallocation) to
4493 * allocated unwritten extents, and only convert the unwritten
4494 * extents to real extents when we're about to write the data.
4495 */
4496 if ((!bma->wasdel || (bma->flags & XFS_BMAPI_COWFORK)) &&
4497 (bma->flags & XFS_BMAPI_PREALLOC) &&
4498 xfs_sb_version_hasextflgbit(&mp->m_sb))
4499 bma->got.br_state = XFS_EXT_UNWRITTEN;
4500
4501 if (bma->wasdel)
4502 error = xfs_bmap_add_extent_delay_real(bma, whichfork);
4503 else
4504 error = xfs_bmap_add_extent_hole_real(bma, whichfork);
4505
4506 bma->logflags |= tmp_logflags;
4507 if (error)
4508 return error;
4509
4510 /*
4511 * Update our extent pointer, given that xfs_bmap_add_extent_delay_real
4512 * or xfs_bmap_add_extent_hole_real might have merged it into one of
4513 * the neighbouring ones.
4514 */
4515 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx), &bma->got);
4516
4517 ASSERT(bma->got.br_startoff <= bma->offset);
4518 ASSERT(bma->got.br_startoff + bma->got.br_blockcount >=
4519 bma->offset + bma->length);
4520 ASSERT(bma->got.br_state == XFS_EXT_NORM ||
4521 bma->got.br_state == XFS_EXT_UNWRITTEN);
4522 return 0;
4523 }
4524
4525 STATIC int
xfs_bmapi_convert_unwritten(struct xfs_bmalloca * bma,struct xfs_bmbt_irec * mval,xfs_filblks_t len,int flags)4526 xfs_bmapi_convert_unwritten(
4527 struct xfs_bmalloca *bma,
4528 struct xfs_bmbt_irec *mval,
4529 xfs_filblks_t len,
4530 int flags)
4531 {
4532 int whichfork = xfs_bmapi_whichfork(flags);
4533 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork);
4534 int tmp_logflags = 0;
4535 int error;
4536
4537 /* check if we need to do unwritten->real conversion */
4538 if (mval->br_state == XFS_EXT_UNWRITTEN &&
4539 (flags & XFS_BMAPI_PREALLOC))
4540 return 0;
4541
4542 /* check if we need to do real->unwritten conversion */
4543 if (mval->br_state == XFS_EXT_NORM &&
4544 (flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) !=
4545 (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT))
4546 return 0;
4547
4548 /*
4549 * Modify (by adding) the state flag, if writing.
4550 */
4551 ASSERT(mval->br_blockcount <= len);
4552 if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) {
4553 bma->cur = xfs_bmbt_init_cursor(bma->ip->i_mount, bma->tp,
4554 bma->ip, whichfork);
4555 bma->cur->bc_private.b.firstblock = *bma->firstblock;
4556 bma->cur->bc_private.b.dfops = bma->dfops;
4557 }
4558 mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN)
4559 ? XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
4560
4561 /*
4562 * Before insertion into the bmbt, zero the range being converted
4563 * if required.
4564 */
4565 if (flags & XFS_BMAPI_ZERO) {
4566 error = xfs_zero_extent(bma->ip, mval->br_startblock,
4567 mval->br_blockcount);
4568 if (error)
4569 return error;
4570 }
4571
4572 error = xfs_bmap_add_extent_unwritten_real(bma->tp, bma->ip, whichfork,
4573 &bma->idx, &bma->cur, mval, bma->firstblock, bma->dfops,
4574 &tmp_logflags);
4575 /*
4576 * Log the inode core unconditionally in the unwritten extent conversion
4577 * path because the conversion might not have done so (e.g., if the
4578 * extent count hasn't changed). We need to make sure the inode is dirty
4579 * in the transaction for the sake of fsync(), even if nothing has
4580 * changed, because fsync() will not force the log for this transaction
4581 * unless it sees the inode pinned.
4582 *
4583 * Note: If we're only converting cow fork extents, there aren't
4584 * any on-disk updates to make, so we don't need to log anything.
4585 */
4586 if (whichfork != XFS_COW_FORK)
4587 bma->logflags |= tmp_logflags | XFS_ILOG_CORE;
4588 if (error)
4589 return error;
4590
4591 /*
4592 * Update our extent pointer, given that
4593 * xfs_bmap_add_extent_unwritten_real might have merged it into one
4594 * of the neighbouring ones.
4595 */
4596 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx), &bma->got);
4597
4598 /*
4599 * We may have combined previously unwritten space with written space,
4600 * so generate another request.
4601 */
4602 if (mval->br_blockcount < len)
4603 return -EAGAIN;
4604 return 0;
4605 }
4606
4607 /*
4608 * Map file blocks to filesystem blocks, and allocate blocks or convert the
4609 * extent state if necessary. Details behaviour is controlled by the flags
4610 * parameter. Only allocates blocks from a single allocation group, to avoid
4611 * locking problems.
4612 *
4613 * The returned value in "firstblock" from the first call in a transaction
4614 * must be remembered and presented to subsequent calls in "firstblock".
4615 * An upper bound for the number of blocks to be allocated is supplied to
4616 * the first call in "total"; if no allocation group has that many free
4617 * blocks then the call will fail (return NULLFSBLOCK in "firstblock").
4618 */
4619 int
xfs_bmapi_write(struct xfs_trans * tp,struct xfs_inode * ip,xfs_fileoff_t bno,xfs_filblks_t len,int flags,xfs_fsblock_t * firstblock,xfs_extlen_t total,struct xfs_bmbt_irec * mval,int * nmap,struct xfs_defer_ops * dfops)4620 xfs_bmapi_write(
4621 struct xfs_trans *tp, /* transaction pointer */
4622 struct xfs_inode *ip, /* incore inode */
4623 xfs_fileoff_t bno, /* starting file offs. mapped */
4624 xfs_filblks_t len, /* length to map in file */
4625 int flags, /* XFS_BMAPI_... */
4626 xfs_fsblock_t *firstblock, /* first allocated block
4627 controls a.g. for allocs */
4628 xfs_extlen_t total, /* total blocks needed */
4629 struct xfs_bmbt_irec *mval, /* output: map values */
4630 int *nmap, /* i/o: mval size/count */
4631 struct xfs_defer_ops *dfops) /* i/o: list extents to free */
4632 {
4633 struct xfs_mount *mp = ip->i_mount;
4634 struct xfs_ifork *ifp;
4635 struct xfs_bmalloca bma = { NULL }; /* args for xfs_bmap_alloc */
4636 xfs_fileoff_t end; /* end of mapped file region */
4637 int eof; /* after the end of extents */
4638 int error; /* error return */
4639 int n; /* current extent index */
4640 xfs_fileoff_t obno; /* old block number (offset) */
4641 int whichfork; /* data or attr fork */
4642
4643 #ifdef DEBUG
4644 xfs_fileoff_t orig_bno; /* original block number value */
4645 int orig_flags; /* original flags arg value */
4646 xfs_filblks_t orig_len; /* original value of len arg */
4647 struct xfs_bmbt_irec *orig_mval; /* original value of mval */
4648 int orig_nmap; /* original value of *nmap */
4649
4650 orig_bno = bno;
4651 orig_len = len;
4652 orig_flags = flags;
4653 orig_mval = mval;
4654 orig_nmap = *nmap;
4655 #endif
4656 whichfork = xfs_bmapi_whichfork(flags);
4657
4658 ASSERT(*nmap >= 1);
4659 ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
4660 ASSERT(!(flags & XFS_BMAPI_IGSTATE));
4661 ASSERT(tp != NULL ||
4662 (flags & (XFS_BMAPI_CONVERT | XFS_BMAPI_COWFORK)) ==
4663 (XFS_BMAPI_CONVERT | XFS_BMAPI_COWFORK));
4664 ASSERT(len > 0);
4665 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL);
4666 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
4667 ASSERT(!(flags & XFS_BMAPI_REMAP) || whichfork == XFS_DATA_FORK);
4668 ASSERT(!(flags & XFS_BMAPI_PREALLOC) || !(flags & XFS_BMAPI_REMAP));
4669 ASSERT(!(flags & XFS_BMAPI_CONVERT) || !(flags & XFS_BMAPI_REMAP));
4670
4671 /* zeroing is for currently only for data extents, not metadata */
4672 ASSERT((flags & (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)) !=
4673 (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO));
4674 /*
4675 * we can allocate unwritten extents or pre-zero allocated blocks,
4676 * but it makes no sense to do both at once. This would result in
4677 * zeroing the unwritten extent twice, but it still being an
4678 * unwritten extent....
4679 */
4680 ASSERT((flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)) !=
4681 (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO));
4682
4683 if (unlikely(XFS_TEST_ERROR(
4684 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
4685 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
4686 mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
4687 XFS_ERROR_REPORT("xfs_bmapi_write", XFS_ERRLEVEL_LOW, mp);
4688 return -EFSCORRUPTED;
4689 }
4690
4691 if (XFS_FORCED_SHUTDOWN(mp))
4692 return -EIO;
4693
4694 ifp = XFS_IFORK_PTR(ip, whichfork);
4695
4696 XFS_STATS_INC(mp, xs_blk_mapw);
4697
4698 if (*firstblock == NULLFSBLOCK) {
4699 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE)
4700 bma.minleft = be16_to_cpu(ifp->if_broot->bb_level) + 1;
4701 else
4702 bma.minleft = 1;
4703 } else {
4704 bma.minleft = 0;
4705 }
4706
4707 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
4708 error = xfs_iread_extents(tp, ip, whichfork);
4709 if (error)
4710 goto error0;
4711 }
4712
4713 xfs_bmap_search_extents(ip, bno, whichfork, &eof, &bma.idx, &bma.got,
4714 &bma.prev);
4715 n = 0;
4716 end = bno + len;
4717 obno = bno;
4718
4719 bma.tp = tp;
4720 bma.ip = ip;
4721 bma.total = total;
4722 bma.datatype = 0;
4723 bma.dfops = dfops;
4724 bma.firstblock = firstblock;
4725
4726 while (bno < end && n < *nmap) {
4727 bool need_alloc = false, wasdelay = false;
4728
4729 /* in hole or beyoned EOF? */
4730 if (eof || bma.got.br_startoff > bno) {
4731 if (flags & XFS_BMAPI_DELALLOC) {
4732 /*
4733 * For the COW fork we can reasonably get a
4734 * request for converting an extent that races
4735 * with other threads already having converted
4736 * part of it, as there converting COW to
4737 * regular blocks is not protected using the
4738 * IOLOCK.
4739 */
4740 ASSERT(flags & XFS_BMAPI_COWFORK);
4741 if (!(flags & XFS_BMAPI_COWFORK)) {
4742 error = -EIO;
4743 goto error0;
4744 }
4745
4746 if (eof || bno >= end)
4747 break;
4748 } else {
4749 need_alloc = true;
4750 }
4751 } else {
4752 /*
4753 * Make sure we only reflink into a hole.
4754 */
4755 ASSERT(!(flags & XFS_BMAPI_REMAP));
4756 if (isnullstartblock(bma.got.br_startblock))
4757 wasdelay = true;
4758 }
4759
4760 /*
4761 * First, deal with the hole before the allocated space
4762 * that we found, if any.
4763 */
4764 if (need_alloc || wasdelay) {
4765 bma.eof = eof;
4766 bma.conv = !!(flags & XFS_BMAPI_CONVERT);
4767 bma.wasdel = wasdelay;
4768 bma.offset = bno;
4769 bma.flags = flags;
4770
4771 /*
4772 * There's a 32/64 bit type mismatch between the
4773 * allocation length request (which can be 64 bits in
4774 * length) and the bma length request, which is
4775 * xfs_extlen_t and therefore 32 bits. Hence we have to
4776 * check for 32-bit overflows and handle them here.
4777 */
4778 if (len > (xfs_filblks_t)MAXEXTLEN)
4779 bma.length = MAXEXTLEN;
4780 else
4781 bma.length = len;
4782
4783 ASSERT(len > 0);
4784 ASSERT(bma.length > 0);
4785 error = xfs_bmapi_allocate(&bma);
4786 if (error)
4787 goto error0;
4788 if (bma.blkno == NULLFSBLOCK)
4789 break;
4790
4791 /*
4792 * If this is a CoW allocation, record the data in
4793 * the refcount btree for orphan recovery.
4794 */
4795 if (whichfork == XFS_COW_FORK) {
4796 error = xfs_refcount_alloc_cow_extent(mp, dfops,
4797 bma.blkno, bma.length);
4798 if (error)
4799 goto error0;
4800 }
4801 }
4802
4803 /* Deal with the allocated space we found. */
4804 xfs_bmapi_trim_map(mval, &bma.got, &bno, len, obno,
4805 end, n, flags);
4806
4807 /* Execute unwritten extent conversion if necessary */
4808 error = xfs_bmapi_convert_unwritten(&bma, mval, len, flags);
4809 if (error == -EAGAIN)
4810 continue;
4811 if (error)
4812 goto error0;
4813
4814 /* update the extent map to return */
4815 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
4816
4817 /*
4818 * If we're done, stop now. Stop when we've allocated
4819 * XFS_BMAP_MAX_NMAP extents no matter what. Otherwise
4820 * the transaction may get too big.
4821 */
4822 if (bno >= end || n >= *nmap || bma.nallocs >= *nmap)
4823 break;
4824
4825 /* Else go on to the next record. */
4826 bma.prev = bma.got;
4827 if (++bma.idx < xfs_iext_count(ifp)) {
4828 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma.idx),
4829 &bma.got);
4830 } else
4831 eof = 1;
4832 }
4833 *nmap = n;
4834
4835 /*
4836 * Transform from btree to extents, give it cur.
4837 */
4838 if (xfs_bmap_wants_extents(ip, whichfork)) {
4839 int tmp_logflags = 0;
4840
4841 ASSERT(bma.cur);
4842 error = xfs_bmap_btree_to_extents(tp, ip, bma.cur,
4843 &tmp_logflags, whichfork);
4844 bma.logflags |= tmp_logflags;
4845 if (error)
4846 goto error0;
4847 }
4848
4849 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE ||
4850 XFS_IFORK_NEXTENTS(ip, whichfork) >
4851 XFS_IFORK_MAXEXT(ip, whichfork));
4852 error = 0;
4853 error0:
4854 /*
4855 * Log everything. Do this after conversion, there's no point in
4856 * logging the extent records if we've converted to btree format.
4857 */
4858 if ((bma.logflags & xfs_ilog_fext(whichfork)) &&
4859 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
4860 bma.logflags &= ~xfs_ilog_fext(whichfork);
4861 else if ((bma.logflags & xfs_ilog_fbroot(whichfork)) &&
4862 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)
4863 bma.logflags &= ~xfs_ilog_fbroot(whichfork);
4864 /*
4865 * Log whatever the flags say, even if error. Otherwise we might miss
4866 * detecting a case where the data is changed, there's an error,
4867 * and it's not logged so we don't shutdown when we should.
4868 */
4869 if (bma.logflags)
4870 xfs_trans_log_inode(tp, ip, bma.logflags);
4871
4872 if (bma.cur) {
4873 if (!error) {
4874 ASSERT(*firstblock == NULLFSBLOCK ||
4875 XFS_FSB_TO_AGNO(mp, *firstblock) <=
4876 XFS_FSB_TO_AGNO(mp,
4877 bma.cur->bc_private.b.firstblock));
4878 *firstblock = bma.cur->bc_private.b.firstblock;
4879 }
4880 xfs_btree_del_cursor(bma.cur,
4881 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
4882 }
4883 if (!error)
4884 xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval,
4885 orig_nmap, *nmap);
4886 return error;
4887 }
4888
4889 /*
4890 * When a delalloc extent is split (e.g., due to a hole punch), the original
4891 * indlen reservation must be shared across the two new extents that are left
4892 * behind.
4893 *
4894 * Given the original reservation and the worst case indlen for the two new
4895 * extents (as calculated by xfs_bmap_worst_indlen()), split the original
4896 * reservation fairly across the two new extents. If necessary, steal available
4897 * blocks from a deleted extent to make up a reservation deficiency (e.g., if
4898 * ores == 1). The number of stolen blocks is returned. The availability and
4899 * subsequent accounting of stolen blocks is the responsibility of the caller.
4900 */
4901 static xfs_filblks_t
xfs_bmap_split_indlen(xfs_filblks_t ores,xfs_filblks_t * indlen1,xfs_filblks_t * indlen2,xfs_filblks_t avail)4902 xfs_bmap_split_indlen(
4903 xfs_filblks_t ores, /* original res. */
4904 xfs_filblks_t *indlen1, /* ext1 worst indlen */
4905 xfs_filblks_t *indlen2, /* ext2 worst indlen */
4906 xfs_filblks_t avail) /* stealable blocks */
4907 {
4908 xfs_filblks_t len1 = *indlen1;
4909 xfs_filblks_t len2 = *indlen2;
4910 xfs_filblks_t nres = len1 + len2; /* new total res. */
4911 xfs_filblks_t stolen = 0;
4912 xfs_filblks_t resfactor;
4913
4914 /*
4915 * Steal as many blocks as we can to try and satisfy the worst case
4916 * indlen for both new extents.
4917 */
4918 if (ores < nres && avail)
4919 stolen = XFS_FILBLKS_MIN(nres - ores, avail);
4920 ores += stolen;
4921
4922 /* nothing else to do if we've satisfied the new reservation */
4923 if (ores >= nres)
4924 return stolen;
4925
4926 /*
4927 * We can't meet the total required reservation for the two extents.
4928 * Calculate the percent of the overall shortage between both extents
4929 * and apply this percentage to each of the requested indlen values.
4930 * This distributes the shortage fairly and reduces the chances that one
4931 * of the two extents is left with nothing when extents are repeatedly
4932 * split.
4933 */
4934 resfactor = (ores * 100);
4935 do_div(resfactor, nres);
4936 len1 *= resfactor;
4937 do_div(len1, 100);
4938 len2 *= resfactor;
4939 do_div(len2, 100);
4940 ASSERT(len1 + len2 <= ores);
4941 ASSERT(len1 < *indlen1 && len2 < *indlen2);
4942
4943 /*
4944 * Hand out the remainder to each extent. If one of the two reservations
4945 * is zero, we want to make sure that one gets a block first. The loop
4946 * below starts with len1, so hand len2 a block right off the bat if it
4947 * is zero.
4948 */
4949 ores -= (len1 + len2);
4950 ASSERT((*indlen1 - len1) + (*indlen2 - len2) >= ores);
4951 if (ores && !len2 && *indlen2) {
4952 len2++;
4953 ores--;
4954 }
4955 while (ores) {
4956 if (len1 < *indlen1) {
4957 len1++;
4958 ores--;
4959 }
4960 if (!ores)
4961 break;
4962 if (len2 < *indlen2) {
4963 len2++;
4964 ores--;
4965 }
4966 }
4967
4968 *indlen1 = len1;
4969 *indlen2 = len2;
4970
4971 return stolen;
4972 }
4973
4974 int
xfs_bmap_del_extent_delay(struct xfs_inode * ip,int whichfork,xfs_extnum_t * idx,struct xfs_bmbt_irec * got,struct xfs_bmbt_irec * del)4975 xfs_bmap_del_extent_delay(
4976 struct xfs_inode *ip,
4977 int whichfork,
4978 xfs_extnum_t *idx,
4979 struct xfs_bmbt_irec *got,
4980 struct xfs_bmbt_irec *del)
4981 {
4982 struct xfs_mount *mp = ip->i_mount;
4983 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
4984 struct xfs_bmbt_irec new;
4985 int64_t da_old, da_new, da_diff = 0;
4986 xfs_fileoff_t del_endoff, got_endoff;
4987 xfs_filblks_t got_indlen, new_indlen, stolen;
4988 int error = 0, state = 0;
4989 bool isrt;
4990
4991 XFS_STATS_INC(mp, xs_del_exlist);
4992
4993 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
4994 del_endoff = del->br_startoff + del->br_blockcount;
4995 got_endoff = got->br_startoff + got->br_blockcount;
4996 da_old = startblockval(got->br_startblock);
4997 da_new = 0;
4998
4999 ASSERT(*idx >= 0);
5000 ASSERT(*idx <= xfs_iext_count(ifp));
5001 ASSERT(del->br_blockcount > 0);
5002 ASSERT(got->br_startoff <= del->br_startoff);
5003 ASSERT(got_endoff >= del_endoff);
5004
5005 if (isrt) {
5006 int64_t rtexts = XFS_FSB_TO_B(mp, del->br_blockcount);
5007
5008 do_div(rtexts, mp->m_sb.sb_rextsize);
5009 xfs_mod_frextents(mp, rtexts);
5010 }
5011
5012 /*
5013 * Update the inode delalloc counter now and wait to update the
5014 * sb counters as we might have to borrow some blocks for the
5015 * indirect block accounting.
5016 */
5017 error = xfs_trans_reserve_quota_nblks(NULL, ip,
5018 -((long)del->br_blockcount), 0,
5019 isrt ? XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS);
5020 if (error)
5021 return error;
5022 ip->i_delayed_blks -= del->br_blockcount;
5023
5024 if (whichfork == XFS_COW_FORK)
5025 state |= BMAP_COWFORK;
5026
5027 if (got->br_startoff == del->br_startoff)
5028 state |= BMAP_LEFT_CONTIG;
5029 if (got_endoff == del_endoff)
5030 state |= BMAP_RIGHT_CONTIG;
5031
5032 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
5033 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
5034 /*
5035 * Matches the whole extent. Delete the entry.
5036 */
5037 xfs_iext_remove(ip, *idx, 1, state);
5038 --*idx;
5039 break;
5040 case BMAP_LEFT_CONTIG:
5041 /*
5042 * Deleting the first part of the extent.
5043 */
5044 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
5045 got->br_startoff = del_endoff;
5046 got->br_blockcount -= del->br_blockcount;
5047 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
5048 got->br_blockcount), da_old);
5049 got->br_startblock = nullstartblock((int)da_new);
5050 xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got);
5051 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
5052 break;
5053 case BMAP_RIGHT_CONTIG:
5054 /*
5055 * Deleting the last part of the extent.
5056 */
5057 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
5058 got->br_blockcount = got->br_blockcount - del->br_blockcount;
5059 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
5060 got->br_blockcount), da_old);
5061 got->br_startblock = nullstartblock((int)da_new);
5062 xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got);
5063 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
5064 break;
5065 case 0:
5066 /*
5067 * Deleting the middle of the extent.
5068 *
5069 * Distribute the original indlen reservation across the two new
5070 * extents. Steal blocks from the deleted extent if necessary.
5071 * Stealing blocks simply fudges the fdblocks accounting below.
5072 * Warn if either of the new indlen reservations is zero as this
5073 * can lead to delalloc problems.
5074 */
5075 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
5076
5077 got->br_blockcount = del->br_startoff - got->br_startoff;
5078 got_indlen = xfs_bmap_worst_indlen(ip, got->br_blockcount);
5079
5080 new.br_blockcount = got_endoff - del_endoff;
5081 new_indlen = xfs_bmap_worst_indlen(ip, new.br_blockcount);
5082
5083 WARN_ON_ONCE(!got_indlen || !new_indlen);
5084 stolen = xfs_bmap_split_indlen(da_old, &got_indlen, &new_indlen,
5085 del->br_blockcount);
5086
5087 got->br_startblock = nullstartblock((int)got_indlen);
5088 xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got);
5089 trace_xfs_bmap_post_update(ip, *idx, 0, _THIS_IP_);
5090
5091 new.br_startoff = del_endoff;
5092 new.br_state = got->br_state;
5093 new.br_startblock = nullstartblock((int)new_indlen);
5094
5095 ++*idx;
5096 xfs_iext_insert(ip, *idx, 1, &new, state);
5097
5098 da_new = got_indlen + new_indlen - stolen;
5099 del->br_blockcount -= stolen;
5100 break;
5101 }
5102
5103 ASSERT(da_old >= da_new);
5104 da_diff = da_old - da_new;
5105 if (!isrt)
5106 da_diff += del->br_blockcount;
5107 if (da_diff)
5108 xfs_mod_fdblocks(mp, da_diff, false);
5109 return error;
5110 }
5111
5112 void
xfs_bmap_del_extent_cow(struct xfs_inode * ip,xfs_extnum_t * idx,struct xfs_bmbt_irec * got,struct xfs_bmbt_irec * del)5113 xfs_bmap_del_extent_cow(
5114 struct xfs_inode *ip,
5115 xfs_extnum_t *idx,
5116 struct xfs_bmbt_irec *got,
5117 struct xfs_bmbt_irec *del)
5118 {
5119 struct xfs_mount *mp = ip->i_mount;
5120 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
5121 struct xfs_bmbt_irec new;
5122 xfs_fileoff_t del_endoff, got_endoff;
5123 int state = BMAP_COWFORK;
5124
5125 XFS_STATS_INC(mp, xs_del_exlist);
5126
5127 del_endoff = del->br_startoff + del->br_blockcount;
5128 got_endoff = got->br_startoff + got->br_blockcount;
5129
5130 ASSERT(*idx >= 0);
5131 ASSERT(*idx <= xfs_iext_count(ifp));
5132 ASSERT(del->br_blockcount > 0);
5133 ASSERT(got->br_startoff <= del->br_startoff);
5134 ASSERT(got_endoff >= del_endoff);
5135 ASSERT(!isnullstartblock(got->br_startblock));
5136
5137 if (got->br_startoff == del->br_startoff)
5138 state |= BMAP_LEFT_CONTIG;
5139 if (got_endoff == del_endoff)
5140 state |= BMAP_RIGHT_CONTIG;
5141
5142 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
5143 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
5144 /*
5145 * Matches the whole extent. Delete the entry.
5146 */
5147 xfs_iext_remove(ip, *idx, 1, state);
5148 --*idx;
5149 break;
5150 case BMAP_LEFT_CONTIG:
5151 /*
5152 * Deleting the first part of the extent.
5153 */
5154 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
5155 got->br_startoff = del_endoff;
5156 got->br_blockcount -= del->br_blockcount;
5157 got->br_startblock = del->br_startblock + del->br_blockcount;
5158 xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got);
5159 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
5160 break;
5161 case BMAP_RIGHT_CONTIG:
5162 /*
5163 * Deleting the last part of the extent.
5164 */
5165 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
5166 got->br_blockcount -= del->br_blockcount;
5167 xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got);
5168 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
5169 break;
5170 case 0:
5171 /*
5172 * Deleting the middle of the extent.
5173 */
5174 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
5175 got->br_blockcount = del->br_startoff - got->br_startoff;
5176 xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got);
5177 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
5178
5179 new.br_startoff = del_endoff;
5180 new.br_blockcount = got_endoff - del_endoff;
5181 new.br_state = got->br_state;
5182 new.br_startblock = del->br_startblock + del->br_blockcount;
5183
5184 ++*idx;
5185 xfs_iext_insert(ip, *idx, 1, &new, state);
5186 break;
5187 }
5188 }
5189
5190 /*
5191 * Called by xfs_bmapi to update file extent records and the btree
5192 * after removing space (or undoing a delayed allocation).
5193 */
5194 STATIC int /* error */
xfs_bmap_del_extent(xfs_inode_t * ip,xfs_trans_t * tp,xfs_extnum_t * idx,struct xfs_defer_ops * dfops,xfs_btree_cur_t * cur,xfs_bmbt_irec_t * del,int * logflagsp,int whichfork,int bflags)5195 xfs_bmap_del_extent(
5196 xfs_inode_t *ip, /* incore inode pointer */
5197 xfs_trans_t *tp, /* current transaction pointer */
5198 xfs_extnum_t *idx, /* extent number to update/delete */
5199 struct xfs_defer_ops *dfops, /* list of extents to be freed */
5200 xfs_btree_cur_t *cur, /* if null, not a btree */
5201 xfs_bmbt_irec_t *del, /* data to remove from extents */
5202 int *logflagsp, /* inode logging flags */
5203 int whichfork, /* data or attr fork */
5204 int bflags) /* bmapi flags */
5205 {
5206 xfs_filblks_t da_new; /* new delay-alloc indirect blocks */
5207 xfs_filblks_t da_old; /* old delay-alloc indirect blocks */
5208 xfs_fsblock_t del_endblock=0; /* first block past del */
5209 xfs_fileoff_t del_endoff; /* first offset past del */
5210 int delay; /* current block is delayed allocated */
5211 int do_fx; /* free extent at end of routine */
5212 xfs_bmbt_rec_host_t *ep; /* current extent entry pointer */
5213 int error; /* error return value */
5214 int flags; /* inode logging flags */
5215 xfs_bmbt_irec_t got; /* current extent entry */
5216 xfs_fileoff_t got_endoff; /* first offset past got */
5217 int i; /* temp state */
5218 xfs_ifork_t *ifp; /* inode fork pointer */
5219 xfs_mount_t *mp; /* mount structure */
5220 xfs_filblks_t nblks; /* quota/sb block count */
5221 xfs_bmbt_irec_t new; /* new record to be inserted */
5222 /* REFERENCED */
5223 uint qfield; /* quota field to update */
5224 xfs_filblks_t temp; /* for indirect length calculations */
5225 xfs_filblks_t temp2; /* for indirect length calculations */
5226 int state = 0;
5227
5228 mp = ip->i_mount;
5229 XFS_STATS_INC(mp, xs_del_exlist);
5230
5231 if (whichfork == XFS_ATTR_FORK)
5232 state |= BMAP_ATTRFORK;
5233 else if (whichfork == XFS_COW_FORK)
5234 state |= BMAP_COWFORK;
5235
5236 ifp = XFS_IFORK_PTR(ip, whichfork);
5237 ASSERT((*idx >= 0) && (*idx < xfs_iext_count(ifp)));
5238 ASSERT(del->br_blockcount > 0);
5239 ep = xfs_iext_get_ext(ifp, *idx);
5240 xfs_bmbt_get_all(ep, &got);
5241 ASSERT(got.br_startoff <= del->br_startoff);
5242 del_endoff = del->br_startoff + del->br_blockcount;
5243 got_endoff = got.br_startoff + got.br_blockcount;
5244 ASSERT(got_endoff >= del_endoff);
5245 delay = isnullstartblock(got.br_startblock);
5246 ASSERT(isnullstartblock(del->br_startblock) == delay);
5247 flags = 0;
5248 qfield = 0;
5249 error = 0;
5250 /*
5251 * If deleting a real allocation, must free up the disk space.
5252 */
5253 if (!delay) {
5254 flags = XFS_ILOG_CORE;
5255 /*
5256 * Realtime allocation. Free it and record di_nblocks update.
5257 */
5258 if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) {
5259 xfs_fsblock_t bno;
5260 xfs_filblks_t len;
5261
5262 ASSERT(do_mod(del->br_blockcount,
5263 mp->m_sb.sb_rextsize) == 0);
5264 ASSERT(do_mod(del->br_startblock,
5265 mp->m_sb.sb_rextsize) == 0);
5266 bno = del->br_startblock;
5267 len = del->br_blockcount;
5268 do_div(bno, mp->m_sb.sb_rextsize);
5269 do_div(len, mp->m_sb.sb_rextsize);
5270 error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len);
5271 if (error)
5272 goto done;
5273 do_fx = 0;
5274 nblks = len * mp->m_sb.sb_rextsize;
5275 qfield = XFS_TRANS_DQ_RTBCOUNT;
5276 }
5277 /*
5278 * Ordinary allocation.
5279 */
5280 else {
5281 do_fx = 1;
5282 nblks = del->br_blockcount;
5283 qfield = XFS_TRANS_DQ_BCOUNT;
5284 }
5285 /*
5286 * Set up del_endblock and cur for later.
5287 */
5288 del_endblock = del->br_startblock + del->br_blockcount;
5289 if (cur) {
5290 if ((error = xfs_bmbt_lookup_eq(cur, got.br_startoff,
5291 got.br_startblock, got.br_blockcount,
5292 &i)))
5293 goto done;
5294 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
5295 }
5296 da_old = da_new = 0;
5297 } else {
5298 da_old = startblockval(got.br_startblock);
5299 da_new = 0;
5300 nblks = 0;
5301 do_fx = 0;
5302 }
5303
5304 /*
5305 * Set flag value to use in switch statement.
5306 * Left-contig is 2, right-contig is 1.
5307 */
5308 switch (((got.br_startoff == del->br_startoff) << 1) |
5309 (got_endoff == del_endoff)) {
5310 case 3:
5311 /*
5312 * Matches the whole extent. Delete the entry.
5313 */
5314 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
5315 xfs_iext_remove(ip, *idx, 1,
5316 whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0);
5317 --*idx;
5318 if (delay)
5319 break;
5320
5321 XFS_IFORK_NEXT_SET(ip, whichfork,
5322 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
5323 flags |= XFS_ILOG_CORE;
5324 if (!cur) {
5325 flags |= xfs_ilog_fext(whichfork);
5326 break;
5327 }
5328 if ((error = xfs_btree_delete(cur, &i)))
5329 goto done;
5330 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
5331 break;
5332
5333 case 2:
5334 /*
5335 * Deleting the first part of the extent.
5336 */
5337 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
5338 xfs_bmbt_set_startoff(ep, del_endoff);
5339 temp = got.br_blockcount - del->br_blockcount;
5340 xfs_bmbt_set_blockcount(ep, temp);
5341 if (delay) {
5342 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
5343 da_old);
5344 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
5345 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
5346 da_new = temp;
5347 break;
5348 }
5349 xfs_bmbt_set_startblock(ep, del_endblock);
5350 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
5351 if (!cur) {
5352 flags |= xfs_ilog_fext(whichfork);
5353 break;
5354 }
5355 if ((error = xfs_bmbt_update(cur, del_endoff, del_endblock,
5356 got.br_blockcount - del->br_blockcount,
5357 got.br_state)))
5358 goto done;
5359 break;
5360
5361 case 1:
5362 /*
5363 * Deleting the last part of the extent.
5364 */
5365 temp = got.br_blockcount - del->br_blockcount;
5366 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
5367 xfs_bmbt_set_blockcount(ep, temp);
5368 if (delay) {
5369 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
5370 da_old);
5371 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
5372 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
5373 da_new = temp;
5374 break;
5375 }
5376 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
5377 if (!cur) {
5378 flags |= xfs_ilog_fext(whichfork);
5379 break;
5380 }
5381 if ((error = xfs_bmbt_update(cur, got.br_startoff,
5382 got.br_startblock,
5383 got.br_blockcount - del->br_blockcount,
5384 got.br_state)))
5385 goto done;
5386 break;
5387
5388 case 0:
5389 /*
5390 * Deleting the middle of the extent.
5391 */
5392 temp = del->br_startoff - got.br_startoff;
5393 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
5394 xfs_bmbt_set_blockcount(ep, temp);
5395 new.br_startoff = del_endoff;
5396 temp2 = got_endoff - del_endoff;
5397 new.br_blockcount = temp2;
5398 new.br_state = got.br_state;
5399 if (!delay) {
5400 new.br_startblock = del_endblock;
5401 flags |= XFS_ILOG_CORE;
5402 if (cur) {
5403 if ((error = xfs_bmbt_update(cur,
5404 got.br_startoff,
5405 got.br_startblock, temp,
5406 got.br_state)))
5407 goto done;
5408 if ((error = xfs_btree_increment(cur, 0, &i)))
5409 goto done;
5410 cur->bc_rec.b = new;
5411 error = xfs_btree_insert(cur, &i);
5412 if (error && error != -ENOSPC)
5413 goto done;
5414 /*
5415 * If get no-space back from btree insert,
5416 * it tried a split, and we have a zero
5417 * block reservation.
5418 * Fix up our state and return the error.
5419 */
5420 if (error == -ENOSPC) {
5421 /*
5422 * Reset the cursor, don't trust
5423 * it after any insert operation.
5424 */
5425 if ((error = xfs_bmbt_lookup_eq(cur,
5426 got.br_startoff,
5427 got.br_startblock,
5428 temp, &i)))
5429 goto done;
5430 XFS_WANT_CORRUPTED_GOTO(mp,
5431 i == 1, done);
5432 /*
5433 * Update the btree record back
5434 * to the original value.
5435 */
5436 if ((error = xfs_bmbt_update(cur,
5437 got.br_startoff,
5438 got.br_startblock,
5439 got.br_blockcount,
5440 got.br_state)))
5441 goto done;
5442 /*
5443 * Reset the extent record back
5444 * to the original value.
5445 */
5446 xfs_bmbt_set_blockcount(ep,
5447 got.br_blockcount);
5448 flags = 0;
5449 error = -ENOSPC;
5450 goto done;
5451 }
5452 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
5453 } else
5454 flags |= xfs_ilog_fext(whichfork);
5455 XFS_IFORK_NEXT_SET(ip, whichfork,
5456 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
5457 } else {
5458 xfs_filblks_t stolen;
5459 ASSERT(whichfork == XFS_DATA_FORK);
5460
5461 /*
5462 * Distribute the original indlen reservation across the
5463 * two new extents. Steal blocks from the deleted extent
5464 * if necessary. Stealing blocks simply fudges the
5465 * fdblocks accounting in xfs_bunmapi().
5466 */
5467 temp = xfs_bmap_worst_indlen(ip, got.br_blockcount);
5468 temp2 = xfs_bmap_worst_indlen(ip, new.br_blockcount);
5469 stolen = xfs_bmap_split_indlen(da_old, &temp, &temp2,
5470 del->br_blockcount);
5471 da_new = temp + temp2 - stolen;
5472 del->br_blockcount -= stolen;
5473
5474 /*
5475 * Set the reservation for each extent. Warn if either
5476 * is zero as this can lead to delalloc problems.
5477 */
5478 WARN_ON_ONCE(!temp || !temp2);
5479 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
5480 new.br_startblock = nullstartblock((int)temp2);
5481 }
5482 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
5483 xfs_iext_insert(ip, *idx + 1, 1, &new, state);
5484 ++*idx;
5485 break;
5486 }
5487
5488 /* remove reverse mapping */
5489 if (!delay) {
5490 error = xfs_rmap_unmap_extent(mp, dfops, ip, whichfork, del);
5491 if (error)
5492 goto done;
5493 }
5494
5495 /*
5496 * If we need to, add to list of extents to delete.
5497 */
5498 if (do_fx && !(bflags & XFS_BMAPI_REMAP)) {
5499 if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) {
5500 error = xfs_refcount_decrease_extent(mp, dfops, del);
5501 if (error)
5502 goto done;
5503 } else
5504 xfs_bmap_add_free(mp, dfops, del->br_startblock,
5505 del->br_blockcount, NULL);
5506 }
5507
5508 /*
5509 * Adjust inode # blocks in the file.
5510 */
5511 if (nblks)
5512 ip->i_d.di_nblocks -= nblks;
5513 /*
5514 * Adjust quota data.
5515 */
5516 if (qfield && !(bflags & XFS_BMAPI_REMAP))
5517 xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks);
5518
5519 /*
5520 * Account for change in delayed indirect blocks.
5521 * Nothing to do for disk quota accounting here.
5522 */
5523 ASSERT(da_old >= da_new);
5524 if (da_old > da_new)
5525 xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new), false);
5526 done:
5527 *logflagsp = flags;
5528 return error;
5529 }
5530
5531 /*
5532 * Unmap (remove) blocks from a file.
5533 * If nexts is nonzero then the number of extents to remove is limited to
5534 * that value. If not all extents in the block range can be removed then
5535 * *done is set.
5536 */
5537 int /* error */
__xfs_bunmapi(xfs_trans_t * tp,struct xfs_inode * ip,xfs_fileoff_t bno,xfs_filblks_t * rlen,int flags,xfs_extnum_t nexts,xfs_fsblock_t * firstblock,struct xfs_defer_ops * dfops)5538 __xfs_bunmapi(
5539 xfs_trans_t *tp, /* transaction pointer */
5540 struct xfs_inode *ip, /* incore inode */
5541 xfs_fileoff_t bno, /* starting offset to unmap */
5542 xfs_filblks_t *rlen, /* i/o: amount remaining */
5543 int flags, /* misc flags */
5544 xfs_extnum_t nexts, /* number of extents max */
5545 xfs_fsblock_t *firstblock, /* first allocated block
5546 controls a.g. for allocs */
5547 struct xfs_defer_ops *dfops) /* i/o: deferred updates */
5548 {
5549 xfs_btree_cur_t *cur; /* bmap btree cursor */
5550 xfs_bmbt_irec_t del; /* extent being deleted */
5551 int eof; /* is deleting at eof */
5552 xfs_bmbt_rec_host_t *ep; /* extent record pointer */
5553 int error; /* error return value */
5554 xfs_extnum_t extno; /* extent number in list */
5555 xfs_bmbt_irec_t got; /* current extent record */
5556 xfs_ifork_t *ifp; /* inode fork pointer */
5557 int isrt; /* freeing in rt area */
5558 xfs_extnum_t lastx; /* last extent index used */
5559 int logflags; /* transaction logging flags */
5560 xfs_extlen_t mod; /* rt extent offset */
5561 xfs_mount_t *mp; /* mount structure */
5562 xfs_bmbt_irec_t prev; /* previous extent record */
5563 xfs_fileoff_t start; /* first file offset deleted */
5564 int tmp_logflags; /* partial logging flags */
5565 int wasdel; /* was a delayed alloc extent */
5566 int whichfork; /* data or attribute fork */
5567 xfs_fsblock_t sum;
5568 xfs_filblks_t len = *rlen; /* length to unmap in file */
5569 xfs_fileoff_t max_len;
5570 xfs_agnumber_t prev_agno = NULLAGNUMBER, agno;
5571
5572 trace_xfs_bunmap(ip, bno, len, flags, _RET_IP_);
5573
5574 whichfork = xfs_bmapi_whichfork(flags);
5575 ASSERT(whichfork != XFS_COW_FORK);
5576 ifp = XFS_IFORK_PTR(ip, whichfork);
5577 if (unlikely(
5578 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
5579 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
5580 XFS_ERROR_REPORT("xfs_bunmapi", XFS_ERRLEVEL_LOW,
5581 ip->i_mount);
5582 return -EFSCORRUPTED;
5583 }
5584 mp = ip->i_mount;
5585 if (XFS_FORCED_SHUTDOWN(mp))
5586 return -EIO;
5587
5588 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
5589 ASSERT(len > 0);
5590 ASSERT(nexts >= 0);
5591
5592 /*
5593 * Guesstimate how many blocks we can unmap without running the risk of
5594 * blowing out the transaction with a mix of EFIs and reflink
5595 * adjustments.
5596 */
5597 if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK)
5598 max_len = min(len, xfs_refcount_max_unmap(tp->t_log_res));
5599 else
5600 max_len = len;
5601
5602 if (!(ifp->if_flags & XFS_IFEXTENTS) &&
5603 (error = xfs_iread_extents(tp, ip, whichfork)))
5604 return error;
5605 if (xfs_iext_count(ifp) == 0) {
5606 *rlen = 0;
5607 return 0;
5608 }
5609 XFS_STATS_INC(mp, xs_blk_unmap);
5610 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
5611 start = bno;
5612 bno = start + len - 1;
5613 ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got,
5614 &prev);
5615
5616 /*
5617 * Check to see if the given block number is past the end of the
5618 * file, back up to the last block if so...
5619 */
5620 if (eof) {
5621 ep = xfs_iext_get_ext(ifp, --lastx);
5622 xfs_bmbt_get_all(ep, &got);
5623 bno = got.br_startoff + got.br_blockcount - 1;
5624 }
5625 logflags = 0;
5626 if (ifp->if_flags & XFS_IFBROOT) {
5627 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
5628 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5629 cur->bc_private.b.firstblock = *firstblock;
5630 cur->bc_private.b.dfops = dfops;
5631 cur->bc_private.b.flags = 0;
5632 } else
5633 cur = NULL;
5634
5635 if (isrt) {
5636 /*
5637 * Synchronize by locking the bitmap inode.
5638 */
5639 xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
5640 xfs_trans_ijoin(tp, mp->m_rbmip, XFS_ILOCK_EXCL);
5641 xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
5642 xfs_trans_ijoin(tp, mp->m_rsumip, XFS_ILOCK_EXCL);
5643 }
5644
5645 extno = 0;
5646 while (bno != (xfs_fileoff_t)-1 && bno >= start && lastx >= 0 &&
5647 (nexts == 0 || extno < nexts) && max_len > 0) {
5648 /*
5649 * Is the found extent after a hole in which bno lives?
5650 * Just back up to the previous extent, if so.
5651 */
5652 if (got.br_startoff > bno) {
5653 if (--lastx < 0)
5654 break;
5655 ep = xfs_iext_get_ext(ifp, lastx);
5656 xfs_bmbt_get_all(ep, &got);
5657 }
5658 /*
5659 * Is the last block of this extent before the range
5660 * we're supposed to delete? If so, we're done.
5661 */
5662 bno = XFS_FILEOFF_MIN(bno,
5663 got.br_startoff + got.br_blockcount - 1);
5664 if (bno < start)
5665 break;
5666 /*
5667 * Then deal with the (possibly delayed) allocated space
5668 * we found.
5669 */
5670 ASSERT(ep != NULL);
5671 del = got;
5672 wasdel = isnullstartblock(del.br_startblock);
5673
5674 /*
5675 * Make sure we don't touch multiple AGF headers out of order
5676 * in a single transaction, as that could cause AB-BA deadlocks.
5677 */
5678 if (!wasdel) {
5679 agno = XFS_FSB_TO_AGNO(mp, del.br_startblock);
5680 if (prev_agno != NULLAGNUMBER && prev_agno > agno)
5681 break;
5682 prev_agno = agno;
5683 }
5684 if (got.br_startoff < start) {
5685 del.br_startoff = start;
5686 del.br_blockcount -= start - got.br_startoff;
5687 if (!wasdel)
5688 del.br_startblock += start - got.br_startoff;
5689 }
5690 if (del.br_startoff + del.br_blockcount > bno + 1)
5691 del.br_blockcount = bno + 1 - del.br_startoff;
5692
5693 /* How much can we safely unmap? */
5694 if (max_len < del.br_blockcount) {
5695 del.br_startoff += del.br_blockcount - max_len;
5696 if (!wasdel)
5697 del.br_startblock += del.br_blockcount - max_len;
5698 del.br_blockcount = max_len;
5699 }
5700
5701 sum = del.br_startblock + del.br_blockcount;
5702 if (isrt &&
5703 (mod = do_mod(sum, mp->m_sb.sb_rextsize))) {
5704 /*
5705 * Realtime extent not lined up at the end.
5706 * The extent could have been split into written
5707 * and unwritten pieces, or we could just be
5708 * unmapping part of it. But we can't really
5709 * get rid of part of a realtime extent.
5710 */
5711 if (del.br_state == XFS_EXT_UNWRITTEN ||
5712 !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
5713 /*
5714 * This piece is unwritten, or we're not
5715 * using unwritten extents. Skip over it.
5716 */
5717 ASSERT(bno >= mod);
5718 bno -= mod > del.br_blockcount ?
5719 del.br_blockcount : mod;
5720 if (bno < got.br_startoff) {
5721 if (--lastx >= 0)
5722 xfs_bmbt_get_all(xfs_iext_get_ext(
5723 ifp, lastx), &got);
5724 }
5725 continue;
5726 }
5727 /*
5728 * It's written, turn it unwritten.
5729 * This is better than zeroing it.
5730 */
5731 ASSERT(del.br_state == XFS_EXT_NORM);
5732 ASSERT(tp->t_blk_res > 0);
5733 /*
5734 * If this spans a realtime extent boundary,
5735 * chop it back to the start of the one we end at.
5736 */
5737 if (del.br_blockcount > mod) {
5738 del.br_startoff += del.br_blockcount - mod;
5739 del.br_startblock += del.br_blockcount - mod;
5740 del.br_blockcount = mod;
5741 }
5742 del.br_state = XFS_EXT_UNWRITTEN;
5743 error = xfs_bmap_add_extent_unwritten_real(tp, ip,
5744 whichfork, &lastx, &cur, &del,
5745 firstblock, dfops, &logflags);
5746 if (error)
5747 goto error0;
5748 goto nodelete;
5749 }
5750 if (isrt && (mod = do_mod(del.br_startblock, mp->m_sb.sb_rextsize))) {
5751 /*
5752 * Realtime extent is lined up at the end but not
5753 * at the front. We'll get rid of full extents if
5754 * we can.
5755 */
5756 mod = mp->m_sb.sb_rextsize - mod;
5757 if (del.br_blockcount > mod) {
5758 del.br_blockcount -= mod;
5759 del.br_startoff += mod;
5760 del.br_startblock += mod;
5761 } else if ((del.br_startoff == start &&
5762 (del.br_state == XFS_EXT_UNWRITTEN ||
5763 tp->t_blk_res == 0)) ||
5764 !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
5765 /*
5766 * Can't make it unwritten. There isn't
5767 * a full extent here so just skip it.
5768 */
5769 ASSERT(bno >= del.br_blockcount);
5770 bno -= del.br_blockcount;
5771 if (got.br_startoff > bno) {
5772 if (--lastx >= 0) {
5773 ep = xfs_iext_get_ext(ifp,
5774 lastx);
5775 xfs_bmbt_get_all(ep, &got);
5776 }
5777 }
5778 continue;
5779 } else if (del.br_state == XFS_EXT_UNWRITTEN) {
5780 /*
5781 * This one is already unwritten.
5782 * It must have a written left neighbor.
5783 * Unwrite the killed part of that one and
5784 * try again.
5785 */
5786 ASSERT(lastx > 0);
5787 xfs_bmbt_get_all(xfs_iext_get_ext(ifp,
5788 lastx - 1), &prev);
5789 ASSERT(prev.br_state == XFS_EXT_NORM);
5790 ASSERT(!isnullstartblock(prev.br_startblock));
5791 ASSERT(del.br_startblock ==
5792 prev.br_startblock + prev.br_blockcount);
5793 if (prev.br_startoff < start) {
5794 mod = start - prev.br_startoff;
5795 prev.br_blockcount -= mod;
5796 prev.br_startblock += mod;
5797 prev.br_startoff = start;
5798 }
5799 prev.br_state = XFS_EXT_UNWRITTEN;
5800 lastx--;
5801 error = xfs_bmap_add_extent_unwritten_real(tp,
5802 ip, whichfork, &lastx, &cur,
5803 &prev, firstblock, dfops,
5804 &logflags);
5805 if (error)
5806 goto error0;
5807 goto nodelete;
5808 } else {
5809 ASSERT(del.br_state == XFS_EXT_NORM);
5810 del.br_state = XFS_EXT_UNWRITTEN;
5811 error = xfs_bmap_add_extent_unwritten_real(tp,
5812 ip, whichfork, &lastx, &cur,
5813 &del, firstblock, dfops,
5814 &logflags);
5815 if (error)
5816 goto error0;
5817 goto nodelete;
5818 }
5819 }
5820
5821 /*
5822 * If it's the case where the directory code is running
5823 * with no block reservation, and the deleted block is in
5824 * the middle of its extent, and the resulting insert
5825 * of an extent would cause transformation to btree format,
5826 * then reject it. The calling code will then swap
5827 * blocks around instead.
5828 * We have to do this now, rather than waiting for the
5829 * conversion to btree format, since the transaction
5830 * will be dirty.
5831 */
5832 if (!wasdel && tp->t_blk_res == 0 &&
5833 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
5834 XFS_IFORK_NEXTENTS(ip, whichfork) >= /* Note the >= */
5835 XFS_IFORK_MAXEXT(ip, whichfork) &&
5836 del.br_startoff > got.br_startoff &&
5837 del.br_startoff + del.br_blockcount <
5838 got.br_startoff + got.br_blockcount) {
5839 error = -ENOSPC;
5840 goto error0;
5841 }
5842
5843 /*
5844 * Unreserve quota and update realtime free space, if
5845 * appropriate. If delayed allocation, update the inode delalloc
5846 * counter now and wait to update the sb counters as
5847 * xfs_bmap_del_extent() might need to borrow some blocks.
5848 */
5849 if (wasdel) {
5850 ASSERT(startblockval(del.br_startblock) > 0);
5851 if (isrt) {
5852 xfs_filblks_t rtexts;
5853
5854 rtexts = XFS_FSB_TO_B(mp, del.br_blockcount);
5855 do_div(rtexts, mp->m_sb.sb_rextsize);
5856 xfs_mod_frextents(mp, (int64_t)rtexts);
5857 (void)xfs_trans_reserve_quota_nblks(NULL,
5858 ip, -((long)del.br_blockcount), 0,
5859 XFS_QMOPT_RES_RTBLKS);
5860 } else {
5861 (void)xfs_trans_reserve_quota_nblks(NULL,
5862 ip, -((long)del.br_blockcount), 0,
5863 XFS_QMOPT_RES_REGBLKS);
5864 }
5865 ip->i_delayed_blks -= del.br_blockcount;
5866 if (cur)
5867 cur->bc_private.b.flags |=
5868 XFS_BTCUR_BPRV_WASDEL;
5869 } else if (cur)
5870 cur->bc_private.b.flags &= ~XFS_BTCUR_BPRV_WASDEL;
5871
5872 error = xfs_bmap_del_extent(ip, tp, &lastx, dfops, cur, &del,
5873 &tmp_logflags, whichfork, flags);
5874 logflags |= tmp_logflags;
5875 if (error)
5876 goto error0;
5877
5878 if (!isrt && wasdel)
5879 xfs_mod_fdblocks(mp, (int64_t)del.br_blockcount, false);
5880
5881 max_len -= del.br_blockcount;
5882 bno = del.br_startoff - 1;
5883 nodelete:
5884 /*
5885 * If not done go on to the next (previous) record.
5886 */
5887 if (bno != (xfs_fileoff_t)-1 && bno >= start) {
5888 if (lastx >= 0) {
5889 ep = xfs_iext_get_ext(ifp, lastx);
5890 if (xfs_bmbt_get_startoff(ep) > bno) {
5891 if (--lastx >= 0)
5892 ep = xfs_iext_get_ext(ifp,
5893 lastx);
5894 }
5895 xfs_bmbt_get_all(ep, &got);
5896 }
5897 extno++;
5898 }
5899 }
5900 if (bno == (xfs_fileoff_t)-1 || bno < start || lastx < 0)
5901 *rlen = 0;
5902 else
5903 *rlen = bno - start + 1;
5904
5905 /*
5906 * Convert to a btree if necessary.
5907 */
5908 if (xfs_bmap_needs_btree(ip, whichfork)) {
5909 ASSERT(cur == NULL);
5910 error = xfs_bmap_extents_to_btree(tp, ip, firstblock, dfops,
5911 &cur, 0, &tmp_logflags, whichfork);
5912 logflags |= tmp_logflags;
5913 if (error)
5914 goto error0;
5915 }
5916 /*
5917 * transform from btree to extents, give it cur
5918 */
5919 else if (xfs_bmap_wants_extents(ip, whichfork)) {
5920 ASSERT(cur != NULL);
5921 error = xfs_bmap_btree_to_extents(tp, ip, cur, &tmp_logflags,
5922 whichfork);
5923 logflags |= tmp_logflags;
5924 if (error)
5925 goto error0;
5926 }
5927 /*
5928 * transform from extents to local?
5929 */
5930 error = 0;
5931 error0:
5932 /*
5933 * Log everything. Do this after conversion, there's no point in
5934 * logging the extent records if we've converted to btree format.
5935 */
5936 if ((logflags & xfs_ilog_fext(whichfork)) &&
5937 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
5938 logflags &= ~xfs_ilog_fext(whichfork);
5939 else if ((logflags & xfs_ilog_fbroot(whichfork)) &&
5940 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)
5941 logflags &= ~xfs_ilog_fbroot(whichfork);
5942 /*
5943 * Log inode even in the error case, if the transaction
5944 * is dirty we'll need to shut down the filesystem.
5945 */
5946 if (logflags)
5947 xfs_trans_log_inode(tp, ip, logflags);
5948 if (cur) {
5949 if (!error) {
5950 *firstblock = cur->bc_private.b.firstblock;
5951 cur->bc_private.b.allocated = 0;
5952 }
5953 xfs_btree_del_cursor(cur,
5954 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
5955 }
5956 return error;
5957 }
5958
5959 /* Unmap a range of a file. */
5960 int
xfs_bunmapi(xfs_trans_t * tp,struct xfs_inode * ip,xfs_fileoff_t bno,xfs_filblks_t len,int flags,xfs_extnum_t nexts,xfs_fsblock_t * firstblock,struct xfs_defer_ops * dfops,int * done)5961 xfs_bunmapi(
5962 xfs_trans_t *tp,
5963 struct xfs_inode *ip,
5964 xfs_fileoff_t bno,
5965 xfs_filblks_t len,
5966 int flags,
5967 xfs_extnum_t nexts,
5968 xfs_fsblock_t *firstblock,
5969 struct xfs_defer_ops *dfops,
5970 int *done)
5971 {
5972 int error;
5973
5974 error = __xfs_bunmapi(tp, ip, bno, &len, flags, nexts, firstblock,
5975 dfops);
5976 *done = (len == 0);
5977 return error;
5978 }
5979
5980 /*
5981 * Determine whether an extent shift can be accomplished by a merge with the
5982 * extent that precedes the target hole of the shift.
5983 */
5984 STATIC bool
xfs_bmse_can_merge(struct xfs_bmbt_irec * left,struct xfs_bmbt_irec * got,xfs_fileoff_t shift)5985 xfs_bmse_can_merge(
5986 struct xfs_bmbt_irec *left, /* preceding extent */
5987 struct xfs_bmbt_irec *got, /* current extent to shift */
5988 xfs_fileoff_t shift) /* shift fsb */
5989 {
5990 xfs_fileoff_t startoff;
5991
5992 startoff = got->br_startoff - shift;
5993
5994 /*
5995 * The extent, once shifted, must be adjacent in-file and on-disk with
5996 * the preceding extent.
5997 */
5998 if ((left->br_startoff + left->br_blockcount != startoff) ||
5999 (left->br_startblock + left->br_blockcount != got->br_startblock) ||
6000 (left->br_state != got->br_state) ||
6001 (left->br_blockcount + got->br_blockcount > MAXEXTLEN))
6002 return false;
6003
6004 return true;
6005 }
6006
6007 /*
6008 * A bmap extent shift adjusts the file offset of an extent to fill a preceding
6009 * hole in the file. If an extent shift would result in the extent being fully
6010 * adjacent to the extent that currently precedes the hole, we can merge with
6011 * the preceding extent rather than do the shift.
6012 *
6013 * This function assumes the caller has verified a shift-by-merge is possible
6014 * with the provided extents via xfs_bmse_can_merge().
6015 */
6016 STATIC int
xfs_bmse_merge(struct xfs_inode * ip,int whichfork,xfs_fileoff_t shift,int current_ext,struct xfs_bmbt_rec_host * gotp,struct xfs_bmbt_rec_host * leftp,struct xfs_btree_cur * cur,int * logflags)6017 xfs_bmse_merge(
6018 struct xfs_inode *ip,
6019 int whichfork,
6020 xfs_fileoff_t shift, /* shift fsb */
6021 int current_ext, /* idx of gotp */
6022 struct xfs_bmbt_rec_host *gotp, /* extent to shift */
6023 struct xfs_bmbt_rec_host *leftp, /* preceding extent */
6024 struct xfs_btree_cur *cur,
6025 int *logflags) /* output */
6026 {
6027 struct xfs_bmbt_irec got;
6028 struct xfs_bmbt_irec left;
6029 xfs_filblks_t blockcount;
6030 int error, i;
6031 struct xfs_mount *mp = ip->i_mount;
6032
6033 xfs_bmbt_get_all(gotp, &got);
6034 xfs_bmbt_get_all(leftp, &left);
6035 blockcount = left.br_blockcount + got.br_blockcount;
6036
6037 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
6038 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
6039 ASSERT(xfs_bmse_can_merge(&left, &got, shift));
6040
6041 /*
6042 * Merge the in-core extents. Note that the host record pointers and
6043 * current_ext index are invalid once the extent has been removed via
6044 * xfs_iext_remove().
6045 */
6046 xfs_bmbt_set_blockcount(leftp, blockcount);
6047 xfs_iext_remove(ip, current_ext, 1, 0);
6048
6049 /*
6050 * Update the on-disk extent count, the btree if necessary and log the
6051 * inode.
6052 */
6053 XFS_IFORK_NEXT_SET(ip, whichfork,
6054 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
6055 *logflags |= XFS_ILOG_CORE;
6056 if (!cur) {
6057 *logflags |= XFS_ILOG_DEXT;
6058 return 0;
6059 }
6060
6061 /* lookup and remove the extent to merge */
6062 error = xfs_bmbt_lookup_eq(cur, got.br_startoff, got.br_startblock,
6063 got.br_blockcount, &i);
6064 if (error)
6065 return error;
6066 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
6067
6068 error = xfs_btree_delete(cur, &i);
6069 if (error)
6070 return error;
6071 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
6072
6073 /* lookup and update size of the previous extent */
6074 error = xfs_bmbt_lookup_eq(cur, left.br_startoff, left.br_startblock,
6075 left.br_blockcount, &i);
6076 if (error)
6077 return error;
6078 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
6079
6080 left.br_blockcount = blockcount;
6081
6082 return xfs_bmbt_update(cur, left.br_startoff, left.br_startblock,
6083 left.br_blockcount, left.br_state);
6084 }
6085
6086 /*
6087 * Shift a single extent.
6088 */
6089 STATIC int
xfs_bmse_shift_one(struct xfs_inode * ip,int whichfork,xfs_fileoff_t offset_shift_fsb,int * current_ext,struct xfs_bmbt_rec_host * gotp,struct xfs_btree_cur * cur,int * logflags,enum shift_direction direction,struct xfs_defer_ops * dfops)6090 xfs_bmse_shift_one(
6091 struct xfs_inode *ip,
6092 int whichfork,
6093 xfs_fileoff_t offset_shift_fsb,
6094 int *current_ext,
6095 struct xfs_bmbt_rec_host *gotp,
6096 struct xfs_btree_cur *cur,
6097 int *logflags,
6098 enum shift_direction direction,
6099 struct xfs_defer_ops *dfops)
6100 {
6101 struct xfs_ifork *ifp;
6102 struct xfs_mount *mp;
6103 xfs_fileoff_t startoff;
6104 struct xfs_bmbt_rec_host *adj_irecp;
6105 struct xfs_bmbt_irec got;
6106 struct xfs_bmbt_irec adj_irec;
6107 int error;
6108 int i;
6109 int total_extents;
6110
6111 mp = ip->i_mount;
6112 ifp = XFS_IFORK_PTR(ip, whichfork);
6113 total_extents = xfs_iext_count(ifp);
6114
6115 xfs_bmbt_get_all(gotp, &got);
6116
6117 /* delalloc extents should be prevented by caller */
6118 XFS_WANT_CORRUPTED_RETURN(mp, !isnullstartblock(got.br_startblock));
6119
6120 if (direction == SHIFT_LEFT) {
6121 startoff = got.br_startoff - offset_shift_fsb;
6122
6123 /*
6124 * Check for merge if we've got an extent to the left,
6125 * otherwise make sure there's enough room at the start
6126 * of the file for the shift.
6127 */
6128 if (!*current_ext) {
6129 if (got.br_startoff < offset_shift_fsb)
6130 return -EINVAL;
6131 goto update_current_ext;
6132 }
6133 /*
6134 * grab the left extent and check for a large
6135 * enough hole.
6136 */
6137 adj_irecp = xfs_iext_get_ext(ifp, *current_ext - 1);
6138 xfs_bmbt_get_all(adj_irecp, &adj_irec);
6139
6140 if (startoff <
6141 adj_irec.br_startoff + adj_irec.br_blockcount)
6142 return -EINVAL;
6143
6144 /* check whether to merge the extent or shift it down */
6145 if (xfs_bmse_can_merge(&adj_irec, &got,
6146 offset_shift_fsb)) {
6147 error = xfs_bmse_merge(ip, whichfork, offset_shift_fsb,
6148 *current_ext, gotp, adj_irecp,
6149 cur, logflags);
6150 if (error)
6151 return error;
6152 adj_irec = got;
6153 goto update_rmap;
6154 }
6155 } else {
6156 startoff = got.br_startoff + offset_shift_fsb;
6157 /* nothing to move if this is the last extent */
6158 if (*current_ext >= (total_extents - 1))
6159 goto update_current_ext;
6160 /*
6161 * If this is not the last extent in the file, make sure there
6162 * is enough room between current extent and next extent for
6163 * accommodating the shift.
6164 */
6165 adj_irecp = xfs_iext_get_ext(ifp, *current_ext + 1);
6166 xfs_bmbt_get_all(adj_irecp, &adj_irec);
6167 if (startoff + got.br_blockcount > adj_irec.br_startoff)
6168 return -EINVAL;
6169 /*
6170 * Unlike a left shift (which involves a hole punch),
6171 * a right shift does not modify extent neighbors
6172 * in any way. We should never find mergeable extents
6173 * in this scenario. Check anyways and warn if we
6174 * encounter two extents that could be one.
6175 */
6176 if (xfs_bmse_can_merge(&got, &adj_irec, offset_shift_fsb))
6177 WARN_ON_ONCE(1);
6178 }
6179 /*
6180 * Increment the extent index for the next iteration, update the start
6181 * offset of the in-core extent and update the btree if applicable.
6182 */
6183 update_current_ext:
6184 if (direction == SHIFT_LEFT)
6185 (*current_ext)++;
6186 else
6187 (*current_ext)--;
6188 xfs_bmbt_set_startoff(gotp, startoff);
6189 *logflags |= XFS_ILOG_CORE;
6190 adj_irec = got;
6191 if (!cur) {
6192 *logflags |= XFS_ILOG_DEXT;
6193 goto update_rmap;
6194 }
6195
6196 error = xfs_bmbt_lookup_eq(cur, got.br_startoff, got.br_startblock,
6197 got.br_blockcount, &i);
6198 if (error)
6199 return error;
6200 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
6201
6202 got.br_startoff = startoff;
6203 error = xfs_bmbt_update(cur, got.br_startoff, got.br_startblock,
6204 got.br_blockcount, got.br_state);
6205 if (error)
6206 return error;
6207
6208 update_rmap:
6209 /* update reverse mapping */
6210 error = xfs_rmap_unmap_extent(mp, dfops, ip, whichfork, &adj_irec);
6211 if (error)
6212 return error;
6213 adj_irec.br_startoff = startoff;
6214 return xfs_rmap_map_extent(mp, dfops, ip, whichfork, &adj_irec);
6215 }
6216
6217 /*
6218 * Shift extent records to the left/right to cover/create a hole.
6219 *
6220 * The maximum number of extents to be shifted in a single operation is
6221 * @num_exts. @stop_fsb specifies the file offset at which to stop shift and the
6222 * file offset where we've left off is returned in @next_fsb. @offset_shift_fsb
6223 * is the length by which each extent is shifted. If there is no hole to shift
6224 * the extents into, this will be considered invalid operation and we abort
6225 * immediately.
6226 */
6227 int
xfs_bmap_shift_extents(struct xfs_trans * tp,struct xfs_inode * ip,xfs_fileoff_t * next_fsb,xfs_fileoff_t offset_shift_fsb,int * done,xfs_fileoff_t stop_fsb,xfs_fsblock_t * firstblock,struct xfs_defer_ops * dfops,enum shift_direction direction,int num_exts)6228 xfs_bmap_shift_extents(
6229 struct xfs_trans *tp,
6230 struct xfs_inode *ip,
6231 xfs_fileoff_t *next_fsb,
6232 xfs_fileoff_t offset_shift_fsb,
6233 int *done,
6234 xfs_fileoff_t stop_fsb,
6235 xfs_fsblock_t *firstblock,
6236 struct xfs_defer_ops *dfops,
6237 enum shift_direction direction,
6238 int num_exts)
6239 {
6240 struct xfs_btree_cur *cur = NULL;
6241 struct xfs_bmbt_rec_host *gotp;
6242 struct xfs_bmbt_irec got;
6243 struct xfs_mount *mp = ip->i_mount;
6244 struct xfs_ifork *ifp;
6245 xfs_extnum_t nexts = 0;
6246 xfs_extnum_t current_ext;
6247 xfs_extnum_t total_extents;
6248 xfs_extnum_t stop_extent;
6249 int error = 0;
6250 int whichfork = XFS_DATA_FORK;
6251 int logflags = 0;
6252
6253 if (unlikely(XFS_TEST_ERROR(
6254 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
6255 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
6256 mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
6257 XFS_ERROR_REPORT("xfs_bmap_shift_extents",
6258 XFS_ERRLEVEL_LOW, mp);
6259 return -EFSCORRUPTED;
6260 }
6261
6262 if (XFS_FORCED_SHUTDOWN(mp))
6263 return -EIO;
6264
6265 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
6266 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
6267 ASSERT(direction == SHIFT_LEFT || direction == SHIFT_RIGHT);
6268 ASSERT(*next_fsb != NULLFSBLOCK || direction == SHIFT_RIGHT);
6269
6270 ifp = XFS_IFORK_PTR(ip, whichfork);
6271 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
6272 /* Read in all the extents */
6273 error = xfs_iread_extents(tp, ip, whichfork);
6274 if (error)
6275 return error;
6276 }
6277
6278 if (ifp->if_flags & XFS_IFBROOT) {
6279 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
6280 cur->bc_private.b.firstblock = *firstblock;
6281 cur->bc_private.b.dfops = dfops;
6282 cur->bc_private.b.flags = 0;
6283 }
6284
6285 /*
6286 * There may be delalloc extents in the data fork before the range we
6287 * are collapsing out, so we cannot use the count of real extents here.
6288 * Instead we have to calculate it from the incore fork.
6289 */
6290 total_extents = xfs_iext_count(ifp);
6291 if (total_extents == 0) {
6292 *done = 1;
6293 goto del_cursor;
6294 }
6295
6296 /*
6297 * In case of first right shift, we need to initialize next_fsb
6298 */
6299 if (*next_fsb == NULLFSBLOCK) {
6300 gotp = xfs_iext_get_ext(ifp, total_extents - 1);
6301 xfs_bmbt_get_all(gotp, &got);
6302 *next_fsb = got.br_startoff;
6303 if (stop_fsb > *next_fsb) {
6304 *done = 1;
6305 goto del_cursor;
6306 }
6307 }
6308
6309 /* Lookup the extent index at which we have to stop */
6310 if (direction == SHIFT_RIGHT) {
6311 gotp = xfs_iext_bno_to_ext(ifp, stop_fsb, &stop_extent);
6312 /* Make stop_extent exclusive of shift range */
6313 stop_extent--;
6314 } else
6315 stop_extent = total_extents;
6316
6317 /*
6318 * Look up the extent index for the fsb where we start shifting. We can
6319 * henceforth iterate with current_ext as extent list changes are locked
6320 * out via ilock.
6321 *
6322 * gotp can be null in 2 cases: 1) if there are no extents or 2)
6323 * *next_fsb lies in a hole beyond which there are no extents. Either
6324 * way, we are done.
6325 */
6326 gotp = xfs_iext_bno_to_ext(ifp, *next_fsb, ¤t_ext);
6327 if (!gotp) {
6328 *done = 1;
6329 goto del_cursor;
6330 }
6331
6332 /* some sanity checking before we finally start shifting extents */
6333 if ((direction == SHIFT_LEFT && current_ext >= stop_extent) ||
6334 (direction == SHIFT_RIGHT && current_ext <= stop_extent)) {
6335 error = -EIO;
6336 goto del_cursor;
6337 }
6338
6339 while (nexts++ < num_exts) {
6340 error = xfs_bmse_shift_one(ip, whichfork, offset_shift_fsb,
6341 ¤t_ext, gotp, cur, &logflags,
6342 direction, dfops);
6343 if (error)
6344 goto del_cursor;
6345 /*
6346 * If there was an extent merge during the shift, the extent
6347 * count can change. Update the total and grade the next record.
6348 */
6349 if (direction == SHIFT_LEFT) {
6350 total_extents = xfs_iext_count(ifp);
6351 stop_extent = total_extents;
6352 }
6353
6354 if (current_ext == stop_extent) {
6355 *done = 1;
6356 *next_fsb = NULLFSBLOCK;
6357 break;
6358 }
6359 gotp = xfs_iext_get_ext(ifp, current_ext);
6360 }
6361
6362 if (!*done) {
6363 xfs_bmbt_get_all(gotp, &got);
6364 *next_fsb = got.br_startoff;
6365 }
6366
6367 del_cursor:
6368 if (cur)
6369 xfs_btree_del_cursor(cur,
6370 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
6371
6372 if (logflags)
6373 xfs_trans_log_inode(tp, ip, logflags);
6374
6375 return error;
6376 }
6377
6378 /*
6379 * Splits an extent into two extents at split_fsb block such that it is
6380 * the first block of the current_ext. @current_ext is a target extent
6381 * to be split. @split_fsb is a block where the extents is split.
6382 * If split_fsb lies in a hole or the first block of extents, just return 0.
6383 */
6384 STATIC int
xfs_bmap_split_extent_at(struct xfs_trans * tp,struct xfs_inode * ip,xfs_fileoff_t split_fsb,xfs_fsblock_t * firstfsb,struct xfs_defer_ops * dfops)6385 xfs_bmap_split_extent_at(
6386 struct xfs_trans *tp,
6387 struct xfs_inode *ip,
6388 xfs_fileoff_t split_fsb,
6389 xfs_fsblock_t *firstfsb,
6390 struct xfs_defer_ops *dfops)
6391 {
6392 int whichfork = XFS_DATA_FORK;
6393 struct xfs_btree_cur *cur = NULL;
6394 struct xfs_bmbt_rec_host *gotp;
6395 struct xfs_bmbt_irec got;
6396 struct xfs_bmbt_irec new; /* split extent */
6397 struct xfs_mount *mp = ip->i_mount;
6398 struct xfs_ifork *ifp;
6399 xfs_fsblock_t gotblkcnt; /* new block count for got */
6400 xfs_extnum_t current_ext;
6401 int error = 0;
6402 int logflags = 0;
6403 int i = 0;
6404
6405 if (unlikely(XFS_TEST_ERROR(
6406 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
6407 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
6408 mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
6409 XFS_ERROR_REPORT("xfs_bmap_split_extent_at",
6410 XFS_ERRLEVEL_LOW, mp);
6411 return -EFSCORRUPTED;
6412 }
6413
6414 if (XFS_FORCED_SHUTDOWN(mp))
6415 return -EIO;
6416
6417 ifp = XFS_IFORK_PTR(ip, whichfork);
6418 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
6419 /* Read in all the extents */
6420 error = xfs_iread_extents(tp, ip, whichfork);
6421 if (error)
6422 return error;
6423 }
6424
6425 /*
6426 * gotp can be null in 2 cases: 1) if there are no extents
6427 * or 2) split_fsb lies in a hole beyond which there are
6428 * no extents. Either way, we are done.
6429 */
6430 gotp = xfs_iext_bno_to_ext(ifp, split_fsb, ¤t_ext);
6431 if (!gotp)
6432 return 0;
6433
6434 xfs_bmbt_get_all(gotp, &got);
6435
6436 /*
6437 * Check split_fsb lies in a hole or the start boundary offset
6438 * of the extent.
6439 */
6440 if (got.br_startoff >= split_fsb)
6441 return 0;
6442
6443 gotblkcnt = split_fsb - got.br_startoff;
6444 new.br_startoff = split_fsb;
6445 new.br_startblock = got.br_startblock + gotblkcnt;
6446 new.br_blockcount = got.br_blockcount - gotblkcnt;
6447 new.br_state = got.br_state;
6448
6449 if (ifp->if_flags & XFS_IFBROOT) {
6450 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
6451 cur->bc_private.b.firstblock = *firstfsb;
6452 cur->bc_private.b.dfops = dfops;
6453 cur->bc_private.b.flags = 0;
6454 error = xfs_bmbt_lookup_eq(cur, got.br_startoff,
6455 got.br_startblock,
6456 got.br_blockcount,
6457 &i);
6458 if (error)
6459 goto del_cursor;
6460 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, del_cursor);
6461 }
6462
6463 xfs_bmbt_set_blockcount(gotp, gotblkcnt);
6464 got.br_blockcount = gotblkcnt;
6465
6466 logflags = XFS_ILOG_CORE;
6467 if (cur) {
6468 error = xfs_bmbt_update(cur, got.br_startoff,
6469 got.br_startblock,
6470 got.br_blockcount,
6471 got.br_state);
6472 if (error)
6473 goto del_cursor;
6474 } else
6475 logflags |= XFS_ILOG_DEXT;
6476
6477 /* Add new extent */
6478 current_ext++;
6479 xfs_iext_insert(ip, current_ext, 1, &new, 0);
6480 XFS_IFORK_NEXT_SET(ip, whichfork,
6481 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
6482
6483 if (cur) {
6484 error = xfs_bmbt_lookup_eq(cur, new.br_startoff,
6485 new.br_startblock, new.br_blockcount,
6486 &i);
6487 if (error)
6488 goto del_cursor;
6489 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, del_cursor);
6490 cur->bc_rec.b.br_state = new.br_state;
6491
6492 error = xfs_btree_insert(cur, &i);
6493 if (error)
6494 goto del_cursor;
6495 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, del_cursor);
6496 }
6497
6498 /*
6499 * Convert to a btree if necessary.
6500 */
6501 if (xfs_bmap_needs_btree(ip, whichfork)) {
6502 int tmp_logflags; /* partial log flag return val */
6503
6504 ASSERT(cur == NULL);
6505 error = xfs_bmap_extents_to_btree(tp, ip, firstfsb, dfops,
6506 &cur, 0, &tmp_logflags, whichfork);
6507 logflags |= tmp_logflags;
6508 }
6509
6510 del_cursor:
6511 if (cur) {
6512 cur->bc_private.b.allocated = 0;
6513 xfs_btree_del_cursor(cur,
6514 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
6515 }
6516
6517 if (logflags)
6518 xfs_trans_log_inode(tp, ip, logflags);
6519 return error;
6520 }
6521
6522 int
xfs_bmap_split_extent(struct xfs_inode * ip,xfs_fileoff_t split_fsb)6523 xfs_bmap_split_extent(
6524 struct xfs_inode *ip,
6525 xfs_fileoff_t split_fsb)
6526 {
6527 struct xfs_mount *mp = ip->i_mount;
6528 struct xfs_trans *tp;
6529 struct xfs_defer_ops dfops;
6530 xfs_fsblock_t firstfsb;
6531 int error;
6532
6533 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write,
6534 XFS_DIOSTRAT_SPACE_RES(mp, 0), 0, 0, &tp);
6535 if (error)
6536 return error;
6537
6538 xfs_ilock(ip, XFS_ILOCK_EXCL);
6539 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
6540
6541 xfs_defer_init(&dfops, &firstfsb);
6542
6543 error = xfs_bmap_split_extent_at(tp, ip, split_fsb,
6544 &firstfsb, &dfops);
6545 if (error)
6546 goto out;
6547
6548 error = xfs_defer_finish(&tp, &dfops, NULL);
6549 if (error)
6550 goto out;
6551
6552 return xfs_trans_commit(tp);
6553
6554 out:
6555 xfs_defer_cancel(&dfops);
6556 xfs_trans_cancel(tp);
6557 return error;
6558 }
6559
6560 /* Deferred mapping is only for real extents in the data fork. */
6561 static bool
xfs_bmap_is_update_needed(struct xfs_bmbt_irec * bmap)6562 xfs_bmap_is_update_needed(
6563 struct xfs_bmbt_irec *bmap)
6564 {
6565 return bmap->br_startblock != HOLESTARTBLOCK &&
6566 bmap->br_startblock != DELAYSTARTBLOCK;
6567 }
6568
6569 /* Record a bmap intent. */
6570 static int
__xfs_bmap_add(struct xfs_mount * mp,struct xfs_defer_ops * dfops,enum xfs_bmap_intent_type type,struct xfs_inode * ip,int whichfork,struct xfs_bmbt_irec * bmap)6571 __xfs_bmap_add(
6572 struct xfs_mount *mp,
6573 struct xfs_defer_ops *dfops,
6574 enum xfs_bmap_intent_type type,
6575 struct xfs_inode *ip,
6576 int whichfork,
6577 struct xfs_bmbt_irec *bmap)
6578 {
6579 int error;
6580 struct xfs_bmap_intent *bi;
6581
6582 trace_xfs_bmap_defer(mp,
6583 XFS_FSB_TO_AGNO(mp, bmap->br_startblock),
6584 type,
6585 XFS_FSB_TO_AGBNO(mp, bmap->br_startblock),
6586 ip->i_ino, whichfork,
6587 bmap->br_startoff,
6588 bmap->br_blockcount,
6589 bmap->br_state);
6590
6591 bi = kmem_alloc(sizeof(struct xfs_bmap_intent), KM_SLEEP | KM_NOFS);
6592 INIT_LIST_HEAD(&bi->bi_list);
6593 bi->bi_type = type;
6594 bi->bi_owner = ip;
6595 bi->bi_whichfork = whichfork;
6596 bi->bi_bmap = *bmap;
6597
6598 error = xfs_defer_join(dfops, bi->bi_owner);
6599 if (error) {
6600 kmem_free(bi);
6601 return error;
6602 }
6603
6604 xfs_defer_add(dfops, XFS_DEFER_OPS_TYPE_BMAP, &bi->bi_list);
6605 return 0;
6606 }
6607
6608 /* Map an extent into a file. */
6609 int
xfs_bmap_map_extent(struct xfs_mount * mp,struct xfs_defer_ops * dfops,struct xfs_inode * ip,struct xfs_bmbt_irec * PREV)6610 xfs_bmap_map_extent(
6611 struct xfs_mount *mp,
6612 struct xfs_defer_ops *dfops,
6613 struct xfs_inode *ip,
6614 struct xfs_bmbt_irec *PREV)
6615 {
6616 if (!xfs_bmap_is_update_needed(PREV))
6617 return 0;
6618
6619 return __xfs_bmap_add(mp, dfops, XFS_BMAP_MAP, ip,
6620 XFS_DATA_FORK, PREV);
6621 }
6622
6623 /* Unmap an extent out of a file. */
6624 int
xfs_bmap_unmap_extent(struct xfs_mount * mp,struct xfs_defer_ops * dfops,struct xfs_inode * ip,struct xfs_bmbt_irec * PREV)6625 xfs_bmap_unmap_extent(
6626 struct xfs_mount *mp,
6627 struct xfs_defer_ops *dfops,
6628 struct xfs_inode *ip,
6629 struct xfs_bmbt_irec *PREV)
6630 {
6631 if (!xfs_bmap_is_update_needed(PREV))
6632 return 0;
6633
6634 return __xfs_bmap_add(mp, dfops, XFS_BMAP_UNMAP, ip,
6635 XFS_DATA_FORK, PREV);
6636 }
6637
6638 /*
6639 * Process one of the deferred bmap operations. We pass back the
6640 * btree cursor to maintain our lock on the bmapbt between calls.
6641 */
6642 int
xfs_bmap_finish_one(struct xfs_trans * tp,struct xfs_defer_ops * dfops,struct xfs_inode * ip,enum xfs_bmap_intent_type type,int whichfork,xfs_fileoff_t startoff,xfs_fsblock_t startblock,xfs_filblks_t * blockcount,xfs_exntst_t state)6643 xfs_bmap_finish_one(
6644 struct xfs_trans *tp,
6645 struct xfs_defer_ops *dfops,
6646 struct xfs_inode *ip,
6647 enum xfs_bmap_intent_type type,
6648 int whichfork,
6649 xfs_fileoff_t startoff,
6650 xfs_fsblock_t startblock,
6651 xfs_filblks_t *blockcount,
6652 xfs_exntst_t state)
6653 {
6654 struct xfs_bmbt_irec bmap;
6655 int nimaps = 1;
6656 xfs_fsblock_t firstfsb;
6657 int flags = XFS_BMAPI_REMAP;
6658 int error = 0;
6659
6660 bmap.br_startblock = startblock;
6661 bmap.br_startoff = startoff;
6662 bmap.br_blockcount = *blockcount;
6663 bmap.br_state = state;
6664
6665 /*
6666 * firstfsb is tied to the transaction lifetime and is used to
6667 * ensure correct AG locking order and schedule work item
6668 * continuations. XFS_BUI_MAX_FAST_EXTENTS (== 1) restricts us
6669 * to only making one bmap call per transaction, so it should
6670 * be safe to have it as a local variable here.
6671 */
6672 firstfsb = NULLFSBLOCK;
6673
6674 trace_xfs_bmap_deferred(tp->t_mountp,
6675 XFS_FSB_TO_AGNO(tp->t_mountp, startblock), type,
6676 XFS_FSB_TO_AGBNO(tp->t_mountp, startblock),
6677 ip->i_ino, whichfork, startoff, *blockcount, state);
6678
6679 if (whichfork != XFS_DATA_FORK && whichfork != XFS_ATTR_FORK)
6680 return -EFSCORRUPTED;
6681 if (whichfork == XFS_ATTR_FORK)
6682 flags |= XFS_BMAPI_ATTRFORK;
6683
6684 if (XFS_TEST_ERROR(false, tp->t_mountp,
6685 XFS_ERRTAG_BMAP_FINISH_ONE,
6686 XFS_RANDOM_BMAP_FINISH_ONE))
6687 return -EIO;
6688
6689 switch (type) {
6690 case XFS_BMAP_MAP:
6691 firstfsb = bmap.br_startblock;
6692 error = xfs_bmapi_write(tp, ip, bmap.br_startoff,
6693 bmap.br_blockcount, flags, &firstfsb,
6694 bmap.br_blockcount, &bmap, &nimaps,
6695 dfops);
6696 *blockcount = 0;
6697 break;
6698 case XFS_BMAP_UNMAP:
6699 error = __xfs_bunmapi(tp, ip, startoff, blockcount,
6700 XFS_BMAPI_REMAP, 1, &firstfsb, dfops);
6701 break;
6702 default:
6703 ASSERT(0);
6704 error = -EFSCORRUPTED;
6705 }
6706
6707 return error;
6708 }
6709