1 /*
2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
24 #include "xfs_mount.h"
25 #include "xfs_defer.h"
26 #include "xfs_inode.h"
27 #include "xfs_error.h"
28 #include "xfs_cksum.h"
29 #include "xfs_icache.h"
30 #include "xfs_trans.h"
31 #include "xfs_ialloc.h"
32 #include "xfs_dir2.h"
33
34 /*
35 * Check that none of the inode's in the buffer have a next
36 * unlinked field of 0.
37 */
38 #if defined(DEBUG)
39 void
xfs_inobp_check(xfs_mount_t * mp,xfs_buf_t * bp)40 xfs_inobp_check(
41 xfs_mount_t *mp,
42 xfs_buf_t *bp)
43 {
44 int i;
45 int j;
46 xfs_dinode_t *dip;
47
48 j = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog;
49
50 for (i = 0; i < j; i++) {
51 dip = xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize);
52 if (!dip->di_next_unlinked) {
53 xfs_alert(mp,
54 "Detected bogus zero next_unlinked field in inode %d buffer 0x%llx.",
55 i, (long long)bp->b_bn);
56 }
57 }
58 }
59 #endif
60
61 bool
xfs_dinode_good_version(struct xfs_mount * mp,__u8 version)62 xfs_dinode_good_version(
63 struct xfs_mount *mp,
64 __u8 version)
65 {
66 if (xfs_sb_version_hascrc(&mp->m_sb))
67 return version == 3;
68
69 return version == 1 || version == 2;
70 }
71
72 /*
73 * If we are doing readahead on an inode buffer, we might be in log recovery
74 * reading an inode allocation buffer that hasn't yet been replayed, and hence
75 * has not had the inode cores stamped into it. Hence for readahead, the buffer
76 * may be potentially invalid.
77 *
78 * If the readahead buffer is invalid, we need to mark it with an error and
79 * clear the DONE status of the buffer so that a followup read will re-read it
80 * from disk. We don't report the error otherwise to avoid warnings during log
81 * recovery and we don't get unnecssary panics on debug kernels. We use EIO here
82 * because all we want to do is say readahead failed; there is no-one to report
83 * the error to, so this will distinguish it from a non-ra verifier failure.
84 * Changes to this readahead error behavour also need to be reflected in
85 * xfs_dquot_buf_readahead_verify().
86 */
87 static void
xfs_inode_buf_verify(struct xfs_buf * bp,bool readahead)88 xfs_inode_buf_verify(
89 struct xfs_buf *bp,
90 bool readahead)
91 {
92 struct xfs_mount *mp = bp->b_target->bt_mount;
93 int i;
94 int ni;
95
96 /*
97 * Validate the magic number and version of every inode in the buffer
98 */
99 ni = XFS_BB_TO_FSB(mp, bp->b_length) * mp->m_sb.sb_inopblock;
100 for (i = 0; i < ni; i++) {
101 int di_ok;
102 xfs_dinode_t *dip;
103
104 dip = xfs_buf_offset(bp, (i << mp->m_sb.sb_inodelog));
105 di_ok = dip->di_magic == cpu_to_be16(XFS_DINODE_MAGIC) &&
106 xfs_dinode_good_version(mp, dip->di_version);
107 if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
108 XFS_ERRTAG_ITOBP_INOTOBP,
109 XFS_RANDOM_ITOBP_INOTOBP))) {
110 if (readahead) {
111 bp->b_flags &= ~XBF_DONE;
112 xfs_buf_ioerror(bp, -EIO);
113 return;
114 }
115
116 xfs_buf_ioerror(bp, -EFSCORRUPTED);
117 xfs_verifier_error(bp);
118 #ifdef DEBUG
119 xfs_alert(mp,
120 "bad inode magic/vsn daddr %lld #%d (magic=%x)",
121 (unsigned long long)bp->b_bn, i,
122 be16_to_cpu(dip->di_magic));
123 #endif
124 }
125 }
126 xfs_inobp_check(mp, bp);
127 }
128
129
130 static void
xfs_inode_buf_read_verify(struct xfs_buf * bp)131 xfs_inode_buf_read_verify(
132 struct xfs_buf *bp)
133 {
134 xfs_inode_buf_verify(bp, false);
135 }
136
137 static void
xfs_inode_buf_readahead_verify(struct xfs_buf * bp)138 xfs_inode_buf_readahead_verify(
139 struct xfs_buf *bp)
140 {
141 xfs_inode_buf_verify(bp, true);
142 }
143
144 static void
xfs_inode_buf_write_verify(struct xfs_buf * bp)145 xfs_inode_buf_write_verify(
146 struct xfs_buf *bp)
147 {
148 xfs_inode_buf_verify(bp, false);
149 }
150
151 const struct xfs_buf_ops xfs_inode_buf_ops = {
152 .name = "xfs_inode",
153 .verify_read = xfs_inode_buf_read_verify,
154 .verify_write = xfs_inode_buf_write_verify,
155 };
156
157 const struct xfs_buf_ops xfs_inode_buf_ra_ops = {
158 .name = "xxfs_inode_ra",
159 .verify_read = xfs_inode_buf_readahead_verify,
160 .verify_write = xfs_inode_buf_write_verify,
161 };
162
163
164 /*
165 * This routine is called to map an inode to the buffer containing the on-disk
166 * version of the inode. It returns a pointer to the buffer containing the
167 * on-disk inode in the bpp parameter, and in the dipp parameter it returns a
168 * pointer to the on-disk inode within that buffer.
169 *
170 * If a non-zero error is returned, then the contents of bpp and dipp are
171 * undefined.
172 */
173 int
xfs_imap_to_bp(struct xfs_mount * mp,struct xfs_trans * tp,struct xfs_imap * imap,struct xfs_dinode ** dipp,struct xfs_buf ** bpp,uint buf_flags,uint iget_flags)174 xfs_imap_to_bp(
175 struct xfs_mount *mp,
176 struct xfs_trans *tp,
177 struct xfs_imap *imap,
178 struct xfs_dinode **dipp,
179 struct xfs_buf **bpp,
180 uint buf_flags,
181 uint iget_flags)
182 {
183 struct xfs_buf *bp;
184 int error;
185
186 buf_flags |= XBF_UNMAPPED;
187 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno,
188 (int)imap->im_len, buf_flags, &bp,
189 &xfs_inode_buf_ops);
190 if (error) {
191 if (error == -EAGAIN) {
192 ASSERT(buf_flags & XBF_TRYLOCK);
193 return error;
194 }
195
196 if (error == -EFSCORRUPTED &&
197 (iget_flags & XFS_IGET_UNTRUSTED))
198 return -EINVAL;
199
200 xfs_warn(mp, "%s: xfs_trans_read_buf() returned error %d.",
201 __func__, error);
202 return error;
203 }
204
205 *bpp = bp;
206 *dipp = xfs_buf_offset(bp, imap->im_boffset);
207 return 0;
208 }
209
210 void
xfs_inode_from_disk(struct xfs_inode * ip,struct xfs_dinode * from)211 xfs_inode_from_disk(
212 struct xfs_inode *ip,
213 struct xfs_dinode *from)
214 {
215 struct xfs_icdinode *to = &ip->i_d;
216 struct inode *inode = VFS_I(ip);
217
218
219 /*
220 * Convert v1 inodes immediately to v2 inode format as this is the
221 * minimum inode version format we support in the rest of the code.
222 */
223 to->di_version = from->di_version;
224 if (to->di_version == 1) {
225 set_nlink(inode, be16_to_cpu(from->di_onlink));
226 to->di_projid_lo = 0;
227 to->di_projid_hi = 0;
228 to->di_version = 2;
229 } else {
230 set_nlink(inode, be32_to_cpu(from->di_nlink));
231 to->di_projid_lo = be16_to_cpu(from->di_projid_lo);
232 to->di_projid_hi = be16_to_cpu(from->di_projid_hi);
233 }
234
235 to->di_format = from->di_format;
236 to->di_uid = be32_to_cpu(from->di_uid);
237 to->di_gid = be32_to_cpu(from->di_gid);
238 to->di_flushiter = be16_to_cpu(from->di_flushiter);
239
240 /*
241 * Time is signed, so need to convert to signed 32 bit before
242 * storing in inode timestamp which may be 64 bit. Otherwise
243 * a time before epoch is converted to a time long after epoch
244 * on 64 bit systems.
245 */
246 inode->i_atime.tv_sec = (int)be32_to_cpu(from->di_atime.t_sec);
247 inode->i_atime.tv_nsec = (int)be32_to_cpu(from->di_atime.t_nsec);
248 inode->i_mtime.tv_sec = (int)be32_to_cpu(from->di_mtime.t_sec);
249 inode->i_mtime.tv_nsec = (int)be32_to_cpu(from->di_mtime.t_nsec);
250 inode->i_ctime.tv_sec = (int)be32_to_cpu(from->di_ctime.t_sec);
251 inode->i_ctime.tv_nsec = (int)be32_to_cpu(from->di_ctime.t_nsec);
252 inode->i_generation = be32_to_cpu(from->di_gen);
253 inode->i_mode = be16_to_cpu(from->di_mode);
254
255 to->di_size = be64_to_cpu(from->di_size);
256 to->di_nblocks = be64_to_cpu(from->di_nblocks);
257 to->di_extsize = be32_to_cpu(from->di_extsize);
258 to->di_nextents = be32_to_cpu(from->di_nextents);
259 to->di_anextents = be16_to_cpu(from->di_anextents);
260 to->di_forkoff = from->di_forkoff;
261 to->di_aformat = from->di_aformat;
262 to->di_dmevmask = be32_to_cpu(from->di_dmevmask);
263 to->di_dmstate = be16_to_cpu(from->di_dmstate);
264 to->di_flags = be16_to_cpu(from->di_flags);
265
266 if (to->di_version == 3) {
267 inode->i_version = be64_to_cpu(from->di_changecount);
268 to->di_crtime.t_sec = be32_to_cpu(from->di_crtime.t_sec);
269 to->di_crtime.t_nsec = be32_to_cpu(from->di_crtime.t_nsec);
270 to->di_flags2 = be64_to_cpu(from->di_flags2);
271 to->di_cowextsize = be32_to_cpu(from->di_cowextsize);
272 }
273 }
274
275 void
xfs_inode_to_disk(struct xfs_inode * ip,struct xfs_dinode * to,xfs_lsn_t lsn)276 xfs_inode_to_disk(
277 struct xfs_inode *ip,
278 struct xfs_dinode *to,
279 xfs_lsn_t lsn)
280 {
281 struct xfs_icdinode *from = &ip->i_d;
282 struct inode *inode = VFS_I(ip);
283
284 to->di_magic = cpu_to_be16(XFS_DINODE_MAGIC);
285 to->di_onlink = 0;
286
287 to->di_version = from->di_version;
288 to->di_format = from->di_format;
289 to->di_uid = cpu_to_be32(from->di_uid);
290 to->di_gid = cpu_to_be32(from->di_gid);
291 to->di_projid_lo = cpu_to_be16(from->di_projid_lo);
292 to->di_projid_hi = cpu_to_be16(from->di_projid_hi);
293
294 memset(to->di_pad, 0, sizeof(to->di_pad));
295 to->di_atime.t_sec = cpu_to_be32(inode->i_atime.tv_sec);
296 to->di_atime.t_nsec = cpu_to_be32(inode->i_atime.tv_nsec);
297 to->di_mtime.t_sec = cpu_to_be32(inode->i_mtime.tv_sec);
298 to->di_mtime.t_nsec = cpu_to_be32(inode->i_mtime.tv_nsec);
299 to->di_ctime.t_sec = cpu_to_be32(inode->i_ctime.tv_sec);
300 to->di_ctime.t_nsec = cpu_to_be32(inode->i_ctime.tv_nsec);
301 to->di_nlink = cpu_to_be32(inode->i_nlink);
302 to->di_gen = cpu_to_be32(inode->i_generation);
303 to->di_mode = cpu_to_be16(inode->i_mode);
304
305 to->di_size = cpu_to_be64(from->di_size);
306 to->di_nblocks = cpu_to_be64(from->di_nblocks);
307 to->di_extsize = cpu_to_be32(from->di_extsize);
308 to->di_nextents = cpu_to_be32(from->di_nextents);
309 to->di_anextents = cpu_to_be16(from->di_anextents);
310 to->di_forkoff = from->di_forkoff;
311 to->di_aformat = from->di_aformat;
312 to->di_dmevmask = cpu_to_be32(from->di_dmevmask);
313 to->di_dmstate = cpu_to_be16(from->di_dmstate);
314 to->di_flags = cpu_to_be16(from->di_flags);
315
316 if (from->di_version == 3) {
317 to->di_changecount = cpu_to_be64(inode->i_version);
318 to->di_crtime.t_sec = cpu_to_be32(from->di_crtime.t_sec);
319 to->di_crtime.t_nsec = cpu_to_be32(from->di_crtime.t_nsec);
320 to->di_flags2 = cpu_to_be64(from->di_flags2);
321 to->di_cowextsize = cpu_to_be32(from->di_cowextsize);
322 to->di_ino = cpu_to_be64(ip->i_ino);
323 to->di_lsn = cpu_to_be64(lsn);
324 memset(to->di_pad2, 0, sizeof(to->di_pad2));
325 uuid_copy(&to->di_uuid, &ip->i_mount->m_sb.sb_meta_uuid);
326 to->di_flushiter = 0;
327 } else {
328 to->di_flushiter = cpu_to_be16(from->di_flushiter);
329 }
330 }
331
332 void
xfs_log_dinode_to_disk(struct xfs_log_dinode * from,struct xfs_dinode * to)333 xfs_log_dinode_to_disk(
334 struct xfs_log_dinode *from,
335 struct xfs_dinode *to)
336 {
337 to->di_magic = cpu_to_be16(from->di_magic);
338 to->di_mode = cpu_to_be16(from->di_mode);
339 to->di_version = from->di_version;
340 to->di_format = from->di_format;
341 to->di_onlink = 0;
342 to->di_uid = cpu_to_be32(from->di_uid);
343 to->di_gid = cpu_to_be32(from->di_gid);
344 to->di_nlink = cpu_to_be32(from->di_nlink);
345 to->di_projid_lo = cpu_to_be16(from->di_projid_lo);
346 to->di_projid_hi = cpu_to_be16(from->di_projid_hi);
347 memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
348
349 to->di_atime.t_sec = cpu_to_be32(from->di_atime.t_sec);
350 to->di_atime.t_nsec = cpu_to_be32(from->di_atime.t_nsec);
351 to->di_mtime.t_sec = cpu_to_be32(from->di_mtime.t_sec);
352 to->di_mtime.t_nsec = cpu_to_be32(from->di_mtime.t_nsec);
353 to->di_ctime.t_sec = cpu_to_be32(from->di_ctime.t_sec);
354 to->di_ctime.t_nsec = cpu_to_be32(from->di_ctime.t_nsec);
355
356 to->di_size = cpu_to_be64(from->di_size);
357 to->di_nblocks = cpu_to_be64(from->di_nblocks);
358 to->di_extsize = cpu_to_be32(from->di_extsize);
359 to->di_nextents = cpu_to_be32(from->di_nextents);
360 to->di_anextents = cpu_to_be16(from->di_anextents);
361 to->di_forkoff = from->di_forkoff;
362 to->di_aformat = from->di_aformat;
363 to->di_dmevmask = cpu_to_be32(from->di_dmevmask);
364 to->di_dmstate = cpu_to_be16(from->di_dmstate);
365 to->di_flags = cpu_to_be16(from->di_flags);
366 to->di_gen = cpu_to_be32(from->di_gen);
367
368 if (from->di_version == 3) {
369 to->di_changecount = cpu_to_be64(from->di_changecount);
370 to->di_crtime.t_sec = cpu_to_be32(from->di_crtime.t_sec);
371 to->di_crtime.t_nsec = cpu_to_be32(from->di_crtime.t_nsec);
372 to->di_flags2 = cpu_to_be64(from->di_flags2);
373 to->di_cowextsize = cpu_to_be32(from->di_cowextsize);
374 to->di_ino = cpu_to_be64(from->di_ino);
375 to->di_lsn = cpu_to_be64(from->di_lsn);
376 memcpy(to->di_pad2, from->di_pad2, sizeof(to->di_pad2));
377 uuid_copy(&to->di_uuid, &from->di_uuid);
378 to->di_flushiter = 0;
379 } else {
380 to->di_flushiter = cpu_to_be16(from->di_flushiter);
381 }
382 }
383
384 static bool
xfs_dinode_verify(struct xfs_mount * mp,struct xfs_inode * ip,struct xfs_dinode * dip)385 xfs_dinode_verify(
386 struct xfs_mount *mp,
387 struct xfs_inode *ip,
388 struct xfs_dinode *dip)
389 {
390 uint16_t mode;
391 uint16_t flags;
392 uint64_t flags2;
393
394 if (dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))
395 return false;
396
397 /* don't allow invalid i_size */
398 if (be64_to_cpu(dip->di_size) & (1ULL << 63))
399 return false;
400
401 mode = be16_to_cpu(dip->di_mode);
402 if (mode && xfs_mode_to_ftype(mode) == XFS_DIR3_FT_UNKNOWN)
403 return false;
404
405 /* No zero-length symlinks/dirs. */
406 if ((S_ISLNK(mode) || S_ISDIR(mode)) && dip->di_size == 0)
407 return false;
408
409 /* only version 3 or greater inodes are extensively verified here */
410 if (dip->di_version < 3)
411 return true;
412
413 if (!xfs_sb_version_hascrc(&mp->m_sb))
414 return false;
415 if (!xfs_verify_cksum((char *)dip, mp->m_sb.sb_inodesize,
416 XFS_DINODE_CRC_OFF))
417 return false;
418 if (be64_to_cpu(dip->di_ino) != ip->i_ino)
419 return false;
420 if (!uuid_equal(&dip->di_uuid, &mp->m_sb.sb_meta_uuid))
421 return false;
422
423 flags = be16_to_cpu(dip->di_flags);
424 flags2 = be64_to_cpu(dip->di_flags2);
425
426 /* don't allow reflink/cowextsize if we don't have reflink */
427 if ((flags2 & (XFS_DIFLAG2_REFLINK | XFS_DIFLAG2_COWEXTSIZE)) &&
428 !xfs_sb_version_hasreflink(&mp->m_sb))
429 return false;
430
431 /* don't let reflink and realtime mix */
432 if ((flags2 & XFS_DIFLAG2_REFLINK) && (flags & XFS_DIFLAG_REALTIME))
433 return false;
434
435 /* don't let reflink and dax mix */
436 if ((flags2 & XFS_DIFLAG2_REFLINK) && (flags2 & XFS_DIFLAG2_DAX))
437 return false;
438
439 return true;
440 }
441
442 void
xfs_dinode_calc_crc(struct xfs_mount * mp,struct xfs_dinode * dip)443 xfs_dinode_calc_crc(
444 struct xfs_mount *mp,
445 struct xfs_dinode *dip)
446 {
447 __uint32_t crc;
448
449 if (dip->di_version < 3)
450 return;
451
452 ASSERT(xfs_sb_version_hascrc(&mp->m_sb));
453 crc = xfs_start_cksum((char *)dip, mp->m_sb.sb_inodesize,
454 XFS_DINODE_CRC_OFF);
455 dip->di_crc = xfs_end_cksum(crc);
456 }
457
458 /*
459 * Read the disk inode attributes into the in-core inode structure.
460 *
461 * For version 5 superblocks, if we are initialising a new inode and we are not
462 * utilising the XFS_MOUNT_IKEEP inode cluster mode, we can simple build the new
463 * inode core with a random generation number. If we are keeping inodes around,
464 * we need to read the inode cluster to get the existing generation number off
465 * disk. Further, if we are using version 4 superblocks (i.e. v1/v2 inode
466 * format) then log recovery is dependent on the di_flushiter field being
467 * initialised from the current on-disk value and hence we must also read the
468 * inode off disk.
469 */
470 int
xfs_iread(xfs_mount_t * mp,xfs_trans_t * tp,xfs_inode_t * ip,uint iget_flags)471 xfs_iread(
472 xfs_mount_t *mp,
473 xfs_trans_t *tp,
474 xfs_inode_t *ip,
475 uint iget_flags)
476 {
477 xfs_buf_t *bp;
478 xfs_dinode_t *dip;
479 int error;
480
481 /*
482 * Fill in the location information in the in-core inode.
483 */
484 error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, iget_flags);
485 if (error)
486 return error;
487
488 /* shortcut IO on inode allocation if possible */
489 if ((iget_flags & XFS_IGET_CREATE) &&
490 xfs_sb_version_hascrc(&mp->m_sb) &&
491 !(mp->m_flags & XFS_MOUNT_IKEEP)) {
492 /* initialise the on-disk inode core */
493 memset(&ip->i_d, 0, sizeof(ip->i_d));
494 VFS_I(ip)->i_generation = prandom_u32();
495 if (xfs_sb_version_hascrc(&mp->m_sb))
496 ip->i_d.di_version = 3;
497 else
498 ip->i_d.di_version = 2;
499 return 0;
500 }
501
502 /*
503 * Get pointers to the on-disk inode and the buffer containing it.
504 */
505 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &bp, 0, iget_flags);
506 if (error)
507 return error;
508
509 /* even unallocated inodes are verified */
510 if (!xfs_dinode_verify(mp, ip, dip)) {
511 xfs_alert(mp, "%s: validation failed for inode %lld failed",
512 __func__, ip->i_ino);
513
514 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, dip);
515 error = -EFSCORRUPTED;
516 goto out_brelse;
517 }
518
519 /*
520 * If the on-disk inode is already linked to a directory
521 * entry, copy all of the inode into the in-core inode.
522 * xfs_iformat_fork() handles copying in the inode format
523 * specific information.
524 * Otherwise, just get the truly permanent information.
525 */
526 if (dip->di_mode) {
527 xfs_inode_from_disk(ip, dip);
528 error = xfs_iformat_fork(ip, dip);
529 if (error) {
530 #ifdef DEBUG
531 xfs_alert(mp, "%s: xfs_iformat() returned error %d",
532 __func__, error);
533 #endif /* DEBUG */
534 goto out_brelse;
535 }
536 } else {
537 /*
538 * Partial initialisation of the in-core inode. Just the bits
539 * that xfs_ialloc won't overwrite or relies on being correct.
540 */
541 ip->i_d.di_version = dip->di_version;
542 VFS_I(ip)->i_generation = be32_to_cpu(dip->di_gen);
543 ip->i_d.di_flushiter = be16_to_cpu(dip->di_flushiter);
544
545 /*
546 * Make sure to pull in the mode here as well in
547 * case the inode is released without being used.
548 * This ensures that xfs_inactive() will see that
549 * the inode is already free and not try to mess
550 * with the uninitialized part of it.
551 */
552 VFS_I(ip)->i_mode = 0;
553 }
554
555 ASSERT(ip->i_d.di_version >= 2);
556 ip->i_delayed_blks = 0;
557
558 /*
559 * Mark the buffer containing the inode as something to keep
560 * around for a while. This helps to keep recently accessed
561 * meta-data in-core longer.
562 */
563 xfs_buf_set_ref(bp, XFS_INO_REF);
564
565 /*
566 * Use xfs_trans_brelse() to release the buffer containing the on-disk
567 * inode, because it was acquired with xfs_trans_read_buf() in
568 * xfs_imap_to_bp() above. If tp is NULL, this is just a normal
569 * brelse(). If we're within a transaction, then xfs_trans_brelse()
570 * will only release the buffer if it is not dirty within the
571 * transaction. It will be OK to release the buffer in this case,
572 * because inodes on disk are never destroyed and we will be locking the
573 * new in-core inode before putting it in the cache where other
574 * processes can find it. Thus we don't have to worry about the inode
575 * being changed just because we released the buffer.
576 */
577 out_brelse:
578 xfs_trans_brelse(tp, bp);
579 return error;
580 }
581