1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * NILFS inode operations.
4 *
5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6 *
7 * Written by Ryusuke Konishi.
8 *
9 */
10
11 #include <linux/buffer_head.h>
12 #include <linux/gfp.h>
13 #include <linux/mpage.h>
14 #include <linux/pagemap.h>
15 #include <linux/writeback.h>
16 #include <linux/uio.h>
17 #include <linux/fiemap.h>
18 #include <linux/random.h>
19 #include "nilfs.h"
20 #include "btnode.h"
21 #include "segment.h"
22 #include "page.h"
23 #include "mdt.h"
24 #include "cpfile.h"
25 #include "ifile.h"
26
27 /**
28 * struct nilfs_iget_args - arguments used during comparison between inodes
29 * @ino: inode number
30 * @cno: checkpoint number
31 * @root: pointer on NILFS root object (mounted checkpoint)
32 * @type: inode type
33 */
34 struct nilfs_iget_args {
35 u64 ino;
36 __u64 cno;
37 struct nilfs_root *root;
38 unsigned int type;
39 };
40
41 static int nilfs_iget_test(struct inode *inode, void *opaque);
42
nilfs_inode_add_blocks(struct inode * inode,int n)43 void nilfs_inode_add_blocks(struct inode *inode, int n)
44 {
45 struct nilfs_root *root = NILFS_I(inode)->i_root;
46
47 inode_add_bytes(inode, i_blocksize(inode) * n);
48 if (root)
49 atomic64_add(n, &root->blocks_count);
50 }
51
nilfs_inode_sub_blocks(struct inode * inode,int n)52 void nilfs_inode_sub_blocks(struct inode *inode, int n)
53 {
54 struct nilfs_root *root = NILFS_I(inode)->i_root;
55
56 inode_sub_bytes(inode, i_blocksize(inode) * n);
57 if (root)
58 atomic64_sub(n, &root->blocks_count);
59 }
60
61 /**
62 * nilfs_get_block() - get a file block on the filesystem (callback function)
63 * @inode: inode struct of the target file
64 * @blkoff: file block number
65 * @bh_result: buffer head to be mapped on
66 * @create: indicate whether allocating the block or not when it has not
67 * been allocated yet.
68 *
69 * This function does not issue actual read request of the specified data
70 * block. It is done by VFS.
71 */
nilfs_get_block(struct inode * inode,sector_t blkoff,struct buffer_head * bh_result,int create)72 int nilfs_get_block(struct inode *inode, sector_t blkoff,
73 struct buffer_head *bh_result, int create)
74 {
75 struct nilfs_inode_info *ii = NILFS_I(inode);
76 struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
77 __u64 blknum = 0;
78 int err = 0, ret;
79 unsigned int maxblocks = bh_result->b_size >> inode->i_blkbits;
80
81 down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
82 ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks);
83 up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
84 if (ret >= 0) { /* found */
85 map_bh(bh_result, inode->i_sb, blknum);
86 if (ret > 0)
87 bh_result->b_size = (ret << inode->i_blkbits);
88 goto out;
89 }
90 /* data block was not found */
91 if (ret == -ENOENT && create) {
92 struct nilfs_transaction_info ti;
93
94 bh_result->b_blocknr = 0;
95 err = nilfs_transaction_begin(inode->i_sb, &ti, 1);
96 if (unlikely(err))
97 goto out;
98 err = nilfs_bmap_insert(ii->i_bmap, blkoff,
99 (unsigned long)bh_result);
100 if (unlikely(err != 0)) {
101 if (err == -EEXIST) {
102 /*
103 * The get_block() function could be called
104 * from multiple callers for an inode.
105 * However, the page having this block must
106 * be locked in this case.
107 */
108 nilfs_warn(inode->i_sb,
109 "%s (ino=%lu): a race condition while inserting a data block at offset=%llu",
110 __func__, inode->i_ino,
111 (unsigned long long)blkoff);
112 err = -EAGAIN;
113 }
114 nilfs_transaction_abort(inode->i_sb);
115 goto out;
116 }
117 nilfs_mark_inode_dirty_sync(inode);
118 nilfs_transaction_commit(inode->i_sb); /* never fails */
119 /* Error handling should be detailed */
120 set_buffer_new(bh_result);
121 set_buffer_delay(bh_result);
122 map_bh(bh_result, inode->i_sb, 0);
123 /* Disk block number must be changed to proper value */
124
125 } else if (ret == -ENOENT) {
126 /*
127 * not found is not error (e.g. hole); must return without
128 * the mapped state flag.
129 */
130 ;
131 } else {
132 err = ret;
133 }
134
135 out:
136 return err;
137 }
138
139 /**
140 * nilfs_read_folio() - implement read_folio() method of nilfs_aops {}
141 * address_space_operations.
142 * @file: file struct of the file to be read
143 * @folio: the folio to be read
144 */
nilfs_read_folio(struct file * file,struct folio * folio)145 static int nilfs_read_folio(struct file *file, struct folio *folio)
146 {
147 return mpage_read_folio(folio, nilfs_get_block);
148 }
149
nilfs_readahead(struct readahead_control * rac)150 static void nilfs_readahead(struct readahead_control *rac)
151 {
152 mpage_readahead(rac, nilfs_get_block);
153 }
154
nilfs_writepages(struct address_space * mapping,struct writeback_control * wbc)155 static int nilfs_writepages(struct address_space *mapping,
156 struct writeback_control *wbc)
157 {
158 struct inode *inode = mapping->host;
159 int err = 0;
160
161 if (sb_rdonly(inode->i_sb)) {
162 nilfs_clear_dirty_pages(mapping);
163 return -EROFS;
164 }
165
166 if (wbc->sync_mode == WB_SYNC_ALL)
167 err = nilfs_construct_dsync_segment(inode->i_sb, inode,
168 wbc->range_start,
169 wbc->range_end);
170 return err;
171 }
172
nilfs_writepage(struct page * page,struct writeback_control * wbc)173 static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
174 {
175 struct folio *folio = page_folio(page);
176 struct inode *inode = folio->mapping->host;
177 int err;
178
179 if (sb_rdonly(inode->i_sb)) {
180 /*
181 * It means that filesystem was remounted in read-only
182 * mode because of error or metadata corruption. But we
183 * have dirty pages that try to be flushed in background.
184 * So, here we simply discard this dirty page.
185 */
186 nilfs_clear_folio_dirty(folio);
187 folio_unlock(folio);
188 return -EROFS;
189 }
190
191 folio_redirty_for_writepage(wbc, folio);
192 folio_unlock(folio);
193
194 if (wbc->sync_mode == WB_SYNC_ALL) {
195 err = nilfs_construct_segment(inode->i_sb);
196 if (unlikely(err))
197 return err;
198 } else if (wbc->for_reclaim)
199 nilfs_flush_segment(inode->i_sb, inode->i_ino);
200
201 return 0;
202 }
203
nilfs_dirty_folio(struct address_space * mapping,struct folio * folio)204 static bool nilfs_dirty_folio(struct address_space *mapping,
205 struct folio *folio)
206 {
207 struct inode *inode = mapping->host;
208 struct buffer_head *head;
209 unsigned int nr_dirty = 0;
210 bool ret = filemap_dirty_folio(mapping, folio);
211
212 /*
213 * The page may not be locked, eg if called from try_to_unmap_one()
214 */
215 spin_lock(&mapping->i_private_lock);
216 head = folio_buffers(folio);
217 if (head) {
218 struct buffer_head *bh = head;
219
220 do {
221 /* Do not mark hole blocks dirty */
222 if (buffer_dirty(bh) || !buffer_mapped(bh))
223 continue;
224
225 set_buffer_dirty(bh);
226 nr_dirty++;
227 } while (bh = bh->b_this_page, bh != head);
228 } else if (ret) {
229 nr_dirty = 1 << (folio_shift(folio) - inode->i_blkbits);
230 }
231 spin_unlock(&mapping->i_private_lock);
232
233 if (nr_dirty)
234 nilfs_set_file_dirty(inode, nr_dirty);
235 return ret;
236 }
237
nilfs_write_failed(struct address_space * mapping,loff_t to)238 void nilfs_write_failed(struct address_space *mapping, loff_t to)
239 {
240 struct inode *inode = mapping->host;
241
242 if (to > inode->i_size) {
243 truncate_pagecache(inode, inode->i_size);
244 nilfs_truncate(inode);
245 }
246 }
247
nilfs_write_begin(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,struct folio ** foliop,void ** fsdata)248 static int nilfs_write_begin(struct file *file, struct address_space *mapping,
249 loff_t pos, unsigned len,
250 struct folio **foliop, void **fsdata)
251
252 {
253 struct inode *inode = mapping->host;
254 int err = nilfs_transaction_begin(inode->i_sb, NULL, 1);
255
256 if (unlikely(err))
257 return err;
258
259 err = block_write_begin(mapping, pos, len, foliop, nilfs_get_block);
260 if (unlikely(err)) {
261 nilfs_write_failed(mapping, pos + len);
262 nilfs_transaction_abort(inode->i_sb);
263 }
264 return err;
265 }
266
nilfs_write_end(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct folio * folio,void * fsdata)267 static int nilfs_write_end(struct file *file, struct address_space *mapping,
268 loff_t pos, unsigned len, unsigned copied,
269 struct folio *folio, void *fsdata)
270 {
271 struct inode *inode = mapping->host;
272 unsigned int start = pos & (PAGE_SIZE - 1);
273 unsigned int nr_dirty;
274 int err;
275
276 nr_dirty = nilfs_page_count_clean_buffers(&folio->page, start,
277 start + copied);
278 copied = generic_write_end(file, mapping, pos, len, copied, folio,
279 fsdata);
280 nilfs_set_file_dirty(inode, nr_dirty);
281 err = nilfs_transaction_commit(inode->i_sb);
282 return err ? : copied;
283 }
284
285 static ssize_t
nilfs_direct_IO(struct kiocb * iocb,struct iov_iter * iter)286 nilfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
287 {
288 struct inode *inode = file_inode(iocb->ki_filp);
289
290 if (iov_iter_rw(iter) == WRITE)
291 return 0;
292
293 /* Needs synchronization with the cleaner */
294 return blockdev_direct_IO(iocb, inode, iter, nilfs_get_block);
295 }
296
297 const struct address_space_operations nilfs_aops = {
298 .writepage = nilfs_writepage,
299 .read_folio = nilfs_read_folio,
300 .writepages = nilfs_writepages,
301 .dirty_folio = nilfs_dirty_folio,
302 .readahead = nilfs_readahead,
303 .write_begin = nilfs_write_begin,
304 .write_end = nilfs_write_end,
305 .invalidate_folio = block_invalidate_folio,
306 .direct_IO = nilfs_direct_IO,
307 .is_partially_uptodate = block_is_partially_uptodate,
308 };
309
310 const struct address_space_operations nilfs_buffer_cache_aops = {
311 .invalidate_folio = block_invalidate_folio,
312 };
313
nilfs_insert_inode_locked(struct inode * inode,struct nilfs_root * root,unsigned long ino)314 static int nilfs_insert_inode_locked(struct inode *inode,
315 struct nilfs_root *root,
316 unsigned long ino)
317 {
318 struct nilfs_iget_args args = {
319 .ino = ino, .root = root, .cno = 0, .type = NILFS_I_TYPE_NORMAL
320 };
321
322 return insert_inode_locked4(inode, ino, nilfs_iget_test, &args);
323 }
324
nilfs_new_inode(struct inode * dir,umode_t mode)325 struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
326 {
327 struct super_block *sb = dir->i_sb;
328 struct inode *inode;
329 struct nilfs_inode_info *ii;
330 struct nilfs_root *root;
331 struct buffer_head *bh;
332 int err = -ENOMEM;
333 ino_t ino;
334
335 inode = new_inode(sb);
336 if (unlikely(!inode))
337 goto failed;
338
339 mapping_set_gfp_mask(inode->i_mapping,
340 mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS));
341
342 root = NILFS_I(dir)->i_root;
343 ii = NILFS_I(inode);
344 ii->i_state = BIT(NILFS_I_NEW);
345 ii->i_type = NILFS_I_TYPE_NORMAL;
346 ii->i_root = root;
347
348 err = nilfs_ifile_create_inode(root->ifile, &ino, &bh);
349 if (unlikely(err))
350 goto failed_ifile_create_inode;
351 /* reference count of i_bh inherits from nilfs_mdt_read_block() */
352 ii->i_bh = bh;
353
354 atomic64_inc(&root->inodes_count);
355 inode_init_owner(&nop_mnt_idmap, inode, dir, mode);
356 inode->i_ino = ino;
357 simple_inode_init_ts(inode);
358
359 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
360 err = nilfs_bmap_read(ii->i_bmap, NULL);
361 if (err < 0)
362 goto failed_after_creation;
363
364 set_bit(NILFS_I_BMAP, &ii->i_state);
365 /* No lock is needed; iget() ensures it. */
366 }
367
368 ii->i_flags = nilfs_mask_flags(
369 mode, NILFS_I(dir)->i_flags & NILFS_FL_INHERITED);
370
371 /* ii->i_file_acl = 0; */
372 /* ii->i_dir_acl = 0; */
373 ii->i_dir_start_lookup = 0;
374 nilfs_set_inode_flags(inode);
375 inode->i_generation = get_random_u32();
376 if (nilfs_insert_inode_locked(inode, root, ino) < 0) {
377 err = -EIO;
378 goto failed_after_creation;
379 }
380
381 err = nilfs_init_acl(inode, dir);
382 if (unlikely(err))
383 /*
384 * Never occur. When supporting nilfs_init_acl(),
385 * proper cancellation of above jobs should be considered.
386 */
387 goto failed_after_creation;
388
389 return inode;
390
391 failed_after_creation:
392 clear_nlink(inode);
393 if (inode->i_state & I_NEW)
394 unlock_new_inode(inode);
395 iput(inode); /*
396 * raw_inode will be deleted through
397 * nilfs_evict_inode().
398 */
399 goto failed;
400
401 failed_ifile_create_inode:
402 make_bad_inode(inode);
403 iput(inode);
404 failed:
405 return ERR_PTR(err);
406 }
407
nilfs_set_inode_flags(struct inode * inode)408 void nilfs_set_inode_flags(struct inode *inode)
409 {
410 unsigned int flags = NILFS_I(inode)->i_flags;
411 unsigned int new_fl = 0;
412
413 if (flags & FS_SYNC_FL)
414 new_fl |= S_SYNC;
415 if (flags & FS_APPEND_FL)
416 new_fl |= S_APPEND;
417 if (flags & FS_IMMUTABLE_FL)
418 new_fl |= S_IMMUTABLE;
419 if (flags & FS_NOATIME_FL)
420 new_fl |= S_NOATIME;
421 if (flags & FS_DIRSYNC_FL)
422 new_fl |= S_DIRSYNC;
423 inode_set_flags(inode, new_fl, S_SYNC | S_APPEND | S_IMMUTABLE |
424 S_NOATIME | S_DIRSYNC);
425 }
426
nilfs_read_inode_common(struct inode * inode,struct nilfs_inode * raw_inode)427 int nilfs_read_inode_common(struct inode *inode,
428 struct nilfs_inode *raw_inode)
429 {
430 struct nilfs_inode_info *ii = NILFS_I(inode);
431 int err;
432
433 inode->i_mode = le16_to_cpu(raw_inode->i_mode);
434 i_uid_write(inode, le32_to_cpu(raw_inode->i_uid));
435 i_gid_write(inode, le32_to_cpu(raw_inode->i_gid));
436 set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
437 inode->i_size = le64_to_cpu(raw_inode->i_size);
438 inode_set_atime(inode, le64_to_cpu(raw_inode->i_mtime),
439 le32_to_cpu(raw_inode->i_mtime_nsec));
440 inode_set_ctime(inode, le64_to_cpu(raw_inode->i_ctime),
441 le32_to_cpu(raw_inode->i_ctime_nsec));
442 inode_set_mtime(inode, le64_to_cpu(raw_inode->i_mtime),
443 le32_to_cpu(raw_inode->i_mtime_nsec));
444 if (nilfs_is_metadata_file_inode(inode) && !S_ISREG(inode->i_mode))
445 return -EIO; /* this inode is for metadata and corrupted */
446 if (inode->i_nlink == 0)
447 return -ESTALE; /* this inode is deleted */
448
449 inode->i_blocks = le64_to_cpu(raw_inode->i_blocks);
450 ii->i_flags = le32_to_cpu(raw_inode->i_flags);
451 #if 0
452 ii->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
453 ii->i_dir_acl = S_ISREG(inode->i_mode) ?
454 0 : le32_to_cpu(raw_inode->i_dir_acl);
455 #endif
456 ii->i_dir_start_lookup = 0;
457 inode->i_generation = le32_to_cpu(raw_inode->i_generation);
458
459 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
460 S_ISLNK(inode->i_mode)) {
461 err = nilfs_bmap_read(ii->i_bmap, raw_inode);
462 if (err < 0)
463 return err;
464 set_bit(NILFS_I_BMAP, &ii->i_state);
465 /* No lock is needed; iget() ensures it. */
466 }
467 return 0;
468 }
469
__nilfs_read_inode(struct super_block * sb,struct nilfs_root * root,unsigned long ino,struct inode * inode)470 static int __nilfs_read_inode(struct super_block *sb,
471 struct nilfs_root *root, unsigned long ino,
472 struct inode *inode)
473 {
474 struct the_nilfs *nilfs = sb->s_fs_info;
475 struct buffer_head *bh;
476 struct nilfs_inode *raw_inode;
477 int err;
478
479 down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
480 err = nilfs_ifile_get_inode_block(root->ifile, ino, &bh);
481 if (unlikely(err))
482 goto bad_inode;
483
484 raw_inode = nilfs_ifile_map_inode(root->ifile, ino, bh);
485
486 err = nilfs_read_inode_common(inode, raw_inode);
487 if (err)
488 goto failed_unmap;
489
490 if (S_ISREG(inode->i_mode)) {
491 inode->i_op = &nilfs_file_inode_operations;
492 inode->i_fop = &nilfs_file_operations;
493 inode->i_mapping->a_ops = &nilfs_aops;
494 } else if (S_ISDIR(inode->i_mode)) {
495 inode->i_op = &nilfs_dir_inode_operations;
496 inode->i_fop = &nilfs_dir_operations;
497 inode->i_mapping->a_ops = &nilfs_aops;
498 } else if (S_ISLNK(inode->i_mode)) {
499 inode->i_op = &nilfs_symlink_inode_operations;
500 inode_nohighmem(inode);
501 inode->i_mapping->a_ops = &nilfs_aops;
502 } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
503 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
504 inode->i_op = &nilfs_special_inode_operations;
505 init_special_inode(
506 inode, inode->i_mode,
507 huge_decode_dev(le64_to_cpu(raw_inode->i_device_code)));
508 } else {
509 nilfs_error(sb,
510 "invalid file type bits in mode 0%o for inode %lu",
511 inode->i_mode, ino);
512 err = -EIO;
513 goto failed_unmap;
514 }
515 nilfs_ifile_unmap_inode(raw_inode);
516 brelse(bh);
517 up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
518 nilfs_set_inode_flags(inode);
519 mapping_set_gfp_mask(inode->i_mapping,
520 mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS));
521 return 0;
522
523 failed_unmap:
524 nilfs_ifile_unmap_inode(raw_inode);
525 brelse(bh);
526
527 bad_inode:
528 up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
529 return err;
530 }
531
nilfs_iget_test(struct inode * inode,void * opaque)532 static int nilfs_iget_test(struct inode *inode, void *opaque)
533 {
534 struct nilfs_iget_args *args = opaque;
535 struct nilfs_inode_info *ii;
536
537 if (args->ino != inode->i_ino || args->root != NILFS_I(inode)->i_root)
538 return 0;
539
540 ii = NILFS_I(inode);
541 if (ii->i_type != args->type)
542 return 0;
543
544 return !(args->type & NILFS_I_TYPE_GC) || args->cno == ii->i_cno;
545 }
546
nilfs_iget_set(struct inode * inode,void * opaque)547 static int nilfs_iget_set(struct inode *inode, void *opaque)
548 {
549 struct nilfs_iget_args *args = opaque;
550
551 inode->i_ino = args->ino;
552 NILFS_I(inode)->i_cno = args->cno;
553 NILFS_I(inode)->i_root = args->root;
554 NILFS_I(inode)->i_type = args->type;
555 if (args->root && args->ino == NILFS_ROOT_INO)
556 nilfs_get_root(args->root);
557 return 0;
558 }
559
nilfs_ilookup(struct super_block * sb,struct nilfs_root * root,unsigned long ino)560 struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root,
561 unsigned long ino)
562 {
563 struct nilfs_iget_args args = {
564 .ino = ino, .root = root, .cno = 0, .type = NILFS_I_TYPE_NORMAL
565 };
566
567 return ilookup5(sb, ino, nilfs_iget_test, &args);
568 }
569
nilfs_iget_locked(struct super_block * sb,struct nilfs_root * root,unsigned long ino)570 struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root,
571 unsigned long ino)
572 {
573 struct nilfs_iget_args args = {
574 .ino = ino, .root = root, .cno = 0, .type = NILFS_I_TYPE_NORMAL
575 };
576
577 return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
578 }
579
nilfs_iget(struct super_block * sb,struct nilfs_root * root,unsigned long ino)580 struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root,
581 unsigned long ino)
582 {
583 struct inode *inode;
584 int err;
585
586 inode = nilfs_iget_locked(sb, root, ino);
587 if (unlikely(!inode))
588 return ERR_PTR(-ENOMEM);
589
590 if (!(inode->i_state & I_NEW)) {
591 if (!inode->i_nlink) {
592 iput(inode);
593 return ERR_PTR(-ESTALE);
594 }
595 return inode;
596 }
597
598 err = __nilfs_read_inode(sb, root, ino, inode);
599 if (unlikely(err)) {
600 iget_failed(inode);
601 return ERR_PTR(err);
602 }
603 unlock_new_inode(inode);
604 return inode;
605 }
606
nilfs_iget_for_gc(struct super_block * sb,unsigned long ino,__u64 cno)607 struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
608 __u64 cno)
609 {
610 struct nilfs_iget_args args = {
611 .ino = ino, .root = NULL, .cno = cno, .type = NILFS_I_TYPE_GC
612 };
613 struct inode *inode;
614 int err;
615
616 inode = iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
617 if (unlikely(!inode))
618 return ERR_PTR(-ENOMEM);
619 if (!(inode->i_state & I_NEW))
620 return inode;
621
622 err = nilfs_init_gcinode(inode);
623 if (unlikely(err)) {
624 iget_failed(inode);
625 return ERR_PTR(err);
626 }
627 unlock_new_inode(inode);
628 return inode;
629 }
630
631 /**
632 * nilfs_attach_btree_node_cache - attach a B-tree node cache to the inode
633 * @inode: inode object
634 *
635 * nilfs_attach_btree_node_cache() attaches a B-tree node cache to @inode,
636 * or does nothing if the inode already has it. This function allocates
637 * an additional inode to maintain page cache of B-tree nodes one-on-one.
638 *
639 * Return Value: On success, 0 is returned. On errors, one of the following
640 * negative error code is returned.
641 *
642 * %-ENOMEM - Insufficient memory available.
643 */
nilfs_attach_btree_node_cache(struct inode * inode)644 int nilfs_attach_btree_node_cache(struct inode *inode)
645 {
646 struct nilfs_inode_info *ii = NILFS_I(inode);
647 struct inode *btnc_inode;
648 struct nilfs_iget_args args;
649
650 if (ii->i_assoc_inode)
651 return 0;
652
653 args.ino = inode->i_ino;
654 args.root = ii->i_root;
655 args.cno = ii->i_cno;
656 args.type = ii->i_type | NILFS_I_TYPE_BTNC;
657
658 btnc_inode = iget5_locked(inode->i_sb, inode->i_ino, nilfs_iget_test,
659 nilfs_iget_set, &args);
660 if (unlikely(!btnc_inode))
661 return -ENOMEM;
662 if (btnc_inode->i_state & I_NEW) {
663 nilfs_init_btnc_inode(btnc_inode);
664 unlock_new_inode(btnc_inode);
665 }
666 NILFS_I(btnc_inode)->i_assoc_inode = inode;
667 NILFS_I(btnc_inode)->i_bmap = ii->i_bmap;
668 ii->i_assoc_inode = btnc_inode;
669
670 return 0;
671 }
672
673 /**
674 * nilfs_detach_btree_node_cache - detach the B-tree node cache from the inode
675 * @inode: inode object
676 *
677 * nilfs_detach_btree_node_cache() detaches the B-tree node cache and its
678 * holder inode bound to @inode, or does nothing if @inode doesn't have it.
679 */
nilfs_detach_btree_node_cache(struct inode * inode)680 void nilfs_detach_btree_node_cache(struct inode *inode)
681 {
682 struct nilfs_inode_info *ii = NILFS_I(inode);
683 struct inode *btnc_inode = ii->i_assoc_inode;
684
685 if (btnc_inode) {
686 NILFS_I(btnc_inode)->i_assoc_inode = NULL;
687 ii->i_assoc_inode = NULL;
688 iput(btnc_inode);
689 }
690 }
691
692 /**
693 * nilfs_iget_for_shadow - obtain inode for shadow mapping
694 * @inode: inode object that uses shadow mapping
695 *
696 * nilfs_iget_for_shadow() allocates a pair of inodes that holds page
697 * caches for shadow mapping. The page cache for data pages is set up
698 * in one inode and the one for b-tree node pages is set up in the
699 * other inode, which is attached to the former inode.
700 *
701 * Return Value: On success, a pointer to the inode for data pages is
702 * returned. On errors, one of the following negative error code is returned
703 * in a pointer type.
704 *
705 * %-ENOMEM - Insufficient memory available.
706 */
nilfs_iget_for_shadow(struct inode * inode)707 struct inode *nilfs_iget_for_shadow(struct inode *inode)
708 {
709 struct nilfs_iget_args args = {
710 .ino = inode->i_ino, .root = NULL, .cno = 0,
711 .type = NILFS_I_TYPE_SHADOW
712 };
713 struct inode *s_inode;
714 int err;
715
716 s_inode = iget5_locked(inode->i_sb, inode->i_ino, nilfs_iget_test,
717 nilfs_iget_set, &args);
718 if (unlikely(!s_inode))
719 return ERR_PTR(-ENOMEM);
720 if (!(s_inode->i_state & I_NEW))
721 return inode;
722
723 NILFS_I(s_inode)->i_flags = 0;
724 memset(NILFS_I(s_inode)->i_bmap, 0, sizeof(struct nilfs_bmap));
725 mapping_set_gfp_mask(s_inode->i_mapping, GFP_NOFS);
726 s_inode->i_mapping->a_ops = &nilfs_buffer_cache_aops;
727
728 err = nilfs_attach_btree_node_cache(s_inode);
729 if (unlikely(err)) {
730 iget_failed(s_inode);
731 return ERR_PTR(err);
732 }
733 unlock_new_inode(s_inode);
734 return s_inode;
735 }
736
737 /**
738 * nilfs_write_inode_common - export common inode information to on-disk inode
739 * @inode: inode object
740 * @raw_inode: on-disk inode
741 *
742 * This function writes standard information from the on-memory inode @inode
743 * to @raw_inode on ifile, cpfile or a super root block. Since inode bmap
744 * data is not exported, nilfs_bmap_write() must be called separately during
745 * log writing.
746 */
nilfs_write_inode_common(struct inode * inode,struct nilfs_inode * raw_inode)747 void nilfs_write_inode_common(struct inode *inode,
748 struct nilfs_inode *raw_inode)
749 {
750 struct nilfs_inode_info *ii = NILFS_I(inode);
751
752 raw_inode->i_mode = cpu_to_le16(inode->i_mode);
753 raw_inode->i_uid = cpu_to_le32(i_uid_read(inode));
754 raw_inode->i_gid = cpu_to_le32(i_gid_read(inode));
755 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
756 raw_inode->i_size = cpu_to_le64(inode->i_size);
757 raw_inode->i_ctime = cpu_to_le64(inode_get_ctime_sec(inode));
758 raw_inode->i_mtime = cpu_to_le64(inode_get_mtime_sec(inode));
759 raw_inode->i_ctime_nsec = cpu_to_le32(inode_get_ctime_nsec(inode));
760 raw_inode->i_mtime_nsec = cpu_to_le32(inode_get_mtime_nsec(inode));
761 raw_inode->i_blocks = cpu_to_le64(inode->i_blocks);
762
763 raw_inode->i_flags = cpu_to_le32(ii->i_flags);
764 raw_inode->i_generation = cpu_to_le32(inode->i_generation);
765
766 /*
767 * When extending inode, nilfs->ns_inode_size should be checked
768 * for substitutions of appended fields.
769 */
770 }
771
nilfs_update_inode(struct inode * inode,struct buffer_head * ibh,int flags)772 void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh, int flags)
773 {
774 ino_t ino = inode->i_ino;
775 struct nilfs_inode_info *ii = NILFS_I(inode);
776 struct inode *ifile = ii->i_root->ifile;
777 struct nilfs_inode *raw_inode;
778
779 raw_inode = nilfs_ifile_map_inode(ifile, ino, ibh);
780
781 if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state))
782 memset(raw_inode, 0, NILFS_MDT(ifile)->mi_entry_size);
783 if (flags & I_DIRTY_DATASYNC)
784 set_bit(NILFS_I_INODE_SYNC, &ii->i_state);
785
786 nilfs_write_inode_common(inode, raw_inode);
787
788 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
789 raw_inode->i_device_code =
790 cpu_to_le64(huge_encode_dev(inode->i_rdev));
791
792 nilfs_ifile_unmap_inode(raw_inode);
793 }
794
795 #define NILFS_MAX_TRUNCATE_BLOCKS 16384 /* 64MB for 4KB block */
796
nilfs_truncate_bmap(struct nilfs_inode_info * ii,unsigned long from)797 static void nilfs_truncate_bmap(struct nilfs_inode_info *ii,
798 unsigned long from)
799 {
800 __u64 b;
801 int ret;
802
803 if (!test_bit(NILFS_I_BMAP, &ii->i_state))
804 return;
805 repeat:
806 ret = nilfs_bmap_last_key(ii->i_bmap, &b);
807 if (ret == -ENOENT)
808 return;
809 else if (ret < 0)
810 goto failed;
811
812 if (b < from)
813 return;
814
815 b -= min_t(__u64, NILFS_MAX_TRUNCATE_BLOCKS, b - from);
816 ret = nilfs_bmap_truncate(ii->i_bmap, b);
817 nilfs_relax_pressure_in_lock(ii->vfs_inode.i_sb);
818 if (!ret || (ret == -ENOMEM &&
819 nilfs_bmap_truncate(ii->i_bmap, b) == 0))
820 goto repeat;
821
822 failed:
823 nilfs_warn(ii->vfs_inode.i_sb, "error %d truncating bmap (ino=%lu)",
824 ret, ii->vfs_inode.i_ino);
825 }
826
nilfs_truncate(struct inode * inode)827 void nilfs_truncate(struct inode *inode)
828 {
829 unsigned long blkoff;
830 unsigned int blocksize;
831 struct nilfs_transaction_info ti;
832 struct super_block *sb = inode->i_sb;
833 struct nilfs_inode_info *ii = NILFS_I(inode);
834
835 if (!test_bit(NILFS_I_BMAP, &ii->i_state))
836 return;
837 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
838 return;
839
840 blocksize = sb->s_blocksize;
841 blkoff = (inode->i_size + blocksize - 1) >> sb->s_blocksize_bits;
842 nilfs_transaction_begin(sb, &ti, 0); /* never fails */
843
844 block_truncate_page(inode->i_mapping, inode->i_size, nilfs_get_block);
845
846 nilfs_truncate_bmap(ii, blkoff);
847
848 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
849 if (IS_SYNC(inode))
850 nilfs_set_transaction_flag(NILFS_TI_SYNC);
851
852 nilfs_mark_inode_dirty(inode);
853 nilfs_set_file_dirty(inode, 0);
854 nilfs_transaction_commit(sb);
855 /*
856 * May construct a logical segment and may fail in sync mode.
857 * But truncate has no return value.
858 */
859 }
860
nilfs_clear_inode(struct inode * inode)861 static void nilfs_clear_inode(struct inode *inode)
862 {
863 struct nilfs_inode_info *ii = NILFS_I(inode);
864
865 /*
866 * Free resources allocated in nilfs_read_inode(), here.
867 */
868 BUG_ON(!list_empty(&ii->i_dirty));
869 brelse(ii->i_bh);
870 ii->i_bh = NULL;
871
872 if (nilfs_is_metadata_file_inode(inode))
873 nilfs_mdt_clear(inode);
874
875 if (test_bit(NILFS_I_BMAP, &ii->i_state))
876 nilfs_bmap_clear(ii->i_bmap);
877
878 if (!(ii->i_type & NILFS_I_TYPE_BTNC))
879 nilfs_detach_btree_node_cache(inode);
880
881 if (ii->i_root && inode->i_ino == NILFS_ROOT_INO)
882 nilfs_put_root(ii->i_root);
883 }
884
nilfs_evict_inode(struct inode * inode)885 void nilfs_evict_inode(struct inode *inode)
886 {
887 struct nilfs_transaction_info ti;
888 struct super_block *sb = inode->i_sb;
889 struct nilfs_inode_info *ii = NILFS_I(inode);
890 struct the_nilfs *nilfs;
891 int ret;
892
893 if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) {
894 truncate_inode_pages_final(&inode->i_data);
895 clear_inode(inode);
896 nilfs_clear_inode(inode);
897 return;
898 }
899 nilfs_transaction_begin(sb, &ti, 0); /* never fails */
900
901 truncate_inode_pages_final(&inode->i_data);
902
903 nilfs = sb->s_fs_info;
904 if (unlikely(sb_rdonly(sb) || !nilfs->ns_writer)) {
905 /*
906 * If this inode is about to be disposed after the file system
907 * has been degraded to read-only due to file system corruption
908 * or after the writer has been detached, do not make any
909 * changes that cause writes, just clear it.
910 * Do this check after read-locking ns_segctor_sem by
911 * nilfs_transaction_begin() in order to avoid a race with
912 * the writer detach operation.
913 */
914 clear_inode(inode);
915 nilfs_clear_inode(inode);
916 nilfs_transaction_abort(sb);
917 return;
918 }
919
920 /* TODO: some of the following operations may fail. */
921 nilfs_truncate_bmap(ii, 0);
922 nilfs_mark_inode_dirty(inode);
923 clear_inode(inode);
924
925 ret = nilfs_ifile_delete_inode(ii->i_root->ifile, inode->i_ino);
926 if (!ret)
927 atomic64_dec(&ii->i_root->inodes_count);
928
929 nilfs_clear_inode(inode);
930
931 if (IS_SYNC(inode))
932 nilfs_set_transaction_flag(NILFS_TI_SYNC);
933 nilfs_transaction_commit(sb);
934 /*
935 * May construct a logical segment and may fail in sync mode.
936 * But delete_inode has no return value.
937 */
938 }
939
nilfs_setattr(struct mnt_idmap * idmap,struct dentry * dentry,struct iattr * iattr)940 int nilfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
941 struct iattr *iattr)
942 {
943 struct nilfs_transaction_info ti;
944 struct inode *inode = d_inode(dentry);
945 struct super_block *sb = inode->i_sb;
946 int err;
947
948 err = setattr_prepare(&nop_mnt_idmap, dentry, iattr);
949 if (err)
950 return err;
951
952 err = nilfs_transaction_begin(sb, &ti, 0);
953 if (unlikely(err))
954 return err;
955
956 if ((iattr->ia_valid & ATTR_SIZE) &&
957 iattr->ia_size != i_size_read(inode)) {
958 inode_dio_wait(inode);
959 truncate_setsize(inode, iattr->ia_size);
960 nilfs_truncate(inode);
961 }
962
963 setattr_copy(&nop_mnt_idmap, inode, iattr);
964 mark_inode_dirty(inode);
965
966 if (iattr->ia_valid & ATTR_MODE) {
967 err = nilfs_acl_chmod(inode);
968 if (unlikely(err))
969 goto out_err;
970 }
971
972 return nilfs_transaction_commit(sb);
973
974 out_err:
975 nilfs_transaction_abort(sb);
976 return err;
977 }
978
nilfs_permission(struct mnt_idmap * idmap,struct inode * inode,int mask)979 int nilfs_permission(struct mnt_idmap *idmap, struct inode *inode,
980 int mask)
981 {
982 struct nilfs_root *root = NILFS_I(inode)->i_root;
983
984 if ((mask & MAY_WRITE) && root &&
985 root->cno != NILFS_CPTREE_CURRENT_CNO)
986 return -EROFS; /* snapshot is not writable */
987
988 return generic_permission(&nop_mnt_idmap, inode, mask);
989 }
990
nilfs_load_inode_block(struct inode * inode,struct buffer_head ** pbh)991 int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh)
992 {
993 struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
994 struct nilfs_inode_info *ii = NILFS_I(inode);
995 int err;
996
997 spin_lock(&nilfs->ns_inode_lock);
998 if (ii->i_bh == NULL || unlikely(!buffer_uptodate(ii->i_bh))) {
999 spin_unlock(&nilfs->ns_inode_lock);
1000 err = nilfs_ifile_get_inode_block(ii->i_root->ifile,
1001 inode->i_ino, pbh);
1002 if (unlikely(err))
1003 return err;
1004 spin_lock(&nilfs->ns_inode_lock);
1005 if (ii->i_bh == NULL)
1006 ii->i_bh = *pbh;
1007 else if (unlikely(!buffer_uptodate(ii->i_bh))) {
1008 __brelse(ii->i_bh);
1009 ii->i_bh = *pbh;
1010 } else {
1011 brelse(*pbh);
1012 *pbh = ii->i_bh;
1013 }
1014 } else
1015 *pbh = ii->i_bh;
1016
1017 get_bh(*pbh);
1018 spin_unlock(&nilfs->ns_inode_lock);
1019 return 0;
1020 }
1021
nilfs_inode_dirty(struct inode * inode)1022 int nilfs_inode_dirty(struct inode *inode)
1023 {
1024 struct nilfs_inode_info *ii = NILFS_I(inode);
1025 struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
1026 int ret = 0;
1027
1028 if (!list_empty(&ii->i_dirty)) {
1029 spin_lock(&nilfs->ns_inode_lock);
1030 ret = test_bit(NILFS_I_DIRTY, &ii->i_state) ||
1031 test_bit(NILFS_I_BUSY, &ii->i_state);
1032 spin_unlock(&nilfs->ns_inode_lock);
1033 }
1034 return ret;
1035 }
1036
nilfs_set_file_dirty(struct inode * inode,unsigned int nr_dirty)1037 int nilfs_set_file_dirty(struct inode *inode, unsigned int nr_dirty)
1038 {
1039 struct nilfs_inode_info *ii = NILFS_I(inode);
1040 struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
1041
1042 atomic_add(nr_dirty, &nilfs->ns_ndirtyblks);
1043
1044 if (test_and_set_bit(NILFS_I_DIRTY, &ii->i_state))
1045 return 0;
1046
1047 spin_lock(&nilfs->ns_inode_lock);
1048 if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
1049 !test_bit(NILFS_I_BUSY, &ii->i_state)) {
1050 /*
1051 * Because this routine may race with nilfs_dispose_list(),
1052 * we have to check NILFS_I_QUEUED here, too.
1053 */
1054 if (list_empty(&ii->i_dirty) && igrab(inode) == NULL) {
1055 /*
1056 * This will happen when somebody is freeing
1057 * this inode.
1058 */
1059 nilfs_warn(inode->i_sb,
1060 "cannot set file dirty (ino=%lu): the file is being freed",
1061 inode->i_ino);
1062 spin_unlock(&nilfs->ns_inode_lock);
1063 return -EINVAL; /*
1064 * NILFS_I_DIRTY may remain for
1065 * freeing inode.
1066 */
1067 }
1068 list_move_tail(&ii->i_dirty, &nilfs->ns_dirty_files);
1069 set_bit(NILFS_I_QUEUED, &ii->i_state);
1070 }
1071 spin_unlock(&nilfs->ns_inode_lock);
1072 return 0;
1073 }
1074
__nilfs_mark_inode_dirty(struct inode * inode,int flags)1075 int __nilfs_mark_inode_dirty(struct inode *inode, int flags)
1076 {
1077 struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
1078 struct buffer_head *ibh;
1079 int err;
1080
1081 /*
1082 * Do not dirty inodes after the log writer has been detached
1083 * and its nilfs_root struct has been freed.
1084 */
1085 if (unlikely(nilfs_purging(nilfs)))
1086 return 0;
1087
1088 err = nilfs_load_inode_block(inode, &ibh);
1089 if (unlikely(err)) {
1090 nilfs_warn(inode->i_sb,
1091 "cannot mark inode dirty (ino=%lu): error %d loading inode block",
1092 inode->i_ino, err);
1093 return err;
1094 }
1095 nilfs_update_inode(inode, ibh, flags);
1096 mark_buffer_dirty(ibh);
1097 nilfs_mdt_mark_dirty(NILFS_I(inode)->i_root->ifile);
1098 brelse(ibh);
1099 return 0;
1100 }
1101
1102 /**
1103 * nilfs_dirty_inode - reflect changes on given inode to an inode block.
1104 * @inode: inode of the file to be registered.
1105 * @flags: flags to determine the dirty state of the inode
1106 *
1107 * nilfs_dirty_inode() loads a inode block containing the specified
1108 * @inode and copies data from a nilfs_inode to a corresponding inode
1109 * entry in the inode block. This operation is excluded from the segment
1110 * construction. This function can be called both as a single operation
1111 * and as a part of indivisible file operations.
1112 */
nilfs_dirty_inode(struct inode * inode,int flags)1113 void nilfs_dirty_inode(struct inode *inode, int flags)
1114 {
1115 struct nilfs_transaction_info ti;
1116 struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
1117
1118 if (is_bad_inode(inode)) {
1119 nilfs_warn(inode->i_sb,
1120 "tried to mark bad_inode dirty. ignored.");
1121 dump_stack();
1122 return;
1123 }
1124 if (mdi) {
1125 nilfs_mdt_mark_dirty(inode);
1126 return;
1127 }
1128 nilfs_transaction_begin(inode->i_sb, &ti, 0);
1129 __nilfs_mark_inode_dirty(inode, flags);
1130 nilfs_transaction_commit(inode->i_sb); /* never fails */
1131 }
1132
nilfs_fiemap(struct inode * inode,struct fiemap_extent_info * fieinfo,__u64 start,__u64 len)1133 int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
1134 __u64 start, __u64 len)
1135 {
1136 struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
1137 __u64 logical = 0, phys = 0, size = 0;
1138 __u32 flags = 0;
1139 loff_t isize;
1140 sector_t blkoff, end_blkoff;
1141 sector_t delalloc_blkoff;
1142 unsigned long delalloc_blklen;
1143 unsigned int blkbits = inode->i_blkbits;
1144 int ret, n;
1145
1146 ret = fiemap_prep(inode, fieinfo, start, &len, 0);
1147 if (ret)
1148 return ret;
1149
1150 inode_lock(inode);
1151
1152 isize = i_size_read(inode);
1153
1154 blkoff = start >> blkbits;
1155 end_blkoff = (start + len - 1) >> blkbits;
1156
1157 delalloc_blklen = nilfs_find_uncommitted_extent(inode, blkoff,
1158 &delalloc_blkoff);
1159
1160 do {
1161 __u64 blkphy;
1162 unsigned int maxblocks;
1163
1164 if (delalloc_blklen && blkoff == delalloc_blkoff) {
1165 if (size) {
1166 /* End of the current extent */
1167 ret = fiemap_fill_next_extent(
1168 fieinfo, logical, phys, size, flags);
1169 if (ret)
1170 break;
1171 }
1172 if (blkoff > end_blkoff)
1173 break;
1174
1175 flags = FIEMAP_EXTENT_MERGED | FIEMAP_EXTENT_DELALLOC;
1176 logical = blkoff << blkbits;
1177 phys = 0;
1178 size = delalloc_blklen << blkbits;
1179
1180 blkoff = delalloc_blkoff + delalloc_blklen;
1181 delalloc_blklen = nilfs_find_uncommitted_extent(
1182 inode, blkoff, &delalloc_blkoff);
1183 continue;
1184 }
1185
1186 /*
1187 * Limit the number of blocks that we look up so as
1188 * not to get into the next delayed allocation extent.
1189 */
1190 maxblocks = INT_MAX;
1191 if (delalloc_blklen)
1192 maxblocks = min_t(sector_t, delalloc_blkoff - blkoff,
1193 maxblocks);
1194 blkphy = 0;
1195
1196 down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
1197 n = nilfs_bmap_lookup_contig(
1198 NILFS_I(inode)->i_bmap, blkoff, &blkphy, maxblocks);
1199 up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
1200
1201 if (n < 0) {
1202 int past_eof;
1203
1204 if (unlikely(n != -ENOENT))
1205 break; /* error */
1206
1207 /* HOLE */
1208 blkoff++;
1209 past_eof = ((blkoff << blkbits) >= isize);
1210
1211 if (size) {
1212 /* End of the current extent */
1213
1214 if (past_eof)
1215 flags |= FIEMAP_EXTENT_LAST;
1216
1217 ret = fiemap_fill_next_extent(
1218 fieinfo, logical, phys, size, flags);
1219 if (ret)
1220 break;
1221 size = 0;
1222 }
1223 if (blkoff > end_blkoff || past_eof)
1224 break;
1225 } else {
1226 if (size) {
1227 if (phys && blkphy << blkbits == phys + size) {
1228 /* The current extent goes on */
1229 size += (u64)n << blkbits;
1230 } else {
1231 /* Terminate the current extent */
1232 ret = fiemap_fill_next_extent(
1233 fieinfo, logical, phys, size,
1234 flags);
1235 if (ret || blkoff > end_blkoff)
1236 break;
1237
1238 /* Start another extent */
1239 flags = FIEMAP_EXTENT_MERGED;
1240 logical = blkoff << blkbits;
1241 phys = blkphy << blkbits;
1242 size = (u64)n << blkbits;
1243 }
1244 } else {
1245 /* Start a new extent */
1246 flags = FIEMAP_EXTENT_MERGED;
1247 logical = blkoff << blkbits;
1248 phys = blkphy << blkbits;
1249 size = (u64)n << blkbits;
1250 }
1251 blkoff += n;
1252 }
1253 cond_resched();
1254 } while (true);
1255
1256 /* If ret is 1 then we just hit the end of the extent array */
1257 if (ret == 1)
1258 ret = 0;
1259
1260 inode_unlock(inode);
1261 return ret;
1262 }
1263