1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
4 * Written by Alex Tomas <alex@clusterfs.com>
5 *
6 * Architecture independence:
7 * Copyright (c) 2005, Bull S.A.
8 * Written by Pierre Peiffer <pierre.peiffer@bull.net>
9 */
10
11 /*
12 * Extents support for EXT4
13 *
14 * TODO:
15 * - ext4*_error() should be used in some situations
16 * - analyze all BUG()/BUG_ON(), use -EIO where appropriate
17 * - smart tree reduction
18 */
19
20 #include <linux/fs.h>
21 #include <linux/time.h>
22 #include <linux/jbd2.h>
23 #include <linux/highuid.h>
24 #include <linux/pagemap.h>
25 #include <linux/quotaops.h>
26 #include <linux/string.h>
27 #include <linux/slab.h>
28 #include <linux/uaccess.h>
29 #include <linux/fiemap.h>
30 #include <linux/iomap.h>
31 #include <linux/sched/mm.h>
32 #include "ext4_jbd2.h"
33 #include "ext4_extents.h"
34 #include "xattr.h"
35
36 #include <trace/events/ext4.h>
37
38 /*
39 * used by extent splitting.
40 */
41 #define EXT4_EXT_MAY_ZEROOUT 0x1 /* safe to zeroout if split fails \
42 due to ENOSPC */
43 #define EXT4_EXT_MARK_UNWRIT1 0x2 /* mark first half unwritten */
44 #define EXT4_EXT_MARK_UNWRIT2 0x4 /* mark second half unwritten */
45
46 #define EXT4_EXT_DATA_VALID1 0x8 /* first half contains valid data */
47 #define EXT4_EXT_DATA_VALID2 0x10 /* second half contains valid data */
48
ext4_extent_block_csum(struct inode * inode,struct ext4_extent_header * eh)49 static __le32 ext4_extent_block_csum(struct inode *inode,
50 struct ext4_extent_header *eh)
51 {
52 struct ext4_inode_info *ei = EXT4_I(inode);
53 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
54 __u32 csum;
55
56 csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)eh,
57 EXT4_EXTENT_TAIL_OFFSET(eh));
58 return cpu_to_le32(csum);
59 }
60
ext4_extent_block_csum_verify(struct inode * inode,struct ext4_extent_header * eh)61 static int ext4_extent_block_csum_verify(struct inode *inode,
62 struct ext4_extent_header *eh)
63 {
64 struct ext4_extent_tail *et;
65
66 if (!ext4_has_metadata_csum(inode->i_sb))
67 return 1;
68
69 et = find_ext4_extent_tail(eh);
70 if (et->et_checksum != ext4_extent_block_csum(inode, eh))
71 return 0;
72 return 1;
73 }
74
ext4_extent_block_csum_set(struct inode * inode,struct ext4_extent_header * eh)75 static void ext4_extent_block_csum_set(struct inode *inode,
76 struct ext4_extent_header *eh)
77 {
78 struct ext4_extent_tail *et;
79
80 if (!ext4_has_metadata_csum(inode->i_sb))
81 return;
82
83 et = find_ext4_extent_tail(eh);
84 et->et_checksum = ext4_extent_block_csum(inode, eh);
85 }
86
87 static struct ext4_ext_path *ext4_split_extent_at(handle_t *handle,
88 struct inode *inode,
89 struct ext4_ext_path *path,
90 ext4_lblk_t split,
91 int split_flag, int flags);
92
ext4_ext_trunc_restart_fn(struct inode * inode,int * dropped)93 static int ext4_ext_trunc_restart_fn(struct inode *inode, int *dropped)
94 {
95 /*
96 * Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this
97 * moment, get_block can be called only for blocks inside i_size since
98 * page cache has been already dropped and writes are blocked by
99 * i_rwsem. So we can safely drop the i_data_sem here.
100 */
101 BUG_ON(EXT4_JOURNAL(inode) == NULL);
102 ext4_discard_preallocations(inode);
103 up_write(&EXT4_I(inode)->i_data_sem);
104 *dropped = 1;
105 return 0;
106 }
107
ext4_ext_path_brelse(struct ext4_ext_path * path)108 static inline void ext4_ext_path_brelse(struct ext4_ext_path *path)
109 {
110 brelse(path->p_bh);
111 path->p_bh = NULL;
112 }
113
ext4_ext_drop_refs(struct ext4_ext_path * path)114 static void ext4_ext_drop_refs(struct ext4_ext_path *path)
115 {
116 int depth, i;
117
118 if (IS_ERR_OR_NULL(path))
119 return;
120 depth = path->p_depth;
121 for (i = 0; i <= depth; i++, path++)
122 ext4_ext_path_brelse(path);
123 }
124
ext4_free_ext_path(struct ext4_ext_path * path)125 void ext4_free_ext_path(struct ext4_ext_path *path)
126 {
127 if (IS_ERR_OR_NULL(path))
128 return;
129 ext4_ext_drop_refs(path);
130 kfree(path);
131 }
132
133 /*
134 * Make sure 'handle' has at least 'check_cred' credits. If not, restart
135 * transaction with 'restart_cred' credits. The function drops i_data_sem
136 * when restarting transaction and gets it after transaction is restarted.
137 *
138 * The function returns 0 on success, 1 if transaction had to be restarted,
139 * and < 0 in case of fatal error.
140 */
ext4_datasem_ensure_credits(handle_t * handle,struct inode * inode,int check_cred,int restart_cred,int revoke_cred)141 int ext4_datasem_ensure_credits(handle_t *handle, struct inode *inode,
142 int check_cred, int restart_cred,
143 int revoke_cred)
144 {
145 int ret;
146 int dropped = 0;
147
148 ret = ext4_journal_ensure_credits_fn(handle, check_cred, restart_cred,
149 revoke_cred, ext4_ext_trunc_restart_fn(inode, &dropped));
150 if (dropped)
151 down_write(&EXT4_I(inode)->i_data_sem);
152 return ret;
153 }
154
155 /*
156 * could return:
157 * - EROFS
158 * - ENOMEM
159 */
ext4_ext_get_access(handle_t * handle,struct inode * inode,struct ext4_ext_path * path)160 static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
161 struct ext4_ext_path *path)
162 {
163 int err = 0;
164
165 if (path->p_bh) {
166 /* path points to block */
167 BUFFER_TRACE(path->p_bh, "get_write_access");
168 err = ext4_journal_get_write_access(handle, inode->i_sb,
169 path->p_bh, EXT4_JTR_NONE);
170 /*
171 * The extent buffer's verified bit will be set again in
172 * __ext4_ext_dirty(). We could leave an inconsistent
173 * buffer if the extents updating procudure break off du
174 * to some error happens, force to check it again.
175 */
176 if (!err)
177 clear_buffer_verified(path->p_bh);
178 }
179 /* path points to leaf/index in inode body */
180 /* we use in-core data, no need to protect them */
181 return err;
182 }
183
184 /*
185 * could return:
186 * - EROFS
187 * - ENOMEM
188 * - EIO
189 */
__ext4_ext_dirty(const char * where,unsigned int line,handle_t * handle,struct inode * inode,struct ext4_ext_path * path)190 static int __ext4_ext_dirty(const char *where, unsigned int line,
191 handle_t *handle, struct inode *inode,
192 struct ext4_ext_path *path)
193 {
194 int err;
195
196 WARN_ON(!rwsem_is_locked(&EXT4_I(inode)->i_data_sem));
197 if (path->p_bh) {
198 ext4_extent_block_csum_set(inode, ext_block_hdr(path->p_bh));
199 /* path points to block */
200 err = __ext4_handle_dirty_metadata(where, line, handle,
201 inode, path->p_bh);
202 /* Extents updating done, re-set verified flag */
203 if (!err)
204 set_buffer_verified(path->p_bh);
205 } else {
206 /* path points to leaf/index in inode body */
207 err = ext4_mark_inode_dirty(handle, inode);
208 }
209 return err;
210 }
211
212 #define ext4_ext_dirty(handle, inode, path) \
213 __ext4_ext_dirty(__func__, __LINE__, (handle), (inode), (path))
214
ext4_ext_find_goal(struct inode * inode,struct ext4_ext_path * path,ext4_lblk_t block)215 static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
216 struct ext4_ext_path *path,
217 ext4_lblk_t block)
218 {
219 if (path) {
220 int depth = path->p_depth;
221 struct ext4_extent *ex;
222
223 /*
224 * Try to predict block placement assuming that we are
225 * filling in a file which will eventually be
226 * non-sparse --- i.e., in the case of libbfd writing
227 * an ELF object sections out-of-order but in a way
228 * the eventually results in a contiguous object or
229 * executable file, or some database extending a table
230 * space file. However, this is actually somewhat
231 * non-ideal if we are writing a sparse file such as
232 * qemu or KVM writing a raw image file that is going
233 * to stay fairly sparse, since it will end up
234 * fragmenting the file system's free space. Maybe we
235 * should have some hueristics or some way to allow
236 * userspace to pass a hint to file system,
237 * especially if the latter case turns out to be
238 * common.
239 */
240 ex = path[depth].p_ext;
241 if (ex) {
242 ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex);
243 ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block);
244
245 if (block > ext_block)
246 return ext_pblk + (block - ext_block);
247 else
248 return ext_pblk - (ext_block - block);
249 }
250
251 /* it looks like index is empty;
252 * try to find starting block from index itself */
253 if (path[depth].p_bh)
254 return path[depth].p_bh->b_blocknr;
255 }
256
257 /* OK. use inode's group */
258 return ext4_inode_to_goal_block(inode);
259 }
260
261 /*
262 * Allocation for a meta data block
263 */
264 static ext4_fsblk_t
ext4_ext_new_meta_block(handle_t * handle,struct inode * inode,struct ext4_ext_path * path,struct ext4_extent * ex,int * err,unsigned int flags)265 ext4_ext_new_meta_block(handle_t *handle, struct inode *inode,
266 struct ext4_ext_path *path,
267 struct ext4_extent *ex, int *err, unsigned int flags)
268 {
269 ext4_fsblk_t goal, newblock;
270
271 goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
272 newblock = ext4_new_meta_blocks(handle, inode, goal, flags,
273 NULL, err);
274 return newblock;
275 }
276
ext4_ext_space_block(struct inode * inode,int check)277 static inline int ext4_ext_space_block(struct inode *inode, int check)
278 {
279 int size;
280
281 size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
282 / sizeof(struct ext4_extent);
283 #ifdef AGGRESSIVE_TEST
284 if (!check && size > 6)
285 size = 6;
286 #endif
287 return size;
288 }
289
ext4_ext_space_block_idx(struct inode * inode,int check)290 static inline int ext4_ext_space_block_idx(struct inode *inode, int check)
291 {
292 int size;
293
294 size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
295 / sizeof(struct ext4_extent_idx);
296 #ifdef AGGRESSIVE_TEST
297 if (!check && size > 5)
298 size = 5;
299 #endif
300 return size;
301 }
302
ext4_ext_space_root(struct inode * inode,int check)303 static inline int ext4_ext_space_root(struct inode *inode, int check)
304 {
305 int size;
306
307 size = sizeof(EXT4_I(inode)->i_data);
308 size -= sizeof(struct ext4_extent_header);
309 size /= sizeof(struct ext4_extent);
310 #ifdef AGGRESSIVE_TEST
311 if (!check && size > 3)
312 size = 3;
313 #endif
314 return size;
315 }
316
ext4_ext_space_root_idx(struct inode * inode,int check)317 static inline int ext4_ext_space_root_idx(struct inode *inode, int check)
318 {
319 int size;
320
321 size = sizeof(EXT4_I(inode)->i_data);
322 size -= sizeof(struct ext4_extent_header);
323 size /= sizeof(struct ext4_extent_idx);
324 #ifdef AGGRESSIVE_TEST
325 if (!check && size > 4)
326 size = 4;
327 #endif
328 return size;
329 }
330
331 static inline struct ext4_ext_path *
ext4_force_split_extent_at(handle_t * handle,struct inode * inode,struct ext4_ext_path * path,ext4_lblk_t lblk,int nofail)332 ext4_force_split_extent_at(handle_t *handle, struct inode *inode,
333 struct ext4_ext_path *path, ext4_lblk_t lblk,
334 int nofail)
335 {
336 int unwritten = ext4_ext_is_unwritten(path[path->p_depth].p_ext);
337 int flags = EXT4_EX_NOCACHE | EXT4_GET_BLOCKS_PRE_IO;
338
339 if (nofail)
340 flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL | EXT4_EX_NOFAIL;
341
342 return ext4_split_extent_at(handle, inode, path, lblk, unwritten ?
343 EXT4_EXT_MARK_UNWRIT1|EXT4_EXT_MARK_UNWRIT2 : 0,
344 flags);
345 }
346
347 static int
ext4_ext_max_entries(struct inode * inode,int depth)348 ext4_ext_max_entries(struct inode *inode, int depth)
349 {
350 int max;
351
352 if (depth == ext_depth(inode)) {
353 if (depth == 0)
354 max = ext4_ext_space_root(inode, 1);
355 else
356 max = ext4_ext_space_root_idx(inode, 1);
357 } else {
358 if (depth == 0)
359 max = ext4_ext_space_block(inode, 1);
360 else
361 max = ext4_ext_space_block_idx(inode, 1);
362 }
363
364 return max;
365 }
366
ext4_valid_extent(struct inode * inode,struct ext4_extent * ext)367 static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
368 {
369 ext4_fsblk_t block = ext4_ext_pblock(ext);
370 int len = ext4_ext_get_actual_len(ext);
371 ext4_lblk_t lblock = le32_to_cpu(ext->ee_block);
372
373 /*
374 * We allow neither:
375 * - zero length
376 * - overflow/wrap-around
377 */
378 if (lblock + len <= lblock)
379 return 0;
380 return ext4_inode_block_valid(inode, block, len);
381 }
382
ext4_valid_extent_idx(struct inode * inode,struct ext4_extent_idx * ext_idx)383 static int ext4_valid_extent_idx(struct inode *inode,
384 struct ext4_extent_idx *ext_idx)
385 {
386 ext4_fsblk_t block = ext4_idx_pblock(ext_idx);
387
388 return ext4_inode_block_valid(inode, block, 1);
389 }
390
ext4_valid_extent_entries(struct inode * inode,struct ext4_extent_header * eh,ext4_lblk_t lblk,ext4_fsblk_t * pblk,int depth)391 static int ext4_valid_extent_entries(struct inode *inode,
392 struct ext4_extent_header *eh,
393 ext4_lblk_t lblk, ext4_fsblk_t *pblk,
394 int depth)
395 {
396 unsigned short entries;
397 ext4_lblk_t lblock = 0;
398 ext4_lblk_t cur = 0;
399
400 if (eh->eh_entries == 0)
401 return 1;
402
403 entries = le16_to_cpu(eh->eh_entries);
404
405 if (depth == 0) {
406 /* leaf entries */
407 struct ext4_extent *ext = EXT_FIRST_EXTENT(eh);
408
409 /*
410 * The logical block in the first entry should equal to
411 * the number in the index block.
412 */
413 if (depth != ext_depth(inode) &&
414 lblk != le32_to_cpu(ext->ee_block))
415 return 0;
416 while (entries) {
417 if (!ext4_valid_extent(inode, ext))
418 return 0;
419
420 /* Check for overlapping extents */
421 lblock = le32_to_cpu(ext->ee_block);
422 if (lblock < cur) {
423 *pblk = ext4_ext_pblock(ext);
424 return 0;
425 }
426 cur = lblock + ext4_ext_get_actual_len(ext);
427 ext++;
428 entries--;
429 }
430 } else {
431 struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh);
432
433 /*
434 * The logical block in the first entry should equal to
435 * the number in the parent index block.
436 */
437 if (depth != ext_depth(inode) &&
438 lblk != le32_to_cpu(ext_idx->ei_block))
439 return 0;
440 while (entries) {
441 if (!ext4_valid_extent_idx(inode, ext_idx))
442 return 0;
443
444 /* Check for overlapping index extents */
445 lblock = le32_to_cpu(ext_idx->ei_block);
446 if (lblock < cur) {
447 *pblk = ext4_idx_pblock(ext_idx);
448 return 0;
449 }
450 ext_idx++;
451 entries--;
452 cur = lblock + 1;
453 }
454 }
455 return 1;
456 }
457
__ext4_ext_check(const char * function,unsigned int line,struct inode * inode,struct ext4_extent_header * eh,int depth,ext4_fsblk_t pblk,ext4_lblk_t lblk)458 static int __ext4_ext_check(const char *function, unsigned int line,
459 struct inode *inode, struct ext4_extent_header *eh,
460 int depth, ext4_fsblk_t pblk, ext4_lblk_t lblk)
461 {
462 const char *error_msg;
463 int max = 0, err = -EFSCORRUPTED;
464
465 if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
466 error_msg = "invalid magic";
467 goto corrupted;
468 }
469 if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) {
470 error_msg = "unexpected eh_depth";
471 goto corrupted;
472 }
473 if (unlikely(eh->eh_max == 0)) {
474 error_msg = "invalid eh_max";
475 goto corrupted;
476 }
477 max = ext4_ext_max_entries(inode, depth);
478 if (unlikely(le16_to_cpu(eh->eh_max) > max)) {
479 error_msg = "too large eh_max";
480 goto corrupted;
481 }
482 if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
483 error_msg = "invalid eh_entries";
484 goto corrupted;
485 }
486 if (unlikely((eh->eh_entries == 0) && (depth > 0))) {
487 error_msg = "eh_entries is 0 but eh_depth is > 0";
488 goto corrupted;
489 }
490 if (!ext4_valid_extent_entries(inode, eh, lblk, &pblk, depth)) {
491 error_msg = "invalid extent entries";
492 goto corrupted;
493 }
494 if (unlikely(depth > 32)) {
495 error_msg = "too large eh_depth";
496 goto corrupted;
497 }
498 /* Verify checksum on non-root extent tree nodes */
499 if (ext_depth(inode) != depth &&
500 !ext4_extent_block_csum_verify(inode, eh)) {
501 error_msg = "extent tree corrupted";
502 err = -EFSBADCRC;
503 goto corrupted;
504 }
505 return 0;
506
507 corrupted:
508 ext4_error_inode_err(inode, function, line, 0, -err,
509 "pblk %llu bad header/extent: %s - magic %x, "
510 "entries %u, max %u(%u), depth %u(%u)",
511 (unsigned long long) pblk, error_msg,
512 le16_to_cpu(eh->eh_magic),
513 le16_to_cpu(eh->eh_entries),
514 le16_to_cpu(eh->eh_max),
515 max, le16_to_cpu(eh->eh_depth), depth);
516 return err;
517 }
518
519 #define ext4_ext_check(inode, eh, depth, pblk) \
520 __ext4_ext_check(__func__, __LINE__, (inode), (eh), (depth), (pblk), 0)
521
ext4_ext_check_inode(struct inode * inode)522 int ext4_ext_check_inode(struct inode *inode)
523 {
524 return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode), 0);
525 }
526
ext4_cache_extents(struct inode * inode,struct ext4_extent_header * eh)527 static void ext4_cache_extents(struct inode *inode,
528 struct ext4_extent_header *eh)
529 {
530 struct ext4_extent *ex = EXT_FIRST_EXTENT(eh);
531 ext4_lblk_t prev = 0;
532 int i;
533
534 for (i = le16_to_cpu(eh->eh_entries); i > 0; i--, ex++) {
535 unsigned int status = EXTENT_STATUS_WRITTEN;
536 ext4_lblk_t lblk = le32_to_cpu(ex->ee_block);
537 int len = ext4_ext_get_actual_len(ex);
538
539 if (prev && (prev != lblk))
540 ext4_es_cache_extent(inode, prev, lblk - prev, ~0,
541 EXTENT_STATUS_HOLE);
542
543 if (ext4_ext_is_unwritten(ex))
544 status = EXTENT_STATUS_UNWRITTEN;
545 ext4_es_cache_extent(inode, lblk, len,
546 ext4_ext_pblock(ex), status);
547 prev = lblk + len;
548 }
549 }
550
551 static struct buffer_head *
__read_extent_tree_block(const char * function,unsigned int line,struct inode * inode,struct ext4_extent_idx * idx,int depth,int flags)552 __read_extent_tree_block(const char *function, unsigned int line,
553 struct inode *inode, struct ext4_extent_idx *idx,
554 int depth, int flags)
555 {
556 struct buffer_head *bh;
557 int err;
558 gfp_t gfp_flags = __GFP_MOVABLE | GFP_NOFS;
559 ext4_fsblk_t pblk;
560
561 if (flags & EXT4_EX_NOFAIL)
562 gfp_flags |= __GFP_NOFAIL;
563
564 pblk = ext4_idx_pblock(idx);
565 bh = sb_getblk_gfp(inode->i_sb, pblk, gfp_flags);
566 if (unlikely(!bh))
567 return ERR_PTR(-ENOMEM);
568
569 if (!bh_uptodate_or_lock(bh)) {
570 trace_ext4_ext_load_extent(inode, pblk, _RET_IP_);
571 err = ext4_read_bh(bh, 0, NULL, false);
572 if (err < 0)
573 goto errout;
574 }
575 if (buffer_verified(bh) && !(flags & EXT4_EX_FORCE_CACHE))
576 return bh;
577 err = __ext4_ext_check(function, line, inode, ext_block_hdr(bh),
578 depth, pblk, le32_to_cpu(idx->ei_block));
579 if (err)
580 goto errout;
581 set_buffer_verified(bh);
582 /*
583 * If this is a leaf block, cache all of its entries
584 */
585 if (!(flags & EXT4_EX_NOCACHE) && depth == 0) {
586 struct ext4_extent_header *eh = ext_block_hdr(bh);
587 ext4_cache_extents(inode, eh);
588 }
589 return bh;
590 errout:
591 put_bh(bh);
592 return ERR_PTR(err);
593
594 }
595
596 #define read_extent_tree_block(inode, idx, depth, flags) \
597 __read_extent_tree_block(__func__, __LINE__, (inode), (idx), \
598 (depth), (flags))
599
600 /*
601 * This function is called to cache a file's extent information in the
602 * extent status tree
603 */
ext4_ext_precache(struct inode * inode)604 int ext4_ext_precache(struct inode *inode)
605 {
606 struct ext4_inode_info *ei = EXT4_I(inode);
607 struct ext4_ext_path *path = NULL;
608 struct buffer_head *bh;
609 int i = 0, depth, ret = 0;
610
611 if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
612 return 0; /* not an extent-mapped inode */
613
614 down_read(&ei->i_data_sem);
615 depth = ext_depth(inode);
616
617 /* Don't cache anything if there are no external extent blocks */
618 if (!depth) {
619 up_read(&ei->i_data_sem);
620 return ret;
621 }
622
623 path = kcalloc(depth + 1, sizeof(struct ext4_ext_path),
624 GFP_NOFS);
625 if (path == NULL) {
626 up_read(&ei->i_data_sem);
627 return -ENOMEM;
628 }
629
630 path[0].p_hdr = ext_inode_hdr(inode);
631 ret = ext4_ext_check(inode, path[0].p_hdr, depth, 0);
632 if (ret)
633 goto out;
634 path[0].p_idx = EXT_FIRST_INDEX(path[0].p_hdr);
635 while (i >= 0) {
636 /*
637 * If this is a leaf block or we've reached the end of
638 * the index block, go up
639 */
640 if ((i == depth) ||
641 path[i].p_idx > EXT_LAST_INDEX(path[i].p_hdr)) {
642 ext4_ext_path_brelse(path + i);
643 i--;
644 continue;
645 }
646 bh = read_extent_tree_block(inode, path[i].p_idx++,
647 depth - i - 1,
648 EXT4_EX_FORCE_CACHE);
649 if (IS_ERR(bh)) {
650 ret = PTR_ERR(bh);
651 break;
652 }
653 i++;
654 path[i].p_bh = bh;
655 path[i].p_hdr = ext_block_hdr(bh);
656 path[i].p_idx = EXT_FIRST_INDEX(path[i].p_hdr);
657 }
658 ext4_set_inode_state(inode, EXT4_STATE_EXT_PRECACHED);
659 out:
660 up_read(&ei->i_data_sem);
661 ext4_free_ext_path(path);
662 return ret;
663 }
664
665 #ifdef EXT_DEBUG
ext4_ext_show_path(struct inode * inode,struct ext4_ext_path * path)666 static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
667 {
668 int k, l = path->p_depth;
669
670 ext_debug(inode, "path:");
671 for (k = 0; k <= l; k++, path++) {
672 if (path->p_idx) {
673 ext_debug(inode, " %d->%llu",
674 le32_to_cpu(path->p_idx->ei_block),
675 ext4_idx_pblock(path->p_idx));
676 } else if (path->p_ext) {
677 ext_debug(inode, " %d:[%d]%d:%llu ",
678 le32_to_cpu(path->p_ext->ee_block),
679 ext4_ext_is_unwritten(path->p_ext),
680 ext4_ext_get_actual_len(path->p_ext),
681 ext4_ext_pblock(path->p_ext));
682 } else
683 ext_debug(inode, " []");
684 }
685 ext_debug(inode, "\n");
686 }
687
ext4_ext_show_leaf(struct inode * inode,struct ext4_ext_path * path)688 static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
689 {
690 int depth = ext_depth(inode);
691 struct ext4_extent_header *eh;
692 struct ext4_extent *ex;
693 int i;
694
695 if (IS_ERR_OR_NULL(path))
696 return;
697
698 eh = path[depth].p_hdr;
699 ex = EXT_FIRST_EXTENT(eh);
700
701 ext_debug(inode, "Displaying leaf extents\n");
702
703 for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
704 ext_debug(inode, "%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block),
705 ext4_ext_is_unwritten(ex),
706 ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex));
707 }
708 ext_debug(inode, "\n");
709 }
710
ext4_ext_show_move(struct inode * inode,struct ext4_ext_path * path,ext4_fsblk_t newblock,int level)711 static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path,
712 ext4_fsblk_t newblock, int level)
713 {
714 int depth = ext_depth(inode);
715 struct ext4_extent *ex;
716
717 if (depth != level) {
718 struct ext4_extent_idx *idx;
719 idx = path[level].p_idx;
720 while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) {
721 ext_debug(inode, "%d: move %d:%llu in new index %llu\n",
722 level, le32_to_cpu(idx->ei_block),
723 ext4_idx_pblock(idx), newblock);
724 idx++;
725 }
726
727 return;
728 }
729
730 ex = path[depth].p_ext;
731 while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) {
732 ext_debug(inode, "move %d:%llu:[%d]%d in new leaf %llu\n",
733 le32_to_cpu(ex->ee_block),
734 ext4_ext_pblock(ex),
735 ext4_ext_is_unwritten(ex),
736 ext4_ext_get_actual_len(ex),
737 newblock);
738 ex++;
739 }
740 }
741
742 #else
743 #define ext4_ext_show_path(inode, path)
744 #define ext4_ext_show_leaf(inode, path)
745 #define ext4_ext_show_move(inode, path, newblock, level)
746 #endif
747
748 /*
749 * ext4_ext_binsearch_idx:
750 * binary search for the closest index of the given block
751 * the header must be checked before calling this
752 */
753 static void
ext4_ext_binsearch_idx(struct inode * inode,struct ext4_ext_path * path,ext4_lblk_t block)754 ext4_ext_binsearch_idx(struct inode *inode,
755 struct ext4_ext_path *path, ext4_lblk_t block)
756 {
757 struct ext4_extent_header *eh = path->p_hdr;
758 struct ext4_extent_idx *r, *l, *m;
759
760
761 ext_debug(inode, "binsearch for %u(idx): ", block);
762
763 l = EXT_FIRST_INDEX(eh) + 1;
764 r = EXT_LAST_INDEX(eh);
765 while (l <= r) {
766 m = l + (r - l) / 2;
767 ext_debug(inode, "%p(%u):%p(%u):%p(%u) ", l,
768 le32_to_cpu(l->ei_block), m, le32_to_cpu(m->ei_block),
769 r, le32_to_cpu(r->ei_block));
770
771 if (block < le32_to_cpu(m->ei_block))
772 r = m - 1;
773 else
774 l = m + 1;
775 }
776
777 path->p_idx = l - 1;
778 ext_debug(inode, " -> %u->%lld ", le32_to_cpu(path->p_idx->ei_block),
779 ext4_idx_pblock(path->p_idx));
780
781 #ifdef CHECK_BINSEARCH
782 {
783 struct ext4_extent_idx *chix, *ix;
784 int k;
785
786 chix = ix = EXT_FIRST_INDEX(eh);
787 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) {
788 if (k != 0 && le32_to_cpu(ix->ei_block) <=
789 le32_to_cpu(ix[-1].ei_block)) {
790 printk(KERN_DEBUG "k=%d, ix=0x%p, "
791 "first=0x%p\n", k,
792 ix, EXT_FIRST_INDEX(eh));
793 printk(KERN_DEBUG "%u <= %u\n",
794 le32_to_cpu(ix->ei_block),
795 le32_to_cpu(ix[-1].ei_block));
796 }
797 BUG_ON(k && le32_to_cpu(ix->ei_block)
798 <= le32_to_cpu(ix[-1].ei_block));
799 if (block < le32_to_cpu(ix->ei_block))
800 break;
801 chix = ix;
802 }
803 BUG_ON(chix != path->p_idx);
804 }
805 #endif
806
807 }
808
809 /*
810 * ext4_ext_binsearch:
811 * binary search for closest extent of the given block
812 * the header must be checked before calling this
813 */
814 static void
ext4_ext_binsearch(struct inode * inode,struct ext4_ext_path * path,ext4_lblk_t block)815 ext4_ext_binsearch(struct inode *inode,
816 struct ext4_ext_path *path, ext4_lblk_t block)
817 {
818 struct ext4_extent_header *eh = path->p_hdr;
819 struct ext4_extent *r, *l, *m;
820
821 if (eh->eh_entries == 0) {
822 /*
823 * this leaf is empty:
824 * we get such a leaf in split/add case
825 */
826 return;
827 }
828
829 ext_debug(inode, "binsearch for %u: ", block);
830
831 l = EXT_FIRST_EXTENT(eh) + 1;
832 r = EXT_LAST_EXTENT(eh);
833
834 while (l <= r) {
835 m = l + (r - l) / 2;
836 ext_debug(inode, "%p(%u):%p(%u):%p(%u) ", l,
837 le32_to_cpu(l->ee_block), m, le32_to_cpu(m->ee_block),
838 r, le32_to_cpu(r->ee_block));
839
840 if (block < le32_to_cpu(m->ee_block))
841 r = m - 1;
842 else
843 l = m + 1;
844 }
845
846 path->p_ext = l - 1;
847 ext_debug(inode, " -> %d:%llu:[%d]%d ",
848 le32_to_cpu(path->p_ext->ee_block),
849 ext4_ext_pblock(path->p_ext),
850 ext4_ext_is_unwritten(path->p_ext),
851 ext4_ext_get_actual_len(path->p_ext));
852
853 #ifdef CHECK_BINSEARCH
854 {
855 struct ext4_extent *chex, *ex;
856 int k;
857
858 chex = ex = EXT_FIRST_EXTENT(eh);
859 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
860 BUG_ON(k && le32_to_cpu(ex->ee_block)
861 <= le32_to_cpu(ex[-1].ee_block));
862 if (block < le32_to_cpu(ex->ee_block))
863 break;
864 chex = ex;
865 }
866 BUG_ON(chex != path->p_ext);
867 }
868 #endif
869
870 }
871
ext4_ext_tree_init(handle_t * handle,struct inode * inode)872 void ext4_ext_tree_init(handle_t *handle, struct inode *inode)
873 {
874 struct ext4_extent_header *eh;
875
876 eh = ext_inode_hdr(inode);
877 eh->eh_depth = 0;
878 eh->eh_entries = 0;
879 eh->eh_magic = EXT4_EXT_MAGIC;
880 eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0));
881 eh->eh_generation = 0;
882 ext4_mark_inode_dirty(handle, inode);
883 }
884
885 struct ext4_ext_path *
ext4_find_extent(struct inode * inode,ext4_lblk_t block,struct ext4_ext_path * path,int flags)886 ext4_find_extent(struct inode *inode, ext4_lblk_t block,
887 struct ext4_ext_path *path, int flags)
888 {
889 struct ext4_extent_header *eh;
890 struct buffer_head *bh;
891 short int depth, i, ppos = 0;
892 int ret;
893 gfp_t gfp_flags = GFP_NOFS;
894
895 if (flags & EXT4_EX_NOFAIL)
896 gfp_flags |= __GFP_NOFAIL;
897
898 eh = ext_inode_hdr(inode);
899 depth = ext_depth(inode);
900 if (depth < 0 || depth > EXT4_MAX_EXTENT_DEPTH) {
901 EXT4_ERROR_INODE(inode, "inode has invalid extent depth: %d",
902 depth);
903 ret = -EFSCORRUPTED;
904 goto err;
905 }
906
907 if (path) {
908 ext4_ext_drop_refs(path);
909 if (depth > path[0].p_maxdepth) {
910 kfree(path);
911 path = NULL;
912 }
913 }
914 if (!path) {
915 /* account possible depth increase */
916 path = kcalloc(depth + 2, sizeof(struct ext4_ext_path),
917 gfp_flags);
918 if (unlikely(!path))
919 return ERR_PTR(-ENOMEM);
920 path[0].p_maxdepth = depth + 1;
921 }
922 path[0].p_hdr = eh;
923 path[0].p_bh = NULL;
924
925 i = depth;
926 if (!(flags & EXT4_EX_NOCACHE) && depth == 0)
927 ext4_cache_extents(inode, eh);
928 /* walk through the tree */
929 while (i) {
930 ext_debug(inode, "depth %d: num %d, max %d\n",
931 ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
932
933 ext4_ext_binsearch_idx(inode, path + ppos, block);
934 path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx);
935 path[ppos].p_depth = i;
936 path[ppos].p_ext = NULL;
937
938 bh = read_extent_tree_block(inode, path[ppos].p_idx, --i, flags);
939 if (IS_ERR(bh)) {
940 ret = PTR_ERR(bh);
941 goto err;
942 }
943
944 eh = ext_block_hdr(bh);
945 ppos++;
946 path[ppos].p_bh = bh;
947 path[ppos].p_hdr = eh;
948 }
949
950 path[ppos].p_depth = i;
951 path[ppos].p_ext = NULL;
952 path[ppos].p_idx = NULL;
953
954 /* find extent */
955 ext4_ext_binsearch(inode, path + ppos, block);
956 /* if not an empty leaf */
957 if (path[ppos].p_ext)
958 path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext);
959
960 ext4_ext_show_path(inode, path);
961
962 return path;
963
964 err:
965 ext4_free_ext_path(path);
966 return ERR_PTR(ret);
967 }
968
969 /*
970 * ext4_ext_insert_index:
971 * insert new index [@logical;@ptr] into the block at @curp;
972 * check where to insert: before @curp or after @curp
973 */
ext4_ext_insert_index(handle_t * handle,struct inode * inode,struct ext4_ext_path * curp,int logical,ext4_fsblk_t ptr)974 static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
975 struct ext4_ext_path *curp,
976 int logical, ext4_fsblk_t ptr)
977 {
978 struct ext4_extent_idx *ix;
979 int len, err;
980
981 err = ext4_ext_get_access(handle, inode, curp);
982 if (err)
983 return err;
984
985 if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) {
986 EXT4_ERROR_INODE(inode,
987 "logical %d == ei_block %d!",
988 logical, le32_to_cpu(curp->p_idx->ei_block));
989 return -EFSCORRUPTED;
990 }
991
992 if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries)
993 >= le16_to_cpu(curp->p_hdr->eh_max))) {
994 EXT4_ERROR_INODE(inode,
995 "eh_entries %d >= eh_max %d!",
996 le16_to_cpu(curp->p_hdr->eh_entries),
997 le16_to_cpu(curp->p_hdr->eh_max));
998 return -EFSCORRUPTED;
999 }
1000
1001 if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
1002 /* insert after */
1003 ext_debug(inode, "insert new index %d after: %llu\n",
1004 logical, ptr);
1005 ix = curp->p_idx + 1;
1006 } else {
1007 /* insert before */
1008 ext_debug(inode, "insert new index %d before: %llu\n",
1009 logical, ptr);
1010 ix = curp->p_idx;
1011 }
1012
1013 if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) {
1014 EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!");
1015 return -EFSCORRUPTED;
1016 }
1017
1018 len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1;
1019 BUG_ON(len < 0);
1020 if (len > 0) {
1021 ext_debug(inode, "insert new index %d: "
1022 "move %d indices from 0x%p to 0x%p\n",
1023 logical, len, ix, ix + 1);
1024 memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx));
1025 }
1026
1027 ix->ei_block = cpu_to_le32(logical);
1028 ext4_idx_store_pblock(ix, ptr);
1029 le16_add_cpu(&curp->p_hdr->eh_entries, 1);
1030
1031 if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) {
1032 EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!");
1033 return -EFSCORRUPTED;
1034 }
1035
1036 err = ext4_ext_dirty(handle, inode, curp);
1037 ext4_std_error(inode->i_sb, err);
1038
1039 return err;
1040 }
1041
1042 /*
1043 * ext4_ext_split:
1044 * inserts new subtree into the path, using free index entry
1045 * at depth @at:
1046 * - allocates all needed blocks (new leaf and all intermediate index blocks)
1047 * - makes decision where to split
1048 * - moves remaining extents and index entries (right to the split point)
1049 * into the newly allocated blocks
1050 * - initializes subtree
1051 */
ext4_ext_split(handle_t * handle,struct inode * inode,unsigned int flags,struct ext4_ext_path * path,struct ext4_extent * newext,int at)1052 static int ext4_ext_split(handle_t *handle, struct inode *inode,
1053 unsigned int flags,
1054 struct ext4_ext_path *path,
1055 struct ext4_extent *newext, int at)
1056 {
1057 struct buffer_head *bh = NULL;
1058 int depth = ext_depth(inode);
1059 struct ext4_extent_header *neh;
1060 struct ext4_extent_idx *fidx;
1061 int i = at, k, m, a;
1062 ext4_fsblk_t newblock, oldblock;
1063 __le32 border;
1064 ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
1065 gfp_t gfp_flags = GFP_NOFS;
1066 int err = 0;
1067 size_t ext_size = 0;
1068
1069 if (flags & EXT4_EX_NOFAIL)
1070 gfp_flags |= __GFP_NOFAIL;
1071
1072 /* make decision: where to split? */
1073 /* FIXME: now decision is simplest: at current extent */
1074
1075 /* if current leaf will be split, then we should use
1076 * border from split point */
1077 if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) {
1078 EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!");
1079 return -EFSCORRUPTED;
1080 }
1081 if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
1082 border = path[depth].p_ext[1].ee_block;
1083 ext_debug(inode, "leaf will be split."
1084 " next leaf starts at %d\n",
1085 le32_to_cpu(border));
1086 } else {
1087 border = newext->ee_block;
1088 ext_debug(inode, "leaf will be added."
1089 " next leaf starts at %d\n",
1090 le32_to_cpu(border));
1091 }
1092
1093 /*
1094 * If error occurs, then we break processing
1095 * and mark filesystem read-only. index won't
1096 * be inserted and tree will be in consistent
1097 * state. Next mount will repair buffers too.
1098 */
1099
1100 /*
1101 * Get array to track all allocated blocks.
1102 * We need this to handle errors and free blocks
1103 * upon them.
1104 */
1105 ablocks = kcalloc(depth, sizeof(ext4_fsblk_t), gfp_flags);
1106 if (!ablocks)
1107 return -ENOMEM;
1108
1109 /* allocate all needed blocks */
1110 ext_debug(inode, "allocate %d blocks for indexes/leaf\n", depth - at);
1111 for (a = 0; a < depth - at; a++) {
1112 newblock = ext4_ext_new_meta_block(handle, inode, path,
1113 newext, &err, flags);
1114 if (newblock == 0)
1115 goto cleanup;
1116 ablocks[a] = newblock;
1117 }
1118
1119 /* initialize new leaf */
1120 newblock = ablocks[--a];
1121 if (unlikely(newblock == 0)) {
1122 EXT4_ERROR_INODE(inode, "newblock == 0!");
1123 err = -EFSCORRUPTED;
1124 goto cleanup;
1125 }
1126 bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS);
1127 if (unlikely(!bh)) {
1128 err = -ENOMEM;
1129 goto cleanup;
1130 }
1131 lock_buffer(bh);
1132
1133 err = ext4_journal_get_create_access(handle, inode->i_sb, bh,
1134 EXT4_JTR_NONE);
1135 if (err)
1136 goto cleanup;
1137
1138 neh = ext_block_hdr(bh);
1139 neh->eh_entries = 0;
1140 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
1141 neh->eh_magic = EXT4_EXT_MAGIC;
1142 neh->eh_depth = 0;
1143 neh->eh_generation = 0;
1144
1145 /* move remainder of path[depth] to the new leaf */
1146 if (unlikely(path[depth].p_hdr->eh_entries !=
1147 path[depth].p_hdr->eh_max)) {
1148 EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!",
1149 path[depth].p_hdr->eh_entries,
1150 path[depth].p_hdr->eh_max);
1151 err = -EFSCORRUPTED;
1152 goto cleanup;
1153 }
1154 /* start copy from next extent */
1155 m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++;
1156 ext4_ext_show_move(inode, path, newblock, depth);
1157 if (m) {
1158 struct ext4_extent *ex;
1159 ex = EXT_FIRST_EXTENT(neh);
1160 memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m);
1161 le16_add_cpu(&neh->eh_entries, m);
1162 }
1163
1164 /* zero out unused area in the extent block */
1165 ext_size = sizeof(struct ext4_extent_header) +
1166 sizeof(struct ext4_extent) * le16_to_cpu(neh->eh_entries);
1167 memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size);
1168 ext4_extent_block_csum_set(inode, neh);
1169 set_buffer_uptodate(bh);
1170 unlock_buffer(bh);
1171
1172 err = ext4_handle_dirty_metadata(handle, inode, bh);
1173 if (err)
1174 goto cleanup;
1175 brelse(bh);
1176 bh = NULL;
1177
1178 /* correct old leaf */
1179 if (m) {
1180 err = ext4_ext_get_access(handle, inode, path + depth);
1181 if (err)
1182 goto cleanup;
1183 le16_add_cpu(&path[depth].p_hdr->eh_entries, -m);
1184 err = ext4_ext_dirty(handle, inode, path + depth);
1185 if (err)
1186 goto cleanup;
1187
1188 }
1189
1190 /* create intermediate indexes */
1191 k = depth - at - 1;
1192 if (unlikely(k < 0)) {
1193 EXT4_ERROR_INODE(inode, "k %d < 0!", k);
1194 err = -EFSCORRUPTED;
1195 goto cleanup;
1196 }
1197 if (k)
1198 ext_debug(inode, "create %d intermediate indices\n", k);
1199 /* insert new index into current index block */
1200 /* current depth stored in i var */
1201 i = depth - 1;
1202 while (k--) {
1203 oldblock = newblock;
1204 newblock = ablocks[--a];
1205 bh = sb_getblk(inode->i_sb, newblock);
1206 if (unlikely(!bh)) {
1207 err = -ENOMEM;
1208 goto cleanup;
1209 }
1210 lock_buffer(bh);
1211
1212 err = ext4_journal_get_create_access(handle, inode->i_sb, bh,
1213 EXT4_JTR_NONE);
1214 if (err)
1215 goto cleanup;
1216
1217 neh = ext_block_hdr(bh);
1218 neh->eh_entries = cpu_to_le16(1);
1219 neh->eh_magic = EXT4_EXT_MAGIC;
1220 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
1221 neh->eh_depth = cpu_to_le16(depth - i);
1222 neh->eh_generation = 0;
1223 fidx = EXT_FIRST_INDEX(neh);
1224 fidx->ei_block = border;
1225 ext4_idx_store_pblock(fidx, oldblock);
1226
1227 ext_debug(inode, "int.index at %d (block %llu): %u -> %llu\n",
1228 i, newblock, le32_to_cpu(border), oldblock);
1229
1230 /* move remainder of path[i] to the new index block */
1231 if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) !=
1232 EXT_LAST_INDEX(path[i].p_hdr))) {
1233 EXT4_ERROR_INODE(inode,
1234 "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!",
1235 le32_to_cpu(path[i].p_ext->ee_block));
1236 err = -EFSCORRUPTED;
1237 goto cleanup;
1238 }
1239 /* start copy indexes */
1240 m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++;
1241 ext_debug(inode, "cur 0x%p, last 0x%p\n", path[i].p_idx,
1242 EXT_MAX_INDEX(path[i].p_hdr));
1243 ext4_ext_show_move(inode, path, newblock, i);
1244 if (m) {
1245 memmove(++fidx, path[i].p_idx,
1246 sizeof(struct ext4_extent_idx) * m);
1247 le16_add_cpu(&neh->eh_entries, m);
1248 }
1249 /* zero out unused area in the extent block */
1250 ext_size = sizeof(struct ext4_extent_header) +
1251 (sizeof(struct ext4_extent) * le16_to_cpu(neh->eh_entries));
1252 memset(bh->b_data + ext_size, 0,
1253 inode->i_sb->s_blocksize - ext_size);
1254 ext4_extent_block_csum_set(inode, neh);
1255 set_buffer_uptodate(bh);
1256 unlock_buffer(bh);
1257
1258 err = ext4_handle_dirty_metadata(handle, inode, bh);
1259 if (err)
1260 goto cleanup;
1261 brelse(bh);
1262 bh = NULL;
1263
1264 /* correct old index */
1265 if (m) {
1266 err = ext4_ext_get_access(handle, inode, path + i);
1267 if (err)
1268 goto cleanup;
1269 le16_add_cpu(&path[i].p_hdr->eh_entries, -m);
1270 err = ext4_ext_dirty(handle, inode, path + i);
1271 if (err)
1272 goto cleanup;
1273 }
1274
1275 i--;
1276 }
1277
1278 /* insert new index */
1279 err = ext4_ext_insert_index(handle, inode, path + at,
1280 le32_to_cpu(border), newblock);
1281
1282 cleanup:
1283 if (bh) {
1284 if (buffer_locked(bh))
1285 unlock_buffer(bh);
1286 brelse(bh);
1287 }
1288
1289 if (err) {
1290 /* free all allocated blocks in error case */
1291 for (i = 0; i < depth; i++) {
1292 if (!ablocks[i])
1293 continue;
1294 ext4_free_blocks(handle, inode, NULL, ablocks[i], 1,
1295 EXT4_FREE_BLOCKS_METADATA);
1296 }
1297 }
1298 kfree(ablocks);
1299
1300 return err;
1301 }
1302
1303 /*
1304 * ext4_ext_grow_indepth:
1305 * implements tree growing procedure:
1306 * - allocates new block
1307 * - moves top-level data (index block or leaf) into the new block
1308 * - initializes new top-level, creating index that points to the
1309 * just created block
1310 */
ext4_ext_grow_indepth(handle_t * handle,struct inode * inode,unsigned int flags)1311 static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
1312 unsigned int flags)
1313 {
1314 struct ext4_extent_header *neh;
1315 struct buffer_head *bh;
1316 ext4_fsblk_t newblock, goal = 0;
1317 struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
1318 int err = 0;
1319 size_t ext_size = 0;
1320
1321 /* Try to prepend new index to old one */
1322 if (ext_depth(inode))
1323 goal = ext4_idx_pblock(EXT_FIRST_INDEX(ext_inode_hdr(inode)));
1324 if (goal > le32_to_cpu(es->s_first_data_block)) {
1325 flags |= EXT4_MB_HINT_TRY_GOAL;
1326 goal--;
1327 } else
1328 goal = ext4_inode_to_goal_block(inode);
1329 newblock = ext4_new_meta_blocks(handle, inode, goal, flags,
1330 NULL, &err);
1331 if (newblock == 0)
1332 return err;
1333
1334 bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS);
1335 if (unlikely(!bh))
1336 return -ENOMEM;
1337 lock_buffer(bh);
1338
1339 err = ext4_journal_get_create_access(handle, inode->i_sb, bh,
1340 EXT4_JTR_NONE);
1341 if (err) {
1342 unlock_buffer(bh);
1343 goto out;
1344 }
1345
1346 ext_size = sizeof(EXT4_I(inode)->i_data);
1347 /* move top-level index/leaf into new block */
1348 memmove(bh->b_data, EXT4_I(inode)->i_data, ext_size);
1349 /* zero out unused area in the extent block */
1350 memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size);
1351
1352 /* set size of new block */
1353 neh = ext_block_hdr(bh);
1354 /* old root could have indexes or leaves
1355 * so calculate e_max right way */
1356 if (ext_depth(inode))
1357 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
1358 else
1359 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
1360 neh->eh_magic = EXT4_EXT_MAGIC;
1361 ext4_extent_block_csum_set(inode, neh);
1362 set_buffer_uptodate(bh);
1363 set_buffer_verified(bh);
1364 unlock_buffer(bh);
1365
1366 err = ext4_handle_dirty_metadata(handle, inode, bh);
1367 if (err)
1368 goto out;
1369
1370 /* Update top-level index: num,max,pointer */
1371 neh = ext_inode_hdr(inode);
1372 neh->eh_entries = cpu_to_le16(1);
1373 ext4_idx_store_pblock(EXT_FIRST_INDEX(neh), newblock);
1374 if (neh->eh_depth == 0) {
1375 /* Root extent block becomes index block */
1376 neh->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0));
1377 EXT_FIRST_INDEX(neh)->ei_block =
1378 EXT_FIRST_EXTENT(neh)->ee_block;
1379 }
1380 ext_debug(inode, "new root: num %d(%d), lblock %d, ptr %llu\n",
1381 le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
1382 le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block),
1383 ext4_idx_pblock(EXT_FIRST_INDEX(neh)));
1384
1385 le16_add_cpu(&neh->eh_depth, 1);
1386 err = ext4_mark_inode_dirty(handle, inode);
1387 out:
1388 brelse(bh);
1389
1390 return err;
1391 }
1392
1393 /*
1394 * ext4_ext_create_new_leaf:
1395 * finds empty index and adds new leaf.
1396 * if no free index is found, then it requests in-depth growing.
1397 */
1398 static struct ext4_ext_path *
ext4_ext_create_new_leaf(handle_t * handle,struct inode * inode,unsigned int mb_flags,unsigned int gb_flags,struct ext4_ext_path * path,struct ext4_extent * newext)1399 ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
1400 unsigned int mb_flags, unsigned int gb_flags,
1401 struct ext4_ext_path *path,
1402 struct ext4_extent *newext)
1403 {
1404 struct ext4_ext_path *curp;
1405 int depth, i, err = 0;
1406 ext4_lblk_t ee_block = le32_to_cpu(newext->ee_block);
1407
1408 repeat:
1409 i = depth = ext_depth(inode);
1410
1411 /* walk up to the tree and look for free index entry */
1412 curp = path + depth;
1413 while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
1414 i--;
1415 curp--;
1416 }
1417
1418 /* we use already allocated block for index block,
1419 * so subsequent data blocks should be contiguous */
1420 if (EXT_HAS_FREE_INDEX(curp)) {
1421 /* if we found index with free entry, then use that
1422 * entry: create all needed subtree and add new leaf */
1423 err = ext4_ext_split(handle, inode, mb_flags, path, newext, i);
1424 if (err)
1425 goto errout;
1426
1427 /* refill path */
1428 path = ext4_find_extent(inode, ee_block, path, gb_flags);
1429 return path;
1430 }
1431
1432 /* tree is full, time to grow in depth */
1433 err = ext4_ext_grow_indepth(handle, inode, mb_flags);
1434 if (err)
1435 goto errout;
1436
1437 /* refill path */
1438 path = ext4_find_extent(inode, ee_block, path, gb_flags);
1439 if (IS_ERR(path))
1440 return path;
1441
1442 /*
1443 * only first (depth 0 -> 1) produces free space;
1444 * in all other cases we have to split the grown tree
1445 */
1446 depth = ext_depth(inode);
1447 if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
1448 /* now we need to split */
1449 goto repeat;
1450 }
1451
1452 return path;
1453
1454 errout:
1455 ext4_free_ext_path(path);
1456 return ERR_PTR(err);
1457 }
1458
1459 /*
1460 * search the closest allocated block to the left for *logical
1461 * and returns it at @logical + it's physical address at @phys
1462 * if *logical is the smallest allocated block, the function
1463 * returns 0 at @phys
1464 * return value contains 0 (success) or error code
1465 */
ext4_ext_search_left(struct inode * inode,struct ext4_ext_path * path,ext4_lblk_t * logical,ext4_fsblk_t * phys)1466 static int ext4_ext_search_left(struct inode *inode,
1467 struct ext4_ext_path *path,
1468 ext4_lblk_t *logical, ext4_fsblk_t *phys)
1469 {
1470 struct ext4_extent_idx *ix;
1471 struct ext4_extent *ex;
1472 int depth, ee_len;
1473
1474 if (unlikely(path == NULL)) {
1475 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1476 return -EFSCORRUPTED;
1477 }
1478 depth = path->p_depth;
1479 *phys = 0;
1480
1481 if (depth == 0 && path->p_ext == NULL)
1482 return 0;
1483
1484 /* usually extent in the path covers blocks smaller
1485 * then *logical, but it can be that extent is the
1486 * first one in the file */
1487
1488 ex = path[depth].p_ext;
1489 ee_len = ext4_ext_get_actual_len(ex);
1490 if (*logical < le32_to_cpu(ex->ee_block)) {
1491 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1492 EXT4_ERROR_INODE(inode,
1493 "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!",
1494 *logical, le32_to_cpu(ex->ee_block));
1495 return -EFSCORRUPTED;
1496 }
1497 while (--depth >= 0) {
1498 ix = path[depth].p_idx;
1499 if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1500 EXT4_ERROR_INODE(inode,
1501 "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!",
1502 ix != NULL ? le32_to_cpu(ix->ei_block) : 0,
1503 le32_to_cpu(EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block),
1504 depth);
1505 return -EFSCORRUPTED;
1506 }
1507 }
1508 return 0;
1509 }
1510
1511 if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
1512 EXT4_ERROR_INODE(inode,
1513 "logical %d < ee_block %d + ee_len %d!",
1514 *logical, le32_to_cpu(ex->ee_block), ee_len);
1515 return -EFSCORRUPTED;
1516 }
1517
1518 *logical = le32_to_cpu(ex->ee_block) + ee_len - 1;
1519 *phys = ext4_ext_pblock(ex) + ee_len - 1;
1520 return 0;
1521 }
1522
1523 /*
1524 * Search the closest allocated block to the right for *logical
1525 * and returns it at @logical + it's physical address at @phys.
1526 * If not exists, return 0 and @phys is set to 0. We will return
1527 * 1 which means we found an allocated block and ret_ex is valid.
1528 * Or return a (< 0) error code.
1529 */
ext4_ext_search_right(struct inode * inode,struct ext4_ext_path * path,ext4_lblk_t * logical,ext4_fsblk_t * phys,struct ext4_extent * ret_ex)1530 static int ext4_ext_search_right(struct inode *inode,
1531 struct ext4_ext_path *path,
1532 ext4_lblk_t *logical, ext4_fsblk_t *phys,
1533 struct ext4_extent *ret_ex)
1534 {
1535 struct buffer_head *bh = NULL;
1536 struct ext4_extent_header *eh;
1537 struct ext4_extent_idx *ix;
1538 struct ext4_extent *ex;
1539 int depth; /* Note, NOT eh_depth; depth from top of tree */
1540 int ee_len;
1541
1542 if (unlikely(path == NULL)) {
1543 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1544 return -EFSCORRUPTED;
1545 }
1546 depth = path->p_depth;
1547 *phys = 0;
1548
1549 if (depth == 0 && path->p_ext == NULL)
1550 return 0;
1551
1552 /* usually extent in the path covers blocks smaller
1553 * then *logical, but it can be that extent is the
1554 * first one in the file */
1555
1556 ex = path[depth].p_ext;
1557 ee_len = ext4_ext_get_actual_len(ex);
1558 if (*logical < le32_to_cpu(ex->ee_block)) {
1559 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1560 EXT4_ERROR_INODE(inode,
1561 "first_extent(path[%d].p_hdr) != ex",
1562 depth);
1563 return -EFSCORRUPTED;
1564 }
1565 while (--depth >= 0) {
1566 ix = path[depth].p_idx;
1567 if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1568 EXT4_ERROR_INODE(inode,
1569 "ix != EXT_FIRST_INDEX *logical %d!",
1570 *logical);
1571 return -EFSCORRUPTED;
1572 }
1573 }
1574 goto found_extent;
1575 }
1576
1577 if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
1578 EXT4_ERROR_INODE(inode,
1579 "logical %d < ee_block %d + ee_len %d!",
1580 *logical, le32_to_cpu(ex->ee_block), ee_len);
1581 return -EFSCORRUPTED;
1582 }
1583
1584 if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) {
1585 /* next allocated block in this leaf */
1586 ex++;
1587 goto found_extent;
1588 }
1589
1590 /* go up and search for index to the right */
1591 while (--depth >= 0) {
1592 ix = path[depth].p_idx;
1593 if (ix != EXT_LAST_INDEX(path[depth].p_hdr))
1594 goto got_index;
1595 }
1596
1597 /* we've gone up to the root and found no index to the right */
1598 return 0;
1599
1600 got_index:
1601 /* we've found index to the right, let's
1602 * follow it and find the closest allocated
1603 * block to the right */
1604 ix++;
1605 while (++depth < path->p_depth) {
1606 /* subtract from p_depth to get proper eh_depth */
1607 bh = read_extent_tree_block(inode, ix, path->p_depth - depth, 0);
1608 if (IS_ERR(bh))
1609 return PTR_ERR(bh);
1610 eh = ext_block_hdr(bh);
1611 ix = EXT_FIRST_INDEX(eh);
1612 put_bh(bh);
1613 }
1614
1615 bh = read_extent_tree_block(inode, ix, path->p_depth - depth, 0);
1616 if (IS_ERR(bh))
1617 return PTR_ERR(bh);
1618 eh = ext_block_hdr(bh);
1619 ex = EXT_FIRST_EXTENT(eh);
1620 found_extent:
1621 *logical = le32_to_cpu(ex->ee_block);
1622 *phys = ext4_ext_pblock(ex);
1623 if (ret_ex)
1624 *ret_ex = *ex;
1625 if (bh)
1626 put_bh(bh);
1627 return 1;
1628 }
1629
1630 /*
1631 * ext4_ext_next_allocated_block:
1632 * returns allocated block in subsequent extent or EXT_MAX_BLOCKS.
1633 * NOTE: it considers block number from index entry as
1634 * allocated block. Thus, index entries have to be consistent
1635 * with leaves.
1636 */
1637 ext4_lblk_t
ext4_ext_next_allocated_block(struct ext4_ext_path * path)1638 ext4_ext_next_allocated_block(struct ext4_ext_path *path)
1639 {
1640 int depth;
1641
1642 BUG_ON(path == NULL);
1643 depth = path->p_depth;
1644
1645 if (depth == 0 && path->p_ext == NULL)
1646 return EXT_MAX_BLOCKS;
1647
1648 while (depth >= 0) {
1649 struct ext4_ext_path *p = &path[depth];
1650
1651 if (depth == path->p_depth) {
1652 /* leaf */
1653 if (p->p_ext && p->p_ext != EXT_LAST_EXTENT(p->p_hdr))
1654 return le32_to_cpu(p->p_ext[1].ee_block);
1655 } else {
1656 /* index */
1657 if (p->p_idx != EXT_LAST_INDEX(p->p_hdr))
1658 return le32_to_cpu(p->p_idx[1].ei_block);
1659 }
1660 depth--;
1661 }
1662
1663 return EXT_MAX_BLOCKS;
1664 }
1665
1666 /*
1667 * ext4_ext_next_leaf_block:
1668 * returns first allocated block from next leaf or EXT_MAX_BLOCKS
1669 */
ext4_ext_next_leaf_block(struct ext4_ext_path * path)1670 static ext4_lblk_t ext4_ext_next_leaf_block(struct ext4_ext_path *path)
1671 {
1672 int depth;
1673
1674 BUG_ON(path == NULL);
1675 depth = path->p_depth;
1676
1677 /* zero-tree has no leaf blocks at all */
1678 if (depth == 0)
1679 return EXT_MAX_BLOCKS;
1680
1681 /* go to index block */
1682 depth--;
1683
1684 while (depth >= 0) {
1685 if (path[depth].p_idx !=
1686 EXT_LAST_INDEX(path[depth].p_hdr))
1687 return (ext4_lblk_t)
1688 le32_to_cpu(path[depth].p_idx[1].ei_block);
1689 depth--;
1690 }
1691
1692 return EXT_MAX_BLOCKS;
1693 }
1694
1695 /*
1696 * ext4_ext_correct_indexes:
1697 * if leaf gets modified and modified extent is first in the leaf,
1698 * then we have to correct all indexes above.
1699 * TODO: do we need to correct tree in all cases?
1700 */
ext4_ext_correct_indexes(handle_t * handle,struct inode * inode,struct ext4_ext_path * path)1701 static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
1702 struct ext4_ext_path *path)
1703 {
1704 struct ext4_extent_header *eh;
1705 int depth = ext_depth(inode);
1706 struct ext4_extent *ex;
1707 __le32 border;
1708 int k, err = 0;
1709
1710 eh = path[depth].p_hdr;
1711 ex = path[depth].p_ext;
1712
1713 if (unlikely(ex == NULL || eh == NULL)) {
1714 EXT4_ERROR_INODE(inode,
1715 "ex %p == NULL or eh %p == NULL", ex, eh);
1716 return -EFSCORRUPTED;
1717 }
1718
1719 if (depth == 0) {
1720 /* there is no tree at all */
1721 return 0;
1722 }
1723
1724 if (ex != EXT_FIRST_EXTENT(eh)) {
1725 /* we correct tree if first leaf got modified only */
1726 return 0;
1727 }
1728
1729 /*
1730 * TODO: we need correction if border is smaller than current one
1731 */
1732 k = depth - 1;
1733 border = path[depth].p_ext->ee_block;
1734 err = ext4_ext_get_access(handle, inode, path + k);
1735 if (err)
1736 return err;
1737 path[k].p_idx->ei_block = border;
1738 err = ext4_ext_dirty(handle, inode, path + k);
1739 if (err)
1740 return err;
1741
1742 while (k--) {
1743 /* change all left-side indexes */
1744 if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
1745 break;
1746 err = ext4_ext_get_access(handle, inode, path + k);
1747 if (err)
1748 goto clean;
1749 path[k].p_idx->ei_block = border;
1750 err = ext4_ext_dirty(handle, inode, path + k);
1751 if (err)
1752 goto clean;
1753 }
1754 return 0;
1755
1756 clean:
1757 /*
1758 * The path[k].p_bh is either unmodified or with no verified bit
1759 * set (see ext4_ext_get_access()). So just clear the verified bit
1760 * of the successfully modified extents buffers, which will force
1761 * these extents to be checked to avoid using inconsistent data.
1762 */
1763 while (++k < depth)
1764 clear_buffer_verified(path[k].p_bh);
1765
1766 return err;
1767 }
1768
ext4_can_extents_be_merged(struct inode * inode,struct ext4_extent * ex1,struct ext4_extent * ex2)1769 static int ext4_can_extents_be_merged(struct inode *inode,
1770 struct ext4_extent *ex1,
1771 struct ext4_extent *ex2)
1772 {
1773 unsigned short ext1_ee_len, ext2_ee_len;
1774
1775 if (ext4_ext_is_unwritten(ex1) != ext4_ext_is_unwritten(ex2))
1776 return 0;
1777
1778 ext1_ee_len = ext4_ext_get_actual_len(ex1);
1779 ext2_ee_len = ext4_ext_get_actual_len(ex2);
1780
1781 if (le32_to_cpu(ex1->ee_block) + ext1_ee_len !=
1782 le32_to_cpu(ex2->ee_block))
1783 return 0;
1784
1785 if (ext1_ee_len + ext2_ee_len > EXT_INIT_MAX_LEN)
1786 return 0;
1787
1788 if (ext4_ext_is_unwritten(ex1) &&
1789 ext1_ee_len + ext2_ee_len > EXT_UNWRITTEN_MAX_LEN)
1790 return 0;
1791 #ifdef AGGRESSIVE_TEST
1792 if (ext1_ee_len >= 4)
1793 return 0;
1794 #endif
1795
1796 if (ext4_ext_pblock(ex1) + ext1_ee_len == ext4_ext_pblock(ex2))
1797 return 1;
1798 return 0;
1799 }
1800
1801 /*
1802 * This function tries to merge the "ex" extent to the next extent in the tree.
1803 * It always tries to merge towards right. If you want to merge towards
1804 * left, pass "ex - 1" as argument instead of "ex".
1805 * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns
1806 * 1 if they got merged.
1807 */
ext4_ext_try_to_merge_right(struct inode * inode,struct ext4_ext_path * path,struct ext4_extent * ex)1808 static int ext4_ext_try_to_merge_right(struct inode *inode,
1809 struct ext4_ext_path *path,
1810 struct ext4_extent *ex)
1811 {
1812 struct ext4_extent_header *eh;
1813 unsigned int depth, len;
1814 int merge_done = 0, unwritten;
1815
1816 depth = ext_depth(inode);
1817 BUG_ON(path[depth].p_hdr == NULL);
1818 eh = path[depth].p_hdr;
1819
1820 while (ex < EXT_LAST_EXTENT(eh)) {
1821 if (!ext4_can_extents_be_merged(inode, ex, ex + 1))
1822 break;
1823 /* merge with next extent! */
1824 unwritten = ext4_ext_is_unwritten(ex);
1825 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1826 + ext4_ext_get_actual_len(ex + 1));
1827 if (unwritten)
1828 ext4_ext_mark_unwritten(ex);
1829
1830 if (ex + 1 < EXT_LAST_EXTENT(eh)) {
1831 len = (EXT_LAST_EXTENT(eh) - ex - 1)
1832 * sizeof(struct ext4_extent);
1833 memmove(ex + 1, ex + 2, len);
1834 }
1835 le16_add_cpu(&eh->eh_entries, -1);
1836 merge_done = 1;
1837 WARN_ON(eh->eh_entries == 0);
1838 if (!eh->eh_entries)
1839 EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!");
1840 }
1841
1842 return merge_done;
1843 }
1844
1845 /*
1846 * This function does a very simple check to see if we can collapse
1847 * an extent tree with a single extent tree leaf block into the inode.
1848 */
ext4_ext_try_to_merge_up(handle_t * handle,struct inode * inode,struct ext4_ext_path * path)1849 static void ext4_ext_try_to_merge_up(handle_t *handle,
1850 struct inode *inode,
1851 struct ext4_ext_path *path)
1852 {
1853 size_t s;
1854 unsigned max_root = ext4_ext_space_root(inode, 0);
1855 ext4_fsblk_t blk;
1856
1857 if ((path[0].p_depth != 1) ||
1858 (le16_to_cpu(path[0].p_hdr->eh_entries) != 1) ||
1859 (le16_to_cpu(path[1].p_hdr->eh_entries) > max_root))
1860 return;
1861
1862 /*
1863 * We need to modify the block allocation bitmap and the block
1864 * group descriptor to release the extent tree block. If we
1865 * can't get the journal credits, give up.
1866 */
1867 if (ext4_journal_extend(handle, 2,
1868 ext4_free_metadata_revoke_credits(inode->i_sb, 1)))
1869 return;
1870
1871 /*
1872 * Copy the extent data up to the inode
1873 */
1874 blk = ext4_idx_pblock(path[0].p_idx);
1875 s = le16_to_cpu(path[1].p_hdr->eh_entries) *
1876 sizeof(struct ext4_extent_idx);
1877 s += sizeof(struct ext4_extent_header);
1878
1879 path[1].p_maxdepth = path[0].p_maxdepth;
1880 memcpy(path[0].p_hdr, path[1].p_hdr, s);
1881 path[0].p_depth = 0;
1882 path[0].p_ext = EXT_FIRST_EXTENT(path[0].p_hdr) +
1883 (path[1].p_ext - EXT_FIRST_EXTENT(path[1].p_hdr));
1884 path[0].p_hdr->eh_max = cpu_to_le16(max_root);
1885
1886 ext4_ext_path_brelse(path + 1);
1887 ext4_free_blocks(handle, inode, NULL, blk, 1,
1888 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
1889 }
1890
1891 /*
1892 * This function tries to merge the @ex extent to neighbours in the tree, then
1893 * tries to collapse the extent tree into the inode.
1894 */
ext4_ext_try_to_merge(handle_t * handle,struct inode * inode,struct ext4_ext_path * path,struct ext4_extent * ex)1895 static void ext4_ext_try_to_merge(handle_t *handle,
1896 struct inode *inode,
1897 struct ext4_ext_path *path,
1898 struct ext4_extent *ex)
1899 {
1900 struct ext4_extent_header *eh;
1901 unsigned int depth;
1902 int merge_done = 0;
1903
1904 depth = ext_depth(inode);
1905 BUG_ON(path[depth].p_hdr == NULL);
1906 eh = path[depth].p_hdr;
1907
1908 if (ex > EXT_FIRST_EXTENT(eh))
1909 merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1);
1910
1911 if (!merge_done)
1912 (void) ext4_ext_try_to_merge_right(inode, path, ex);
1913
1914 ext4_ext_try_to_merge_up(handle, inode, path);
1915 }
1916
1917 /*
1918 * check if a portion of the "newext" extent overlaps with an
1919 * existing extent.
1920 *
1921 * If there is an overlap discovered, it updates the length of the newext
1922 * such that there will be no overlap, and then returns 1.
1923 * If there is no overlap found, it returns 0.
1924 */
ext4_ext_check_overlap(struct ext4_sb_info * sbi,struct inode * inode,struct ext4_extent * newext,struct ext4_ext_path * path)1925 static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi,
1926 struct inode *inode,
1927 struct ext4_extent *newext,
1928 struct ext4_ext_path *path)
1929 {
1930 ext4_lblk_t b1, b2;
1931 unsigned int depth, len1;
1932 unsigned int ret = 0;
1933
1934 b1 = le32_to_cpu(newext->ee_block);
1935 len1 = ext4_ext_get_actual_len(newext);
1936 depth = ext_depth(inode);
1937 if (!path[depth].p_ext)
1938 goto out;
1939 b2 = EXT4_LBLK_CMASK(sbi, le32_to_cpu(path[depth].p_ext->ee_block));
1940
1941 /*
1942 * get the next allocated block if the extent in the path
1943 * is before the requested block(s)
1944 */
1945 if (b2 < b1) {
1946 b2 = ext4_ext_next_allocated_block(path);
1947 if (b2 == EXT_MAX_BLOCKS)
1948 goto out;
1949 b2 = EXT4_LBLK_CMASK(sbi, b2);
1950 }
1951
1952 /* check for wrap through zero on extent logical start block*/
1953 if (b1 + len1 < b1) {
1954 len1 = EXT_MAX_BLOCKS - b1;
1955 newext->ee_len = cpu_to_le16(len1);
1956 ret = 1;
1957 }
1958
1959 /* check for overlap */
1960 if (b1 + len1 > b2) {
1961 newext->ee_len = cpu_to_le16(b2 - b1);
1962 ret = 1;
1963 }
1964 out:
1965 return ret;
1966 }
1967
1968 /*
1969 * ext4_ext_insert_extent:
1970 * tries to merge requested extent into the existing extent or
1971 * inserts requested extent as new one into the tree,
1972 * creating new leaf in the no-space case.
1973 */
1974 struct ext4_ext_path *
ext4_ext_insert_extent(handle_t * handle,struct inode * inode,struct ext4_ext_path * path,struct ext4_extent * newext,int gb_flags)1975 ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
1976 struct ext4_ext_path *path,
1977 struct ext4_extent *newext, int gb_flags)
1978 {
1979 struct ext4_extent_header *eh;
1980 struct ext4_extent *ex, *fex;
1981 struct ext4_extent *nearex; /* nearest extent */
1982 int depth, len, err = 0;
1983 ext4_lblk_t next;
1984 int mb_flags = 0, unwritten;
1985
1986 if (gb_flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
1987 mb_flags |= EXT4_MB_DELALLOC_RESERVED;
1988 if (unlikely(ext4_ext_get_actual_len(newext) == 0)) {
1989 EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0");
1990 err = -EFSCORRUPTED;
1991 goto errout;
1992 }
1993 depth = ext_depth(inode);
1994 ex = path[depth].p_ext;
1995 eh = path[depth].p_hdr;
1996 if (unlikely(path[depth].p_hdr == NULL)) {
1997 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
1998 err = -EFSCORRUPTED;
1999 goto errout;
2000 }
2001
2002 /* try to insert block into found extent and return */
2003 if (ex && !(gb_flags & EXT4_GET_BLOCKS_PRE_IO)) {
2004
2005 /*
2006 * Try to see whether we should rather test the extent on
2007 * right from ex, or from the left of ex. This is because
2008 * ext4_find_extent() can return either extent on the
2009 * left, or on the right from the searched position. This
2010 * will make merging more effective.
2011 */
2012 if (ex < EXT_LAST_EXTENT(eh) &&
2013 (le32_to_cpu(ex->ee_block) +
2014 ext4_ext_get_actual_len(ex) <
2015 le32_to_cpu(newext->ee_block))) {
2016 ex += 1;
2017 goto prepend;
2018 } else if ((ex > EXT_FIRST_EXTENT(eh)) &&
2019 (le32_to_cpu(newext->ee_block) +
2020 ext4_ext_get_actual_len(newext) <
2021 le32_to_cpu(ex->ee_block)))
2022 ex -= 1;
2023
2024 /* Try to append newex to the ex */
2025 if (ext4_can_extents_be_merged(inode, ex, newext)) {
2026 ext_debug(inode, "append [%d]%d block to %u:[%d]%d"
2027 "(from %llu)\n",
2028 ext4_ext_is_unwritten(newext),
2029 ext4_ext_get_actual_len(newext),
2030 le32_to_cpu(ex->ee_block),
2031 ext4_ext_is_unwritten(ex),
2032 ext4_ext_get_actual_len(ex),
2033 ext4_ext_pblock(ex));
2034 err = ext4_ext_get_access(handle, inode,
2035 path + depth);
2036 if (err)
2037 goto errout;
2038 unwritten = ext4_ext_is_unwritten(ex);
2039 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
2040 + ext4_ext_get_actual_len(newext));
2041 if (unwritten)
2042 ext4_ext_mark_unwritten(ex);
2043 nearex = ex;
2044 goto merge;
2045 }
2046
2047 prepend:
2048 /* Try to prepend newex to the ex */
2049 if (ext4_can_extents_be_merged(inode, newext, ex)) {
2050 ext_debug(inode, "prepend %u[%d]%d block to %u:[%d]%d"
2051 "(from %llu)\n",
2052 le32_to_cpu(newext->ee_block),
2053 ext4_ext_is_unwritten(newext),
2054 ext4_ext_get_actual_len(newext),
2055 le32_to_cpu(ex->ee_block),
2056 ext4_ext_is_unwritten(ex),
2057 ext4_ext_get_actual_len(ex),
2058 ext4_ext_pblock(ex));
2059 err = ext4_ext_get_access(handle, inode,
2060 path + depth);
2061 if (err)
2062 goto errout;
2063
2064 unwritten = ext4_ext_is_unwritten(ex);
2065 ex->ee_block = newext->ee_block;
2066 ext4_ext_store_pblock(ex, ext4_ext_pblock(newext));
2067 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
2068 + ext4_ext_get_actual_len(newext));
2069 if (unwritten)
2070 ext4_ext_mark_unwritten(ex);
2071 nearex = ex;
2072 goto merge;
2073 }
2074 }
2075
2076 depth = ext_depth(inode);
2077 eh = path[depth].p_hdr;
2078 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max))
2079 goto has_space;
2080
2081 /* probably next leaf has space for us? */
2082 fex = EXT_LAST_EXTENT(eh);
2083 next = EXT_MAX_BLOCKS;
2084 if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block))
2085 next = ext4_ext_next_leaf_block(path);
2086 if (next != EXT_MAX_BLOCKS) {
2087 struct ext4_ext_path *npath;
2088
2089 ext_debug(inode, "next leaf block - %u\n", next);
2090 npath = ext4_find_extent(inode, next, NULL, gb_flags);
2091 if (IS_ERR(npath)) {
2092 err = PTR_ERR(npath);
2093 goto errout;
2094 }
2095 BUG_ON(npath->p_depth != path->p_depth);
2096 eh = npath[depth].p_hdr;
2097 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
2098 ext_debug(inode, "next leaf isn't full(%d)\n",
2099 le16_to_cpu(eh->eh_entries));
2100 ext4_free_ext_path(path);
2101 path = npath;
2102 goto has_space;
2103 }
2104 ext_debug(inode, "next leaf has no free space(%d,%d)\n",
2105 le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
2106 ext4_free_ext_path(npath);
2107 }
2108
2109 /*
2110 * There is no free space in the found leaf.
2111 * We're gonna add a new leaf in the tree.
2112 */
2113 if (gb_flags & EXT4_GET_BLOCKS_METADATA_NOFAIL)
2114 mb_flags |= EXT4_MB_USE_RESERVED;
2115 path = ext4_ext_create_new_leaf(handle, inode, mb_flags, gb_flags,
2116 path, newext);
2117 if (IS_ERR(path))
2118 return path;
2119 depth = ext_depth(inode);
2120 eh = path[depth].p_hdr;
2121
2122 has_space:
2123 nearex = path[depth].p_ext;
2124
2125 err = ext4_ext_get_access(handle, inode, path + depth);
2126 if (err)
2127 goto errout;
2128
2129 if (!nearex) {
2130 /* there is no extent in this leaf, create first one */
2131 ext_debug(inode, "first extent in the leaf: %u:%llu:[%d]%d\n",
2132 le32_to_cpu(newext->ee_block),
2133 ext4_ext_pblock(newext),
2134 ext4_ext_is_unwritten(newext),
2135 ext4_ext_get_actual_len(newext));
2136 nearex = EXT_FIRST_EXTENT(eh);
2137 } else {
2138 if (le32_to_cpu(newext->ee_block)
2139 > le32_to_cpu(nearex->ee_block)) {
2140 /* Insert after */
2141 ext_debug(inode, "insert %u:%llu:[%d]%d before: "
2142 "nearest %p\n",
2143 le32_to_cpu(newext->ee_block),
2144 ext4_ext_pblock(newext),
2145 ext4_ext_is_unwritten(newext),
2146 ext4_ext_get_actual_len(newext),
2147 nearex);
2148 nearex++;
2149 } else {
2150 /* Insert before */
2151 BUG_ON(newext->ee_block == nearex->ee_block);
2152 ext_debug(inode, "insert %u:%llu:[%d]%d after: "
2153 "nearest %p\n",
2154 le32_to_cpu(newext->ee_block),
2155 ext4_ext_pblock(newext),
2156 ext4_ext_is_unwritten(newext),
2157 ext4_ext_get_actual_len(newext),
2158 nearex);
2159 }
2160 len = EXT_LAST_EXTENT(eh) - nearex + 1;
2161 if (len > 0) {
2162 ext_debug(inode, "insert %u:%llu:[%d]%d: "
2163 "move %d extents from 0x%p to 0x%p\n",
2164 le32_to_cpu(newext->ee_block),
2165 ext4_ext_pblock(newext),
2166 ext4_ext_is_unwritten(newext),
2167 ext4_ext_get_actual_len(newext),
2168 len, nearex, nearex + 1);
2169 memmove(nearex + 1, nearex,
2170 len * sizeof(struct ext4_extent));
2171 }
2172 }
2173
2174 le16_add_cpu(&eh->eh_entries, 1);
2175 path[depth].p_ext = nearex;
2176 nearex->ee_block = newext->ee_block;
2177 ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext));
2178 nearex->ee_len = newext->ee_len;
2179
2180 merge:
2181 /* try to merge extents */
2182 if (!(gb_flags & EXT4_GET_BLOCKS_PRE_IO))
2183 ext4_ext_try_to_merge(handle, inode, path, nearex);
2184
2185 /* time to correct all indexes above */
2186 err = ext4_ext_correct_indexes(handle, inode, path);
2187 if (err)
2188 goto errout;
2189
2190 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
2191 if (err)
2192 goto errout;
2193
2194 return path;
2195
2196 errout:
2197 ext4_free_ext_path(path);
2198 return ERR_PTR(err);
2199 }
2200
ext4_fill_es_cache_info(struct inode * inode,ext4_lblk_t block,ext4_lblk_t num,struct fiemap_extent_info * fieinfo)2201 static int ext4_fill_es_cache_info(struct inode *inode,
2202 ext4_lblk_t block, ext4_lblk_t num,
2203 struct fiemap_extent_info *fieinfo)
2204 {
2205 ext4_lblk_t next, end = block + num - 1;
2206 struct extent_status es;
2207 unsigned char blksize_bits = inode->i_sb->s_blocksize_bits;
2208 unsigned int flags;
2209 int err;
2210
2211 while (block <= end) {
2212 next = 0;
2213 flags = 0;
2214 if (!ext4_es_lookup_extent(inode, block, &next, &es))
2215 break;
2216 if (ext4_es_is_unwritten(&es))
2217 flags |= FIEMAP_EXTENT_UNWRITTEN;
2218 if (ext4_es_is_delayed(&es))
2219 flags |= (FIEMAP_EXTENT_DELALLOC |
2220 FIEMAP_EXTENT_UNKNOWN);
2221 if (ext4_es_is_hole(&es))
2222 flags |= EXT4_FIEMAP_EXTENT_HOLE;
2223 if (next == 0)
2224 flags |= FIEMAP_EXTENT_LAST;
2225 if (flags & (FIEMAP_EXTENT_DELALLOC|
2226 EXT4_FIEMAP_EXTENT_HOLE))
2227 es.es_pblk = 0;
2228 else
2229 es.es_pblk = ext4_es_pblock(&es);
2230 err = fiemap_fill_next_extent(fieinfo,
2231 (__u64)es.es_lblk << blksize_bits,
2232 (__u64)es.es_pblk << blksize_bits,
2233 (__u64)es.es_len << blksize_bits,
2234 flags);
2235 if (next == 0)
2236 break;
2237 block = next;
2238 if (err < 0)
2239 return err;
2240 if (err == 1)
2241 return 0;
2242 }
2243 return 0;
2244 }
2245
2246
2247 /*
2248 * ext4_ext_find_hole - find hole around given block according to the given path
2249 * @inode: inode we lookup in
2250 * @path: path in extent tree to @lblk
2251 * @lblk: pointer to logical block around which we want to determine hole
2252 *
2253 * Determine hole length (and start if easily possible) around given logical
2254 * block. We don't try too hard to find the beginning of the hole but @path
2255 * actually points to extent before @lblk, we provide it.
2256 *
2257 * The function returns the length of a hole starting at @lblk. We update @lblk
2258 * to the beginning of the hole if we managed to find it.
2259 */
ext4_ext_find_hole(struct inode * inode,struct ext4_ext_path * path,ext4_lblk_t * lblk)2260 static ext4_lblk_t ext4_ext_find_hole(struct inode *inode,
2261 struct ext4_ext_path *path,
2262 ext4_lblk_t *lblk)
2263 {
2264 int depth = ext_depth(inode);
2265 struct ext4_extent *ex;
2266 ext4_lblk_t len;
2267
2268 ex = path[depth].p_ext;
2269 if (ex == NULL) {
2270 /* there is no extent yet, so gap is [0;-] */
2271 *lblk = 0;
2272 len = EXT_MAX_BLOCKS;
2273 } else if (*lblk < le32_to_cpu(ex->ee_block)) {
2274 len = le32_to_cpu(ex->ee_block) - *lblk;
2275 } else if (*lblk >= le32_to_cpu(ex->ee_block)
2276 + ext4_ext_get_actual_len(ex)) {
2277 ext4_lblk_t next;
2278
2279 *lblk = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex);
2280 next = ext4_ext_next_allocated_block(path);
2281 BUG_ON(next == *lblk);
2282 len = next - *lblk;
2283 } else {
2284 BUG();
2285 }
2286 return len;
2287 }
2288
2289 /*
2290 * ext4_ext_rm_idx:
2291 * removes index from the index block.
2292 */
ext4_ext_rm_idx(handle_t * handle,struct inode * inode,struct ext4_ext_path * path,int depth)2293 static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
2294 struct ext4_ext_path *path, int depth)
2295 {
2296 int err;
2297 ext4_fsblk_t leaf;
2298 int k = depth - 1;
2299
2300 /* free index block */
2301 leaf = ext4_idx_pblock(path[k].p_idx);
2302 if (unlikely(path[k].p_hdr->eh_entries == 0)) {
2303 EXT4_ERROR_INODE(inode, "path[%d].p_hdr->eh_entries == 0", k);
2304 return -EFSCORRUPTED;
2305 }
2306 err = ext4_ext_get_access(handle, inode, path + k);
2307 if (err)
2308 return err;
2309
2310 if (path[k].p_idx != EXT_LAST_INDEX(path[k].p_hdr)) {
2311 int len = EXT_LAST_INDEX(path[k].p_hdr) - path[k].p_idx;
2312 len *= sizeof(struct ext4_extent_idx);
2313 memmove(path[k].p_idx, path[k].p_idx + 1, len);
2314 }
2315
2316 le16_add_cpu(&path[k].p_hdr->eh_entries, -1);
2317 err = ext4_ext_dirty(handle, inode, path + k);
2318 if (err)
2319 return err;
2320 ext_debug(inode, "index is empty, remove it, free block %llu\n", leaf);
2321 trace_ext4_ext_rm_idx(inode, leaf);
2322
2323 ext4_free_blocks(handle, inode, NULL, leaf, 1,
2324 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
2325
2326 while (--k >= 0) {
2327 if (path[k + 1].p_idx != EXT_FIRST_INDEX(path[k + 1].p_hdr))
2328 break;
2329 err = ext4_ext_get_access(handle, inode, path + k);
2330 if (err)
2331 goto clean;
2332 path[k].p_idx->ei_block = path[k + 1].p_idx->ei_block;
2333 err = ext4_ext_dirty(handle, inode, path + k);
2334 if (err)
2335 goto clean;
2336 }
2337 return 0;
2338
2339 clean:
2340 /*
2341 * The path[k].p_bh is either unmodified or with no verified bit
2342 * set (see ext4_ext_get_access()). So just clear the verified bit
2343 * of the successfully modified extents buffers, which will force
2344 * these extents to be checked to avoid using inconsistent data.
2345 */
2346 while (++k < depth)
2347 clear_buffer_verified(path[k].p_bh);
2348
2349 return err;
2350 }
2351
2352 /*
2353 * ext4_ext_calc_credits_for_single_extent:
2354 * This routine returns max. credits that needed to insert an extent
2355 * to the extent tree.
2356 * When pass the actual path, the caller should calculate credits
2357 * under i_data_sem.
2358 */
ext4_ext_calc_credits_for_single_extent(struct inode * inode,int nrblocks,struct ext4_ext_path * path)2359 int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks,
2360 struct ext4_ext_path *path)
2361 {
2362 if (path) {
2363 int depth = ext_depth(inode);
2364 int ret = 0;
2365
2366 /* probably there is space in leaf? */
2367 if (le16_to_cpu(path[depth].p_hdr->eh_entries)
2368 < le16_to_cpu(path[depth].p_hdr->eh_max)) {
2369
2370 /*
2371 * There are some space in the leaf tree, no
2372 * need to account for leaf block credit
2373 *
2374 * bitmaps and block group descriptor blocks
2375 * and other metadata blocks still need to be
2376 * accounted.
2377 */
2378 /* 1 bitmap, 1 block group descriptor */
2379 ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb);
2380 return ret;
2381 }
2382 }
2383
2384 return ext4_chunk_trans_blocks(inode, nrblocks);
2385 }
2386
2387 /*
2388 * How many index/leaf blocks need to change/allocate to add @extents extents?
2389 *
2390 * If we add a single extent, then in the worse case, each tree level
2391 * index/leaf need to be changed in case of the tree split.
2392 *
2393 * If more extents are inserted, they could cause the whole tree split more
2394 * than once, but this is really rare.
2395 */
ext4_ext_index_trans_blocks(struct inode * inode,int extents)2396 int ext4_ext_index_trans_blocks(struct inode *inode, int extents)
2397 {
2398 int index;
2399
2400 /* If we are converting the inline data, only one is needed here. */
2401 if (ext4_has_inline_data(inode))
2402 return 1;
2403
2404 /*
2405 * Extent tree can change between the time we estimate credits and
2406 * the time we actually modify the tree. Assume the worst case.
2407 */
2408 if (extents <= 1)
2409 index = EXT4_MAX_EXTENT_DEPTH * 2;
2410 else
2411 index = EXT4_MAX_EXTENT_DEPTH * 3;
2412
2413 return index;
2414 }
2415
get_default_free_blocks_flags(struct inode * inode)2416 static inline int get_default_free_blocks_flags(struct inode *inode)
2417 {
2418 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode) ||
2419 ext4_test_inode_flag(inode, EXT4_INODE_EA_INODE))
2420 return EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET;
2421 else if (ext4_should_journal_data(inode))
2422 return EXT4_FREE_BLOCKS_FORGET;
2423 return 0;
2424 }
2425
2426 /*
2427 * ext4_rereserve_cluster - increment the reserved cluster count when
2428 * freeing a cluster with a pending reservation
2429 *
2430 * @inode - file containing the cluster
2431 * @lblk - logical block in cluster to be reserved
2432 *
2433 * Increments the reserved cluster count and adjusts quota in a bigalloc
2434 * file system when freeing a partial cluster containing at least one
2435 * delayed and unwritten block. A partial cluster meeting that
2436 * requirement will have a pending reservation. If so, the
2437 * RERESERVE_CLUSTER flag is used when calling ext4_free_blocks() to
2438 * defer reserved and allocated space accounting to a subsequent call
2439 * to this function.
2440 */
ext4_rereserve_cluster(struct inode * inode,ext4_lblk_t lblk)2441 static void ext4_rereserve_cluster(struct inode *inode, ext4_lblk_t lblk)
2442 {
2443 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2444 struct ext4_inode_info *ei = EXT4_I(inode);
2445
2446 dquot_reclaim_block(inode, EXT4_C2B(sbi, 1));
2447
2448 spin_lock(&ei->i_block_reservation_lock);
2449 ei->i_reserved_data_blocks++;
2450 percpu_counter_add(&sbi->s_dirtyclusters_counter, 1);
2451 spin_unlock(&ei->i_block_reservation_lock);
2452
2453 percpu_counter_add(&sbi->s_freeclusters_counter, 1);
2454 ext4_remove_pending(inode, lblk);
2455 }
2456
ext4_remove_blocks(handle_t * handle,struct inode * inode,struct ext4_extent * ex,struct partial_cluster * partial,ext4_lblk_t from,ext4_lblk_t to)2457 static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
2458 struct ext4_extent *ex,
2459 struct partial_cluster *partial,
2460 ext4_lblk_t from, ext4_lblk_t to)
2461 {
2462 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2463 unsigned short ee_len = ext4_ext_get_actual_len(ex);
2464 ext4_fsblk_t last_pblk, pblk;
2465 ext4_lblk_t num;
2466 int flags;
2467
2468 /* only extent tail removal is allowed */
2469 if (from < le32_to_cpu(ex->ee_block) ||
2470 to != le32_to_cpu(ex->ee_block) + ee_len - 1) {
2471 ext4_error(sbi->s_sb,
2472 "strange request: removal(2) %u-%u from %u:%u",
2473 from, to, le32_to_cpu(ex->ee_block), ee_len);
2474 return 0;
2475 }
2476
2477 #ifdef EXTENTS_STATS
2478 spin_lock(&sbi->s_ext_stats_lock);
2479 sbi->s_ext_blocks += ee_len;
2480 sbi->s_ext_extents++;
2481 if (ee_len < sbi->s_ext_min)
2482 sbi->s_ext_min = ee_len;
2483 if (ee_len > sbi->s_ext_max)
2484 sbi->s_ext_max = ee_len;
2485 if (ext_depth(inode) > sbi->s_depth_max)
2486 sbi->s_depth_max = ext_depth(inode);
2487 spin_unlock(&sbi->s_ext_stats_lock);
2488 #endif
2489
2490 trace_ext4_remove_blocks(inode, ex, from, to, partial);
2491
2492 /*
2493 * if we have a partial cluster, and it's different from the
2494 * cluster of the last block in the extent, we free it
2495 */
2496 last_pblk = ext4_ext_pblock(ex) + ee_len - 1;
2497
2498 if (partial->state != initial &&
2499 partial->pclu != EXT4_B2C(sbi, last_pblk)) {
2500 if (partial->state == tofree) {
2501 flags = get_default_free_blocks_flags(inode);
2502 if (ext4_is_pending(inode, partial->lblk))
2503 flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER;
2504 ext4_free_blocks(handle, inode, NULL,
2505 EXT4_C2B(sbi, partial->pclu),
2506 sbi->s_cluster_ratio, flags);
2507 if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)
2508 ext4_rereserve_cluster(inode, partial->lblk);
2509 }
2510 partial->state = initial;
2511 }
2512
2513 num = le32_to_cpu(ex->ee_block) + ee_len - from;
2514 pblk = ext4_ext_pblock(ex) + ee_len - num;
2515
2516 /*
2517 * We free the partial cluster at the end of the extent (if any),
2518 * unless the cluster is used by another extent (partial_cluster
2519 * state is nofree). If a partial cluster exists here, it must be
2520 * shared with the last block in the extent.
2521 */
2522 flags = get_default_free_blocks_flags(inode);
2523
2524 /* partial, left end cluster aligned, right end unaligned */
2525 if ((EXT4_LBLK_COFF(sbi, to) != sbi->s_cluster_ratio - 1) &&
2526 (EXT4_LBLK_CMASK(sbi, to) >= from) &&
2527 (partial->state != nofree)) {
2528 if (ext4_is_pending(inode, to))
2529 flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER;
2530 ext4_free_blocks(handle, inode, NULL,
2531 EXT4_PBLK_CMASK(sbi, last_pblk),
2532 sbi->s_cluster_ratio, flags);
2533 if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)
2534 ext4_rereserve_cluster(inode, to);
2535 partial->state = initial;
2536 flags = get_default_free_blocks_flags(inode);
2537 }
2538
2539 flags |= EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER;
2540
2541 /*
2542 * For bigalloc file systems, we never free a partial cluster
2543 * at the beginning of the extent. Instead, we check to see if we
2544 * need to free it on a subsequent call to ext4_remove_blocks,
2545 * or at the end of ext4_ext_rm_leaf or ext4_ext_remove_space.
2546 */
2547 flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER;
2548 ext4_free_blocks(handle, inode, NULL, pblk, num, flags);
2549
2550 /* reset the partial cluster if we've freed past it */
2551 if (partial->state != initial && partial->pclu != EXT4_B2C(sbi, pblk))
2552 partial->state = initial;
2553
2554 /*
2555 * If we've freed the entire extent but the beginning is not left
2556 * cluster aligned and is not marked as ineligible for freeing we
2557 * record the partial cluster at the beginning of the extent. It
2558 * wasn't freed by the preceding ext4_free_blocks() call, and we
2559 * need to look farther to the left to determine if it's to be freed
2560 * (not shared with another extent). Else, reset the partial
2561 * cluster - we're either done freeing or the beginning of the
2562 * extent is left cluster aligned.
2563 */
2564 if (EXT4_LBLK_COFF(sbi, from) && num == ee_len) {
2565 if (partial->state == initial) {
2566 partial->pclu = EXT4_B2C(sbi, pblk);
2567 partial->lblk = from;
2568 partial->state = tofree;
2569 }
2570 } else {
2571 partial->state = initial;
2572 }
2573
2574 return 0;
2575 }
2576
2577 /*
2578 * ext4_ext_rm_leaf() Removes the extents associated with the
2579 * blocks appearing between "start" and "end". Both "start"
2580 * and "end" must appear in the same extent or EIO is returned.
2581 *
2582 * @handle: The journal handle
2583 * @inode: The files inode
2584 * @path: The path to the leaf
2585 * @partial_cluster: The cluster which we'll have to free if all extents
2586 * has been released from it. However, if this value is
2587 * negative, it's a cluster just to the right of the
2588 * punched region and it must not be freed.
2589 * @start: The first block to remove
2590 * @end: The last block to remove
2591 */
2592 static int
ext4_ext_rm_leaf(handle_t * handle,struct inode * inode,struct ext4_ext_path * path,struct partial_cluster * partial,ext4_lblk_t start,ext4_lblk_t end)2593 ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
2594 struct ext4_ext_path *path,
2595 struct partial_cluster *partial,
2596 ext4_lblk_t start, ext4_lblk_t end)
2597 {
2598 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2599 int err = 0, correct_index = 0;
2600 int depth = ext_depth(inode), credits, revoke_credits;
2601 struct ext4_extent_header *eh;
2602 ext4_lblk_t a, b;
2603 unsigned num;
2604 ext4_lblk_t ex_ee_block;
2605 unsigned short ex_ee_len;
2606 unsigned unwritten = 0;
2607 struct ext4_extent *ex;
2608 ext4_fsblk_t pblk;
2609
2610 /* the header must be checked already in ext4_ext_remove_space() */
2611 ext_debug(inode, "truncate since %u in leaf to %u\n", start, end);
2612 if (!path[depth].p_hdr)
2613 path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
2614 eh = path[depth].p_hdr;
2615 if (unlikely(path[depth].p_hdr == NULL)) {
2616 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
2617 return -EFSCORRUPTED;
2618 }
2619 /* find where to start removing */
2620 ex = path[depth].p_ext;
2621 if (!ex)
2622 ex = EXT_LAST_EXTENT(eh);
2623
2624 ex_ee_block = le32_to_cpu(ex->ee_block);
2625 ex_ee_len = ext4_ext_get_actual_len(ex);
2626
2627 trace_ext4_ext_rm_leaf(inode, start, ex, partial);
2628
2629 while (ex >= EXT_FIRST_EXTENT(eh) &&
2630 ex_ee_block + ex_ee_len > start) {
2631
2632 if (ext4_ext_is_unwritten(ex))
2633 unwritten = 1;
2634 else
2635 unwritten = 0;
2636
2637 ext_debug(inode, "remove ext %u:[%d]%d\n", ex_ee_block,
2638 unwritten, ex_ee_len);
2639 path[depth].p_ext = ex;
2640
2641 a = max(ex_ee_block, start);
2642 b = min(ex_ee_block + ex_ee_len - 1, end);
2643
2644 ext_debug(inode, " border %u:%u\n", a, b);
2645
2646 /* If this extent is beyond the end of the hole, skip it */
2647 if (end < ex_ee_block) {
2648 /*
2649 * We're going to skip this extent and move to another,
2650 * so note that its first cluster is in use to avoid
2651 * freeing it when removing blocks. Eventually, the
2652 * right edge of the truncated/punched region will
2653 * be just to the left.
2654 */
2655 if (sbi->s_cluster_ratio > 1) {
2656 pblk = ext4_ext_pblock(ex);
2657 partial->pclu = EXT4_B2C(sbi, pblk);
2658 partial->state = nofree;
2659 }
2660 ex--;
2661 ex_ee_block = le32_to_cpu(ex->ee_block);
2662 ex_ee_len = ext4_ext_get_actual_len(ex);
2663 continue;
2664 } else if (b != ex_ee_block + ex_ee_len - 1) {
2665 EXT4_ERROR_INODE(inode,
2666 "can not handle truncate %u:%u "
2667 "on extent %u:%u",
2668 start, end, ex_ee_block,
2669 ex_ee_block + ex_ee_len - 1);
2670 err = -EFSCORRUPTED;
2671 goto out;
2672 } else if (a != ex_ee_block) {
2673 /* remove tail of the extent */
2674 num = a - ex_ee_block;
2675 } else {
2676 /* remove whole extent: excellent! */
2677 num = 0;
2678 }
2679 /*
2680 * 3 for leaf, sb, and inode plus 2 (bmap and group
2681 * descriptor) for each block group; assume two block
2682 * groups plus ex_ee_len/blocks_per_block_group for
2683 * the worst case
2684 */
2685 credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb));
2686 if (ex == EXT_FIRST_EXTENT(eh)) {
2687 correct_index = 1;
2688 credits += (ext_depth(inode)) + 1;
2689 }
2690 credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
2691 /*
2692 * We may end up freeing some index blocks and data from the
2693 * punched range. Note that partial clusters are accounted for
2694 * by ext4_free_data_revoke_credits().
2695 */
2696 revoke_credits =
2697 ext4_free_metadata_revoke_credits(inode->i_sb,
2698 ext_depth(inode)) +
2699 ext4_free_data_revoke_credits(inode, b - a + 1);
2700
2701 err = ext4_datasem_ensure_credits(handle, inode, credits,
2702 credits, revoke_credits);
2703 if (err) {
2704 if (err > 0)
2705 err = -EAGAIN;
2706 goto out;
2707 }
2708
2709 err = ext4_ext_get_access(handle, inode, path + depth);
2710 if (err)
2711 goto out;
2712
2713 err = ext4_remove_blocks(handle, inode, ex, partial, a, b);
2714 if (err)
2715 goto out;
2716
2717 if (num == 0)
2718 /* this extent is removed; mark slot entirely unused */
2719 ext4_ext_store_pblock(ex, 0);
2720
2721 ex->ee_len = cpu_to_le16(num);
2722 /*
2723 * Do not mark unwritten if all the blocks in the
2724 * extent have been removed.
2725 */
2726 if (unwritten && num)
2727 ext4_ext_mark_unwritten(ex);
2728 /*
2729 * If the extent was completely released,
2730 * we need to remove it from the leaf
2731 */
2732 if (num == 0) {
2733 if (end != EXT_MAX_BLOCKS - 1) {
2734 /*
2735 * For hole punching, we need to scoot all the
2736 * extents up when an extent is removed so that
2737 * we dont have blank extents in the middle
2738 */
2739 memmove(ex, ex+1, (EXT_LAST_EXTENT(eh) - ex) *
2740 sizeof(struct ext4_extent));
2741
2742 /* Now get rid of the one at the end */
2743 memset(EXT_LAST_EXTENT(eh), 0,
2744 sizeof(struct ext4_extent));
2745 }
2746 le16_add_cpu(&eh->eh_entries, -1);
2747 }
2748
2749 err = ext4_ext_dirty(handle, inode, path + depth);
2750 if (err)
2751 goto out;
2752
2753 ext_debug(inode, "new extent: %u:%u:%llu\n", ex_ee_block, num,
2754 ext4_ext_pblock(ex));
2755 ex--;
2756 ex_ee_block = le32_to_cpu(ex->ee_block);
2757 ex_ee_len = ext4_ext_get_actual_len(ex);
2758 }
2759
2760 if (correct_index && eh->eh_entries)
2761 err = ext4_ext_correct_indexes(handle, inode, path);
2762
2763 /*
2764 * If there's a partial cluster and at least one extent remains in
2765 * the leaf, free the partial cluster if it isn't shared with the
2766 * current extent. If it is shared with the current extent
2767 * we reset the partial cluster because we've reached the start of the
2768 * truncated/punched region and we're done removing blocks.
2769 */
2770 if (partial->state == tofree && ex >= EXT_FIRST_EXTENT(eh)) {
2771 pblk = ext4_ext_pblock(ex) + ex_ee_len - 1;
2772 if (partial->pclu != EXT4_B2C(sbi, pblk)) {
2773 int flags = get_default_free_blocks_flags(inode);
2774
2775 if (ext4_is_pending(inode, partial->lblk))
2776 flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER;
2777 ext4_free_blocks(handle, inode, NULL,
2778 EXT4_C2B(sbi, partial->pclu),
2779 sbi->s_cluster_ratio, flags);
2780 if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)
2781 ext4_rereserve_cluster(inode, partial->lblk);
2782 }
2783 partial->state = initial;
2784 }
2785
2786 /* if this leaf is free, then we should
2787 * remove it from index block above */
2788 if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
2789 err = ext4_ext_rm_idx(handle, inode, path, depth);
2790
2791 out:
2792 return err;
2793 }
2794
2795 /*
2796 * ext4_ext_more_to_rm:
2797 * returns 1 if current index has to be freed (even partial)
2798 */
2799 static int
ext4_ext_more_to_rm(struct ext4_ext_path * path)2800 ext4_ext_more_to_rm(struct ext4_ext_path *path)
2801 {
2802 BUG_ON(path->p_idx == NULL);
2803
2804 if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
2805 return 0;
2806
2807 /*
2808 * if truncate on deeper level happened, it wasn't partial,
2809 * so we have to consider current index for truncation
2810 */
2811 if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block)
2812 return 0;
2813 return 1;
2814 }
2815
ext4_ext_remove_space(struct inode * inode,ext4_lblk_t start,ext4_lblk_t end)2816 int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
2817 ext4_lblk_t end)
2818 {
2819 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2820 int depth = ext_depth(inode);
2821 struct ext4_ext_path *path = NULL;
2822 struct partial_cluster partial;
2823 handle_t *handle;
2824 int i = 0, err = 0;
2825
2826 partial.pclu = 0;
2827 partial.lblk = 0;
2828 partial.state = initial;
2829
2830 ext_debug(inode, "truncate since %u to %u\n", start, end);
2831
2832 /* probably first extent we're gonna free will be last in block */
2833 handle = ext4_journal_start_with_revoke(inode, EXT4_HT_TRUNCATE,
2834 depth + 1,
2835 ext4_free_metadata_revoke_credits(inode->i_sb, depth));
2836 if (IS_ERR(handle))
2837 return PTR_ERR(handle);
2838
2839 again:
2840 trace_ext4_ext_remove_space(inode, start, end, depth);
2841
2842 /*
2843 * Check if we are removing extents inside the extent tree. If that
2844 * is the case, we are going to punch a hole inside the extent tree
2845 * so we have to check whether we need to split the extent covering
2846 * the last block to remove so we can easily remove the part of it
2847 * in ext4_ext_rm_leaf().
2848 */
2849 if (end < EXT_MAX_BLOCKS - 1) {
2850 struct ext4_extent *ex;
2851 ext4_lblk_t ee_block, ex_end, lblk;
2852 ext4_fsblk_t pblk;
2853
2854 /* find extent for or closest extent to this block */
2855 path = ext4_find_extent(inode, end, NULL,
2856 EXT4_EX_NOCACHE | EXT4_EX_NOFAIL);
2857 if (IS_ERR(path)) {
2858 ext4_journal_stop(handle);
2859 return PTR_ERR(path);
2860 }
2861 depth = ext_depth(inode);
2862 /* Leaf not may not exist only if inode has no blocks at all */
2863 ex = path[depth].p_ext;
2864 if (!ex) {
2865 if (depth) {
2866 EXT4_ERROR_INODE(inode,
2867 "path[%d].p_hdr == NULL",
2868 depth);
2869 err = -EFSCORRUPTED;
2870 }
2871 goto out;
2872 }
2873
2874 ee_block = le32_to_cpu(ex->ee_block);
2875 ex_end = ee_block + ext4_ext_get_actual_len(ex) - 1;
2876
2877 /*
2878 * See if the last block is inside the extent, if so split
2879 * the extent at 'end' block so we can easily remove the
2880 * tail of the first part of the split extent in
2881 * ext4_ext_rm_leaf().
2882 */
2883 if (end >= ee_block && end < ex_end) {
2884
2885 /*
2886 * If we're going to split the extent, note that
2887 * the cluster containing the block after 'end' is
2888 * in use to avoid freeing it when removing blocks.
2889 */
2890 if (sbi->s_cluster_ratio > 1) {
2891 pblk = ext4_ext_pblock(ex) + end - ee_block + 1;
2892 partial.pclu = EXT4_B2C(sbi, pblk);
2893 partial.state = nofree;
2894 }
2895
2896 /*
2897 * Split the extent in two so that 'end' is the last
2898 * block in the first new extent. Also we should not
2899 * fail removing space due to ENOSPC so try to use
2900 * reserved block if that happens.
2901 */
2902 path = ext4_force_split_extent_at(handle, inode, path,
2903 end + 1, 1);
2904 if (IS_ERR(path)) {
2905 err = PTR_ERR(path);
2906 goto out;
2907 }
2908 } else if (sbi->s_cluster_ratio > 1 && end >= ex_end &&
2909 partial.state == initial) {
2910 /*
2911 * If we're punching, there's an extent to the right.
2912 * If the partial cluster hasn't been set, set it to
2913 * that extent's first cluster and its state to nofree
2914 * so it won't be freed should it contain blocks to be
2915 * removed. If it's already set (tofree/nofree), we're
2916 * retrying and keep the original partial cluster info
2917 * so a cluster marked tofree as a result of earlier
2918 * extent removal is not lost.
2919 */
2920 lblk = ex_end + 1;
2921 err = ext4_ext_search_right(inode, path, &lblk, &pblk,
2922 NULL);
2923 if (err < 0)
2924 goto out;
2925 if (pblk) {
2926 partial.pclu = EXT4_B2C(sbi, pblk);
2927 partial.state = nofree;
2928 }
2929 }
2930 }
2931 /*
2932 * We start scanning from right side, freeing all the blocks
2933 * after i_size and walking into the tree depth-wise.
2934 */
2935 depth = ext_depth(inode);
2936 if (path) {
2937 int k = i = depth;
2938 while (--k > 0)
2939 path[k].p_block =
2940 le16_to_cpu(path[k].p_hdr->eh_entries)+1;
2941 } else {
2942 path = kcalloc(depth + 1, sizeof(struct ext4_ext_path),
2943 GFP_NOFS | __GFP_NOFAIL);
2944 if (path == NULL) {
2945 ext4_journal_stop(handle);
2946 return -ENOMEM;
2947 }
2948 path[0].p_maxdepth = path[0].p_depth = depth;
2949 path[0].p_hdr = ext_inode_hdr(inode);
2950 i = 0;
2951
2952 if (ext4_ext_check(inode, path[0].p_hdr, depth, 0)) {
2953 err = -EFSCORRUPTED;
2954 goto out;
2955 }
2956 }
2957 err = 0;
2958
2959 while (i >= 0 && err == 0) {
2960 if (i == depth) {
2961 /* this is leaf block */
2962 err = ext4_ext_rm_leaf(handle, inode, path,
2963 &partial, start, end);
2964 /* root level has p_bh == NULL, brelse() eats this */
2965 ext4_ext_path_brelse(path + i);
2966 i--;
2967 continue;
2968 }
2969
2970 /* this is index block */
2971 if (!path[i].p_hdr) {
2972 ext_debug(inode, "initialize header\n");
2973 path[i].p_hdr = ext_block_hdr(path[i].p_bh);
2974 }
2975
2976 if (!path[i].p_idx) {
2977 /* this level hasn't been touched yet */
2978 path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
2979 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
2980 ext_debug(inode, "init index ptr: hdr 0x%p, num %d\n",
2981 path[i].p_hdr,
2982 le16_to_cpu(path[i].p_hdr->eh_entries));
2983 } else {
2984 /* we were already here, see at next index */
2985 path[i].p_idx--;
2986 }
2987
2988 ext_debug(inode, "level %d - index, first 0x%p, cur 0x%p\n",
2989 i, EXT_FIRST_INDEX(path[i].p_hdr),
2990 path[i].p_idx);
2991 if (ext4_ext_more_to_rm(path + i)) {
2992 struct buffer_head *bh;
2993 /* go to the next level */
2994 ext_debug(inode, "move to level %d (block %llu)\n",
2995 i + 1, ext4_idx_pblock(path[i].p_idx));
2996 memset(path + i + 1, 0, sizeof(*path));
2997 bh = read_extent_tree_block(inode, path[i].p_idx,
2998 depth - i - 1,
2999 EXT4_EX_NOCACHE);
3000 if (IS_ERR(bh)) {
3001 /* should we reset i_size? */
3002 err = PTR_ERR(bh);
3003 break;
3004 }
3005 /* Yield here to deal with large extent trees.
3006 * Should be a no-op if we did IO above. */
3007 cond_resched();
3008 if (WARN_ON(i + 1 > depth)) {
3009 err = -EFSCORRUPTED;
3010 break;
3011 }
3012 path[i + 1].p_bh = bh;
3013
3014 /* save actual number of indexes since this
3015 * number is changed at the next iteration */
3016 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);
3017 i++;
3018 } else {
3019 /* we finished processing this index, go up */
3020 if (path[i].p_hdr->eh_entries == 0 && i > 0) {
3021 /* index is empty, remove it;
3022 * handle must be already prepared by the
3023 * truncatei_leaf() */
3024 err = ext4_ext_rm_idx(handle, inode, path, i);
3025 }
3026 /* root level has p_bh == NULL, brelse() eats this */
3027 ext4_ext_path_brelse(path + i);
3028 i--;
3029 ext_debug(inode, "return to level %d\n", i);
3030 }
3031 }
3032
3033 trace_ext4_ext_remove_space_done(inode, start, end, depth, &partial,
3034 path->p_hdr->eh_entries);
3035
3036 /*
3037 * if there's a partial cluster and we have removed the first extent
3038 * in the file, then we also free the partial cluster, if any
3039 */
3040 if (partial.state == tofree && err == 0) {
3041 int flags = get_default_free_blocks_flags(inode);
3042
3043 if (ext4_is_pending(inode, partial.lblk))
3044 flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER;
3045 ext4_free_blocks(handle, inode, NULL,
3046 EXT4_C2B(sbi, partial.pclu),
3047 sbi->s_cluster_ratio, flags);
3048 if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)
3049 ext4_rereserve_cluster(inode, partial.lblk);
3050 partial.state = initial;
3051 }
3052
3053 /* TODO: flexible tree reduction should be here */
3054 if (path->p_hdr->eh_entries == 0) {
3055 /*
3056 * truncate to zero freed all the tree,
3057 * so we need to correct eh_depth
3058 */
3059 err = ext4_ext_get_access(handle, inode, path);
3060 if (err == 0) {
3061 ext_inode_hdr(inode)->eh_depth = 0;
3062 ext_inode_hdr(inode)->eh_max =
3063 cpu_to_le16(ext4_ext_space_root(inode, 0));
3064 err = ext4_ext_dirty(handle, inode, path);
3065 }
3066 }
3067 out:
3068 ext4_free_ext_path(path);
3069 path = NULL;
3070 if (err == -EAGAIN)
3071 goto again;
3072 ext4_journal_stop(handle);
3073
3074 return err;
3075 }
3076
3077 /*
3078 * called at mount time
3079 */
ext4_ext_init(struct super_block * sb)3080 void ext4_ext_init(struct super_block *sb)
3081 {
3082 /*
3083 * possible initialization would be here
3084 */
3085
3086 if (ext4_has_feature_extents(sb)) {
3087 #if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS)
3088 printk(KERN_INFO "EXT4-fs: file extents enabled"
3089 #ifdef AGGRESSIVE_TEST
3090 ", aggressive tests"
3091 #endif
3092 #ifdef CHECK_BINSEARCH
3093 ", check binsearch"
3094 #endif
3095 #ifdef EXTENTS_STATS
3096 ", stats"
3097 #endif
3098 "\n");
3099 #endif
3100 #ifdef EXTENTS_STATS
3101 spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock);
3102 EXT4_SB(sb)->s_ext_min = 1 << 30;
3103 EXT4_SB(sb)->s_ext_max = 0;
3104 #endif
3105 }
3106 }
3107
3108 /*
3109 * called at umount time
3110 */
ext4_ext_release(struct super_block * sb)3111 void ext4_ext_release(struct super_block *sb)
3112 {
3113 if (!ext4_has_feature_extents(sb))
3114 return;
3115
3116 #ifdef EXTENTS_STATS
3117 if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) {
3118 struct ext4_sb_info *sbi = EXT4_SB(sb);
3119 printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n",
3120 sbi->s_ext_blocks, sbi->s_ext_extents,
3121 sbi->s_ext_blocks / sbi->s_ext_extents);
3122 printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n",
3123 sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max);
3124 }
3125 #endif
3126 }
3127
ext4_zeroout_es(struct inode * inode,struct ext4_extent * ex)3128 static void ext4_zeroout_es(struct inode *inode, struct ext4_extent *ex)
3129 {
3130 ext4_lblk_t ee_block;
3131 ext4_fsblk_t ee_pblock;
3132 unsigned int ee_len;
3133
3134 ee_block = le32_to_cpu(ex->ee_block);
3135 ee_len = ext4_ext_get_actual_len(ex);
3136 ee_pblock = ext4_ext_pblock(ex);
3137
3138 if (ee_len == 0)
3139 return;
3140
3141 ext4_es_insert_extent(inode, ee_block, ee_len, ee_pblock,
3142 EXTENT_STATUS_WRITTEN, 0);
3143 }
3144
3145 /* FIXME!! we need to try to merge to left or right after zero-out */
ext4_ext_zeroout(struct inode * inode,struct ext4_extent * ex)3146 static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
3147 {
3148 ext4_fsblk_t ee_pblock;
3149 unsigned int ee_len;
3150
3151 ee_len = ext4_ext_get_actual_len(ex);
3152 ee_pblock = ext4_ext_pblock(ex);
3153 return ext4_issue_zeroout(inode, le32_to_cpu(ex->ee_block), ee_pblock,
3154 ee_len);
3155 }
3156
3157 /*
3158 * ext4_split_extent_at() splits an extent at given block.
3159 *
3160 * @handle: the journal handle
3161 * @inode: the file inode
3162 * @path: the path to the extent
3163 * @split: the logical block where the extent is splitted.
3164 * @split_flags: indicates if the extent could be zeroout if split fails, and
3165 * the states(init or unwritten) of new extents.
3166 * @flags: flags used to insert new extent to extent tree.
3167 *
3168 *
3169 * Splits extent [a, b] into two extents [a, @split) and [@split, b], states
3170 * of which are determined by split_flag.
3171 *
3172 * There are two cases:
3173 * a> the extent are splitted into two extent.
3174 * b> split is not needed, and just mark the extent.
3175 *
3176 * Return an extent path pointer on success, or an error pointer on failure.
3177 */
ext4_split_extent_at(handle_t * handle,struct inode * inode,struct ext4_ext_path * path,ext4_lblk_t split,int split_flag,int flags)3178 static struct ext4_ext_path *ext4_split_extent_at(handle_t *handle,
3179 struct inode *inode,
3180 struct ext4_ext_path *path,
3181 ext4_lblk_t split,
3182 int split_flag, int flags)
3183 {
3184 ext4_fsblk_t newblock;
3185 ext4_lblk_t ee_block;
3186 struct ext4_extent *ex, newex, orig_ex, zero_ex;
3187 struct ext4_extent *ex2 = NULL;
3188 unsigned int ee_len, depth;
3189 int err = 0;
3190
3191 BUG_ON((split_flag & (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)) ==
3192 (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2));
3193
3194 ext_debug(inode, "logical block %llu\n", (unsigned long long)split);
3195
3196 ext4_ext_show_leaf(inode, path);
3197
3198 depth = ext_depth(inode);
3199 ex = path[depth].p_ext;
3200 ee_block = le32_to_cpu(ex->ee_block);
3201 ee_len = ext4_ext_get_actual_len(ex);
3202 newblock = split - ee_block + ext4_ext_pblock(ex);
3203
3204 BUG_ON(split < ee_block || split >= (ee_block + ee_len));
3205 BUG_ON(!ext4_ext_is_unwritten(ex) &&
3206 split_flag & (EXT4_EXT_MAY_ZEROOUT |
3207 EXT4_EXT_MARK_UNWRIT1 |
3208 EXT4_EXT_MARK_UNWRIT2));
3209
3210 err = ext4_ext_get_access(handle, inode, path + depth);
3211 if (err)
3212 goto out;
3213
3214 if (split == ee_block) {
3215 /*
3216 * case b: block @split is the block that the extent begins with
3217 * then we just change the state of the extent, and splitting
3218 * is not needed.
3219 */
3220 if (split_flag & EXT4_EXT_MARK_UNWRIT2)
3221 ext4_ext_mark_unwritten(ex);
3222 else
3223 ext4_ext_mark_initialized(ex);
3224
3225 if (!(flags & EXT4_GET_BLOCKS_PRE_IO))
3226 ext4_ext_try_to_merge(handle, inode, path, ex);
3227
3228 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3229 goto out;
3230 }
3231
3232 /* case a */
3233 memcpy(&orig_ex, ex, sizeof(orig_ex));
3234 ex->ee_len = cpu_to_le16(split - ee_block);
3235 if (split_flag & EXT4_EXT_MARK_UNWRIT1)
3236 ext4_ext_mark_unwritten(ex);
3237
3238 /*
3239 * path may lead to new leaf, not to original leaf any more
3240 * after ext4_ext_insert_extent() returns,
3241 */
3242 err = ext4_ext_dirty(handle, inode, path + depth);
3243 if (err)
3244 goto fix_extent_len;
3245
3246 ex2 = &newex;
3247 ex2->ee_block = cpu_to_le32(split);
3248 ex2->ee_len = cpu_to_le16(ee_len - (split - ee_block));
3249 ext4_ext_store_pblock(ex2, newblock);
3250 if (split_flag & EXT4_EXT_MARK_UNWRIT2)
3251 ext4_ext_mark_unwritten(ex2);
3252
3253 path = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
3254 if (!IS_ERR(path))
3255 goto out;
3256
3257 err = PTR_ERR(path);
3258 if (err != -ENOSPC && err != -EDQUOT && err != -ENOMEM)
3259 return path;
3260
3261 /*
3262 * Get a new path to try to zeroout or fix the extent length.
3263 * Using EXT4_EX_NOFAIL guarantees that ext4_find_extent()
3264 * will not return -ENOMEM, otherwise -ENOMEM will cause a
3265 * retry in do_writepages(), and a WARN_ON may be triggered
3266 * in ext4_da_update_reserve_space() due to an incorrect
3267 * ee_len causing the i_reserved_data_blocks exception.
3268 */
3269 path = ext4_find_extent(inode, ee_block, NULL, flags | EXT4_EX_NOFAIL);
3270 if (IS_ERR(path)) {
3271 EXT4_ERROR_INODE(inode, "Failed split extent on %u, err %ld",
3272 split, PTR_ERR(path));
3273 return path;
3274 }
3275 depth = ext_depth(inode);
3276 ex = path[depth].p_ext;
3277
3278 if (EXT4_EXT_MAY_ZEROOUT & split_flag) {
3279 if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) {
3280 if (split_flag & EXT4_EXT_DATA_VALID1) {
3281 err = ext4_ext_zeroout(inode, ex2);
3282 zero_ex.ee_block = ex2->ee_block;
3283 zero_ex.ee_len = cpu_to_le16(
3284 ext4_ext_get_actual_len(ex2));
3285 ext4_ext_store_pblock(&zero_ex,
3286 ext4_ext_pblock(ex2));
3287 } else {
3288 err = ext4_ext_zeroout(inode, ex);
3289 zero_ex.ee_block = ex->ee_block;
3290 zero_ex.ee_len = cpu_to_le16(
3291 ext4_ext_get_actual_len(ex));
3292 ext4_ext_store_pblock(&zero_ex,
3293 ext4_ext_pblock(ex));
3294 }
3295 } else {
3296 err = ext4_ext_zeroout(inode, &orig_ex);
3297 zero_ex.ee_block = orig_ex.ee_block;
3298 zero_ex.ee_len = cpu_to_le16(
3299 ext4_ext_get_actual_len(&orig_ex));
3300 ext4_ext_store_pblock(&zero_ex,
3301 ext4_ext_pblock(&orig_ex));
3302 }
3303
3304 if (!err) {
3305 /* update the extent length and mark as initialized */
3306 ex->ee_len = cpu_to_le16(ee_len);
3307 ext4_ext_try_to_merge(handle, inode, path, ex);
3308 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3309 if (!err)
3310 /* update extent status tree */
3311 ext4_zeroout_es(inode, &zero_ex);
3312 /* If we failed at this point, we don't know in which
3313 * state the extent tree exactly is so don't try to fix
3314 * length of the original extent as it may do even more
3315 * damage.
3316 */
3317 goto out;
3318 }
3319 }
3320
3321 fix_extent_len:
3322 ex->ee_len = orig_ex.ee_len;
3323 /*
3324 * Ignore ext4_ext_dirty return value since we are already in error path
3325 * and err is a non-zero error code.
3326 */
3327 ext4_ext_dirty(handle, inode, path + path->p_depth);
3328 out:
3329 if (err) {
3330 ext4_free_ext_path(path);
3331 path = ERR_PTR(err);
3332 }
3333 ext4_ext_show_leaf(inode, path);
3334 return path;
3335 }
3336
3337 /*
3338 * ext4_split_extent() splits an extent and mark extent which is covered
3339 * by @map as split_flags indicates
3340 *
3341 * It may result in splitting the extent into multiple extents (up to three)
3342 * There are three possibilities:
3343 * a> There is no split required
3344 * b> Splits in two extents: Split is happening at either end of the extent
3345 * c> Splits in three extents: Somone is splitting in middle of the extent
3346 *
3347 */
ext4_split_extent(handle_t * handle,struct inode * inode,struct ext4_ext_path * path,struct ext4_map_blocks * map,int split_flag,int flags,unsigned int * allocated)3348 static struct ext4_ext_path *ext4_split_extent(handle_t *handle,
3349 struct inode *inode,
3350 struct ext4_ext_path *path,
3351 struct ext4_map_blocks *map,
3352 int split_flag, int flags,
3353 unsigned int *allocated)
3354 {
3355 ext4_lblk_t ee_block;
3356 struct ext4_extent *ex;
3357 unsigned int ee_len, depth;
3358 int unwritten;
3359 int split_flag1, flags1;
3360
3361 depth = ext_depth(inode);
3362 ex = path[depth].p_ext;
3363 ee_block = le32_to_cpu(ex->ee_block);
3364 ee_len = ext4_ext_get_actual_len(ex);
3365 unwritten = ext4_ext_is_unwritten(ex);
3366
3367 if (map->m_lblk + map->m_len < ee_block + ee_len) {
3368 split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT;
3369 flags1 = flags | EXT4_GET_BLOCKS_PRE_IO;
3370 if (unwritten)
3371 split_flag1 |= EXT4_EXT_MARK_UNWRIT1 |
3372 EXT4_EXT_MARK_UNWRIT2;
3373 if (split_flag & EXT4_EXT_DATA_VALID2)
3374 split_flag1 |= EXT4_EXT_DATA_VALID1;
3375 path = ext4_split_extent_at(handle, inode, path,
3376 map->m_lblk + map->m_len, split_flag1, flags1);
3377 if (IS_ERR(path))
3378 return path;
3379 /*
3380 * Update path is required because previous ext4_split_extent_at
3381 * may result in split of original leaf or extent zeroout.
3382 */
3383 path = ext4_find_extent(inode, map->m_lblk, path, flags);
3384 if (IS_ERR(path))
3385 return path;
3386 depth = ext_depth(inode);
3387 ex = path[depth].p_ext;
3388 if (!ex) {
3389 EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
3390 (unsigned long) map->m_lblk);
3391 ext4_free_ext_path(path);
3392 return ERR_PTR(-EFSCORRUPTED);
3393 }
3394 unwritten = ext4_ext_is_unwritten(ex);
3395 }
3396
3397 if (map->m_lblk >= ee_block) {
3398 split_flag1 = split_flag & EXT4_EXT_DATA_VALID2;
3399 if (unwritten) {
3400 split_flag1 |= EXT4_EXT_MARK_UNWRIT1;
3401 split_flag1 |= split_flag & (EXT4_EXT_MAY_ZEROOUT |
3402 EXT4_EXT_MARK_UNWRIT2);
3403 }
3404 path = ext4_split_extent_at(handle, inode, path,
3405 map->m_lblk, split_flag1, flags);
3406 if (IS_ERR(path))
3407 return path;
3408 }
3409
3410 if (allocated) {
3411 if (map->m_lblk + map->m_len > ee_block + ee_len)
3412 *allocated = ee_len - (map->m_lblk - ee_block);
3413 else
3414 *allocated = map->m_len;
3415 }
3416 ext4_ext_show_leaf(inode, path);
3417 return path;
3418 }
3419
3420 /*
3421 * This function is called by ext4_ext_map_blocks() if someone tries to write
3422 * to an unwritten extent. It may result in splitting the unwritten
3423 * extent into multiple extents (up to three - one initialized and two
3424 * unwritten).
3425 * There are three possibilities:
3426 * a> There is no split required: Entire extent should be initialized
3427 * b> Splits in two extents: Write is happening at either end of the extent
3428 * c> Splits in three extents: Somone is writing in middle of the extent
3429 *
3430 * Pre-conditions:
3431 * - The extent pointed to by 'path' is unwritten.
3432 * - The extent pointed to by 'path' contains a superset
3433 * of the logical span [map->m_lblk, map->m_lblk + map->m_len).
3434 *
3435 * Post-conditions on success:
3436 * - the returned value is the number of blocks beyond map->l_lblk
3437 * that are allocated and initialized.
3438 * It is guaranteed to be >= map->m_len.
3439 */
3440 static struct ext4_ext_path *
ext4_ext_convert_to_initialized(handle_t * handle,struct inode * inode,struct ext4_map_blocks * map,struct ext4_ext_path * path,int flags,unsigned int * allocated)3441 ext4_ext_convert_to_initialized(handle_t *handle, struct inode *inode,
3442 struct ext4_map_blocks *map, struct ext4_ext_path *path,
3443 int flags, unsigned int *allocated)
3444 {
3445 struct ext4_sb_info *sbi;
3446 struct ext4_extent_header *eh;
3447 struct ext4_map_blocks split_map;
3448 struct ext4_extent zero_ex1, zero_ex2;
3449 struct ext4_extent *ex, *abut_ex;
3450 ext4_lblk_t ee_block, eof_block;
3451 unsigned int ee_len, depth, map_len = map->m_len;
3452 int err = 0;
3453 int split_flag = EXT4_EXT_DATA_VALID2;
3454 unsigned int max_zeroout = 0;
3455
3456 ext_debug(inode, "logical block %llu, max_blocks %u\n",
3457 (unsigned long long)map->m_lblk, map_len);
3458
3459 sbi = EXT4_SB(inode->i_sb);
3460 eof_block = (EXT4_I(inode)->i_disksize + inode->i_sb->s_blocksize - 1)
3461 >> inode->i_sb->s_blocksize_bits;
3462 if (eof_block < map->m_lblk + map_len)
3463 eof_block = map->m_lblk + map_len;
3464
3465 depth = ext_depth(inode);
3466 eh = path[depth].p_hdr;
3467 ex = path[depth].p_ext;
3468 ee_block = le32_to_cpu(ex->ee_block);
3469 ee_len = ext4_ext_get_actual_len(ex);
3470 zero_ex1.ee_len = 0;
3471 zero_ex2.ee_len = 0;
3472
3473 trace_ext4_ext_convert_to_initialized_enter(inode, map, ex);
3474
3475 /* Pre-conditions */
3476 BUG_ON(!ext4_ext_is_unwritten(ex));
3477 BUG_ON(!in_range(map->m_lblk, ee_block, ee_len));
3478
3479 /*
3480 * Attempt to transfer newly initialized blocks from the currently
3481 * unwritten extent to its neighbor. This is much cheaper
3482 * than an insertion followed by a merge as those involve costly
3483 * memmove() calls. Transferring to the left is the common case in
3484 * steady state for workloads doing fallocate(FALLOC_FL_KEEP_SIZE)
3485 * followed by append writes.
3486 *
3487 * Limitations of the current logic:
3488 * - L1: we do not deal with writes covering the whole extent.
3489 * This would require removing the extent if the transfer
3490 * is possible.
3491 * - L2: we only attempt to merge with an extent stored in the
3492 * same extent tree node.
3493 */
3494 *allocated = 0;
3495 if ((map->m_lblk == ee_block) &&
3496 /* See if we can merge left */
3497 (map_len < ee_len) && /*L1*/
3498 (ex > EXT_FIRST_EXTENT(eh))) { /*L2*/
3499 ext4_lblk_t prev_lblk;
3500 ext4_fsblk_t prev_pblk, ee_pblk;
3501 unsigned int prev_len;
3502
3503 abut_ex = ex - 1;
3504 prev_lblk = le32_to_cpu(abut_ex->ee_block);
3505 prev_len = ext4_ext_get_actual_len(abut_ex);
3506 prev_pblk = ext4_ext_pblock(abut_ex);
3507 ee_pblk = ext4_ext_pblock(ex);
3508
3509 /*
3510 * A transfer of blocks from 'ex' to 'abut_ex' is allowed
3511 * upon those conditions:
3512 * - C1: abut_ex is initialized,
3513 * - C2: abut_ex is logically abutting ex,
3514 * - C3: abut_ex is physically abutting ex,
3515 * - C4: abut_ex can receive the additional blocks without
3516 * overflowing the (initialized) length limit.
3517 */
3518 if ((!ext4_ext_is_unwritten(abut_ex)) && /*C1*/
3519 ((prev_lblk + prev_len) == ee_block) && /*C2*/
3520 ((prev_pblk + prev_len) == ee_pblk) && /*C3*/
3521 (prev_len < (EXT_INIT_MAX_LEN - map_len))) { /*C4*/
3522 err = ext4_ext_get_access(handle, inode, path + depth);
3523 if (err)
3524 goto errout;
3525
3526 trace_ext4_ext_convert_to_initialized_fastpath(inode,
3527 map, ex, abut_ex);
3528
3529 /* Shift the start of ex by 'map_len' blocks */
3530 ex->ee_block = cpu_to_le32(ee_block + map_len);
3531 ext4_ext_store_pblock(ex, ee_pblk + map_len);
3532 ex->ee_len = cpu_to_le16(ee_len - map_len);
3533 ext4_ext_mark_unwritten(ex); /* Restore the flag */
3534
3535 /* Extend abut_ex by 'map_len' blocks */
3536 abut_ex->ee_len = cpu_to_le16(prev_len + map_len);
3537
3538 /* Result: number of initialized blocks past m_lblk */
3539 *allocated = map_len;
3540 }
3541 } else if (((map->m_lblk + map_len) == (ee_block + ee_len)) &&
3542 (map_len < ee_len) && /*L1*/
3543 ex < EXT_LAST_EXTENT(eh)) { /*L2*/
3544 /* See if we can merge right */
3545 ext4_lblk_t next_lblk;
3546 ext4_fsblk_t next_pblk, ee_pblk;
3547 unsigned int next_len;
3548
3549 abut_ex = ex + 1;
3550 next_lblk = le32_to_cpu(abut_ex->ee_block);
3551 next_len = ext4_ext_get_actual_len(abut_ex);
3552 next_pblk = ext4_ext_pblock(abut_ex);
3553 ee_pblk = ext4_ext_pblock(ex);
3554
3555 /*
3556 * A transfer of blocks from 'ex' to 'abut_ex' is allowed
3557 * upon those conditions:
3558 * - C1: abut_ex is initialized,
3559 * - C2: abut_ex is logically abutting ex,
3560 * - C3: abut_ex is physically abutting ex,
3561 * - C4: abut_ex can receive the additional blocks without
3562 * overflowing the (initialized) length limit.
3563 */
3564 if ((!ext4_ext_is_unwritten(abut_ex)) && /*C1*/
3565 ((map->m_lblk + map_len) == next_lblk) && /*C2*/
3566 ((ee_pblk + ee_len) == next_pblk) && /*C3*/
3567 (next_len < (EXT_INIT_MAX_LEN - map_len))) { /*C4*/
3568 err = ext4_ext_get_access(handle, inode, path + depth);
3569 if (err)
3570 goto errout;
3571
3572 trace_ext4_ext_convert_to_initialized_fastpath(inode,
3573 map, ex, abut_ex);
3574
3575 /* Shift the start of abut_ex by 'map_len' blocks */
3576 abut_ex->ee_block = cpu_to_le32(next_lblk - map_len);
3577 ext4_ext_store_pblock(abut_ex, next_pblk - map_len);
3578 ex->ee_len = cpu_to_le16(ee_len - map_len);
3579 ext4_ext_mark_unwritten(ex); /* Restore the flag */
3580
3581 /* Extend abut_ex by 'map_len' blocks */
3582 abut_ex->ee_len = cpu_to_le16(next_len + map_len);
3583
3584 /* Result: number of initialized blocks past m_lblk */
3585 *allocated = map_len;
3586 }
3587 }
3588 if (*allocated) {
3589 /* Mark the block containing both extents as dirty */
3590 err = ext4_ext_dirty(handle, inode, path + depth);
3591
3592 /* Update path to point to the right extent */
3593 path[depth].p_ext = abut_ex;
3594 if (err)
3595 goto errout;
3596 goto out;
3597 } else
3598 *allocated = ee_len - (map->m_lblk - ee_block);
3599
3600 WARN_ON(map->m_lblk < ee_block);
3601 /*
3602 * It is safe to convert extent to initialized via explicit
3603 * zeroout only if extent is fully inside i_size or new_size.
3604 */
3605 split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
3606
3607 if (EXT4_EXT_MAY_ZEROOUT & split_flag)
3608 max_zeroout = sbi->s_extent_max_zeroout_kb >>
3609 (inode->i_sb->s_blocksize_bits - 10);
3610
3611 /*
3612 * five cases:
3613 * 1. split the extent into three extents.
3614 * 2. split the extent into two extents, zeroout the head of the first
3615 * extent.
3616 * 3. split the extent into two extents, zeroout the tail of the second
3617 * extent.
3618 * 4. split the extent into two extents with out zeroout.
3619 * 5. no splitting needed, just possibly zeroout the head and / or the
3620 * tail of the extent.
3621 */
3622 split_map.m_lblk = map->m_lblk;
3623 split_map.m_len = map->m_len;
3624
3625 if (max_zeroout && (*allocated > split_map.m_len)) {
3626 if (*allocated <= max_zeroout) {
3627 /* case 3 or 5 */
3628 zero_ex1.ee_block =
3629 cpu_to_le32(split_map.m_lblk +
3630 split_map.m_len);
3631 zero_ex1.ee_len =
3632 cpu_to_le16(*allocated - split_map.m_len);
3633 ext4_ext_store_pblock(&zero_ex1,
3634 ext4_ext_pblock(ex) + split_map.m_lblk +
3635 split_map.m_len - ee_block);
3636 err = ext4_ext_zeroout(inode, &zero_ex1);
3637 if (err)
3638 goto fallback;
3639 split_map.m_len = *allocated;
3640 }
3641 if (split_map.m_lblk - ee_block + split_map.m_len <
3642 max_zeroout) {
3643 /* case 2 or 5 */
3644 if (split_map.m_lblk != ee_block) {
3645 zero_ex2.ee_block = ex->ee_block;
3646 zero_ex2.ee_len = cpu_to_le16(split_map.m_lblk -
3647 ee_block);
3648 ext4_ext_store_pblock(&zero_ex2,
3649 ext4_ext_pblock(ex));
3650 err = ext4_ext_zeroout(inode, &zero_ex2);
3651 if (err)
3652 goto fallback;
3653 }
3654
3655 split_map.m_len += split_map.m_lblk - ee_block;
3656 split_map.m_lblk = ee_block;
3657 *allocated = map->m_len;
3658 }
3659 }
3660
3661 fallback:
3662 path = ext4_split_extent(handle, inode, path, &split_map, split_flag,
3663 flags, NULL);
3664 if (IS_ERR(path))
3665 return path;
3666 out:
3667 /* If we have gotten a failure, don't zero out status tree */
3668 ext4_zeroout_es(inode, &zero_ex1);
3669 ext4_zeroout_es(inode, &zero_ex2);
3670 return path;
3671
3672 errout:
3673 ext4_free_ext_path(path);
3674 return ERR_PTR(err);
3675 }
3676
3677 /*
3678 * This function is called by ext4_ext_map_blocks() from
3679 * ext4_get_blocks_dio_write() when DIO to write
3680 * to an unwritten extent.
3681 *
3682 * Writing to an unwritten extent may result in splitting the unwritten
3683 * extent into multiple initialized/unwritten extents (up to three)
3684 * There are three possibilities:
3685 * a> There is no split required: Entire extent should be unwritten
3686 * b> Splits in two extents: Write is happening at either end of the extent
3687 * c> Splits in three extents: Somone is writing in middle of the extent
3688 *
3689 * This works the same way in the case of initialized -> unwritten conversion.
3690 *
3691 * One of more index blocks maybe needed if the extent tree grow after
3692 * the unwritten extent split. To prevent ENOSPC occur at the IO
3693 * complete, we need to split the unwritten extent before DIO submit
3694 * the IO. The unwritten extent called at this time will be split
3695 * into three unwritten extent(at most). After IO complete, the part
3696 * being filled will be convert to initialized by the end_io callback function
3697 * via ext4_convert_unwritten_extents().
3698 *
3699 * The size of unwritten extent to be written is passed to the caller via the
3700 * allocated pointer. Return an extent path pointer on success, or an error
3701 * pointer on failure.
3702 */
ext4_split_convert_extents(handle_t * handle,struct inode * inode,struct ext4_map_blocks * map,struct ext4_ext_path * path,int flags,unsigned int * allocated)3703 static struct ext4_ext_path *ext4_split_convert_extents(handle_t *handle,
3704 struct inode *inode,
3705 struct ext4_map_blocks *map,
3706 struct ext4_ext_path *path,
3707 int flags, unsigned int *allocated)
3708 {
3709 ext4_lblk_t eof_block;
3710 ext4_lblk_t ee_block;
3711 struct ext4_extent *ex;
3712 unsigned int ee_len;
3713 int split_flag = 0, depth;
3714
3715 ext_debug(inode, "logical block %llu, max_blocks %u\n",
3716 (unsigned long long)map->m_lblk, map->m_len);
3717
3718 eof_block = (EXT4_I(inode)->i_disksize + inode->i_sb->s_blocksize - 1)
3719 >> inode->i_sb->s_blocksize_bits;
3720 if (eof_block < map->m_lblk + map->m_len)
3721 eof_block = map->m_lblk + map->m_len;
3722 /*
3723 * It is safe to convert extent to initialized via explicit
3724 * zeroout only if extent is fully inside i_size or new_size.
3725 */
3726 depth = ext_depth(inode);
3727 ex = path[depth].p_ext;
3728 ee_block = le32_to_cpu(ex->ee_block);
3729 ee_len = ext4_ext_get_actual_len(ex);
3730
3731 /* Convert to unwritten */
3732 if (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN) {
3733 split_flag |= EXT4_EXT_DATA_VALID1;
3734 /* Convert to initialized */
3735 } else if (flags & EXT4_GET_BLOCKS_CONVERT) {
3736 split_flag |= ee_block + ee_len <= eof_block ?
3737 EXT4_EXT_MAY_ZEROOUT : 0;
3738 split_flag |= (EXT4_EXT_MARK_UNWRIT2 | EXT4_EXT_DATA_VALID2);
3739 }
3740 flags |= EXT4_GET_BLOCKS_PRE_IO;
3741 return ext4_split_extent(handle, inode, path, map, split_flag, flags,
3742 allocated);
3743 }
3744
3745 static struct ext4_ext_path *
ext4_convert_unwritten_extents_endio(handle_t * handle,struct inode * inode,struct ext4_map_blocks * map,struct ext4_ext_path * path)3746 ext4_convert_unwritten_extents_endio(handle_t *handle, struct inode *inode,
3747 struct ext4_map_blocks *map,
3748 struct ext4_ext_path *path)
3749 {
3750 struct ext4_extent *ex;
3751 ext4_lblk_t ee_block;
3752 unsigned int ee_len;
3753 int depth;
3754 int err = 0;
3755
3756 depth = ext_depth(inode);
3757 ex = path[depth].p_ext;
3758 ee_block = le32_to_cpu(ex->ee_block);
3759 ee_len = ext4_ext_get_actual_len(ex);
3760
3761 ext_debug(inode, "logical block %llu, max_blocks %u\n",
3762 (unsigned long long)ee_block, ee_len);
3763
3764 /* If extent is larger than requested it is a clear sign that we still
3765 * have some extent state machine issues left. So extent_split is still
3766 * required.
3767 * TODO: Once all related issues will be fixed this situation should be
3768 * illegal.
3769 */
3770 if (ee_block != map->m_lblk || ee_len > map->m_len) {
3771 #ifdef CONFIG_EXT4_DEBUG
3772 ext4_warning(inode->i_sb, "Inode (%ld) finished: extent logical block %llu,"
3773 " len %u; IO logical block %llu, len %u",
3774 inode->i_ino, (unsigned long long)ee_block, ee_len,
3775 (unsigned long long)map->m_lblk, map->m_len);
3776 #endif
3777 path = ext4_split_convert_extents(handle, inode, map, path,
3778 EXT4_GET_BLOCKS_CONVERT, NULL);
3779 if (IS_ERR(path))
3780 return path;
3781
3782 path = ext4_find_extent(inode, map->m_lblk, path, 0);
3783 if (IS_ERR(path))
3784 return path;
3785 depth = ext_depth(inode);
3786 ex = path[depth].p_ext;
3787 }
3788
3789 err = ext4_ext_get_access(handle, inode, path + depth);
3790 if (err)
3791 goto errout;
3792 /* first mark the extent as initialized */
3793 ext4_ext_mark_initialized(ex);
3794
3795 /* note: ext4_ext_correct_indexes() isn't needed here because
3796 * borders are not changed
3797 */
3798 ext4_ext_try_to_merge(handle, inode, path, ex);
3799
3800 /* Mark modified extent as dirty */
3801 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3802 if (err)
3803 goto errout;
3804
3805 ext4_ext_show_leaf(inode, path);
3806 return path;
3807
3808 errout:
3809 ext4_free_ext_path(path);
3810 return ERR_PTR(err);
3811 }
3812
3813 static struct ext4_ext_path *
convert_initialized_extent(handle_t * handle,struct inode * inode,struct ext4_map_blocks * map,struct ext4_ext_path * path,unsigned int * allocated)3814 convert_initialized_extent(handle_t *handle, struct inode *inode,
3815 struct ext4_map_blocks *map,
3816 struct ext4_ext_path *path,
3817 unsigned int *allocated)
3818 {
3819 struct ext4_extent *ex;
3820 ext4_lblk_t ee_block;
3821 unsigned int ee_len;
3822 int depth;
3823 int err = 0;
3824
3825 /*
3826 * Make sure that the extent is no bigger than we support with
3827 * unwritten extent
3828 */
3829 if (map->m_len > EXT_UNWRITTEN_MAX_LEN)
3830 map->m_len = EXT_UNWRITTEN_MAX_LEN / 2;
3831
3832 depth = ext_depth(inode);
3833 ex = path[depth].p_ext;
3834 ee_block = le32_to_cpu(ex->ee_block);
3835 ee_len = ext4_ext_get_actual_len(ex);
3836
3837 ext_debug(inode, "logical block %llu, max_blocks %u\n",
3838 (unsigned long long)ee_block, ee_len);
3839
3840 if (ee_block != map->m_lblk || ee_len > map->m_len) {
3841 path = ext4_split_convert_extents(handle, inode, map, path,
3842 EXT4_GET_BLOCKS_CONVERT_UNWRITTEN, NULL);
3843 if (IS_ERR(path))
3844 return path;
3845
3846 path = ext4_find_extent(inode, map->m_lblk, path, 0);
3847 if (IS_ERR(path))
3848 return path;
3849 depth = ext_depth(inode);
3850 ex = path[depth].p_ext;
3851 if (!ex) {
3852 EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
3853 (unsigned long) map->m_lblk);
3854 err = -EFSCORRUPTED;
3855 goto errout;
3856 }
3857 }
3858
3859 err = ext4_ext_get_access(handle, inode, path + depth);
3860 if (err)
3861 goto errout;
3862 /* first mark the extent as unwritten */
3863 ext4_ext_mark_unwritten(ex);
3864
3865 /* note: ext4_ext_correct_indexes() isn't needed here because
3866 * borders are not changed
3867 */
3868 ext4_ext_try_to_merge(handle, inode, path, ex);
3869
3870 /* Mark modified extent as dirty */
3871 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3872 if (err)
3873 goto errout;
3874 ext4_ext_show_leaf(inode, path);
3875
3876 ext4_update_inode_fsync_trans(handle, inode, 1);
3877
3878 map->m_flags |= EXT4_MAP_UNWRITTEN;
3879 if (*allocated > map->m_len)
3880 *allocated = map->m_len;
3881 map->m_len = *allocated;
3882 return path;
3883
3884 errout:
3885 ext4_free_ext_path(path);
3886 return ERR_PTR(err);
3887 }
3888
3889 static struct ext4_ext_path *
ext4_ext_handle_unwritten_extents(handle_t * handle,struct inode * inode,struct ext4_map_blocks * map,struct ext4_ext_path * path,int flags,unsigned int * allocated,ext4_fsblk_t newblock)3890 ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode,
3891 struct ext4_map_blocks *map,
3892 struct ext4_ext_path *path, int flags,
3893 unsigned int *allocated, ext4_fsblk_t newblock)
3894 {
3895 int err = 0;
3896
3897 ext_debug(inode, "logical block %llu, max_blocks %u, flags 0x%x, allocated %u\n",
3898 (unsigned long long)map->m_lblk, map->m_len, flags,
3899 *allocated);
3900 ext4_ext_show_leaf(inode, path);
3901
3902 /*
3903 * When writing into unwritten space, we should not fail to
3904 * allocate metadata blocks for the new extent block if needed.
3905 */
3906 flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL;
3907
3908 trace_ext4_ext_handle_unwritten_extents(inode, map, flags,
3909 *allocated, newblock);
3910
3911 /* get_block() before submitting IO, split the extent */
3912 if (flags & EXT4_GET_BLOCKS_PRE_IO) {
3913 path = ext4_split_convert_extents(handle, inode, map, path,
3914 flags | EXT4_GET_BLOCKS_CONVERT, allocated);
3915 if (IS_ERR(path))
3916 return path;
3917 /*
3918 * shouldn't get a 0 allocated when splitting an extent unless
3919 * m_len is 0 (bug) or extent has been corrupted
3920 */
3921 if (unlikely(*allocated == 0)) {
3922 EXT4_ERROR_INODE(inode,
3923 "unexpected allocated == 0, m_len = %u",
3924 map->m_len);
3925 err = -EFSCORRUPTED;
3926 goto errout;
3927 }
3928 map->m_flags |= EXT4_MAP_UNWRITTEN;
3929 goto out;
3930 }
3931 /* IO end_io complete, convert the filled extent to written */
3932 if (flags & EXT4_GET_BLOCKS_CONVERT) {
3933 path = ext4_convert_unwritten_extents_endio(handle, inode,
3934 map, path);
3935 if (IS_ERR(path))
3936 return path;
3937 ext4_update_inode_fsync_trans(handle, inode, 1);
3938 goto map_out;
3939 }
3940 /* buffered IO cases */
3941 /*
3942 * repeat fallocate creation request
3943 * we already have an unwritten extent
3944 */
3945 if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT) {
3946 map->m_flags |= EXT4_MAP_UNWRITTEN;
3947 goto map_out;
3948 }
3949
3950 /* buffered READ or buffered write_begin() lookup */
3951 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
3952 /*
3953 * We have blocks reserved already. We
3954 * return allocated blocks so that delalloc
3955 * won't do block reservation for us. But
3956 * the buffer head will be unmapped so that
3957 * a read from the block returns 0s.
3958 */
3959 map->m_flags |= EXT4_MAP_UNWRITTEN;
3960 goto out1;
3961 }
3962
3963 /*
3964 * Default case when (flags & EXT4_GET_BLOCKS_CREATE) == 1.
3965 * For buffered writes, at writepage time, etc. Convert a
3966 * discovered unwritten extent to written.
3967 */
3968 path = ext4_ext_convert_to_initialized(handle, inode, map, path,
3969 flags, allocated);
3970 if (IS_ERR(path))
3971 return path;
3972 ext4_update_inode_fsync_trans(handle, inode, 1);
3973 /*
3974 * shouldn't get a 0 allocated when converting an unwritten extent
3975 * unless m_len is 0 (bug) or extent has been corrupted
3976 */
3977 if (unlikely(*allocated == 0)) {
3978 EXT4_ERROR_INODE(inode, "unexpected allocated == 0, m_len = %u",
3979 map->m_len);
3980 err = -EFSCORRUPTED;
3981 goto errout;
3982 }
3983
3984 out:
3985 map->m_flags |= EXT4_MAP_NEW;
3986 map_out:
3987 map->m_flags |= EXT4_MAP_MAPPED;
3988 out1:
3989 map->m_pblk = newblock;
3990 if (*allocated > map->m_len)
3991 *allocated = map->m_len;
3992 map->m_len = *allocated;
3993 ext4_ext_show_leaf(inode, path);
3994 return path;
3995
3996 errout:
3997 ext4_free_ext_path(path);
3998 return ERR_PTR(err);
3999 }
4000
4001 /*
4002 * get_implied_cluster_alloc - check to see if the requested
4003 * allocation (in the map structure) overlaps with a cluster already
4004 * allocated in an extent.
4005 * @sb The filesystem superblock structure
4006 * @map The requested lblk->pblk mapping
4007 * @ex The extent structure which might contain an implied
4008 * cluster allocation
4009 *
4010 * This function is called by ext4_ext_map_blocks() after we failed to
4011 * find blocks that were already in the inode's extent tree. Hence,
4012 * we know that the beginning of the requested region cannot overlap
4013 * the extent from the inode's extent tree. There are three cases we
4014 * want to catch. The first is this case:
4015 *
4016 * |--- cluster # N--|
4017 * |--- extent ---| |---- requested region ---|
4018 * |==========|
4019 *
4020 * The second case that we need to test for is this one:
4021 *
4022 * |--------- cluster # N ----------------|
4023 * |--- requested region --| |------- extent ----|
4024 * |=======================|
4025 *
4026 * The third case is when the requested region lies between two extents
4027 * within the same cluster:
4028 * |------------- cluster # N-------------|
4029 * |----- ex -----| |---- ex_right ----|
4030 * |------ requested region ------|
4031 * |================|
4032 *
4033 * In each of the above cases, we need to set the map->m_pblk and
4034 * map->m_len so it corresponds to the return the extent labelled as
4035 * "|====|" from cluster #N, since it is already in use for data in
4036 * cluster EXT4_B2C(sbi, map->m_lblk). We will then return 1 to
4037 * signal to ext4_ext_map_blocks() that map->m_pblk should be treated
4038 * as a new "allocated" block region. Otherwise, we will return 0 and
4039 * ext4_ext_map_blocks() will then allocate one or more new clusters
4040 * by calling ext4_mb_new_blocks().
4041 */
get_implied_cluster_alloc(struct super_block * sb,struct ext4_map_blocks * map,struct ext4_extent * ex,struct ext4_ext_path * path)4042 static int get_implied_cluster_alloc(struct super_block *sb,
4043 struct ext4_map_blocks *map,
4044 struct ext4_extent *ex,
4045 struct ext4_ext_path *path)
4046 {
4047 struct ext4_sb_info *sbi = EXT4_SB(sb);
4048 ext4_lblk_t c_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
4049 ext4_lblk_t ex_cluster_start, ex_cluster_end;
4050 ext4_lblk_t rr_cluster_start;
4051 ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
4052 ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
4053 unsigned short ee_len = ext4_ext_get_actual_len(ex);
4054
4055 /* The extent passed in that we are trying to match */
4056 ex_cluster_start = EXT4_B2C(sbi, ee_block);
4057 ex_cluster_end = EXT4_B2C(sbi, ee_block + ee_len - 1);
4058
4059 /* The requested region passed into ext4_map_blocks() */
4060 rr_cluster_start = EXT4_B2C(sbi, map->m_lblk);
4061
4062 if ((rr_cluster_start == ex_cluster_end) ||
4063 (rr_cluster_start == ex_cluster_start)) {
4064 if (rr_cluster_start == ex_cluster_end)
4065 ee_start += ee_len - 1;
4066 map->m_pblk = EXT4_PBLK_CMASK(sbi, ee_start) + c_offset;
4067 map->m_len = min(map->m_len,
4068 (unsigned) sbi->s_cluster_ratio - c_offset);
4069 /*
4070 * Check for and handle this case:
4071 *
4072 * |--------- cluster # N-------------|
4073 * |------- extent ----|
4074 * |--- requested region ---|
4075 * |===========|
4076 */
4077
4078 if (map->m_lblk < ee_block)
4079 map->m_len = min(map->m_len, ee_block - map->m_lblk);
4080
4081 /*
4082 * Check for the case where there is already another allocated
4083 * block to the right of 'ex' but before the end of the cluster.
4084 *
4085 * |------------- cluster # N-------------|
4086 * |----- ex -----| |---- ex_right ----|
4087 * |------ requested region ------|
4088 * |================|
4089 */
4090 if (map->m_lblk > ee_block) {
4091 ext4_lblk_t next = ext4_ext_next_allocated_block(path);
4092 map->m_len = min(map->m_len, next - map->m_lblk);
4093 }
4094
4095 trace_ext4_get_implied_cluster_alloc_exit(sb, map, 1);
4096 return 1;
4097 }
4098
4099 trace_ext4_get_implied_cluster_alloc_exit(sb, map, 0);
4100 return 0;
4101 }
4102
4103 /*
4104 * Determine hole length around the given logical block, first try to
4105 * locate and expand the hole from the given @path, and then adjust it
4106 * if it's partially or completely converted to delayed extents, insert
4107 * it into the extent cache tree if it's indeed a hole, finally return
4108 * the length of the determined extent.
4109 */
ext4_ext_determine_insert_hole(struct inode * inode,struct ext4_ext_path * path,ext4_lblk_t lblk)4110 static ext4_lblk_t ext4_ext_determine_insert_hole(struct inode *inode,
4111 struct ext4_ext_path *path,
4112 ext4_lblk_t lblk)
4113 {
4114 ext4_lblk_t hole_start, len;
4115 struct extent_status es;
4116
4117 hole_start = lblk;
4118 len = ext4_ext_find_hole(inode, path, &hole_start);
4119 again:
4120 ext4_es_find_extent_range(inode, &ext4_es_is_delayed, hole_start,
4121 hole_start + len - 1, &es);
4122 if (!es.es_len)
4123 goto insert_hole;
4124
4125 /*
4126 * There's a delalloc extent in the hole, handle it if the delalloc
4127 * extent is in front of, behind and straddle the queried range.
4128 */
4129 if (lblk >= es.es_lblk + es.es_len) {
4130 /*
4131 * The delalloc extent is in front of the queried range,
4132 * find again from the queried start block.
4133 */
4134 len -= lblk - hole_start;
4135 hole_start = lblk;
4136 goto again;
4137 } else if (in_range(lblk, es.es_lblk, es.es_len)) {
4138 /*
4139 * The delalloc extent containing lblk, it must have been
4140 * added after ext4_map_blocks() checked the extent status
4141 * tree so we are not holding i_rwsem and delalloc info is
4142 * only stabilized by i_data_sem we are going to release
4143 * soon. Don't modify the extent status tree and report
4144 * extent as a hole, just adjust the length to the delalloc
4145 * extent's after lblk.
4146 */
4147 len = es.es_lblk + es.es_len - lblk;
4148 return len;
4149 } else {
4150 /*
4151 * The delalloc extent is partially or completely behind
4152 * the queried range, update hole length until the
4153 * beginning of the delalloc extent.
4154 */
4155 len = min(es.es_lblk - hole_start, len);
4156 }
4157
4158 insert_hole:
4159 /* Put just found gap into cache to speed up subsequent requests */
4160 ext_debug(inode, " -> %u:%u\n", hole_start, len);
4161 ext4_es_insert_extent(inode, hole_start, len, ~0,
4162 EXTENT_STATUS_HOLE, 0);
4163
4164 /* Update hole_len to reflect hole size after lblk */
4165 if (hole_start != lblk)
4166 len -= lblk - hole_start;
4167
4168 return len;
4169 }
4170
4171 /*
4172 * Block allocation/map/preallocation routine for extents based files
4173 *
4174 *
4175 * Need to be called with
4176 * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
4177 * (ie, flags is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
4178 *
4179 * return > 0, number of blocks already mapped/allocated
4180 * if flags doesn't contain EXT4_GET_BLOCKS_CREATE and these are pre-allocated blocks
4181 * buffer head is unmapped
4182 * otherwise blocks are mapped
4183 *
4184 * return = 0, if plain look up failed (blocks have not been allocated)
4185 * buffer head is unmapped
4186 *
4187 * return < 0, error case.
4188 */
ext4_ext_map_blocks(handle_t * handle,struct inode * inode,struct ext4_map_blocks * map,int flags)4189 int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
4190 struct ext4_map_blocks *map, int flags)
4191 {
4192 struct ext4_ext_path *path = NULL;
4193 struct ext4_extent newex, *ex, ex2;
4194 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4195 ext4_fsblk_t newblock = 0, pblk;
4196 int err = 0, depth;
4197 unsigned int allocated = 0, offset = 0;
4198 unsigned int allocated_clusters = 0;
4199 struct ext4_allocation_request ar;
4200 ext4_lblk_t cluster_offset;
4201
4202 ext_debug(inode, "blocks %u/%u requested\n", map->m_lblk, map->m_len);
4203 trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
4204
4205 /* find extent for this block */
4206 path = ext4_find_extent(inode, map->m_lblk, NULL, 0);
4207 if (IS_ERR(path)) {
4208 err = PTR_ERR(path);
4209 goto out;
4210 }
4211
4212 depth = ext_depth(inode);
4213
4214 /*
4215 * consistent leaf must not be empty;
4216 * this situation is possible, though, _during_ tree modification;
4217 * this is why assert can't be put in ext4_find_extent()
4218 */
4219 if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
4220 EXT4_ERROR_INODE(inode, "bad extent address "
4221 "lblock: %lu, depth: %d pblock %lld",
4222 (unsigned long) map->m_lblk, depth,
4223 path[depth].p_block);
4224 err = -EFSCORRUPTED;
4225 goto out;
4226 }
4227
4228 ex = path[depth].p_ext;
4229 if (ex) {
4230 ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
4231 ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
4232 unsigned short ee_len;
4233
4234
4235 /*
4236 * unwritten extents are treated as holes, except that
4237 * we split out initialized portions during a write.
4238 */
4239 ee_len = ext4_ext_get_actual_len(ex);
4240
4241 trace_ext4_ext_show_extent(inode, ee_block, ee_start, ee_len);
4242
4243 /* if found extent covers block, simply return it */
4244 if (in_range(map->m_lblk, ee_block, ee_len)) {
4245 newblock = map->m_lblk - ee_block + ee_start;
4246 /* number of remaining blocks in the extent */
4247 allocated = ee_len - (map->m_lblk - ee_block);
4248 ext_debug(inode, "%u fit into %u:%d -> %llu\n",
4249 map->m_lblk, ee_block, ee_len, newblock);
4250
4251 /*
4252 * If the extent is initialized check whether the
4253 * caller wants to convert it to unwritten.
4254 */
4255 if ((!ext4_ext_is_unwritten(ex)) &&
4256 (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) {
4257 path = convert_initialized_extent(handle,
4258 inode, map, path, &allocated);
4259 if (IS_ERR(path))
4260 err = PTR_ERR(path);
4261 goto out;
4262 } else if (!ext4_ext_is_unwritten(ex)) {
4263 map->m_flags |= EXT4_MAP_MAPPED;
4264 map->m_pblk = newblock;
4265 if (allocated > map->m_len)
4266 allocated = map->m_len;
4267 map->m_len = allocated;
4268 ext4_ext_show_leaf(inode, path);
4269 goto out;
4270 }
4271
4272 path = ext4_ext_handle_unwritten_extents(
4273 handle, inode, map, path, flags,
4274 &allocated, newblock);
4275 if (IS_ERR(path))
4276 err = PTR_ERR(path);
4277 goto out;
4278 }
4279 }
4280
4281 /*
4282 * requested block isn't allocated yet;
4283 * we couldn't try to create block if flags doesn't contain EXT4_GET_BLOCKS_CREATE
4284 */
4285 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
4286 ext4_lblk_t len;
4287
4288 len = ext4_ext_determine_insert_hole(inode, path, map->m_lblk);
4289
4290 map->m_pblk = 0;
4291 map->m_len = min_t(unsigned int, map->m_len, len);
4292 goto out;
4293 }
4294
4295 /*
4296 * Okay, we need to do block allocation.
4297 */
4298 newex.ee_block = cpu_to_le32(map->m_lblk);
4299 cluster_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
4300
4301 /*
4302 * If we are doing bigalloc, check to see if the extent returned
4303 * by ext4_find_extent() implies a cluster we can use.
4304 */
4305 if (cluster_offset && ex &&
4306 get_implied_cluster_alloc(inode->i_sb, map, ex, path)) {
4307 ar.len = allocated = map->m_len;
4308 newblock = map->m_pblk;
4309 goto got_allocated_blocks;
4310 }
4311
4312 /* find neighbour allocated blocks */
4313 ar.lleft = map->m_lblk;
4314 err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
4315 if (err)
4316 goto out;
4317 ar.lright = map->m_lblk;
4318 err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2);
4319 if (err < 0)
4320 goto out;
4321
4322 /* Check if the extent after searching to the right implies a
4323 * cluster we can use. */
4324 if ((sbi->s_cluster_ratio > 1) && err &&
4325 get_implied_cluster_alloc(inode->i_sb, map, &ex2, path)) {
4326 ar.len = allocated = map->m_len;
4327 newblock = map->m_pblk;
4328 err = 0;
4329 goto got_allocated_blocks;
4330 }
4331
4332 /*
4333 * See if request is beyond maximum number of blocks we can have in
4334 * a single extent. For an initialized extent this limit is
4335 * EXT_INIT_MAX_LEN and for an unwritten extent this limit is
4336 * EXT_UNWRITTEN_MAX_LEN.
4337 */
4338 if (map->m_len > EXT_INIT_MAX_LEN &&
4339 !(flags & EXT4_GET_BLOCKS_UNWRIT_EXT))
4340 map->m_len = EXT_INIT_MAX_LEN;
4341 else if (map->m_len > EXT_UNWRITTEN_MAX_LEN &&
4342 (flags & EXT4_GET_BLOCKS_UNWRIT_EXT))
4343 map->m_len = EXT_UNWRITTEN_MAX_LEN;
4344
4345 /* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */
4346 newex.ee_len = cpu_to_le16(map->m_len);
4347 err = ext4_ext_check_overlap(sbi, inode, &newex, path);
4348 if (err)
4349 allocated = ext4_ext_get_actual_len(&newex);
4350 else
4351 allocated = map->m_len;
4352
4353 /* allocate new block */
4354 ar.inode = inode;
4355 ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk);
4356 ar.logical = map->m_lblk;
4357 /*
4358 * We calculate the offset from the beginning of the cluster
4359 * for the logical block number, since when we allocate a
4360 * physical cluster, the physical block should start at the
4361 * same offset from the beginning of the cluster. This is
4362 * needed so that future calls to get_implied_cluster_alloc()
4363 * work correctly.
4364 */
4365 offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
4366 ar.len = EXT4_NUM_B2C(sbi, offset+allocated);
4367 ar.goal -= offset;
4368 ar.logical -= offset;
4369 if (S_ISREG(inode->i_mode))
4370 ar.flags = EXT4_MB_HINT_DATA;
4371 else
4372 /* disable in-core preallocation for non-regular files */
4373 ar.flags = 0;
4374 if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE)
4375 ar.flags |= EXT4_MB_HINT_NOPREALLOC;
4376 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
4377 ar.flags |= EXT4_MB_DELALLOC_RESERVED;
4378 if (flags & EXT4_GET_BLOCKS_METADATA_NOFAIL)
4379 ar.flags |= EXT4_MB_USE_RESERVED;
4380 newblock = ext4_mb_new_blocks(handle, &ar, &err);
4381 if (!newblock)
4382 goto out;
4383 allocated_clusters = ar.len;
4384 ar.len = EXT4_C2B(sbi, ar.len) - offset;
4385 ext_debug(inode, "allocate new block: goal %llu, found %llu/%u, requested %u\n",
4386 ar.goal, newblock, ar.len, allocated);
4387 if (ar.len > allocated)
4388 ar.len = allocated;
4389
4390 got_allocated_blocks:
4391 /* try to insert new extent into found leaf and return */
4392 pblk = newblock + offset;
4393 ext4_ext_store_pblock(&newex, pblk);
4394 newex.ee_len = cpu_to_le16(ar.len);
4395 /* Mark unwritten */
4396 if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT) {
4397 ext4_ext_mark_unwritten(&newex);
4398 map->m_flags |= EXT4_MAP_UNWRITTEN;
4399 }
4400
4401 path = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
4402 if (IS_ERR(path)) {
4403 err = PTR_ERR(path);
4404 if (allocated_clusters) {
4405 int fb_flags = 0;
4406
4407 /*
4408 * free data blocks we just allocated.
4409 * not a good idea to call discard here directly,
4410 * but otherwise we'd need to call it every free().
4411 */
4412 ext4_discard_preallocations(inode);
4413 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
4414 fb_flags = EXT4_FREE_BLOCKS_NO_QUOT_UPDATE;
4415 ext4_free_blocks(handle, inode, NULL, newblock,
4416 EXT4_C2B(sbi, allocated_clusters),
4417 fb_flags);
4418 }
4419 goto out;
4420 }
4421
4422 /*
4423 * Cache the extent and update transaction to commit on fdatasync only
4424 * when it is _not_ an unwritten extent.
4425 */
4426 if ((flags & EXT4_GET_BLOCKS_UNWRIT_EXT) == 0)
4427 ext4_update_inode_fsync_trans(handle, inode, 1);
4428 else
4429 ext4_update_inode_fsync_trans(handle, inode, 0);
4430
4431 map->m_flags |= (EXT4_MAP_NEW | EXT4_MAP_MAPPED);
4432 map->m_pblk = pblk;
4433 map->m_len = ar.len;
4434 allocated = map->m_len;
4435 ext4_ext_show_leaf(inode, path);
4436 out:
4437 ext4_free_ext_path(path);
4438
4439 trace_ext4_ext_map_blocks_exit(inode, flags, map,
4440 err ? err : allocated);
4441 return err ? err : allocated;
4442 }
4443
ext4_ext_truncate(handle_t * handle,struct inode * inode)4444 int ext4_ext_truncate(handle_t *handle, struct inode *inode)
4445 {
4446 struct super_block *sb = inode->i_sb;
4447 ext4_lblk_t last_block;
4448 int err = 0;
4449
4450 /*
4451 * TODO: optimization is possible here.
4452 * Probably we need not scan at all,
4453 * because page truncation is enough.
4454 */
4455
4456 /* we have to know where to truncate from in crash case */
4457 EXT4_I(inode)->i_disksize = inode->i_size;
4458 err = ext4_mark_inode_dirty(handle, inode);
4459 if (err)
4460 return err;
4461
4462 last_block = (inode->i_size + sb->s_blocksize - 1)
4463 >> EXT4_BLOCK_SIZE_BITS(sb);
4464 ext4_es_remove_extent(inode, last_block, EXT_MAX_BLOCKS - last_block);
4465
4466 retry_remove_space:
4467 err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1);
4468 if (err == -ENOMEM) {
4469 memalloc_retry_wait(GFP_ATOMIC);
4470 goto retry_remove_space;
4471 }
4472 return err;
4473 }
4474
ext4_alloc_file_blocks(struct file * file,ext4_lblk_t offset,ext4_lblk_t len,loff_t new_size,int flags)4475 static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset,
4476 ext4_lblk_t len, loff_t new_size,
4477 int flags)
4478 {
4479 struct inode *inode = file_inode(file);
4480 handle_t *handle;
4481 int ret = 0, ret2 = 0, ret3 = 0;
4482 int retries = 0;
4483 int depth = 0;
4484 struct ext4_map_blocks map;
4485 unsigned int credits;
4486 loff_t epos, old_size = i_size_read(inode);
4487
4488 BUG_ON(!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS));
4489 map.m_lblk = offset;
4490 map.m_len = len;
4491 /*
4492 * Don't normalize the request if it can fit in one extent so
4493 * that it doesn't get unnecessarily split into multiple
4494 * extents.
4495 */
4496 if (len <= EXT_UNWRITTEN_MAX_LEN)
4497 flags |= EXT4_GET_BLOCKS_NO_NORMALIZE;
4498
4499 /*
4500 * credits to insert 1 extent into extent tree
4501 */
4502 credits = ext4_chunk_trans_blocks(inode, len);
4503 depth = ext_depth(inode);
4504
4505 retry:
4506 while (len) {
4507 /*
4508 * Recalculate credits when extent tree depth changes.
4509 */
4510 if (depth != ext_depth(inode)) {
4511 credits = ext4_chunk_trans_blocks(inode, len);
4512 depth = ext_depth(inode);
4513 }
4514
4515 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
4516 credits);
4517 if (IS_ERR(handle)) {
4518 ret = PTR_ERR(handle);
4519 break;
4520 }
4521 ret = ext4_map_blocks(handle, inode, &map, flags);
4522 if (ret <= 0) {
4523 ext4_debug("inode #%lu: block %u: len %u: "
4524 "ext4_ext_map_blocks returned %d",
4525 inode->i_ino, map.m_lblk,
4526 map.m_len, ret);
4527 ext4_mark_inode_dirty(handle, inode);
4528 ext4_journal_stop(handle);
4529 break;
4530 }
4531 /*
4532 * allow a full retry cycle for any remaining allocations
4533 */
4534 retries = 0;
4535 map.m_lblk += ret;
4536 map.m_len = len = len - ret;
4537 epos = (loff_t)map.m_lblk << inode->i_blkbits;
4538 inode_set_ctime_current(inode);
4539 if (new_size) {
4540 if (epos > new_size)
4541 epos = new_size;
4542 if (ext4_update_inode_size(inode, epos) & 0x1)
4543 inode_set_mtime_to_ts(inode,
4544 inode_get_ctime(inode));
4545 if (epos > old_size) {
4546 pagecache_isize_extended(inode, old_size, epos);
4547 ext4_zero_partial_blocks(handle, inode,
4548 old_size, epos - old_size);
4549 }
4550 }
4551 ret2 = ext4_mark_inode_dirty(handle, inode);
4552 ext4_update_inode_fsync_trans(handle, inode, 1);
4553 ret3 = ext4_journal_stop(handle);
4554 ret2 = ret3 ? ret3 : ret2;
4555 if (unlikely(ret2))
4556 break;
4557 }
4558 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
4559 goto retry;
4560
4561 return ret > 0 ? ret2 : ret;
4562 }
4563
4564 static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len);
4565
4566 static int ext4_insert_range(struct file *file, loff_t offset, loff_t len);
4567
ext4_zero_range(struct file * file,loff_t offset,loff_t len,int mode)4568 static long ext4_zero_range(struct file *file, loff_t offset,
4569 loff_t len, int mode)
4570 {
4571 struct inode *inode = file_inode(file);
4572 handle_t *handle = NULL;
4573 loff_t new_size = 0;
4574 loff_t end = offset + len;
4575 ext4_lblk_t start_lblk, end_lblk;
4576 unsigned int blocksize = i_blocksize(inode);
4577 unsigned int blkbits = inode->i_blkbits;
4578 int ret, flags, credits;
4579
4580 trace_ext4_zero_range(inode, offset, len, mode);
4581 WARN_ON_ONCE(!inode_is_locked(inode));
4582
4583 /* Indirect files do not support unwritten extents */
4584 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
4585 return -EOPNOTSUPP;
4586
4587 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
4588 (end > inode->i_size || end > EXT4_I(inode)->i_disksize)) {
4589 new_size = end;
4590 ret = inode_newsize_ok(inode, new_size);
4591 if (ret)
4592 return ret;
4593 }
4594
4595 flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
4596 /* Preallocate the range including the unaligned edges */
4597 if (!IS_ALIGNED(offset | end, blocksize)) {
4598 ext4_lblk_t alloc_lblk = offset >> blkbits;
4599 ext4_lblk_t len_lblk = EXT4_MAX_BLOCKS(len, offset, blkbits);
4600
4601 ret = ext4_alloc_file_blocks(file, alloc_lblk, len_lblk,
4602 new_size, flags);
4603 if (ret)
4604 return ret;
4605 }
4606
4607 ret = ext4_update_disksize_before_punch(inode, offset, len);
4608 if (ret)
4609 return ret;
4610
4611 /* Now release the pages and zero block aligned part of pages */
4612 ret = ext4_truncate_page_cache_block_range(inode, offset, end);
4613 if (ret)
4614 return ret;
4615
4616 /* Zero range excluding the unaligned edges */
4617 start_lblk = EXT4_B_TO_LBLK(inode, offset);
4618 end_lblk = end >> blkbits;
4619 if (end_lblk > start_lblk) {
4620 ext4_lblk_t zero_blks = end_lblk - start_lblk;
4621
4622 flags |= (EXT4_GET_BLOCKS_CONVERT_UNWRITTEN | EXT4_EX_NOCACHE);
4623 ret = ext4_alloc_file_blocks(file, start_lblk, zero_blks,
4624 new_size, flags);
4625 if (ret)
4626 return ret;
4627 }
4628 /* Finish zeroing out if it doesn't contain partial block */
4629 if (IS_ALIGNED(offset | end, blocksize))
4630 return ret;
4631
4632 /*
4633 * In worst case we have to writeout two nonadjacent unwritten
4634 * blocks and update the inode
4635 */
4636 credits = (2 * ext4_ext_index_trans_blocks(inode, 2)) + 1;
4637 if (ext4_should_journal_data(inode))
4638 credits += 2;
4639 handle = ext4_journal_start(inode, EXT4_HT_MISC, credits);
4640 if (IS_ERR(handle)) {
4641 ret = PTR_ERR(handle);
4642 ext4_std_error(inode->i_sb, ret);
4643 return ret;
4644 }
4645
4646 /* Zero out partial block at the edges of the range */
4647 ret = ext4_zero_partial_blocks(handle, inode, offset, len);
4648 if (ret)
4649 goto out_handle;
4650
4651 if (new_size)
4652 ext4_update_inode_size(inode, new_size);
4653 ret = ext4_mark_inode_dirty(handle, inode);
4654 if (unlikely(ret))
4655 goto out_handle;
4656
4657 ext4_update_inode_fsync_trans(handle, inode, 1);
4658 if (file->f_flags & O_SYNC)
4659 ext4_handle_sync(handle);
4660
4661 out_handle:
4662 ext4_journal_stop(handle);
4663 return ret;
4664 }
4665
ext4_do_fallocate(struct file * file,loff_t offset,loff_t len,int mode)4666 static long ext4_do_fallocate(struct file *file, loff_t offset,
4667 loff_t len, int mode)
4668 {
4669 struct inode *inode = file_inode(file);
4670 loff_t end = offset + len;
4671 loff_t new_size = 0;
4672 ext4_lblk_t start_lblk, len_lblk;
4673 int ret;
4674
4675 trace_ext4_fallocate_enter(inode, offset, len, mode);
4676 WARN_ON_ONCE(!inode_is_locked(inode));
4677
4678 start_lblk = offset >> inode->i_blkbits;
4679 len_lblk = EXT4_MAX_BLOCKS(len, offset, inode->i_blkbits);
4680
4681 /* We only support preallocation for extent-based files only. */
4682 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
4683 ret = -EOPNOTSUPP;
4684 goto out;
4685 }
4686
4687 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
4688 (end > inode->i_size || end > EXT4_I(inode)->i_disksize)) {
4689 new_size = end;
4690 ret = inode_newsize_ok(inode, new_size);
4691 if (ret)
4692 goto out;
4693 }
4694
4695 ret = ext4_alloc_file_blocks(file, start_lblk, len_lblk, new_size,
4696 EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT);
4697 if (ret)
4698 goto out;
4699
4700 if (file->f_flags & O_SYNC && EXT4_SB(inode->i_sb)->s_journal) {
4701 ret = ext4_fc_commit(EXT4_SB(inode->i_sb)->s_journal,
4702 EXT4_I(inode)->i_sync_tid);
4703 }
4704 out:
4705 trace_ext4_fallocate_exit(inode, offset, len_lblk, ret);
4706 return ret;
4707 }
4708
4709 /*
4710 * preallocate space for a file. This implements ext4's fallocate file
4711 * operation, which gets called from sys_fallocate system call.
4712 * For block-mapped files, posix_fallocate should fall back to the method
4713 * of writing zeroes to the required new blocks (the same behavior which is
4714 * expected for file systems which do not support fallocate() system call).
4715 */
ext4_fallocate(struct file * file,int mode,loff_t offset,loff_t len)4716 long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
4717 {
4718 struct inode *inode = file_inode(file);
4719 struct address_space *mapping = file->f_mapping;
4720 int ret;
4721
4722 /*
4723 * Encrypted inodes can't handle collapse range or insert
4724 * range since we would need to re-encrypt blocks with a
4725 * different IV or XTS tweak (which are based on the logical
4726 * block number).
4727 */
4728 if (IS_ENCRYPTED(inode) &&
4729 (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
4730 return -EOPNOTSUPP;
4731
4732 /* Return error if mode is not supported */
4733 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
4734 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
4735 FALLOC_FL_INSERT_RANGE))
4736 return -EOPNOTSUPP;
4737
4738 inode_lock(inode);
4739 ret = ext4_convert_inline_data(inode);
4740 if (ret)
4741 goto out_inode_lock;
4742
4743 /* Wait all existing dio workers, newcomers will block on i_rwsem */
4744 inode_dio_wait(inode);
4745
4746 ret = file_modified(file);
4747 if (ret)
4748 goto out_inode_lock;
4749
4750 if ((mode & FALLOC_FL_MODE_MASK) == FALLOC_FL_ALLOCATE_RANGE) {
4751 ret = ext4_do_fallocate(file, offset, len, mode);
4752 goto out_inode_lock;
4753 }
4754
4755 /*
4756 * Follow-up operations will drop page cache, hold invalidate lock
4757 * to prevent page faults from reinstantiating pages we have
4758 * released from page cache.
4759 */
4760 filemap_invalidate_lock(mapping);
4761
4762 ret = ext4_break_layouts(inode);
4763 if (ret)
4764 goto out_invalidate_lock;
4765
4766 if (mode & FALLOC_FL_PUNCH_HOLE)
4767 ret = ext4_punch_hole(file, offset, len);
4768 else if (mode & FALLOC_FL_COLLAPSE_RANGE)
4769 ret = ext4_collapse_range(file, offset, len);
4770 else if (mode & FALLOC_FL_INSERT_RANGE)
4771 ret = ext4_insert_range(file, offset, len);
4772 else if (mode & FALLOC_FL_ZERO_RANGE)
4773 ret = ext4_zero_range(file, offset, len, mode);
4774 else
4775 ret = -EOPNOTSUPP;
4776
4777 out_invalidate_lock:
4778 filemap_invalidate_unlock(mapping);
4779 out_inode_lock:
4780 inode_unlock(inode);
4781 return ret;
4782 }
4783
4784 /*
4785 * This function convert a range of blocks to written extents
4786 * The caller of this function will pass the start offset and the size.
4787 * all unwritten extents within this range will be converted to
4788 * written extents.
4789 *
4790 * This function is called from the direct IO end io call back
4791 * function, to convert the fallocated extents after IO is completed.
4792 * Returns 0 on success.
4793 */
ext4_convert_unwritten_extents(handle_t * handle,struct inode * inode,loff_t offset,ssize_t len)4794 int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode,
4795 loff_t offset, ssize_t len)
4796 {
4797 unsigned int max_blocks;
4798 int ret = 0, ret2 = 0, ret3 = 0;
4799 struct ext4_map_blocks map;
4800 unsigned int blkbits = inode->i_blkbits;
4801 unsigned int credits = 0;
4802
4803 map.m_lblk = offset >> blkbits;
4804 max_blocks = EXT4_MAX_BLOCKS(len, offset, blkbits);
4805
4806 if (!handle) {
4807 /*
4808 * credits to insert 1 extent into extent tree
4809 */
4810 credits = ext4_chunk_trans_blocks(inode, max_blocks);
4811 }
4812 while (ret >= 0 && ret < max_blocks) {
4813 map.m_lblk += ret;
4814 map.m_len = (max_blocks -= ret);
4815 if (credits) {
4816 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
4817 credits);
4818 if (IS_ERR(handle)) {
4819 ret = PTR_ERR(handle);
4820 break;
4821 }
4822 }
4823 ret = ext4_map_blocks(handle, inode, &map,
4824 EXT4_GET_BLOCKS_IO_CONVERT_EXT);
4825 if (ret <= 0)
4826 ext4_warning(inode->i_sb,
4827 "inode #%lu: block %u: len %u: "
4828 "ext4_ext_map_blocks returned %d",
4829 inode->i_ino, map.m_lblk,
4830 map.m_len, ret);
4831 ret2 = ext4_mark_inode_dirty(handle, inode);
4832 if (credits) {
4833 ret3 = ext4_journal_stop(handle);
4834 if (unlikely(ret3))
4835 ret2 = ret3;
4836 }
4837
4838 if (ret <= 0 || ret2)
4839 break;
4840 }
4841 return ret > 0 ? ret2 : ret;
4842 }
4843
ext4_convert_unwritten_io_end_vec(handle_t * handle,ext4_io_end_t * io_end)4844 int ext4_convert_unwritten_io_end_vec(handle_t *handle, ext4_io_end_t *io_end)
4845 {
4846 int ret = 0, err = 0;
4847 struct ext4_io_end_vec *io_end_vec;
4848
4849 /*
4850 * This is somewhat ugly but the idea is clear: When transaction is
4851 * reserved, everything goes into it. Otherwise we rather start several
4852 * smaller transactions for conversion of each extent separately.
4853 */
4854 if (handle) {
4855 handle = ext4_journal_start_reserved(handle,
4856 EXT4_HT_EXT_CONVERT);
4857 if (IS_ERR(handle))
4858 return PTR_ERR(handle);
4859 }
4860
4861 list_for_each_entry(io_end_vec, &io_end->list_vec, list) {
4862 ret = ext4_convert_unwritten_extents(handle, io_end->inode,
4863 io_end_vec->offset,
4864 io_end_vec->size);
4865 if (ret)
4866 break;
4867 }
4868
4869 if (handle)
4870 err = ext4_journal_stop(handle);
4871
4872 return ret < 0 ? ret : err;
4873 }
4874
ext4_iomap_xattr_fiemap(struct inode * inode,struct iomap * iomap)4875 static int ext4_iomap_xattr_fiemap(struct inode *inode, struct iomap *iomap)
4876 {
4877 __u64 physical = 0;
4878 __u64 length = 0;
4879 int blockbits = inode->i_sb->s_blocksize_bits;
4880 int error = 0;
4881 u16 iomap_type;
4882
4883 /* in-inode? */
4884 if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
4885 struct ext4_iloc iloc;
4886 int offset; /* offset of xattr in inode */
4887
4888 error = ext4_get_inode_loc(inode, &iloc);
4889 if (error)
4890 return error;
4891 physical = (__u64)iloc.bh->b_blocknr << blockbits;
4892 offset = EXT4_GOOD_OLD_INODE_SIZE +
4893 EXT4_I(inode)->i_extra_isize;
4894 physical += offset;
4895 length = EXT4_SB(inode->i_sb)->s_inode_size - offset;
4896 brelse(iloc.bh);
4897 iomap_type = IOMAP_INLINE;
4898 } else if (EXT4_I(inode)->i_file_acl) { /* external block */
4899 physical = (__u64)EXT4_I(inode)->i_file_acl << blockbits;
4900 length = inode->i_sb->s_blocksize;
4901 iomap_type = IOMAP_MAPPED;
4902 } else {
4903 /* no in-inode or external block for xattr, so return -ENOENT */
4904 error = -ENOENT;
4905 goto out;
4906 }
4907
4908 iomap->addr = physical;
4909 iomap->offset = 0;
4910 iomap->length = length;
4911 iomap->type = iomap_type;
4912 iomap->flags = 0;
4913 out:
4914 return error;
4915 }
4916
ext4_iomap_xattr_begin(struct inode * inode,loff_t offset,loff_t length,unsigned flags,struct iomap * iomap,struct iomap * srcmap)4917 static int ext4_iomap_xattr_begin(struct inode *inode, loff_t offset,
4918 loff_t length, unsigned flags,
4919 struct iomap *iomap, struct iomap *srcmap)
4920 {
4921 int error;
4922
4923 error = ext4_iomap_xattr_fiemap(inode, iomap);
4924 if (error == 0 && (offset >= iomap->length))
4925 error = -ENOENT;
4926 return error;
4927 }
4928
4929 static const struct iomap_ops ext4_iomap_xattr_ops = {
4930 .iomap_begin = ext4_iomap_xattr_begin,
4931 };
4932
ext4_fiemap_check_ranges(struct inode * inode,u64 start,u64 * len)4933 static int ext4_fiemap_check_ranges(struct inode *inode, u64 start, u64 *len)
4934 {
4935 u64 maxbytes = ext4_get_maxbytes(inode);
4936
4937 if (*len == 0)
4938 return -EINVAL;
4939 if (start > maxbytes)
4940 return -EFBIG;
4941
4942 /*
4943 * Shrink request scope to what the fs can actually handle.
4944 */
4945 if (*len > maxbytes || (maxbytes - *len) < start)
4946 *len = maxbytes - start;
4947 return 0;
4948 }
4949
ext4_fiemap(struct inode * inode,struct fiemap_extent_info * fieinfo,u64 start,u64 len)4950 int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4951 u64 start, u64 len)
4952 {
4953 int error = 0;
4954
4955 if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
4956 error = ext4_ext_precache(inode);
4957 if (error)
4958 return error;
4959 fieinfo->fi_flags &= ~FIEMAP_FLAG_CACHE;
4960 }
4961
4962 /*
4963 * For bitmap files the maximum size limit could be smaller than
4964 * s_maxbytes, so check len here manually instead of just relying on the
4965 * generic check.
4966 */
4967 error = ext4_fiemap_check_ranges(inode, start, &len);
4968 if (error)
4969 return error;
4970
4971 if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
4972 fieinfo->fi_flags &= ~FIEMAP_FLAG_XATTR;
4973 return iomap_fiemap(inode, fieinfo, start, len,
4974 &ext4_iomap_xattr_ops);
4975 }
4976
4977 return iomap_fiemap(inode, fieinfo, start, len, &ext4_iomap_report_ops);
4978 }
4979
ext4_get_es_cache(struct inode * inode,struct fiemap_extent_info * fieinfo,__u64 start,__u64 len)4980 int ext4_get_es_cache(struct inode *inode, struct fiemap_extent_info *fieinfo,
4981 __u64 start, __u64 len)
4982 {
4983 ext4_lblk_t start_blk, len_blks;
4984 __u64 last_blk;
4985 int error = 0;
4986
4987 if (ext4_has_inline_data(inode)) {
4988 int has_inline;
4989
4990 down_read(&EXT4_I(inode)->xattr_sem);
4991 has_inline = ext4_has_inline_data(inode);
4992 up_read(&EXT4_I(inode)->xattr_sem);
4993 if (has_inline)
4994 return 0;
4995 }
4996
4997 if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
4998 error = ext4_ext_precache(inode);
4999 if (error)
5000 return error;
5001 fieinfo->fi_flags &= ~FIEMAP_FLAG_CACHE;
5002 }
5003
5004 error = fiemap_prep(inode, fieinfo, start, &len, 0);
5005 if (error)
5006 return error;
5007
5008 error = ext4_fiemap_check_ranges(inode, start, &len);
5009 if (error)
5010 return error;
5011
5012 start_blk = start >> inode->i_sb->s_blocksize_bits;
5013 last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits;
5014 if (last_blk >= EXT_MAX_BLOCKS)
5015 last_blk = EXT_MAX_BLOCKS-1;
5016 len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1;
5017
5018 /*
5019 * Walk the extent tree gathering extent information
5020 * and pushing extents back to the user.
5021 */
5022 return ext4_fill_es_cache_info(inode, start_blk, len_blks, fieinfo);
5023 }
5024
5025 /*
5026 * ext4_ext_shift_path_extents:
5027 * Shift the extents of a path structure lying between path[depth].p_ext
5028 * and EXT_LAST_EXTENT(path[depth].p_hdr), by @shift blocks. @SHIFT tells
5029 * if it is right shift or left shift operation.
5030 */
5031 static int
ext4_ext_shift_path_extents(struct ext4_ext_path * path,ext4_lblk_t shift,struct inode * inode,handle_t * handle,enum SHIFT_DIRECTION SHIFT)5032 ext4_ext_shift_path_extents(struct ext4_ext_path *path, ext4_lblk_t shift,
5033 struct inode *inode, handle_t *handle,
5034 enum SHIFT_DIRECTION SHIFT)
5035 {
5036 int depth, err = 0;
5037 struct ext4_extent *ex_start, *ex_last;
5038 bool update = false;
5039 int credits, restart_credits;
5040 depth = path->p_depth;
5041
5042 while (depth >= 0) {
5043 if (depth == path->p_depth) {
5044 ex_start = path[depth].p_ext;
5045 if (!ex_start)
5046 return -EFSCORRUPTED;
5047
5048 ex_last = EXT_LAST_EXTENT(path[depth].p_hdr);
5049 /* leaf + sb + inode */
5050 credits = 3;
5051 if (ex_start == EXT_FIRST_EXTENT(path[depth].p_hdr)) {
5052 update = true;
5053 /* extent tree + sb + inode */
5054 credits = depth + 2;
5055 }
5056
5057 restart_credits = ext4_writepage_trans_blocks(inode);
5058 err = ext4_datasem_ensure_credits(handle, inode, credits,
5059 restart_credits, 0);
5060 if (err) {
5061 if (err > 0)
5062 err = -EAGAIN;
5063 goto out;
5064 }
5065
5066 err = ext4_ext_get_access(handle, inode, path + depth);
5067 if (err)
5068 goto out;
5069
5070 while (ex_start <= ex_last) {
5071 if (SHIFT == SHIFT_LEFT) {
5072 le32_add_cpu(&ex_start->ee_block,
5073 -shift);
5074 /* Try to merge to the left. */
5075 if ((ex_start >
5076 EXT_FIRST_EXTENT(path[depth].p_hdr))
5077 &&
5078 ext4_ext_try_to_merge_right(inode,
5079 path, ex_start - 1))
5080 ex_last--;
5081 else
5082 ex_start++;
5083 } else {
5084 le32_add_cpu(&ex_last->ee_block, shift);
5085 ext4_ext_try_to_merge_right(inode, path,
5086 ex_last);
5087 ex_last--;
5088 }
5089 }
5090 err = ext4_ext_dirty(handle, inode, path + depth);
5091 if (err)
5092 goto out;
5093
5094 if (--depth < 0 || !update)
5095 break;
5096 }
5097
5098 /* Update index too */
5099 err = ext4_ext_get_access(handle, inode, path + depth);
5100 if (err)
5101 goto out;
5102
5103 if (SHIFT == SHIFT_LEFT)
5104 le32_add_cpu(&path[depth].p_idx->ei_block, -shift);
5105 else
5106 le32_add_cpu(&path[depth].p_idx->ei_block, shift);
5107 err = ext4_ext_dirty(handle, inode, path + depth);
5108 if (err)
5109 goto out;
5110
5111 /* we are done if current index is not a starting index */
5112 if (path[depth].p_idx != EXT_FIRST_INDEX(path[depth].p_hdr))
5113 break;
5114
5115 depth--;
5116 }
5117
5118 out:
5119 return err;
5120 }
5121
5122 /*
5123 * ext4_ext_shift_extents:
5124 * All the extents which lies in the range from @start to the last allocated
5125 * block for the @inode are shifted either towards left or right (depending
5126 * upon @SHIFT) by @shift blocks.
5127 * On success, 0 is returned, error otherwise.
5128 */
5129 static int
ext4_ext_shift_extents(struct inode * inode,handle_t * handle,ext4_lblk_t start,ext4_lblk_t shift,enum SHIFT_DIRECTION SHIFT)5130 ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
5131 ext4_lblk_t start, ext4_lblk_t shift,
5132 enum SHIFT_DIRECTION SHIFT)
5133 {
5134 struct ext4_ext_path *path;
5135 int ret = 0, depth;
5136 struct ext4_extent *extent;
5137 ext4_lblk_t stop, *iterator, ex_start, ex_end;
5138 ext4_lblk_t tmp = EXT_MAX_BLOCKS;
5139
5140 /* Let path point to the last extent */
5141 path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL,
5142 EXT4_EX_NOCACHE);
5143 if (IS_ERR(path))
5144 return PTR_ERR(path);
5145
5146 depth = path->p_depth;
5147 extent = path[depth].p_ext;
5148 if (!extent)
5149 goto out;
5150
5151 stop = le32_to_cpu(extent->ee_block);
5152
5153 /*
5154 * For left shifts, make sure the hole on the left is big enough to
5155 * accommodate the shift. For right shifts, make sure the last extent
5156 * won't be shifted beyond EXT_MAX_BLOCKS.
5157 */
5158 if (SHIFT == SHIFT_LEFT) {
5159 path = ext4_find_extent(inode, start - 1, path,
5160 EXT4_EX_NOCACHE);
5161 if (IS_ERR(path))
5162 return PTR_ERR(path);
5163 depth = path->p_depth;
5164 extent = path[depth].p_ext;
5165 if (extent) {
5166 ex_start = le32_to_cpu(extent->ee_block);
5167 ex_end = le32_to_cpu(extent->ee_block) +
5168 ext4_ext_get_actual_len(extent);
5169 } else {
5170 ex_start = 0;
5171 ex_end = 0;
5172 }
5173
5174 if ((start == ex_start && shift > ex_start) ||
5175 (shift > start - ex_end)) {
5176 ret = -EINVAL;
5177 goto out;
5178 }
5179 } else {
5180 if (shift > EXT_MAX_BLOCKS -
5181 (stop + ext4_ext_get_actual_len(extent))) {
5182 ret = -EINVAL;
5183 goto out;
5184 }
5185 }
5186
5187 /*
5188 * In case of left shift, iterator points to start and it is increased
5189 * till we reach stop. In case of right shift, iterator points to stop
5190 * and it is decreased till we reach start.
5191 */
5192 again:
5193 ret = 0;
5194 if (SHIFT == SHIFT_LEFT)
5195 iterator = &start;
5196 else
5197 iterator = &stop;
5198
5199 if (tmp != EXT_MAX_BLOCKS)
5200 *iterator = tmp;
5201
5202 /*
5203 * Its safe to start updating extents. Start and stop are unsigned, so
5204 * in case of right shift if extent with 0 block is reached, iterator
5205 * becomes NULL to indicate the end of the loop.
5206 */
5207 while (iterator && start <= stop) {
5208 path = ext4_find_extent(inode, *iterator, path,
5209 EXT4_EX_NOCACHE);
5210 if (IS_ERR(path))
5211 return PTR_ERR(path);
5212 depth = path->p_depth;
5213 extent = path[depth].p_ext;
5214 if (!extent) {
5215 EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
5216 (unsigned long) *iterator);
5217 return -EFSCORRUPTED;
5218 }
5219 if (SHIFT == SHIFT_LEFT && *iterator >
5220 le32_to_cpu(extent->ee_block)) {
5221 /* Hole, move to the next extent */
5222 if (extent < EXT_LAST_EXTENT(path[depth].p_hdr)) {
5223 path[depth].p_ext++;
5224 } else {
5225 *iterator = ext4_ext_next_allocated_block(path);
5226 continue;
5227 }
5228 }
5229
5230 tmp = *iterator;
5231 if (SHIFT == SHIFT_LEFT) {
5232 extent = EXT_LAST_EXTENT(path[depth].p_hdr);
5233 *iterator = le32_to_cpu(extent->ee_block) +
5234 ext4_ext_get_actual_len(extent);
5235 } else {
5236 extent = EXT_FIRST_EXTENT(path[depth].p_hdr);
5237 if (le32_to_cpu(extent->ee_block) > start)
5238 *iterator = le32_to_cpu(extent->ee_block) - 1;
5239 else if (le32_to_cpu(extent->ee_block) == start)
5240 iterator = NULL;
5241 else {
5242 extent = EXT_LAST_EXTENT(path[depth].p_hdr);
5243 while (le32_to_cpu(extent->ee_block) >= start)
5244 extent--;
5245
5246 if (extent == EXT_LAST_EXTENT(path[depth].p_hdr))
5247 break;
5248
5249 extent++;
5250 iterator = NULL;
5251 }
5252 path[depth].p_ext = extent;
5253 }
5254 ret = ext4_ext_shift_path_extents(path, shift, inode,
5255 handle, SHIFT);
5256 /* iterator can be NULL which means we should break */
5257 if (ret == -EAGAIN)
5258 goto again;
5259 if (ret)
5260 break;
5261 }
5262 out:
5263 ext4_free_ext_path(path);
5264 return ret;
5265 }
5266
5267 /*
5268 * ext4_collapse_range:
5269 * This implements the fallocate's collapse range functionality for ext4
5270 * Returns: 0 and non-zero on error.
5271 */
ext4_collapse_range(struct file * file,loff_t offset,loff_t len)5272 static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len)
5273 {
5274 struct inode *inode = file_inode(file);
5275 struct super_block *sb = inode->i_sb;
5276 struct address_space *mapping = inode->i_mapping;
5277 loff_t end = offset + len;
5278 ext4_lblk_t start_lblk, end_lblk;
5279 handle_t *handle;
5280 unsigned int credits;
5281 loff_t start, new_size;
5282 int ret;
5283
5284 trace_ext4_collapse_range(inode, offset, len);
5285 WARN_ON_ONCE(!inode_is_locked(inode));
5286
5287 /* Currently just for extent based files */
5288 if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
5289 return -EOPNOTSUPP;
5290 /* Collapse range works only on fs cluster size aligned regions. */
5291 if (!IS_ALIGNED(offset | len, EXT4_CLUSTER_SIZE(sb)))
5292 return -EINVAL;
5293 /*
5294 * There is no need to overlap collapse range with EOF, in which case
5295 * it is effectively a truncate operation
5296 */
5297 if (end >= inode->i_size)
5298 return -EINVAL;
5299
5300 /*
5301 * Write tail of the last page before removed range and data that
5302 * will be shifted since they will get removed from the page cache
5303 * below. We are also protected from pages becoming dirty by
5304 * i_rwsem and invalidate_lock.
5305 * Need to round down offset to be aligned with page size boundary
5306 * for page size > block size.
5307 */
5308 start = round_down(offset, PAGE_SIZE);
5309 ret = filemap_write_and_wait_range(mapping, start, offset);
5310 if (!ret)
5311 ret = filemap_write_and_wait_range(mapping, end, LLONG_MAX);
5312 if (ret)
5313 return ret;
5314
5315 truncate_pagecache(inode, start);
5316
5317 credits = ext4_writepage_trans_blocks(inode);
5318 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
5319 if (IS_ERR(handle))
5320 return PTR_ERR(handle);
5321
5322 ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE, handle);
5323
5324 start_lblk = offset >> inode->i_blkbits;
5325 end_lblk = (offset + len) >> inode->i_blkbits;
5326
5327 down_write(&EXT4_I(inode)->i_data_sem);
5328 ext4_discard_preallocations(inode);
5329 ext4_es_remove_extent(inode, start_lblk, EXT_MAX_BLOCKS - start_lblk);
5330
5331 ret = ext4_ext_remove_space(inode, start_lblk, end_lblk - 1);
5332 if (ret) {
5333 up_write(&EXT4_I(inode)->i_data_sem);
5334 goto out_handle;
5335 }
5336 ext4_discard_preallocations(inode);
5337
5338 ret = ext4_ext_shift_extents(inode, handle, end_lblk,
5339 end_lblk - start_lblk, SHIFT_LEFT);
5340 if (ret) {
5341 up_write(&EXT4_I(inode)->i_data_sem);
5342 goto out_handle;
5343 }
5344
5345 new_size = inode->i_size - len;
5346 i_size_write(inode, new_size);
5347 EXT4_I(inode)->i_disksize = new_size;
5348
5349 up_write(&EXT4_I(inode)->i_data_sem);
5350 ret = ext4_mark_inode_dirty(handle, inode);
5351 if (ret)
5352 goto out_handle;
5353
5354 ext4_update_inode_fsync_trans(handle, inode, 1);
5355 if (IS_SYNC(inode))
5356 ext4_handle_sync(handle);
5357
5358 out_handle:
5359 ext4_journal_stop(handle);
5360 return ret;
5361 }
5362
5363 /*
5364 * ext4_insert_range:
5365 * This function implements the FALLOC_FL_INSERT_RANGE flag of fallocate.
5366 * The data blocks starting from @offset to the EOF are shifted by @len
5367 * towards right to create a hole in the @inode. Inode size is increased
5368 * by len bytes.
5369 * Returns 0 on success, error otherwise.
5370 */
ext4_insert_range(struct file * file,loff_t offset,loff_t len)5371 static int ext4_insert_range(struct file *file, loff_t offset, loff_t len)
5372 {
5373 struct inode *inode = file_inode(file);
5374 struct super_block *sb = inode->i_sb;
5375 struct address_space *mapping = inode->i_mapping;
5376 handle_t *handle;
5377 struct ext4_ext_path *path;
5378 struct ext4_extent *extent;
5379 ext4_lblk_t start_lblk, len_lblk, ee_start_lblk = 0;
5380 unsigned int credits, ee_len;
5381 int ret, depth, split_flag = 0;
5382 loff_t start;
5383
5384 trace_ext4_insert_range(inode, offset, len);
5385 WARN_ON_ONCE(!inode_is_locked(inode));
5386
5387 /* Currently just for extent based files */
5388 if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
5389 return -EOPNOTSUPP;
5390 /* Insert range works only on fs cluster size aligned regions. */
5391 if (!IS_ALIGNED(offset | len, EXT4_CLUSTER_SIZE(sb)))
5392 return -EINVAL;
5393 /* Offset must be less than i_size */
5394 if (offset >= inode->i_size)
5395 return -EINVAL;
5396 /* Check whether the maximum file size would be exceeded */
5397 if (len > inode->i_sb->s_maxbytes - inode->i_size)
5398 return -EFBIG;
5399
5400 /*
5401 * Write out all dirty pages. Need to round down to align start offset
5402 * to page size boundary for page size > block size.
5403 */
5404 start = round_down(offset, PAGE_SIZE);
5405 ret = filemap_write_and_wait_range(mapping, start, LLONG_MAX);
5406 if (ret)
5407 return ret;
5408
5409 truncate_pagecache(inode, start);
5410
5411 credits = ext4_writepage_trans_blocks(inode);
5412 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
5413 if (IS_ERR(handle))
5414 return PTR_ERR(handle);
5415
5416 ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE, handle);
5417
5418 /* Expand file to avoid data loss if there is error while shifting */
5419 inode->i_size += len;
5420 EXT4_I(inode)->i_disksize += len;
5421 ret = ext4_mark_inode_dirty(handle, inode);
5422 if (ret)
5423 goto out_handle;
5424
5425 start_lblk = offset >> inode->i_blkbits;
5426 len_lblk = len >> inode->i_blkbits;
5427
5428 down_write(&EXT4_I(inode)->i_data_sem);
5429 ext4_discard_preallocations(inode);
5430
5431 path = ext4_find_extent(inode, start_lblk, NULL, 0);
5432 if (IS_ERR(path)) {
5433 up_write(&EXT4_I(inode)->i_data_sem);
5434 ret = PTR_ERR(path);
5435 goto out_handle;
5436 }
5437
5438 depth = ext_depth(inode);
5439 extent = path[depth].p_ext;
5440 if (extent) {
5441 ee_start_lblk = le32_to_cpu(extent->ee_block);
5442 ee_len = ext4_ext_get_actual_len(extent);
5443
5444 /*
5445 * If start_lblk is not the starting block of extent, split
5446 * the extent @start_lblk
5447 */
5448 if ((start_lblk > ee_start_lblk) &&
5449 (start_lblk < (ee_start_lblk + ee_len))) {
5450 if (ext4_ext_is_unwritten(extent))
5451 split_flag = EXT4_EXT_MARK_UNWRIT1 |
5452 EXT4_EXT_MARK_UNWRIT2;
5453 path = ext4_split_extent_at(handle, inode, path,
5454 start_lblk, split_flag,
5455 EXT4_EX_NOCACHE |
5456 EXT4_GET_BLOCKS_PRE_IO |
5457 EXT4_GET_BLOCKS_METADATA_NOFAIL);
5458 }
5459
5460 if (IS_ERR(path)) {
5461 up_write(&EXT4_I(inode)->i_data_sem);
5462 ret = PTR_ERR(path);
5463 goto out_handle;
5464 }
5465 }
5466
5467 ext4_free_ext_path(path);
5468 ext4_es_remove_extent(inode, start_lblk, EXT_MAX_BLOCKS - start_lblk);
5469
5470 /*
5471 * if start_lblk lies in a hole which is at start of file, use
5472 * ee_start_lblk to shift extents
5473 */
5474 ret = ext4_ext_shift_extents(inode, handle,
5475 max(ee_start_lblk, start_lblk), len_lblk, SHIFT_RIGHT);
5476 up_write(&EXT4_I(inode)->i_data_sem);
5477 if (ret)
5478 goto out_handle;
5479
5480 ext4_update_inode_fsync_trans(handle, inode, 1);
5481 if (IS_SYNC(inode))
5482 ext4_handle_sync(handle);
5483
5484 out_handle:
5485 ext4_journal_stop(handle);
5486 return ret;
5487 }
5488
5489 /**
5490 * ext4_swap_extents() - Swap extents between two inodes
5491 * @handle: handle for this transaction
5492 * @inode1: First inode
5493 * @inode2: Second inode
5494 * @lblk1: Start block for first inode
5495 * @lblk2: Start block for second inode
5496 * @count: Number of blocks to swap
5497 * @unwritten: Mark second inode's extents as unwritten after swap
5498 * @erp: Pointer to save error value
5499 *
5500 * This helper routine does exactly what is promise "swap extents". All other
5501 * stuff such as page-cache locking consistency, bh mapping consistency or
5502 * extent's data copying must be performed by caller.
5503 * Locking:
5504 * i_rwsem is held for both inodes
5505 * i_data_sem is locked for write for both inodes
5506 * Assumptions:
5507 * All pages from requested range are locked for both inodes
5508 */
5509 int
ext4_swap_extents(handle_t * handle,struct inode * inode1,struct inode * inode2,ext4_lblk_t lblk1,ext4_lblk_t lblk2,ext4_lblk_t count,int unwritten,int * erp)5510 ext4_swap_extents(handle_t *handle, struct inode *inode1,
5511 struct inode *inode2, ext4_lblk_t lblk1, ext4_lblk_t lblk2,
5512 ext4_lblk_t count, int unwritten, int *erp)
5513 {
5514 struct ext4_ext_path *path1 = NULL;
5515 struct ext4_ext_path *path2 = NULL;
5516 int replaced_count = 0;
5517
5518 BUG_ON(!rwsem_is_locked(&EXT4_I(inode1)->i_data_sem));
5519 BUG_ON(!rwsem_is_locked(&EXT4_I(inode2)->i_data_sem));
5520 BUG_ON(!inode_is_locked(inode1));
5521 BUG_ON(!inode_is_locked(inode2));
5522
5523 ext4_es_remove_extent(inode1, lblk1, count);
5524 ext4_es_remove_extent(inode2, lblk2, count);
5525
5526 while (count) {
5527 struct ext4_extent *ex1, *ex2, tmp_ex;
5528 ext4_lblk_t e1_blk, e2_blk;
5529 int e1_len, e2_len, len;
5530 int split = 0;
5531
5532 path1 = ext4_find_extent(inode1, lblk1, path1, EXT4_EX_NOCACHE);
5533 if (IS_ERR(path1)) {
5534 *erp = PTR_ERR(path1);
5535 goto errout;
5536 }
5537 path2 = ext4_find_extent(inode2, lblk2, path2, EXT4_EX_NOCACHE);
5538 if (IS_ERR(path2)) {
5539 *erp = PTR_ERR(path2);
5540 goto errout;
5541 }
5542 ex1 = path1[path1->p_depth].p_ext;
5543 ex2 = path2[path2->p_depth].p_ext;
5544 /* Do we have something to swap ? */
5545 if (unlikely(!ex2 || !ex1))
5546 goto errout;
5547
5548 e1_blk = le32_to_cpu(ex1->ee_block);
5549 e2_blk = le32_to_cpu(ex2->ee_block);
5550 e1_len = ext4_ext_get_actual_len(ex1);
5551 e2_len = ext4_ext_get_actual_len(ex2);
5552
5553 /* Hole handling */
5554 if (!in_range(lblk1, e1_blk, e1_len) ||
5555 !in_range(lblk2, e2_blk, e2_len)) {
5556 ext4_lblk_t next1, next2;
5557
5558 /* if hole after extent, then go to next extent */
5559 next1 = ext4_ext_next_allocated_block(path1);
5560 next2 = ext4_ext_next_allocated_block(path2);
5561 /* If hole before extent, then shift to that extent */
5562 if (e1_blk > lblk1)
5563 next1 = e1_blk;
5564 if (e2_blk > lblk2)
5565 next2 = e2_blk;
5566 /* Do we have something to swap */
5567 if (next1 == EXT_MAX_BLOCKS || next2 == EXT_MAX_BLOCKS)
5568 goto errout;
5569 /* Move to the rightest boundary */
5570 len = next1 - lblk1;
5571 if (len < next2 - lblk2)
5572 len = next2 - lblk2;
5573 if (len > count)
5574 len = count;
5575 lblk1 += len;
5576 lblk2 += len;
5577 count -= len;
5578 continue;
5579 }
5580
5581 /* Prepare left boundary */
5582 if (e1_blk < lblk1) {
5583 split = 1;
5584 path1 = ext4_force_split_extent_at(handle, inode1,
5585 path1, lblk1, 0);
5586 if (IS_ERR(path1)) {
5587 *erp = PTR_ERR(path1);
5588 goto errout;
5589 }
5590 }
5591 if (e2_blk < lblk2) {
5592 split = 1;
5593 path2 = ext4_force_split_extent_at(handle, inode2,
5594 path2, lblk2, 0);
5595 if (IS_ERR(path2)) {
5596 *erp = PTR_ERR(path2);
5597 goto errout;
5598 }
5599 }
5600 /* ext4_split_extent_at() may result in leaf extent split,
5601 * path must to be revalidated. */
5602 if (split)
5603 continue;
5604
5605 /* Prepare right boundary */
5606 len = count;
5607 if (len > e1_blk + e1_len - lblk1)
5608 len = e1_blk + e1_len - lblk1;
5609 if (len > e2_blk + e2_len - lblk2)
5610 len = e2_blk + e2_len - lblk2;
5611
5612 if (len != e1_len) {
5613 split = 1;
5614 path1 = ext4_force_split_extent_at(handle, inode1,
5615 path1, lblk1 + len, 0);
5616 if (IS_ERR(path1)) {
5617 *erp = PTR_ERR(path1);
5618 goto errout;
5619 }
5620 }
5621 if (len != e2_len) {
5622 split = 1;
5623 path2 = ext4_force_split_extent_at(handle, inode2,
5624 path2, lblk2 + len, 0);
5625 if (IS_ERR(path2)) {
5626 *erp = PTR_ERR(path2);
5627 goto errout;
5628 }
5629 }
5630 /* ext4_split_extent_at() may result in leaf extent split,
5631 * path must to be revalidated. */
5632 if (split)
5633 continue;
5634
5635 BUG_ON(e2_len != e1_len);
5636 *erp = ext4_ext_get_access(handle, inode1, path1 + path1->p_depth);
5637 if (unlikely(*erp))
5638 goto errout;
5639 *erp = ext4_ext_get_access(handle, inode2, path2 + path2->p_depth);
5640 if (unlikely(*erp))
5641 goto errout;
5642
5643 /* Both extents are fully inside boundaries. Swap it now */
5644 tmp_ex = *ex1;
5645 ext4_ext_store_pblock(ex1, ext4_ext_pblock(ex2));
5646 ext4_ext_store_pblock(ex2, ext4_ext_pblock(&tmp_ex));
5647 ex1->ee_len = cpu_to_le16(e2_len);
5648 ex2->ee_len = cpu_to_le16(e1_len);
5649 if (unwritten)
5650 ext4_ext_mark_unwritten(ex2);
5651 if (ext4_ext_is_unwritten(&tmp_ex))
5652 ext4_ext_mark_unwritten(ex1);
5653
5654 ext4_ext_try_to_merge(handle, inode2, path2, ex2);
5655 ext4_ext_try_to_merge(handle, inode1, path1, ex1);
5656 *erp = ext4_ext_dirty(handle, inode2, path2 +
5657 path2->p_depth);
5658 if (unlikely(*erp))
5659 goto errout;
5660 *erp = ext4_ext_dirty(handle, inode1, path1 +
5661 path1->p_depth);
5662 /*
5663 * Looks scarry ah..? second inode already points to new blocks,
5664 * and it was successfully dirtied. But luckily error may happen
5665 * only due to journal error, so full transaction will be
5666 * aborted anyway.
5667 */
5668 if (unlikely(*erp))
5669 goto errout;
5670
5671 lblk1 += len;
5672 lblk2 += len;
5673 replaced_count += len;
5674 count -= len;
5675 }
5676
5677 errout:
5678 ext4_free_ext_path(path1);
5679 ext4_free_ext_path(path2);
5680 return replaced_count;
5681 }
5682
5683 /*
5684 * ext4_clu_mapped - determine whether any block in a logical cluster has
5685 * been mapped to a physical cluster
5686 *
5687 * @inode - file containing the logical cluster
5688 * @lclu - logical cluster of interest
5689 *
5690 * Returns 1 if any block in the logical cluster is mapped, signifying
5691 * that a physical cluster has been allocated for it. Otherwise,
5692 * returns 0. Can also return negative error codes. Derived from
5693 * ext4_ext_map_blocks().
5694 */
ext4_clu_mapped(struct inode * inode,ext4_lblk_t lclu)5695 int ext4_clu_mapped(struct inode *inode, ext4_lblk_t lclu)
5696 {
5697 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
5698 struct ext4_ext_path *path;
5699 int depth, mapped = 0, err = 0;
5700 struct ext4_extent *extent;
5701 ext4_lblk_t first_lblk, first_lclu, last_lclu;
5702
5703 /*
5704 * if data can be stored inline, the logical cluster isn't
5705 * mapped - no physical clusters have been allocated, and the
5706 * file has no extents
5707 */
5708 if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) ||
5709 ext4_has_inline_data(inode))
5710 return 0;
5711
5712 /* search for the extent closest to the first block in the cluster */
5713 path = ext4_find_extent(inode, EXT4_C2B(sbi, lclu), NULL, 0);
5714 if (IS_ERR(path))
5715 return PTR_ERR(path);
5716
5717 depth = ext_depth(inode);
5718
5719 /*
5720 * A consistent leaf must not be empty. This situation is possible,
5721 * though, _during_ tree modification, and it's why an assert can't
5722 * be put in ext4_find_extent().
5723 */
5724 if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
5725 EXT4_ERROR_INODE(inode,
5726 "bad extent address - lblock: %lu, depth: %d, pblock: %lld",
5727 (unsigned long) EXT4_C2B(sbi, lclu),
5728 depth, path[depth].p_block);
5729 err = -EFSCORRUPTED;
5730 goto out;
5731 }
5732
5733 extent = path[depth].p_ext;
5734
5735 /* can't be mapped if the extent tree is empty */
5736 if (extent == NULL)
5737 goto out;
5738
5739 first_lblk = le32_to_cpu(extent->ee_block);
5740 first_lclu = EXT4_B2C(sbi, first_lblk);
5741
5742 /*
5743 * Three possible outcomes at this point - found extent spanning
5744 * the target cluster, to the left of the target cluster, or to the
5745 * right of the target cluster. The first two cases are handled here.
5746 * The last case indicates the target cluster is not mapped.
5747 */
5748 if (lclu >= first_lclu) {
5749 last_lclu = EXT4_B2C(sbi, first_lblk +
5750 ext4_ext_get_actual_len(extent) - 1);
5751 if (lclu <= last_lclu) {
5752 mapped = 1;
5753 } else {
5754 first_lblk = ext4_ext_next_allocated_block(path);
5755 first_lclu = EXT4_B2C(sbi, first_lblk);
5756 if (lclu == first_lclu)
5757 mapped = 1;
5758 }
5759 }
5760
5761 out:
5762 ext4_free_ext_path(path);
5763
5764 return err ? err : mapped;
5765 }
5766
5767 /*
5768 * Updates physical block address and unwritten status of extent
5769 * starting at lblk start and of len. If such an extent doesn't exist,
5770 * this function splits the extent tree appropriately to create an
5771 * extent like this. This function is called in the fast commit
5772 * replay path. Returns 0 on success and error on failure.
5773 */
ext4_ext_replay_update_ex(struct inode * inode,ext4_lblk_t start,int len,int unwritten,ext4_fsblk_t pblk)5774 int ext4_ext_replay_update_ex(struct inode *inode, ext4_lblk_t start,
5775 int len, int unwritten, ext4_fsblk_t pblk)
5776 {
5777 struct ext4_ext_path *path;
5778 struct ext4_extent *ex;
5779 int ret;
5780
5781 path = ext4_find_extent(inode, start, NULL, 0);
5782 if (IS_ERR(path))
5783 return PTR_ERR(path);
5784 ex = path[path->p_depth].p_ext;
5785 if (!ex) {
5786 ret = -EFSCORRUPTED;
5787 goto out;
5788 }
5789
5790 if (le32_to_cpu(ex->ee_block) != start ||
5791 ext4_ext_get_actual_len(ex) != len) {
5792 /* We need to split this extent to match our extent first */
5793 down_write(&EXT4_I(inode)->i_data_sem);
5794 path = ext4_force_split_extent_at(NULL, inode, path, start, 1);
5795 up_write(&EXT4_I(inode)->i_data_sem);
5796 if (IS_ERR(path)) {
5797 ret = PTR_ERR(path);
5798 goto out;
5799 }
5800
5801 path = ext4_find_extent(inode, start, path, 0);
5802 if (IS_ERR(path))
5803 return PTR_ERR(path);
5804
5805 ex = path[path->p_depth].p_ext;
5806 WARN_ON(le32_to_cpu(ex->ee_block) != start);
5807
5808 if (ext4_ext_get_actual_len(ex) != len) {
5809 down_write(&EXT4_I(inode)->i_data_sem);
5810 path = ext4_force_split_extent_at(NULL, inode, path,
5811 start + len, 1);
5812 up_write(&EXT4_I(inode)->i_data_sem);
5813 if (IS_ERR(path)) {
5814 ret = PTR_ERR(path);
5815 goto out;
5816 }
5817
5818 path = ext4_find_extent(inode, start, path, 0);
5819 if (IS_ERR(path))
5820 return PTR_ERR(path);
5821 ex = path[path->p_depth].p_ext;
5822 }
5823 }
5824 if (unwritten)
5825 ext4_ext_mark_unwritten(ex);
5826 else
5827 ext4_ext_mark_initialized(ex);
5828 ext4_ext_store_pblock(ex, pblk);
5829 down_write(&EXT4_I(inode)->i_data_sem);
5830 ret = ext4_ext_dirty(NULL, inode, &path[path->p_depth]);
5831 up_write(&EXT4_I(inode)->i_data_sem);
5832 out:
5833 ext4_free_ext_path(path);
5834 ext4_mark_inode_dirty(NULL, inode);
5835 return ret;
5836 }
5837
5838 /* Try to shrink the extent tree */
ext4_ext_replay_shrink_inode(struct inode * inode,ext4_lblk_t end)5839 void ext4_ext_replay_shrink_inode(struct inode *inode, ext4_lblk_t end)
5840 {
5841 struct ext4_ext_path *path = NULL;
5842 struct ext4_extent *ex;
5843 ext4_lblk_t old_cur, cur = 0;
5844
5845 while (cur < end) {
5846 path = ext4_find_extent(inode, cur, NULL, 0);
5847 if (IS_ERR(path))
5848 return;
5849 ex = path[path->p_depth].p_ext;
5850 if (!ex) {
5851 ext4_free_ext_path(path);
5852 ext4_mark_inode_dirty(NULL, inode);
5853 return;
5854 }
5855 old_cur = cur;
5856 cur = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex);
5857 if (cur <= old_cur)
5858 cur = old_cur + 1;
5859 ext4_ext_try_to_merge(NULL, inode, path, ex);
5860 down_write(&EXT4_I(inode)->i_data_sem);
5861 ext4_ext_dirty(NULL, inode, &path[path->p_depth]);
5862 up_write(&EXT4_I(inode)->i_data_sem);
5863 ext4_mark_inode_dirty(NULL, inode);
5864 ext4_free_ext_path(path);
5865 }
5866 }
5867
5868 /* Check if *cur is a hole and if it is, skip it */
skip_hole(struct inode * inode,ext4_lblk_t * cur)5869 static int skip_hole(struct inode *inode, ext4_lblk_t *cur)
5870 {
5871 int ret;
5872 struct ext4_map_blocks map;
5873
5874 map.m_lblk = *cur;
5875 map.m_len = ((inode->i_size) >> inode->i_sb->s_blocksize_bits) - *cur;
5876
5877 ret = ext4_map_blocks(NULL, inode, &map, 0);
5878 if (ret < 0)
5879 return ret;
5880 if (ret != 0)
5881 return 0;
5882 *cur = *cur + map.m_len;
5883 return 0;
5884 }
5885
5886 /* Count number of blocks used by this inode and update i_blocks */
ext4_ext_replay_set_iblocks(struct inode * inode)5887 int ext4_ext_replay_set_iblocks(struct inode *inode)
5888 {
5889 struct ext4_ext_path *path = NULL, *path2 = NULL;
5890 struct ext4_extent *ex;
5891 ext4_lblk_t cur = 0, end;
5892 int numblks = 0, i, ret = 0;
5893 ext4_fsblk_t cmp1, cmp2;
5894 struct ext4_map_blocks map;
5895
5896 /* Determin the size of the file first */
5897 path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL,
5898 EXT4_EX_NOCACHE);
5899 if (IS_ERR(path))
5900 return PTR_ERR(path);
5901 ex = path[path->p_depth].p_ext;
5902 if (!ex)
5903 goto out;
5904 end = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex);
5905
5906 /* Count the number of data blocks */
5907 cur = 0;
5908 while (cur < end) {
5909 map.m_lblk = cur;
5910 map.m_len = end - cur;
5911 ret = ext4_map_blocks(NULL, inode, &map, 0);
5912 if (ret < 0)
5913 break;
5914 if (ret > 0)
5915 numblks += ret;
5916 cur = cur + map.m_len;
5917 }
5918
5919 /*
5920 * Count the number of extent tree blocks. We do it by looking up
5921 * two successive extents and determining the difference between
5922 * their paths. When path is different for 2 successive extents
5923 * we compare the blocks in the path at each level and increment
5924 * iblocks by total number of differences found.
5925 */
5926 cur = 0;
5927 ret = skip_hole(inode, &cur);
5928 if (ret < 0)
5929 goto out;
5930 path = ext4_find_extent(inode, cur, path, 0);
5931 if (IS_ERR(path))
5932 goto out;
5933 numblks += path->p_depth;
5934 while (cur < end) {
5935 path = ext4_find_extent(inode, cur, path, 0);
5936 if (IS_ERR(path))
5937 break;
5938 ex = path[path->p_depth].p_ext;
5939 if (!ex)
5940 goto cleanup;
5941
5942 cur = max(cur + 1, le32_to_cpu(ex->ee_block) +
5943 ext4_ext_get_actual_len(ex));
5944 ret = skip_hole(inode, &cur);
5945 if (ret < 0)
5946 break;
5947
5948 path2 = ext4_find_extent(inode, cur, path2, 0);
5949 if (IS_ERR(path2))
5950 break;
5951
5952 for (i = 0; i <= max(path->p_depth, path2->p_depth); i++) {
5953 cmp1 = cmp2 = 0;
5954 if (i <= path->p_depth)
5955 cmp1 = path[i].p_bh ?
5956 path[i].p_bh->b_blocknr : 0;
5957 if (i <= path2->p_depth)
5958 cmp2 = path2[i].p_bh ?
5959 path2[i].p_bh->b_blocknr : 0;
5960 if (cmp1 != cmp2 && cmp2 != 0)
5961 numblks++;
5962 }
5963 }
5964
5965 out:
5966 inode->i_blocks = numblks << (inode->i_sb->s_blocksize_bits - 9);
5967 ext4_mark_inode_dirty(NULL, inode);
5968 cleanup:
5969 ext4_free_ext_path(path);
5970 ext4_free_ext_path(path2);
5971 return 0;
5972 }
5973
ext4_ext_clear_bb(struct inode * inode)5974 int ext4_ext_clear_bb(struct inode *inode)
5975 {
5976 struct ext4_ext_path *path = NULL;
5977 struct ext4_extent *ex;
5978 ext4_lblk_t cur = 0, end;
5979 int j, ret = 0;
5980 struct ext4_map_blocks map;
5981
5982 if (ext4_test_inode_flag(inode, EXT4_INODE_INLINE_DATA))
5983 return 0;
5984
5985 /* Determin the size of the file first */
5986 path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL,
5987 EXT4_EX_NOCACHE);
5988 if (IS_ERR(path))
5989 return PTR_ERR(path);
5990 ex = path[path->p_depth].p_ext;
5991 if (!ex)
5992 goto out;
5993 end = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex);
5994
5995 cur = 0;
5996 while (cur < end) {
5997 map.m_lblk = cur;
5998 map.m_len = end - cur;
5999 ret = ext4_map_blocks(NULL, inode, &map, 0);
6000 if (ret < 0)
6001 break;
6002 if (ret > 0) {
6003 path = ext4_find_extent(inode, map.m_lblk, path, 0);
6004 if (!IS_ERR(path)) {
6005 for (j = 0; j < path->p_depth; j++) {
6006 ext4_mb_mark_bb(inode->i_sb,
6007 path[j].p_block, 1, false);
6008 ext4_fc_record_regions(inode->i_sb, inode->i_ino,
6009 0, path[j].p_block, 1, 1);
6010 }
6011 } else {
6012 path = NULL;
6013 }
6014 ext4_mb_mark_bb(inode->i_sb, map.m_pblk, map.m_len, false);
6015 ext4_fc_record_regions(inode->i_sb, inode->i_ino,
6016 map.m_lblk, map.m_pblk, map.m_len, 1);
6017 }
6018 cur = cur + map.m_len;
6019 }
6020
6021 out:
6022 ext4_free_ext_path(path);
6023 return 0;
6024 }
6025