1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/fs/ext4/indirect.c
4 *
5 * from
6 *
7 * linux/fs/ext4/inode.c
8 *
9 * Copyright (C) 1992, 1993, 1994, 1995
10 * Remy Card (card@masi.ibp.fr)
11 * Laboratoire MASI - Institut Blaise Pascal
12 * Universite Pierre et Marie Curie (Paris VI)
13 *
14 * from
15 *
16 * linux/fs/minix/inode.c
17 *
18 * Copyright (C) 1991, 1992 Linus Torvalds
19 *
20 * Goal-directed block allocation by Stephen Tweedie
21 * (sct@redhat.com), 1993, 1998
22 */
23
24 #include "ext4_jbd2.h"
25 #include "truncate.h"
26 #include <linux/dax.h>
27 #include <linux/uio.h>
28
29 #include <trace/events/ext4.h>
30
31 typedef struct {
32 __le32 *p;
33 __le32 key;
34 struct buffer_head *bh;
35 } Indirect;
36
add_chain(Indirect * p,struct buffer_head * bh,__le32 * v)37 static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
38 {
39 p->key = *(p->p = v);
40 p->bh = bh;
41 }
42
43 /**
44 * ext4_block_to_path - parse the block number into array of offsets
45 * @inode: inode in question (we are only interested in its superblock)
46 * @i_block: block number to be parsed
47 * @offsets: array to store the offsets in
48 * @boundary: set this non-zero if the referred-to block is likely to be
49 * followed (on disk) by an indirect block.
50 *
51 * To store the locations of file's data ext4 uses a data structure common
52 * for UNIX filesystems - tree of pointers anchored in the inode, with
53 * data blocks at leaves and indirect blocks in intermediate nodes.
54 * This function translates the block number into path in that tree -
55 * return value is the path length and @offsets[n] is the offset of
56 * pointer to (n+1)th node in the nth one. If @block is out of range
57 * (negative or too large) warning is printed and zero returned.
58 *
59 * Note: function doesn't find node addresses, so no IO is needed. All
60 * we need to know is the capacity of indirect blocks (taken from the
61 * inode->i_sb).
62 */
63
64 /*
65 * Portability note: the last comparison (check that we fit into triple
66 * indirect block) is spelled differently, because otherwise on an
67 * architecture with 32-bit longs and 8Kb pages we might get into trouble
68 * if our filesystem had 8Kb blocks. We might use long long, but that would
69 * kill us on x86. Oh, well, at least the sign propagation does not matter -
70 * i_block would have to be negative in the very beginning, so we would not
71 * get there at all.
72 */
73
ext4_block_to_path(struct inode * inode,ext4_lblk_t i_block,ext4_lblk_t offsets[4],int * boundary)74 static int ext4_block_to_path(struct inode *inode,
75 ext4_lblk_t i_block,
76 ext4_lblk_t offsets[4], int *boundary)
77 {
78 int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb);
79 int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb);
80 const long direct_blocks = EXT4_NDIR_BLOCKS,
81 indirect_blocks = ptrs,
82 double_blocks = (1 << (ptrs_bits * 2));
83 int n = 0;
84 int final = 0;
85
86 if (i_block < direct_blocks) {
87 offsets[n++] = i_block;
88 final = direct_blocks;
89 } else if ((i_block -= direct_blocks) < indirect_blocks) {
90 offsets[n++] = EXT4_IND_BLOCK;
91 offsets[n++] = i_block;
92 final = ptrs;
93 } else if ((i_block -= indirect_blocks) < double_blocks) {
94 offsets[n++] = EXT4_DIND_BLOCK;
95 offsets[n++] = i_block >> ptrs_bits;
96 offsets[n++] = i_block & (ptrs - 1);
97 final = ptrs;
98 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
99 offsets[n++] = EXT4_TIND_BLOCK;
100 offsets[n++] = i_block >> (ptrs_bits * 2);
101 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
102 offsets[n++] = i_block & (ptrs - 1);
103 final = ptrs;
104 } else {
105 ext4_warning(inode->i_sb, "block %lu > max in inode %lu",
106 i_block + direct_blocks +
107 indirect_blocks + double_blocks, inode->i_ino);
108 }
109 if (boundary)
110 *boundary = final - 1 - (i_block & (ptrs - 1));
111 return n;
112 }
113
114 /**
115 * ext4_get_branch - read the chain of indirect blocks leading to data
116 * @inode: inode in question
117 * @depth: depth of the chain (1 - direct pointer, etc.)
118 * @offsets: offsets of pointers in inode/indirect blocks
119 * @chain: place to store the result
120 * @err: here we store the error value
121 *
122 * Function fills the array of triples <key, p, bh> and returns %NULL
123 * if everything went OK or the pointer to the last filled triple
124 * (incomplete one) otherwise. Upon the return chain[i].key contains
125 * the number of (i+1)-th block in the chain (as it is stored in memory,
126 * i.e. little-endian 32-bit), chain[i].p contains the address of that
127 * number (it points into struct inode for i==0 and into the bh->b_data
128 * for i>0) and chain[i].bh points to the buffer_head of i-th indirect
129 * block for i>0 and NULL for i==0. In other words, it holds the block
130 * numbers of the chain, addresses they were taken from (and where we can
131 * verify that chain did not change) and buffer_heads hosting these
132 * numbers.
133 *
134 * Function stops when it stumbles upon zero pointer (absent block)
135 * (pointer to last triple returned, *@err == 0)
136 * or when it gets an IO error reading an indirect block
137 * (ditto, *@err == -EIO)
138 * or when it reads all @depth-1 indirect blocks successfully and finds
139 * the whole chain, all way to the data (returns %NULL, *err == 0).
140 *
141 * Need to be called with
142 * down_read(&EXT4_I(inode)->i_data_sem)
143 */
ext4_get_branch(struct inode * inode,int depth,ext4_lblk_t * offsets,Indirect chain[4],int * err)144 static Indirect *ext4_get_branch(struct inode *inode, int depth,
145 ext4_lblk_t *offsets,
146 Indirect chain[4], int *err)
147 {
148 struct super_block *sb = inode->i_sb;
149 Indirect *p = chain;
150 struct buffer_head *bh;
151 int ret = -EIO;
152
153 *err = 0;
154 /* i_data is not going away, no lock needed */
155 add_chain(chain, NULL, EXT4_I(inode)->i_data + *offsets);
156 if (!p->key)
157 goto no_block;
158 while (--depth) {
159 bh = sb_getblk(sb, le32_to_cpu(p->key));
160 if (unlikely(!bh)) {
161 ret = -ENOMEM;
162 goto failure;
163 }
164
165 if (!bh_uptodate_or_lock(bh)) {
166 if (bh_submit_read(bh) < 0) {
167 put_bh(bh);
168 goto failure;
169 }
170 /* validate block references */
171 if (ext4_check_indirect_blockref(inode, bh)) {
172 put_bh(bh);
173 goto failure;
174 }
175 }
176
177 add_chain(++p, bh, (__le32 *)bh->b_data + *++offsets);
178 /* Reader: end */
179 if (!p->key)
180 goto no_block;
181 }
182 return NULL;
183
184 failure:
185 *err = ret;
186 no_block:
187 return p;
188 }
189
190 /**
191 * ext4_find_near - find a place for allocation with sufficient locality
192 * @inode: owner
193 * @ind: descriptor of indirect block.
194 *
195 * This function returns the preferred place for block allocation.
196 * It is used when heuristic for sequential allocation fails.
197 * Rules are:
198 * + if there is a block to the left of our position - allocate near it.
199 * + if pointer will live in indirect block - allocate near that block.
200 * + if pointer will live in inode - allocate in the same
201 * cylinder group.
202 *
203 * In the latter case we colour the starting block by the callers PID to
204 * prevent it from clashing with concurrent allocations for a different inode
205 * in the same block group. The PID is used here so that functionally related
206 * files will be close-by on-disk.
207 *
208 * Caller must make sure that @ind is valid and will stay that way.
209 */
ext4_find_near(struct inode * inode,Indirect * ind)210 static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind)
211 {
212 struct ext4_inode_info *ei = EXT4_I(inode);
213 __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data;
214 __le32 *p;
215
216 /* Try to find previous block */
217 for (p = ind->p - 1; p >= start; p--) {
218 if (*p)
219 return le32_to_cpu(*p);
220 }
221
222 /* No such thing, so let's try location of indirect block */
223 if (ind->bh)
224 return ind->bh->b_blocknr;
225
226 /*
227 * It is going to be referred to from the inode itself? OK, just put it
228 * into the same cylinder group then.
229 */
230 return ext4_inode_to_goal_block(inode);
231 }
232
233 /**
234 * ext4_find_goal - find a preferred place for allocation.
235 * @inode: owner
236 * @block: block we want
237 * @partial: pointer to the last triple within a chain
238 *
239 * Normally this function find the preferred place for block allocation,
240 * returns it.
241 * Because this is only used for non-extent files, we limit the block nr
242 * to 32 bits.
243 */
ext4_find_goal(struct inode * inode,ext4_lblk_t block,Indirect * partial)244 static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block,
245 Indirect *partial)
246 {
247 ext4_fsblk_t goal;
248
249 /*
250 * XXX need to get goal block from mballoc's data structures
251 */
252
253 goal = ext4_find_near(inode, partial);
254 goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;
255 return goal;
256 }
257
258 /**
259 * ext4_blks_to_allocate - Look up the block map and count the number
260 * of direct blocks need to be allocated for the given branch.
261 *
262 * @branch: chain of indirect blocks
263 * @k: number of blocks need for indirect blocks
264 * @blks: number of data blocks to be mapped.
265 * @blocks_to_boundary: the offset in the indirect block
266 *
267 * return the total number of blocks to be allocate, including the
268 * direct and indirect blocks.
269 */
ext4_blks_to_allocate(Indirect * branch,int k,unsigned int blks,int blocks_to_boundary)270 static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks,
271 int blocks_to_boundary)
272 {
273 unsigned int count = 0;
274
275 /*
276 * Simple case, [t,d]Indirect block(s) has not allocated yet
277 * then it's clear blocks on that path have not allocated
278 */
279 if (k > 0) {
280 /* right now we don't handle cross boundary allocation */
281 if (blks < blocks_to_boundary + 1)
282 count += blks;
283 else
284 count += blocks_to_boundary + 1;
285 return count;
286 }
287
288 count++;
289 while (count < blks && count <= blocks_to_boundary &&
290 le32_to_cpu(*(branch[0].p + count)) == 0) {
291 count++;
292 }
293 return count;
294 }
295
296 /**
297 * ext4_alloc_branch() - allocate and set up a chain of blocks
298 * @handle: handle for this transaction
299 * @ar: structure describing the allocation request
300 * @indirect_blks: number of allocated indirect blocks
301 * @offsets: offsets (in the blocks) to store the pointers to next.
302 * @branch: place to store the chain in.
303 *
304 * This function allocates blocks, zeroes out all but the last one,
305 * links them into chain and (if we are synchronous) writes them to disk.
306 * In other words, it prepares a branch that can be spliced onto the
307 * inode. It stores the information about that chain in the branch[], in
308 * the same format as ext4_get_branch() would do. We are calling it after
309 * we had read the existing part of chain and partial points to the last
310 * triple of that (one with zero ->key). Upon the exit we have the same
311 * picture as after the successful ext4_get_block(), except that in one
312 * place chain is disconnected - *branch->p is still zero (we did not
313 * set the last link), but branch->key contains the number that should
314 * be placed into *branch->p to fill that gap.
315 *
316 * If allocation fails we free all blocks we've allocated (and forget
317 * their buffer_heads) and return the error value the from failed
318 * ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain
319 * as described above and return 0.
320 */
ext4_alloc_branch(handle_t * handle,struct ext4_allocation_request * ar,int indirect_blks,ext4_lblk_t * offsets,Indirect * branch)321 static int ext4_alloc_branch(handle_t *handle,
322 struct ext4_allocation_request *ar,
323 int indirect_blks, ext4_lblk_t *offsets,
324 Indirect *branch)
325 {
326 struct buffer_head * bh;
327 ext4_fsblk_t b, new_blocks[4];
328 __le32 *p;
329 int i, j, err, len = 1;
330
331 for (i = 0; i <= indirect_blks; i++) {
332 if (i == indirect_blks) {
333 new_blocks[i] = ext4_mb_new_blocks(handle, ar, &err);
334 } else
335 ar->goal = new_blocks[i] = ext4_new_meta_blocks(handle,
336 ar->inode, ar->goal,
337 ar->flags & EXT4_MB_DELALLOC_RESERVED,
338 NULL, &err);
339 if (err) {
340 i--;
341 goto failed;
342 }
343 branch[i].key = cpu_to_le32(new_blocks[i]);
344 if (i == 0)
345 continue;
346
347 bh = branch[i].bh = sb_getblk(ar->inode->i_sb, new_blocks[i-1]);
348 if (unlikely(!bh)) {
349 err = -ENOMEM;
350 goto failed;
351 }
352 lock_buffer(bh);
353 BUFFER_TRACE(bh, "call get_create_access");
354 err = ext4_journal_get_create_access(handle, bh);
355 if (err) {
356 unlock_buffer(bh);
357 goto failed;
358 }
359
360 memset(bh->b_data, 0, bh->b_size);
361 p = branch[i].p = (__le32 *) bh->b_data + offsets[i];
362 b = new_blocks[i];
363
364 if (i == indirect_blks)
365 len = ar->len;
366 for (j = 0; j < len; j++)
367 *p++ = cpu_to_le32(b++);
368
369 BUFFER_TRACE(bh, "marking uptodate");
370 set_buffer_uptodate(bh);
371 unlock_buffer(bh);
372
373 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
374 err = ext4_handle_dirty_metadata(handle, ar->inode, bh);
375 if (err)
376 goto failed;
377 }
378 return 0;
379 failed:
380 for (; i >= 0; i--) {
381 /*
382 * We want to ext4_forget() only freshly allocated indirect
383 * blocks. Buffer for new_blocks[i-1] is at branch[i].bh and
384 * buffer at branch[0].bh is indirect block / inode already
385 * existing before ext4_alloc_branch() was called.
386 */
387 if (i > 0 && i != indirect_blks && branch[i].bh)
388 ext4_forget(handle, 1, ar->inode, branch[i].bh,
389 branch[i].bh->b_blocknr);
390 ext4_free_blocks(handle, ar->inode, NULL, new_blocks[i],
391 (i == indirect_blks) ? ar->len : 1, 0);
392 }
393 return err;
394 }
395
396 /**
397 * ext4_splice_branch() - splice the allocated branch onto inode.
398 * @handle: handle for this transaction
399 * @ar: structure describing the allocation request
400 * @where: location of missing link
401 * @num: number of indirect blocks we are adding
402 *
403 * This function fills the missing link and does all housekeeping needed in
404 * inode (->i_blocks, etc.). In case of success we end up with the full
405 * chain to new block and return 0.
406 */
ext4_splice_branch(handle_t * handle,struct ext4_allocation_request * ar,Indirect * where,int num)407 static int ext4_splice_branch(handle_t *handle,
408 struct ext4_allocation_request *ar,
409 Indirect *where, int num)
410 {
411 int i;
412 int err = 0;
413 ext4_fsblk_t current_block;
414
415 /*
416 * If we're splicing into a [td]indirect block (as opposed to the
417 * inode) then we need to get write access to the [td]indirect block
418 * before the splice.
419 */
420 if (where->bh) {
421 BUFFER_TRACE(where->bh, "get_write_access");
422 err = ext4_journal_get_write_access(handle, where->bh);
423 if (err)
424 goto err_out;
425 }
426 /* That's it */
427
428 *where->p = where->key;
429
430 /*
431 * Update the host buffer_head or inode to point to more just allocated
432 * direct blocks blocks
433 */
434 if (num == 0 && ar->len > 1) {
435 current_block = le32_to_cpu(where->key) + 1;
436 for (i = 1; i < ar->len; i++)
437 *(where->p + i) = cpu_to_le32(current_block++);
438 }
439
440 /* We are done with atomic stuff, now do the rest of housekeeping */
441 /* had we spliced it onto indirect block? */
442 if (where->bh) {
443 /*
444 * If we spliced it onto an indirect block, we haven't
445 * altered the inode. Note however that if it is being spliced
446 * onto an indirect block at the very end of the file (the
447 * file is growing) then we *will* alter the inode to reflect
448 * the new i_size. But that is not done here - it is done in
449 * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode.
450 */
451 jbd_debug(5, "splicing indirect only\n");
452 BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata");
453 err = ext4_handle_dirty_metadata(handle, ar->inode, where->bh);
454 if (err)
455 goto err_out;
456 } else {
457 /*
458 * OK, we spliced it into the inode itself on a direct block.
459 */
460 ext4_mark_inode_dirty(handle, ar->inode);
461 jbd_debug(5, "splicing direct\n");
462 }
463 return err;
464
465 err_out:
466 for (i = 1; i <= num; i++) {
467 /*
468 * branch[i].bh is newly allocated, so there is no
469 * need to revoke the block, which is why we don't
470 * need to set EXT4_FREE_BLOCKS_METADATA.
471 */
472 ext4_free_blocks(handle, ar->inode, where[i].bh, 0, 1,
473 EXT4_FREE_BLOCKS_FORGET);
474 }
475 ext4_free_blocks(handle, ar->inode, NULL, le32_to_cpu(where[num].key),
476 ar->len, 0);
477
478 return err;
479 }
480
481 /*
482 * The ext4_ind_map_blocks() function handles non-extents inodes
483 * (i.e., using the traditional indirect/double-indirect i_blocks
484 * scheme) for ext4_map_blocks().
485 *
486 * Allocation strategy is simple: if we have to allocate something, we will
487 * have to go the whole way to leaf. So let's do it before attaching anything
488 * to tree, set linkage between the newborn blocks, write them if sync is
489 * required, recheck the path, free and repeat if check fails, otherwise
490 * set the last missing link (that will protect us from any truncate-generated
491 * removals - all blocks on the path are immune now) and possibly force the
492 * write on the parent block.
493 * That has a nice additional property: no special recovery from the failed
494 * allocations is needed - we simply release blocks and do not touch anything
495 * reachable from inode.
496 *
497 * `handle' can be NULL if create == 0.
498 *
499 * return > 0, # of blocks mapped or allocated.
500 * return = 0, if plain lookup failed.
501 * return < 0, error case.
502 *
503 * The ext4_ind_get_blocks() function should be called with
504 * down_write(&EXT4_I(inode)->i_data_sem) if allocating filesystem
505 * blocks (i.e., flags has EXT4_GET_BLOCKS_CREATE set) or
506 * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system
507 * blocks.
508 */
ext4_ind_map_blocks(handle_t * handle,struct inode * inode,struct ext4_map_blocks * map,int flags)509 int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
510 struct ext4_map_blocks *map,
511 int flags)
512 {
513 struct ext4_allocation_request ar;
514 int err = -EIO;
515 ext4_lblk_t offsets[4];
516 Indirect chain[4];
517 Indirect *partial;
518 int indirect_blks;
519 int blocks_to_boundary = 0;
520 int depth;
521 int count = 0;
522 ext4_fsblk_t first_block = 0;
523
524 trace_ext4_ind_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
525 J_ASSERT(!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)));
526 J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0);
527 depth = ext4_block_to_path(inode, map->m_lblk, offsets,
528 &blocks_to_boundary);
529
530 if (depth == 0)
531 goto out;
532
533 partial = ext4_get_branch(inode, depth, offsets, chain, &err);
534
535 /* Simplest case - block found, no allocation needed */
536 if (!partial) {
537 first_block = le32_to_cpu(chain[depth - 1].key);
538 count++;
539 /*map more blocks*/
540 while (count < map->m_len && count <= blocks_to_boundary) {
541 ext4_fsblk_t blk;
542
543 blk = le32_to_cpu(*(chain[depth-1].p + count));
544
545 if (blk == first_block + count)
546 count++;
547 else
548 break;
549 }
550 goto got_it;
551 }
552
553 /* Next simple case - plain lookup failed */
554 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
555 unsigned epb = inode->i_sb->s_blocksize / sizeof(u32);
556 int i;
557
558 /*
559 * Count number blocks in a subtree under 'partial'. At each
560 * level we count number of complete empty subtrees beyond
561 * current offset and then descend into the subtree only
562 * partially beyond current offset.
563 */
564 count = 0;
565 for (i = partial - chain + 1; i < depth; i++)
566 count = count * epb + (epb - offsets[i] - 1);
567 count++;
568 /* Fill in size of a hole we found */
569 map->m_pblk = 0;
570 map->m_len = min_t(unsigned int, map->m_len, count);
571 goto cleanup;
572 }
573
574 /* Failed read of indirect block */
575 if (err == -EIO)
576 goto cleanup;
577
578 /*
579 * Okay, we need to do block allocation.
580 */
581 if (ext4_has_feature_bigalloc(inode->i_sb)) {
582 EXT4_ERROR_INODE(inode, "Can't allocate blocks for "
583 "non-extent mapped inodes with bigalloc");
584 return -EFSCORRUPTED;
585 }
586
587 /* Set up for the direct block allocation */
588 memset(&ar, 0, sizeof(ar));
589 ar.inode = inode;
590 ar.logical = map->m_lblk;
591 if (S_ISREG(inode->i_mode))
592 ar.flags = EXT4_MB_HINT_DATA;
593 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
594 ar.flags |= EXT4_MB_DELALLOC_RESERVED;
595 if (flags & EXT4_GET_BLOCKS_METADATA_NOFAIL)
596 ar.flags |= EXT4_MB_USE_RESERVED;
597
598 ar.goal = ext4_find_goal(inode, map->m_lblk, partial);
599
600 /* the number of blocks need to allocate for [d,t]indirect blocks */
601 indirect_blks = (chain + depth) - partial - 1;
602
603 /*
604 * Next look up the indirect map to count the totoal number of
605 * direct blocks to allocate for this branch.
606 */
607 ar.len = ext4_blks_to_allocate(partial, indirect_blks,
608 map->m_len, blocks_to_boundary);
609
610 /*
611 * Block out ext4_truncate while we alter the tree
612 */
613 err = ext4_alloc_branch(handle, &ar, indirect_blks,
614 offsets + (partial - chain), partial);
615
616 /*
617 * The ext4_splice_branch call will free and forget any buffers
618 * on the new chain if there is a failure, but that risks using
619 * up transaction credits, especially for bitmaps where the
620 * credits cannot be returned. Can we handle this somehow? We
621 * may need to return -EAGAIN upwards in the worst case. --sct
622 */
623 if (!err)
624 err = ext4_splice_branch(handle, &ar, partial, indirect_blks);
625 if (err)
626 goto cleanup;
627
628 map->m_flags |= EXT4_MAP_NEW;
629
630 ext4_update_inode_fsync_trans(handle, inode, 1);
631 count = ar.len;
632 got_it:
633 map->m_flags |= EXT4_MAP_MAPPED;
634 map->m_pblk = le32_to_cpu(chain[depth-1].key);
635 map->m_len = count;
636 if (count > blocks_to_boundary)
637 map->m_flags |= EXT4_MAP_BOUNDARY;
638 err = count;
639 /* Clean up and exit */
640 partial = chain + depth - 1; /* the whole chain */
641 cleanup:
642 while (partial > chain) {
643 BUFFER_TRACE(partial->bh, "call brelse");
644 brelse(partial->bh);
645 partial--;
646 }
647 out:
648 trace_ext4_ind_map_blocks_exit(inode, flags, map, err);
649 return err;
650 }
651
652 /*
653 * Calculate the number of metadata blocks need to reserve
654 * to allocate a new block at @lblocks for non extent file based file
655 */
ext4_ind_calc_metadata_amount(struct inode * inode,sector_t lblock)656 int ext4_ind_calc_metadata_amount(struct inode *inode, sector_t lblock)
657 {
658 struct ext4_inode_info *ei = EXT4_I(inode);
659 sector_t dind_mask = ~((sector_t)EXT4_ADDR_PER_BLOCK(inode->i_sb) - 1);
660 int blk_bits;
661
662 if (lblock < EXT4_NDIR_BLOCKS)
663 return 0;
664
665 lblock -= EXT4_NDIR_BLOCKS;
666
667 if (ei->i_da_metadata_calc_len &&
668 (lblock & dind_mask) == ei->i_da_metadata_calc_last_lblock) {
669 ei->i_da_metadata_calc_len++;
670 return 0;
671 }
672 ei->i_da_metadata_calc_last_lblock = lblock & dind_mask;
673 ei->i_da_metadata_calc_len = 1;
674 blk_bits = order_base_2(lblock);
675 return (blk_bits / EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb)) + 1;
676 }
677
678 /*
679 * Calculate number of indirect blocks touched by mapping @nrblocks logically
680 * contiguous blocks
681 */
ext4_ind_trans_blocks(struct inode * inode,int nrblocks)682 int ext4_ind_trans_blocks(struct inode *inode, int nrblocks)
683 {
684 /*
685 * With N contiguous data blocks, we need at most
686 * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) + 1 indirect blocks,
687 * 2 dindirect blocks, and 1 tindirect block
688 */
689 return DIV_ROUND_UP(nrblocks, EXT4_ADDR_PER_BLOCK(inode->i_sb)) + 4;
690 }
691
692 /*
693 * Truncate transactions can be complex and absolutely huge. So we need to
694 * be able to restart the transaction at a conventient checkpoint to make
695 * sure we don't overflow the journal.
696 *
697 * Try to extend this transaction for the purposes of truncation. If
698 * extend fails, we need to propagate the failure up and restart the
699 * transaction in the top-level truncate loop. --sct
700 *
701 * Returns 0 if we managed to create more room. If we can't create more
702 * room, and the transaction must be restarted we return 1.
703 */
try_to_extend_transaction(handle_t * handle,struct inode * inode)704 static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
705 {
706 if (!ext4_handle_valid(handle))
707 return 0;
708 if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1))
709 return 0;
710 if (!ext4_journal_extend(handle, ext4_blocks_for_truncate(inode)))
711 return 0;
712 return 1;
713 }
714
715 /*
716 * Probably it should be a library function... search for first non-zero word
717 * or memcmp with zero_page, whatever is better for particular architecture.
718 * Linus?
719 */
all_zeroes(__le32 * p,__le32 * q)720 static inline int all_zeroes(__le32 *p, __le32 *q)
721 {
722 while (p < q)
723 if (*p++)
724 return 0;
725 return 1;
726 }
727
728 /**
729 * ext4_find_shared - find the indirect blocks for partial truncation.
730 * @inode: inode in question
731 * @depth: depth of the affected branch
732 * @offsets: offsets of pointers in that branch (see ext4_block_to_path)
733 * @chain: place to store the pointers to partial indirect blocks
734 * @top: place to the (detached) top of branch
735 *
736 * This is a helper function used by ext4_truncate().
737 *
738 * When we do truncate() we may have to clean the ends of several
739 * indirect blocks but leave the blocks themselves alive. Block is
740 * partially truncated if some data below the new i_size is referred
741 * from it (and it is on the path to the first completely truncated
742 * data block, indeed). We have to free the top of that path along
743 * with everything to the right of the path. Since no allocation
744 * past the truncation point is possible until ext4_truncate()
745 * finishes, we may safely do the latter, but top of branch may
746 * require special attention - pageout below the truncation point
747 * might try to populate it.
748 *
749 * We atomically detach the top of branch from the tree, store the
750 * block number of its root in *@top, pointers to buffer_heads of
751 * partially truncated blocks - in @chain[].bh and pointers to
752 * their last elements that should not be removed - in
753 * @chain[].p. Return value is the pointer to last filled element
754 * of @chain.
755 *
756 * The work left to caller to do the actual freeing of subtrees:
757 * a) free the subtree starting from *@top
758 * b) free the subtrees whose roots are stored in
759 * (@chain[i].p+1 .. end of @chain[i].bh->b_data)
760 * c) free the subtrees growing from the inode past the @chain[0].
761 * (no partially truncated stuff there). */
762
ext4_find_shared(struct inode * inode,int depth,ext4_lblk_t offsets[4],Indirect chain[4],__le32 * top)763 static Indirect *ext4_find_shared(struct inode *inode, int depth,
764 ext4_lblk_t offsets[4], Indirect chain[4],
765 __le32 *top)
766 {
767 Indirect *partial, *p;
768 int k, err;
769
770 *top = 0;
771 /* Make k index the deepest non-null offset + 1 */
772 for (k = depth; k > 1 && !offsets[k-1]; k--)
773 ;
774 partial = ext4_get_branch(inode, k, offsets, chain, &err);
775 /* Writer: pointers */
776 if (!partial)
777 partial = chain + k-1;
778 /*
779 * If the branch acquired continuation since we've looked at it -
780 * fine, it should all survive and (new) top doesn't belong to us.
781 */
782 if (!partial->key && *partial->p)
783 /* Writer: end */
784 goto no_top;
785 for (p = partial; (p > chain) && all_zeroes((__le32 *) p->bh->b_data, p->p); p--)
786 ;
787 /*
788 * OK, we've found the last block that must survive. The rest of our
789 * branch should be detached before unlocking. However, if that rest
790 * of branch is all ours and does not grow immediately from the inode
791 * it's easier to cheat and just decrement partial->p.
792 */
793 if (p == chain + k - 1 && p > chain) {
794 p->p--;
795 } else {
796 *top = *p->p;
797 /* Nope, don't do this in ext4. Must leave the tree intact */
798 #if 0
799 *p->p = 0;
800 #endif
801 }
802 /* Writer: end */
803
804 while (partial > p) {
805 brelse(partial->bh);
806 partial--;
807 }
808 no_top:
809 return partial;
810 }
811
812 /*
813 * Zero a number of block pointers in either an inode or an indirect block.
814 * If we restart the transaction we must again get write access to the
815 * indirect block for further modification.
816 *
817 * We release `count' blocks on disk, but (last - first) may be greater
818 * than `count' because there can be holes in there.
819 *
820 * Return 0 on success, 1 on invalid block range
821 * and < 0 on fatal error.
822 */
ext4_clear_blocks(handle_t * handle,struct inode * inode,struct buffer_head * bh,ext4_fsblk_t block_to_free,unsigned long count,__le32 * first,__le32 * last)823 static int ext4_clear_blocks(handle_t *handle, struct inode *inode,
824 struct buffer_head *bh,
825 ext4_fsblk_t block_to_free,
826 unsigned long count, __le32 *first,
827 __le32 *last)
828 {
829 __le32 *p;
830 int flags = EXT4_FREE_BLOCKS_VALIDATED;
831 int err;
832
833 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode) ||
834 ext4_test_inode_flag(inode, EXT4_INODE_EA_INODE))
835 flags |= EXT4_FREE_BLOCKS_FORGET | EXT4_FREE_BLOCKS_METADATA;
836 else if (ext4_should_journal_data(inode))
837 flags |= EXT4_FREE_BLOCKS_FORGET;
838
839 if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), block_to_free,
840 count)) {
841 EXT4_ERROR_INODE(inode, "attempt to clear invalid "
842 "blocks %llu len %lu",
843 (unsigned long long) block_to_free, count);
844 return 1;
845 }
846
847 if (try_to_extend_transaction(handle, inode)) {
848 if (bh) {
849 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
850 err = ext4_handle_dirty_metadata(handle, inode, bh);
851 if (unlikely(err))
852 goto out_err;
853 }
854 err = ext4_mark_inode_dirty(handle, inode);
855 if (unlikely(err))
856 goto out_err;
857 err = ext4_truncate_restart_trans(handle, inode,
858 ext4_blocks_for_truncate(inode));
859 if (unlikely(err))
860 goto out_err;
861 if (bh) {
862 BUFFER_TRACE(bh, "retaking write access");
863 err = ext4_journal_get_write_access(handle, bh);
864 if (unlikely(err))
865 goto out_err;
866 }
867 }
868
869 for (p = first; p < last; p++)
870 *p = 0;
871
872 ext4_free_blocks(handle, inode, NULL, block_to_free, count, flags);
873 return 0;
874 out_err:
875 ext4_std_error(inode->i_sb, err);
876 return err;
877 }
878
879 /**
880 * ext4_free_data - free a list of data blocks
881 * @handle: handle for this transaction
882 * @inode: inode we are dealing with
883 * @this_bh: indirect buffer_head which contains *@first and *@last
884 * @first: array of block numbers
885 * @last: points immediately past the end of array
886 *
887 * We are freeing all blocks referred from that array (numbers are stored as
888 * little-endian 32-bit) and updating @inode->i_blocks appropriately.
889 *
890 * We accumulate contiguous runs of blocks to free. Conveniently, if these
891 * blocks are contiguous then releasing them at one time will only affect one
892 * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't
893 * actually use a lot of journal space.
894 *
895 * @this_bh will be %NULL if @first and @last point into the inode's direct
896 * block pointers.
897 */
ext4_free_data(handle_t * handle,struct inode * inode,struct buffer_head * this_bh,__le32 * first,__le32 * last)898 static void ext4_free_data(handle_t *handle, struct inode *inode,
899 struct buffer_head *this_bh,
900 __le32 *first, __le32 *last)
901 {
902 ext4_fsblk_t block_to_free = 0; /* Starting block # of a run */
903 unsigned long count = 0; /* Number of blocks in the run */
904 __le32 *block_to_free_p = NULL; /* Pointer into inode/ind
905 corresponding to
906 block_to_free */
907 ext4_fsblk_t nr; /* Current block # */
908 __le32 *p; /* Pointer into inode/ind
909 for current block */
910 int err = 0;
911
912 if (this_bh) { /* For indirect block */
913 BUFFER_TRACE(this_bh, "get_write_access");
914 err = ext4_journal_get_write_access(handle, this_bh);
915 /* Important: if we can't update the indirect pointers
916 * to the blocks, we can't free them. */
917 if (err)
918 return;
919 }
920
921 for (p = first; p < last; p++) {
922 nr = le32_to_cpu(*p);
923 if (nr) {
924 /* accumulate blocks to free if they're contiguous */
925 if (count == 0) {
926 block_to_free = nr;
927 block_to_free_p = p;
928 count = 1;
929 } else if (nr == block_to_free + count) {
930 count++;
931 } else {
932 err = ext4_clear_blocks(handle, inode, this_bh,
933 block_to_free, count,
934 block_to_free_p, p);
935 if (err)
936 break;
937 block_to_free = nr;
938 block_to_free_p = p;
939 count = 1;
940 }
941 }
942 }
943
944 if (!err && count > 0)
945 err = ext4_clear_blocks(handle, inode, this_bh, block_to_free,
946 count, block_to_free_p, p);
947 if (err < 0)
948 /* fatal error */
949 return;
950
951 if (this_bh) {
952 BUFFER_TRACE(this_bh, "call ext4_handle_dirty_metadata");
953
954 /*
955 * The buffer head should have an attached journal head at this
956 * point. However, if the data is corrupted and an indirect
957 * block pointed to itself, it would have been detached when
958 * the block was cleared. Check for this instead of OOPSing.
959 */
960 if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(this_bh))
961 ext4_handle_dirty_metadata(handle, inode, this_bh);
962 else
963 EXT4_ERROR_INODE(inode,
964 "circular indirect block detected at "
965 "block %llu",
966 (unsigned long long) this_bh->b_blocknr);
967 }
968 }
969
970 /**
971 * ext4_free_branches - free an array of branches
972 * @handle: JBD handle for this transaction
973 * @inode: inode we are dealing with
974 * @parent_bh: the buffer_head which contains *@first and *@last
975 * @first: array of block numbers
976 * @last: pointer immediately past the end of array
977 * @depth: depth of the branches to free
978 *
979 * We are freeing all blocks referred from these branches (numbers are
980 * stored as little-endian 32-bit) and updating @inode->i_blocks
981 * appropriately.
982 */
ext4_free_branches(handle_t * handle,struct inode * inode,struct buffer_head * parent_bh,__le32 * first,__le32 * last,int depth)983 static void ext4_free_branches(handle_t *handle, struct inode *inode,
984 struct buffer_head *parent_bh,
985 __le32 *first, __le32 *last, int depth)
986 {
987 ext4_fsblk_t nr;
988 __le32 *p;
989
990 if (ext4_handle_is_aborted(handle))
991 return;
992
993 if (depth--) {
994 struct buffer_head *bh;
995 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
996 p = last;
997 while (--p >= first) {
998 nr = le32_to_cpu(*p);
999 if (!nr)
1000 continue; /* A hole */
1001
1002 if (!ext4_data_block_valid(EXT4_SB(inode->i_sb),
1003 nr, 1)) {
1004 EXT4_ERROR_INODE(inode,
1005 "invalid indirect mapped "
1006 "block %lu (level %d)",
1007 (unsigned long) nr, depth);
1008 break;
1009 }
1010
1011 /* Go read the buffer for the next level down */
1012 bh = sb_bread(inode->i_sb, nr);
1013
1014 /*
1015 * A read failure? Report error and clear slot
1016 * (should be rare).
1017 */
1018 if (!bh) {
1019 EXT4_ERROR_INODE_BLOCK(inode, nr,
1020 "Read failure");
1021 continue;
1022 }
1023
1024 /* This zaps the entire block. Bottom up. */
1025 BUFFER_TRACE(bh, "free child branches");
1026 ext4_free_branches(handle, inode, bh,
1027 (__le32 *) bh->b_data,
1028 (__le32 *) bh->b_data + addr_per_block,
1029 depth);
1030 brelse(bh);
1031
1032 /*
1033 * Everything below this this pointer has been
1034 * released. Now let this top-of-subtree go.
1035 *
1036 * We want the freeing of this indirect block to be
1037 * atomic in the journal with the updating of the
1038 * bitmap block which owns it. So make some room in
1039 * the journal.
1040 *
1041 * We zero the parent pointer *after* freeing its
1042 * pointee in the bitmaps, so if extend_transaction()
1043 * for some reason fails to put the bitmap changes and
1044 * the release into the same transaction, recovery
1045 * will merely complain about releasing a free block,
1046 * rather than leaking blocks.
1047 */
1048 if (ext4_handle_is_aborted(handle))
1049 return;
1050 if (try_to_extend_transaction(handle, inode)) {
1051 ext4_mark_inode_dirty(handle, inode);
1052 ext4_truncate_restart_trans(handle, inode,
1053 ext4_blocks_for_truncate(inode));
1054 }
1055
1056 /*
1057 * The forget flag here is critical because if
1058 * we are journaling (and not doing data
1059 * journaling), we have to make sure a revoke
1060 * record is written to prevent the journal
1061 * replay from overwriting the (former)
1062 * indirect block if it gets reallocated as a
1063 * data block. This must happen in the same
1064 * transaction where the data blocks are
1065 * actually freed.
1066 */
1067 ext4_free_blocks(handle, inode, NULL, nr, 1,
1068 EXT4_FREE_BLOCKS_METADATA|
1069 EXT4_FREE_BLOCKS_FORGET);
1070
1071 if (parent_bh) {
1072 /*
1073 * The block which we have just freed is
1074 * pointed to by an indirect block: journal it
1075 */
1076 BUFFER_TRACE(parent_bh, "get_write_access");
1077 if (!ext4_journal_get_write_access(handle,
1078 parent_bh)){
1079 *p = 0;
1080 BUFFER_TRACE(parent_bh,
1081 "call ext4_handle_dirty_metadata");
1082 ext4_handle_dirty_metadata(handle,
1083 inode,
1084 parent_bh);
1085 }
1086 }
1087 }
1088 } else {
1089 /* We have reached the bottom of the tree. */
1090 BUFFER_TRACE(parent_bh, "free data blocks");
1091 ext4_free_data(handle, inode, parent_bh, first, last);
1092 }
1093 }
1094
ext4_ind_truncate(handle_t * handle,struct inode * inode)1095 void ext4_ind_truncate(handle_t *handle, struct inode *inode)
1096 {
1097 struct ext4_inode_info *ei = EXT4_I(inode);
1098 __le32 *i_data = ei->i_data;
1099 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
1100 ext4_lblk_t offsets[4];
1101 Indirect chain[4];
1102 Indirect *partial;
1103 __le32 nr = 0;
1104 int n = 0;
1105 ext4_lblk_t last_block, max_block;
1106 unsigned blocksize = inode->i_sb->s_blocksize;
1107
1108 last_block = (inode->i_size + blocksize-1)
1109 >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
1110 max_block = (EXT4_SB(inode->i_sb)->s_bitmap_maxbytes + blocksize-1)
1111 >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
1112
1113 if (last_block != max_block) {
1114 n = ext4_block_to_path(inode, last_block, offsets, NULL);
1115 if (n == 0)
1116 return;
1117 }
1118
1119 ext4_es_remove_extent(inode, last_block, EXT_MAX_BLOCKS - last_block);
1120
1121 /*
1122 * The orphan list entry will now protect us from any crash which
1123 * occurs before the truncate completes, so it is now safe to propagate
1124 * the new, shorter inode size (held for now in i_size) into the
1125 * on-disk inode. We do this via i_disksize, which is the value which
1126 * ext4 *really* writes onto the disk inode.
1127 */
1128 ei->i_disksize = inode->i_size;
1129
1130 if (last_block == max_block) {
1131 /*
1132 * It is unnecessary to free any data blocks if last_block is
1133 * equal to the indirect block limit.
1134 */
1135 return;
1136 } else if (n == 1) { /* direct blocks */
1137 ext4_free_data(handle, inode, NULL, i_data+offsets[0],
1138 i_data + EXT4_NDIR_BLOCKS);
1139 goto do_indirects;
1140 }
1141
1142 partial = ext4_find_shared(inode, n, offsets, chain, &nr);
1143 /* Kill the top of shared branch (not detached) */
1144 if (nr) {
1145 if (partial == chain) {
1146 /* Shared branch grows from the inode */
1147 ext4_free_branches(handle, inode, NULL,
1148 &nr, &nr+1, (chain+n-1) - partial);
1149 *partial->p = 0;
1150 /*
1151 * We mark the inode dirty prior to restart,
1152 * and prior to stop. No need for it here.
1153 */
1154 } else {
1155 /* Shared branch grows from an indirect block */
1156 BUFFER_TRACE(partial->bh, "get_write_access");
1157 ext4_free_branches(handle, inode, partial->bh,
1158 partial->p,
1159 partial->p+1, (chain+n-1) - partial);
1160 }
1161 }
1162 /* Clear the ends of indirect blocks on the shared branch */
1163 while (partial > chain) {
1164 ext4_free_branches(handle, inode, partial->bh, partial->p + 1,
1165 (__le32*)partial->bh->b_data+addr_per_block,
1166 (chain+n-1) - partial);
1167 BUFFER_TRACE(partial->bh, "call brelse");
1168 brelse(partial->bh);
1169 partial--;
1170 }
1171 do_indirects:
1172 /* Kill the remaining (whole) subtrees */
1173 switch (offsets[0]) {
1174 default:
1175 nr = i_data[EXT4_IND_BLOCK];
1176 if (nr) {
1177 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
1178 i_data[EXT4_IND_BLOCK] = 0;
1179 }
1180 /* fall through */
1181 case EXT4_IND_BLOCK:
1182 nr = i_data[EXT4_DIND_BLOCK];
1183 if (nr) {
1184 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
1185 i_data[EXT4_DIND_BLOCK] = 0;
1186 }
1187 /* fall through */
1188 case EXT4_DIND_BLOCK:
1189 nr = i_data[EXT4_TIND_BLOCK];
1190 if (nr) {
1191 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
1192 i_data[EXT4_TIND_BLOCK] = 0;
1193 }
1194 /* fall through */
1195 case EXT4_TIND_BLOCK:
1196 ;
1197 }
1198 }
1199
1200 /**
1201 * ext4_ind_remove_space - remove space from the range
1202 * @handle: JBD handle for this transaction
1203 * @inode: inode we are dealing with
1204 * @start: First block to remove
1205 * @end: One block after the last block to remove (exclusive)
1206 *
1207 * Free the blocks in the defined range (end is exclusive endpoint of
1208 * range). This is used by ext4_punch_hole().
1209 */
ext4_ind_remove_space(handle_t * handle,struct inode * inode,ext4_lblk_t start,ext4_lblk_t end)1210 int ext4_ind_remove_space(handle_t *handle, struct inode *inode,
1211 ext4_lblk_t start, ext4_lblk_t end)
1212 {
1213 struct ext4_inode_info *ei = EXT4_I(inode);
1214 __le32 *i_data = ei->i_data;
1215 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
1216 ext4_lblk_t offsets[4], offsets2[4];
1217 Indirect chain[4], chain2[4];
1218 Indirect *partial, *partial2;
1219 Indirect *p = NULL, *p2 = NULL;
1220 ext4_lblk_t max_block;
1221 __le32 nr = 0, nr2 = 0;
1222 int n = 0, n2 = 0;
1223 unsigned blocksize = inode->i_sb->s_blocksize;
1224
1225 max_block = (EXT4_SB(inode->i_sb)->s_bitmap_maxbytes + blocksize-1)
1226 >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
1227 if (end >= max_block)
1228 end = max_block;
1229 if ((start >= end) || (start > max_block))
1230 return 0;
1231
1232 n = ext4_block_to_path(inode, start, offsets, NULL);
1233 n2 = ext4_block_to_path(inode, end, offsets2, NULL);
1234
1235 BUG_ON(n > n2);
1236
1237 if ((n == 1) && (n == n2)) {
1238 /* We're punching only within direct block range */
1239 ext4_free_data(handle, inode, NULL, i_data + offsets[0],
1240 i_data + offsets2[0]);
1241 return 0;
1242 } else if (n2 > n) {
1243 /*
1244 * Start and end are on a different levels so we're going to
1245 * free partial block at start, and partial block at end of
1246 * the range. If there are some levels in between then
1247 * do_indirects label will take care of that.
1248 */
1249
1250 if (n == 1) {
1251 /*
1252 * Start is at the direct block level, free
1253 * everything to the end of the level.
1254 */
1255 ext4_free_data(handle, inode, NULL, i_data + offsets[0],
1256 i_data + EXT4_NDIR_BLOCKS);
1257 goto end_range;
1258 }
1259
1260
1261 partial = p = ext4_find_shared(inode, n, offsets, chain, &nr);
1262 if (nr) {
1263 if (partial == chain) {
1264 /* Shared branch grows from the inode */
1265 ext4_free_branches(handle, inode, NULL,
1266 &nr, &nr+1, (chain+n-1) - partial);
1267 *partial->p = 0;
1268 } else {
1269 /* Shared branch grows from an indirect block */
1270 BUFFER_TRACE(partial->bh, "get_write_access");
1271 ext4_free_branches(handle, inode, partial->bh,
1272 partial->p,
1273 partial->p+1, (chain+n-1) - partial);
1274 }
1275 }
1276
1277 /*
1278 * Clear the ends of indirect blocks on the shared branch
1279 * at the start of the range
1280 */
1281 while (partial > chain) {
1282 ext4_free_branches(handle, inode, partial->bh,
1283 partial->p + 1,
1284 (__le32 *)partial->bh->b_data+addr_per_block,
1285 (chain+n-1) - partial);
1286 partial--;
1287 }
1288
1289 end_range:
1290 partial2 = p2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
1291 if (nr2) {
1292 if (partial2 == chain2) {
1293 /*
1294 * Remember, end is exclusive so here we're at
1295 * the start of the next level we're not going
1296 * to free. Everything was covered by the start
1297 * of the range.
1298 */
1299 goto do_indirects;
1300 }
1301 } else {
1302 /*
1303 * ext4_find_shared returns Indirect structure which
1304 * points to the last element which should not be
1305 * removed by truncate. But this is end of the range
1306 * in punch_hole so we need to point to the next element
1307 */
1308 partial2->p++;
1309 }
1310
1311 /*
1312 * Clear the ends of indirect blocks on the shared branch
1313 * at the end of the range
1314 */
1315 while (partial2 > chain2) {
1316 ext4_free_branches(handle, inode, partial2->bh,
1317 (__le32 *)partial2->bh->b_data,
1318 partial2->p,
1319 (chain2+n2-1) - partial2);
1320 partial2--;
1321 }
1322 goto do_indirects;
1323 }
1324
1325 /* Punch happened within the same level (n == n2) */
1326 partial = p = ext4_find_shared(inode, n, offsets, chain, &nr);
1327 partial2 = p2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
1328
1329 /* Free top, but only if partial2 isn't its subtree. */
1330 if (nr) {
1331 int level = min(partial - chain, partial2 - chain2);
1332 int i;
1333 int subtree = 1;
1334
1335 for (i = 0; i <= level; i++) {
1336 if (offsets[i] != offsets2[i]) {
1337 subtree = 0;
1338 break;
1339 }
1340 }
1341
1342 if (!subtree) {
1343 if (partial == chain) {
1344 /* Shared branch grows from the inode */
1345 ext4_free_branches(handle, inode, NULL,
1346 &nr, &nr+1,
1347 (chain+n-1) - partial);
1348 *partial->p = 0;
1349 } else {
1350 /* Shared branch grows from an indirect block */
1351 BUFFER_TRACE(partial->bh, "get_write_access");
1352 ext4_free_branches(handle, inode, partial->bh,
1353 partial->p,
1354 partial->p+1,
1355 (chain+n-1) - partial);
1356 }
1357 }
1358 }
1359
1360 if (!nr2) {
1361 /*
1362 * ext4_find_shared returns Indirect structure which
1363 * points to the last element which should not be
1364 * removed by truncate. But this is end of the range
1365 * in punch_hole so we need to point to the next element
1366 */
1367 partial2->p++;
1368 }
1369
1370 while (partial > chain || partial2 > chain2) {
1371 int depth = (chain+n-1) - partial;
1372 int depth2 = (chain2+n2-1) - partial2;
1373
1374 if (partial > chain && partial2 > chain2 &&
1375 partial->bh->b_blocknr == partial2->bh->b_blocknr) {
1376 /*
1377 * We've converged on the same block. Clear the range,
1378 * then we're done.
1379 */
1380 ext4_free_branches(handle, inode, partial->bh,
1381 partial->p + 1,
1382 partial2->p,
1383 (chain+n-1) - partial);
1384 goto cleanup;
1385 }
1386
1387 /*
1388 * The start and end partial branches may not be at the same
1389 * level even though the punch happened within one level. So, we
1390 * give them a chance to arrive at the same level, then walk
1391 * them in step with each other until we converge on the same
1392 * block.
1393 */
1394 if (partial > chain && depth <= depth2) {
1395 ext4_free_branches(handle, inode, partial->bh,
1396 partial->p + 1,
1397 (__le32 *)partial->bh->b_data+addr_per_block,
1398 (chain+n-1) - partial);
1399 partial--;
1400 }
1401 if (partial2 > chain2 && depth2 <= depth) {
1402 ext4_free_branches(handle, inode, partial2->bh,
1403 (__le32 *)partial2->bh->b_data,
1404 partial2->p,
1405 (chain2+n2-1) - partial2);
1406 partial2--;
1407 }
1408 }
1409
1410 cleanup:
1411 while (p && p > chain) {
1412 BUFFER_TRACE(p->bh, "call brelse");
1413 brelse(p->bh);
1414 p--;
1415 }
1416 while (p2 && p2 > chain2) {
1417 BUFFER_TRACE(p2->bh, "call brelse");
1418 brelse(p2->bh);
1419 p2--;
1420 }
1421 return 0;
1422
1423 do_indirects:
1424 /* Kill the remaining (whole) subtrees */
1425 switch (offsets[0]) {
1426 default:
1427 if (++n >= n2)
1428 break;
1429 nr = i_data[EXT4_IND_BLOCK];
1430 if (nr) {
1431 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
1432 i_data[EXT4_IND_BLOCK] = 0;
1433 }
1434 /* fall through */
1435 case EXT4_IND_BLOCK:
1436 if (++n >= n2)
1437 break;
1438 nr = i_data[EXT4_DIND_BLOCK];
1439 if (nr) {
1440 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
1441 i_data[EXT4_DIND_BLOCK] = 0;
1442 }
1443 /* fall through */
1444 case EXT4_DIND_BLOCK:
1445 if (++n >= n2)
1446 break;
1447 nr = i_data[EXT4_TIND_BLOCK];
1448 if (nr) {
1449 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
1450 i_data[EXT4_TIND_BLOCK] = 0;
1451 }
1452 /* fall through */
1453 case EXT4_TIND_BLOCK:
1454 ;
1455 }
1456 goto cleanup;
1457 }
1458