• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/fs/ext4/indirect.c
4  *
5  *  from
6  *
7  *  linux/fs/ext4/inode.c
8  *
9  * Copyright (C) 1992, 1993, 1994, 1995
10  * Remy Card (card@masi.ibp.fr)
11  * Laboratoire MASI - Institut Blaise Pascal
12  * Universite Pierre et Marie Curie (Paris VI)
13  *
14  *  from
15  *
16  *  linux/fs/minix/inode.c
17  *
18  *  Copyright (C) 1991, 1992  Linus Torvalds
19  *
20  *  Goal-directed block allocation by Stephen Tweedie
21  *	(sct@redhat.com), 1993, 1998
22  */
23 
24 #include "ext4_jbd2.h"
25 #include "truncate.h"
26 #include <linux/dax.h>
27 #include <linux/uio.h>
28 
29 #include <trace/events/ext4.h>
30 
31 typedef struct {
32 	__le32	*p;
33 	__le32	key;
34 	struct buffer_head *bh;
35 } Indirect;
36 
add_chain(Indirect * p,struct buffer_head * bh,__le32 * v)37 static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
38 {
39 	p->key = *(p->p = v);
40 	p->bh = bh;
41 }
42 
43 /**
44  *	ext4_block_to_path - parse the block number into array of offsets
45  *	@inode: inode in question (we are only interested in its superblock)
46  *	@i_block: block number to be parsed
47  *	@offsets: array to store the offsets in
48  *	@boundary: set this non-zero if the referred-to block is likely to be
49  *	       followed (on disk) by an indirect block.
50  *
51  *	To store the locations of file's data ext4 uses a data structure common
52  *	for UNIX filesystems - tree of pointers anchored in the inode, with
53  *	data blocks at leaves and indirect blocks in intermediate nodes.
54  *	This function translates the block number into path in that tree -
55  *	return value is the path length and @offsets[n] is the offset of
56  *	pointer to (n+1)th node in the nth one. If @block is out of range
57  *	(negative or too large) warning is printed and zero returned.
58  *
59  *	Note: function doesn't find node addresses, so no IO is needed. All
60  *	we need to know is the capacity of indirect blocks (taken from the
61  *	inode->i_sb).
62  */
63 
64 /*
65  * Portability note: the last comparison (check that we fit into triple
66  * indirect block) is spelled differently, because otherwise on an
67  * architecture with 32-bit longs and 8Kb pages we might get into trouble
68  * if our filesystem had 8Kb blocks. We might use long long, but that would
69  * kill us on x86. Oh, well, at least the sign propagation does not matter -
70  * i_block would have to be negative in the very beginning, so we would not
71  * get there at all.
72  */
73 
ext4_block_to_path(struct inode * inode,ext4_lblk_t i_block,ext4_lblk_t offsets[4],int * boundary)74 static int ext4_block_to_path(struct inode *inode,
75 			      ext4_lblk_t i_block,
76 			      ext4_lblk_t offsets[4], int *boundary)
77 {
78 	int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb);
79 	int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb);
80 	const long direct_blocks = EXT4_NDIR_BLOCKS,
81 		indirect_blocks = ptrs,
82 		double_blocks = (1 << (ptrs_bits * 2));
83 	int n = 0;
84 	int final = 0;
85 
86 	if (i_block < direct_blocks) {
87 		offsets[n++] = i_block;
88 		final = direct_blocks;
89 	} else if ((i_block -= direct_blocks) < indirect_blocks) {
90 		offsets[n++] = EXT4_IND_BLOCK;
91 		offsets[n++] = i_block;
92 		final = ptrs;
93 	} else if ((i_block -= indirect_blocks) < double_blocks) {
94 		offsets[n++] = EXT4_DIND_BLOCK;
95 		offsets[n++] = i_block >> ptrs_bits;
96 		offsets[n++] = i_block & (ptrs - 1);
97 		final = ptrs;
98 	} else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
99 		offsets[n++] = EXT4_TIND_BLOCK;
100 		offsets[n++] = i_block >> (ptrs_bits * 2);
101 		offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
102 		offsets[n++] = i_block & (ptrs - 1);
103 		final = ptrs;
104 	} else {
105 		ext4_warning(inode->i_sb, "block %lu > max in inode %lu",
106 			     i_block + direct_blocks +
107 			     indirect_blocks + double_blocks, inode->i_ino);
108 	}
109 	if (boundary)
110 		*boundary = final - 1 - (i_block & (ptrs - 1));
111 	return n;
112 }
113 
114 /**
115  *	ext4_get_branch - read the chain of indirect blocks leading to data
116  *	@inode: inode in question
117  *	@depth: depth of the chain (1 - direct pointer, etc.)
118  *	@offsets: offsets of pointers in inode/indirect blocks
119  *	@chain: place to store the result
120  *	@err: here we store the error value
121  *
122  *	Function fills the array of triples <key, p, bh> and returns %NULL
123  *	if everything went OK or the pointer to the last filled triple
124  *	(incomplete one) otherwise. Upon the return chain[i].key contains
125  *	the number of (i+1)-th block in the chain (as it is stored in memory,
126  *	i.e. little-endian 32-bit), chain[i].p contains the address of that
127  *	number (it points into struct inode for i==0 and into the bh->b_data
128  *	for i>0) and chain[i].bh points to the buffer_head of i-th indirect
129  *	block for i>0 and NULL for i==0. In other words, it holds the block
130  *	numbers of the chain, addresses they were taken from (and where we can
131  *	verify that chain did not change) and buffer_heads hosting these
132  *	numbers.
133  *
134  *	Function stops when it stumbles upon zero pointer (absent block)
135  *		(pointer to last triple returned, *@err == 0)
136  *	or when it gets an IO error reading an indirect block
137  *		(ditto, *@err == -EIO)
138  *	or when it reads all @depth-1 indirect blocks successfully and finds
139  *	the whole chain, all way to the data (returns %NULL, *err == 0).
140  *
141  *      Need to be called with
142  *      down_read(&EXT4_I(inode)->i_data_sem)
143  */
ext4_get_branch(struct inode * inode,int depth,ext4_lblk_t * offsets,Indirect chain[4],int * err)144 static Indirect *ext4_get_branch(struct inode *inode, int depth,
145 				 ext4_lblk_t  *offsets,
146 				 Indirect chain[4], int *err)
147 {
148 	struct super_block *sb = inode->i_sb;
149 	Indirect *p = chain;
150 	struct buffer_head *bh;
151 	unsigned int key;
152 	int ret = -EIO;
153 
154 	*err = 0;
155 	/* i_data is not going away, no lock needed */
156 	add_chain(chain, NULL, EXT4_I(inode)->i_data + *offsets);
157 	if (!p->key)
158 		goto no_block;
159 	while (--depth) {
160 		key = le32_to_cpu(p->key);
161 		if (key > ext4_blocks_count(EXT4_SB(sb)->s_es)) {
162 			/* the block was out of range */
163 			ret = -EFSCORRUPTED;
164 			goto failure;
165 		}
166 		bh = sb_getblk(sb, key);
167 		if (unlikely(!bh)) {
168 			ret = -ENOMEM;
169 			goto failure;
170 		}
171 
172 		if (!bh_uptodate_or_lock(bh)) {
173 			if (ext4_read_bh(bh, 0, NULL) < 0) {
174 				put_bh(bh);
175 				goto failure;
176 			}
177 			/* validate block references */
178 			if (ext4_check_indirect_blockref(inode, bh)) {
179 				put_bh(bh);
180 				goto failure;
181 			}
182 		}
183 
184 		add_chain(++p, bh, (__le32 *)bh->b_data + *++offsets);
185 		/* Reader: end */
186 		if (!p->key)
187 			goto no_block;
188 	}
189 	return NULL;
190 
191 failure:
192 	*err = ret;
193 no_block:
194 	return p;
195 }
196 
197 /**
198  *	ext4_find_near - find a place for allocation with sufficient locality
199  *	@inode: owner
200  *	@ind: descriptor of indirect block.
201  *
202  *	This function returns the preferred place for block allocation.
203  *	It is used when heuristic for sequential allocation fails.
204  *	Rules are:
205  *	  + if there is a block to the left of our position - allocate near it.
206  *	  + if pointer will live in indirect block - allocate near that block.
207  *	  + if pointer will live in inode - allocate in the same
208  *	    cylinder group.
209  *
210  * In the latter case we colour the starting block by the callers PID to
211  * prevent it from clashing with concurrent allocations for a different inode
212  * in the same block group.   The PID is used here so that functionally related
213  * files will be close-by on-disk.
214  *
215  *	Caller must make sure that @ind is valid and will stay that way.
216  */
ext4_find_near(struct inode * inode,Indirect * ind)217 static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind)
218 {
219 	struct ext4_inode_info *ei = EXT4_I(inode);
220 	__le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data;
221 	__le32 *p;
222 
223 	/* Try to find previous block */
224 	for (p = ind->p - 1; p >= start; p--) {
225 		if (*p)
226 			return le32_to_cpu(*p);
227 	}
228 
229 	/* No such thing, so let's try location of indirect block */
230 	if (ind->bh)
231 		return ind->bh->b_blocknr;
232 
233 	/*
234 	 * It is going to be referred to from the inode itself? OK, just put it
235 	 * into the same cylinder group then.
236 	 */
237 	return ext4_inode_to_goal_block(inode);
238 }
239 
240 /**
241  *	ext4_find_goal - find a preferred place for allocation.
242  *	@inode: owner
243  *	@block:  block we want
244  *	@partial: pointer to the last triple within a chain
245  *
246  *	Normally this function find the preferred place for block allocation,
247  *	returns it.
248  *	Because this is only used for non-extent files, we limit the block nr
249  *	to 32 bits.
250  */
ext4_find_goal(struct inode * inode,ext4_lblk_t block,Indirect * partial)251 static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block,
252 				   Indirect *partial)
253 {
254 	ext4_fsblk_t goal;
255 
256 	/*
257 	 * XXX need to get goal block from mballoc's data structures
258 	 */
259 
260 	goal = ext4_find_near(inode, partial);
261 	goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;
262 	return goal;
263 }
264 
265 /**
266  *	ext4_blks_to_allocate - Look up the block map and count the number
267  *	of direct blocks need to be allocated for the given branch.
268  *
269  *	@branch: chain of indirect blocks
270  *	@k: number of blocks need for indirect blocks
271  *	@blks: number of data blocks to be mapped.
272  *	@blocks_to_boundary:  the offset in the indirect block
273  *
274  *	return the total number of blocks to be allocate, including the
275  *	direct and indirect blocks.
276  */
ext4_blks_to_allocate(Indirect * branch,int k,unsigned int blks,int blocks_to_boundary)277 static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks,
278 				 int blocks_to_boundary)
279 {
280 	unsigned int count = 0;
281 
282 	/*
283 	 * Simple case, [t,d]Indirect block(s) has not allocated yet
284 	 * then it's clear blocks on that path have not allocated
285 	 */
286 	if (k > 0) {
287 		/* right now we don't handle cross boundary allocation */
288 		if (blks < blocks_to_boundary + 1)
289 			count += blks;
290 		else
291 			count += blocks_to_boundary + 1;
292 		return count;
293 	}
294 
295 	count++;
296 	while (count < blks && count <= blocks_to_boundary &&
297 		le32_to_cpu(*(branch[0].p + count)) == 0) {
298 		count++;
299 	}
300 	return count;
301 }
302 
303 /**
304  * ext4_alloc_branch() - allocate and set up a chain of blocks
305  * @handle: handle for this transaction
306  * @ar: structure describing the allocation request
307  * @indirect_blks: number of allocated indirect blocks
308  * @offsets: offsets (in the blocks) to store the pointers to next.
309  * @branch: place to store the chain in.
310  *
311  *	This function allocates blocks, zeroes out all but the last one,
312  *	links them into chain and (if we are synchronous) writes them to disk.
313  *	In other words, it prepares a branch that can be spliced onto the
314  *	inode. It stores the information about that chain in the branch[], in
315  *	the same format as ext4_get_branch() would do. We are calling it after
316  *	we had read the existing part of chain and partial points to the last
317  *	triple of that (one with zero ->key). Upon the exit we have the same
318  *	picture as after the successful ext4_get_block(), except that in one
319  *	place chain is disconnected - *branch->p is still zero (we did not
320  *	set the last link), but branch->key contains the number that should
321  *	be placed into *branch->p to fill that gap.
322  *
323  *	If allocation fails we free all blocks we've allocated (and forget
324  *	their buffer_heads) and return the error value the from failed
325  *	ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain
326  *	as described above and return 0.
327  */
ext4_alloc_branch(handle_t * handle,struct ext4_allocation_request * ar,int indirect_blks,ext4_lblk_t * offsets,Indirect * branch)328 static int ext4_alloc_branch(handle_t *handle,
329 			     struct ext4_allocation_request *ar,
330 			     int indirect_blks, ext4_lblk_t *offsets,
331 			     Indirect *branch)
332 {
333 	struct buffer_head *		bh;
334 	ext4_fsblk_t			b, new_blocks[4];
335 	__le32				*p;
336 	int				i, j, err, len = 1;
337 
338 	for (i = 0; i <= indirect_blks; i++) {
339 		if (i == indirect_blks) {
340 			new_blocks[i] = ext4_mb_new_blocks(handle, ar, &err);
341 		} else {
342 			ar->goal = new_blocks[i] = ext4_new_meta_blocks(handle,
343 					ar->inode, ar->goal,
344 					ar->flags & EXT4_MB_DELALLOC_RESERVED,
345 					NULL, &err);
346 			/* Simplify error cleanup... */
347 			branch[i+1].bh = NULL;
348 		}
349 		if (err) {
350 			i--;
351 			goto failed;
352 		}
353 		branch[i].key = cpu_to_le32(new_blocks[i]);
354 		if (i == 0)
355 			continue;
356 
357 		bh = branch[i].bh = sb_getblk(ar->inode->i_sb, new_blocks[i-1]);
358 		if (unlikely(!bh)) {
359 			err = -ENOMEM;
360 			goto failed;
361 		}
362 		lock_buffer(bh);
363 		BUFFER_TRACE(bh, "call get_create_access");
364 		err = ext4_journal_get_create_access(handle, bh);
365 		if (err) {
366 			unlock_buffer(bh);
367 			goto failed;
368 		}
369 
370 		memset(bh->b_data, 0, bh->b_size);
371 		p = branch[i].p = (__le32 *) bh->b_data + offsets[i];
372 		b = new_blocks[i];
373 
374 		if (i == indirect_blks)
375 			len = ar->len;
376 		for (j = 0; j < len; j++)
377 			*p++ = cpu_to_le32(b++);
378 
379 		BUFFER_TRACE(bh, "marking uptodate");
380 		set_buffer_uptodate(bh);
381 		unlock_buffer(bh);
382 
383 		BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
384 		err = ext4_handle_dirty_metadata(handle, ar->inode, bh);
385 		if (err)
386 			goto failed;
387 	}
388 	return 0;
389 failed:
390 	if (i == indirect_blks) {
391 		/* Free data blocks */
392 		ext4_free_blocks(handle, ar->inode, NULL, new_blocks[i],
393 				 ar->len, 0);
394 		i--;
395 	}
396 	for (; i >= 0; i--) {
397 		/*
398 		 * We want to ext4_forget() only freshly allocated indirect
399 		 * blocks. Buffer for new_blocks[i] is at branch[i+1].bh
400 		 * (buffer at branch[0].bh is indirect block / inode already
401 		 * existing before ext4_alloc_branch() was called). Also
402 		 * because blocks are freshly allocated, we don't need to
403 		 * revoke them which is why we don't set
404 		 * EXT4_FREE_BLOCKS_METADATA.
405 		 */
406 		ext4_free_blocks(handle, ar->inode, branch[i+1].bh,
407 				 new_blocks[i], 1,
408 				 branch[i+1].bh ? EXT4_FREE_BLOCKS_FORGET : 0);
409 	}
410 	return err;
411 }
412 
413 /**
414  * ext4_splice_branch() - splice the allocated branch onto inode.
415  * @handle: handle for this transaction
416  * @ar: structure describing the allocation request
417  * @where: location of missing link
418  * @num:   number of indirect blocks we are adding
419  *
420  * This function fills the missing link and does all housekeeping needed in
421  * inode (->i_blocks, etc.). In case of success we end up with the full
422  * chain to new block and return 0.
423  */
ext4_splice_branch(handle_t * handle,struct ext4_allocation_request * ar,Indirect * where,int num)424 static int ext4_splice_branch(handle_t *handle,
425 			      struct ext4_allocation_request *ar,
426 			      Indirect *where, int num)
427 {
428 	int i;
429 	int err = 0;
430 	ext4_fsblk_t current_block;
431 
432 	/*
433 	 * If we're splicing into a [td]indirect block (as opposed to the
434 	 * inode) then we need to get write access to the [td]indirect block
435 	 * before the splice.
436 	 */
437 	if (where->bh) {
438 		BUFFER_TRACE(where->bh, "get_write_access");
439 		err = ext4_journal_get_write_access(handle, where->bh);
440 		if (err)
441 			goto err_out;
442 	}
443 	/* That's it */
444 
445 	*where->p = where->key;
446 
447 	/*
448 	 * Update the host buffer_head or inode to point to more just allocated
449 	 * direct blocks blocks
450 	 */
451 	if (num == 0 && ar->len > 1) {
452 		current_block = le32_to_cpu(where->key) + 1;
453 		for (i = 1; i < ar->len; i++)
454 			*(where->p + i) = cpu_to_le32(current_block++);
455 	}
456 
457 	/* We are done with atomic stuff, now do the rest of housekeeping */
458 	/* had we spliced it onto indirect block? */
459 	if (where->bh) {
460 		/*
461 		 * If we spliced it onto an indirect block, we haven't
462 		 * altered the inode.  Note however that if it is being spliced
463 		 * onto an indirect block at the very end of the file (the
464 		 * file is growing) then we *will* alter the inode to reflect
465 		 * the new i_size.  But that is not done here - it is done in
466 		 * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode.
467 		 */
468 		jbd_debug(5, "splicing indirect only\n");
469 		BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata");
470 		err = ext4_handle_dirty_metadata(handle, ar->inode, where->bh);
471 		if (err)
472 			goto err_out;
473 	} else {
474 		/*
475 		 * OK, we spliced it into the inode itself on a direct block.
476 		 */
477 		err = ext4_mark_inode_dirty(handle, ar->inode);
478 		if (unlikely(err))
479 			goto err_out;
480 		jbd_debug(5, "splicing direct\n");
481 	}
482 	return err;
483 
484 err_out:
485 	for (i = 1; i <= num; i++) {
486 		/*
487 		 * branch[i].bh is newly allocated, so there is no
488 		 * need to revoke the block, which is why we don't
489 		 * need to set EXT4_FREE_BLOCKS_METADATA.
490 		 */
491 		ext4_free_blocks(handle, ar->inode, where[i].bh, 0, 1,
492 				 EXT4_FREE_BLOCKS_FORGET);
493 	}
494 	ext4_free_blocks(handle, ar->inode, NULL, le32_to_cpu(where[num].key),
495 			 ar->len, 0);
496 
497 	return err;
498 }
499 
500 /*
501  * The ext4_ind_map_blocks() function handles non-extents inodes
502  * (i.e., using the traditional indirect/double-indirect i_blocks
503  * scheme) for ext4_map_blocks().
504  *
505  * Allocation strategy is simple: if we have to allocate something, we will
506  * have to go the whole way to leaf. So let's do it before attaching anything
507  * to tree, set linkage between the newborn blocks, write them if sync is
508  * required, recheck the path, free and repeat if check fails, otherwise
509  * set the last missing link (that will protect us from any truncate-generated
510  * removals - all blocks on the path are immune now) and possibly force the
511  * write on the parent block.
512  * That has a nice additional property: no special recovery from the failed
513  * allocations is needed - we simply release blocks and do not touch anything
514  * reachable from inode.
515  *
516  * `handle' can be NULL if create == 0.
517  *
518  * return > 0, # of blocks mapped or allocated.
519  * return = 0, if plain lookup failed.
520  * return < 0, error case.
521  *
522  * The ext4_ind_get_blocks() function should be called with
523  * down_write(&EXT4_I(inode)->i_data_sem) if allocating filesystem
524  * blocks (i.e., flags has EXT4_GET_BLOCKS_CREATE set) or
525  * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system
526  * blocks.
527  */
ext4_ind_map_blocks(handle_t * handle,struct inode * inode,struct ext4_map_blocks * map,int flags)528 int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
529 			struct ext4_map_blocks *map,
530 			int flags)
531 {
532 	struct ext4_allocation_request ar;
533 	int err = -EIO;
534 	ext4_lblk_t offsets[4];
535 	Indirect chain[4];
536 	Indirect *partial;
537 	int indirect_blks;
538 	int blocks_to_boundary = 0;
539 	int depth;
540 	int count = 0;
541 	ext4_fsblk_t first_block = 0;
542 
543 	trace_ext4_ind_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
544 	J_ASSERT(!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)));
545 	J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0);
546 	depth = ext4_block_to_path(inode, map->m_lblk, offsets,
547 				   &blocks_to_boundary);
548 
549 	if (depth == 0)
550 		goto out;
551 
552 	partial = ext4_get_branch(inode, depth, offsets, chain, &err);
553 
554 	/* Simplest case - block found, no allocation needed */
555 	if (!partial) {
556 		first_block = le32_to_cpu(chain[depth - 1].key);
557 		count++;
558 		/*map more blocks*/
559 		while (count < map->m_len && count <= blocks_to_boundary) {
560 			ext4_fsblk_t blk;
561 
562 			blk = le32_to_cpu(*(chain[depth-1].p + count));
563 
564 			if (blk == first_block + count)
565 				count++;
566 			else
567 				break;
568 		}
569 		goto got_it;
570 	}
571 
572 	/* Next simple case - plain lookup failed */
573 	if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
574 		unsigned epb = inode->i_sb->s_blocksize / sizeof(u32);
575 		int i;
576 
577 		/*
578 		 * Count number blocks in a subtree under 'partial'. At each
579 		 * level we count number of complete empty subtrees beyond
580 		 * current offset and then descend into the subtree only
581 		 * partially beyond current offset.
582 		 */
583 		count = 0;
584 		for (i = partial - chain + 1; i < depth; i++)
585 			count = count * epb + (epb - offsets[i] - 1);
586 		count++;
587 		/* Fill in size of a hole we found */
588 		map->m_pblk = 0;
589 		map->m_len = min_t(unsigned int, map->m_len, count);
590 		goto cleanup;
591 	}
592 
593 	/* Failed read of indirect block */
594 	if (err == -EIO)
595 		goto cleanup;
596 
597 	/*
598 	 * Okay, we need to do block allocation.
599 	*/
600 	if (ext4_has_feature_bigalloc(inode->i_sb)) {
601 		EXT4_ERROR_INODE(inode, "Can't allocate blocks for "
602 				 "non-extent mapped inodes with bigalloc");
603 		err = -EFSCORRUPTED;
604 		goto out;
605 	}
606 
607 	/* Set up for the direct block allocation */
608 	memset(&ar, 0, sizeof(ar));
609 	ar.inode = inode;
610 	ar.logical = map->m_lblk;
611 	if (S_ISREG(inode->i_mode))
612 		ar.flags = EXT4_MB_HINT_DATA;
613 	if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
614 		ar.flags |= EXT4_MB_DELALLOC_RESERVED;
615 	if (flags & EXT4_GET_BLOCKS_METADATA_NOFAIL)
616 		ar.flags |= EXT4_MB_USE_RESERVED;
617 
618 	ar.goal = ext4_find_goal(inode, map->m_lblk, partial);
619 
620 	/* the number of blocks need to allocate for [d,t]indirect blocks */
621 	indirect_blks = (chain + depth) - partial - 1;
622 
623 	/*
624 	 * Next look up the indirect map to count the totoal number of
625 	 * direct blocks to allocate for this branch.
626 	 */
627 	ar.len = ext4_blks_to_allocate(partial, indirect_blks,
628 				       map->m_len, blocks_to_boundary);
629 
630 	/*
631 	 * Block out ext4_truncate while we alter the tree
632 	 */
633 	err = ext4_alloc_branch(handle, &ar, indirect_blks,
634 				offsets + (partial - chain), partial);
635 
636 	/*
637 	 * The ext4_splice_branch call will free and forget any buffers
638 	 * on the new chain if there is a failure, but that risks using
639 	 * up transaction credits, especially for bitmaps where the
640 	 * credits cannot be returned.  Can we handle this somehow?  We
641 	 * may need to return -EAGAIN upwards in the worst case.  --sct
642 	 */
643 	if (!err)
644 		err = ext4_splice_branch(handle, &ar, partial, indirect_blks);
645 	if (err)
646 		goto cleanup;
647 
648 	map->m_flags |= EXT4_MAP_NEW;
649 
650 	ext4_update_inode_fsync_trans(handle, inode, 1);
651 	count = ar.len;
652 got_it:
653 	map->m_flags |= EXT4_MAP_MAPPED;
654 	map->m_pblk = le32_to_cpu(chain[depth-1].key);
655 	map->m_len = count;
656 	if (count > blocks_to_boundary)
657 		map->m_flags |= EXT4_MAP_BOUNDARY;
658 	err = count;
659 	/* Clean up and exit */
660 	partial = chain + depth - 1;	/* the whole chain */
661 cleanup:
662 	while (partial > chain) {
663 		BUFFER_TRACE(partial->bh, "call brelse");
664 		brelse(partial->bh);
665 		partial--;
666 	}
667 out:
668 	trace_ext4_ind_map_blocks_exit(inode, flags, map, err);
669 	return err;
670 }
671 
672 /*
673  * Calculate number of indirect blocks touched by mapping @nrblocks logically
674  * contiguous blocks
675  */
ext4_ind_trans_blocks(struct inode * inode,int nrblocks)676 int ext4_ind_trans_blocks(struct inode *inode, int nrblocks)
677 {
678 	/*
679 	 * With N contiguous data blocks, we need at most
680 	 * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) + 1 indirect blocks,
681 	 * 2 dindirect blocks, and 1 tindirect block
682 	 */
683 	return DIV_ROUND_UP(nrblocks, EXT4_ADDR_PER_BLOCK(inode->i_sb)) + 4;
684 }
685 
ext4_ind_trunc_restart_fn(handle_t * handle,struct inode * inode,struct buffer_head * bh,int * dropped)686 static int ext4_ind_trunc_restart_fn(handle_t *handle, struct inode *inode,
687 				     struct buffer_head *bh, int *dropped)
688 {
689 	int err;
690 
691 	if (bh) {
692 		BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
693 		err = ext4_handle_dirty_metadata(handle, inode, bh);
694 		if (unlikely(err))
695 			return err;
696 	}
697 	err = ext4_mark_inode_dirty(handle, inode);
698 	if (unlikely(err))
699 		return err;
700 	/*
701 	 * Drop i_data_sem to avoid deadlock with ext4_map_blocks.  At this
702 	 * moment, get_block can be called only for blocks inside i_size since
703 	 * page cache has been already dropped and writes are blocked by
704 	 * i_mutex. So we can safely drop the i_data_sem here.
705 	 */
706 	BUG_ON(EXT4_JOURNAL(inode) == NULL);
707 	ext4_discard_preallocations(inode, 0);
708 	up_write(&EXT4_I(inode)->i_data_sem);
709 	*dropped = 1;
710 	return 0;
711 }
712 
713 /*
714  * Truncate transactions can be complex and absolutely huge.  So we need to
715  * be able to restart the transaction at a convenient checkpoint to make
716  * sure we don't overflow the journal.
717  *
718  * Try to extend this transaction for the purposes of truncation.  If
719  * extend fails, we restart transaction.
720  */
ext4_ind_truncate_ensure_credits(handle_t * handle,struct inode * inode,struct buffer_head * bh,int revoke_creds)721 static int ext4_ind_truncate_ensure_credits(handle_t *handle,
722 					    struct inode *inode,
723 					    struct buffer_head *bh,
724 					    int revoke_creds)
725 {
726 	int ret;
727 	int dropped = 0;
728 
729 	ret = ext4_journal_ensure_credits_fn(handle, EXT4_RESERVE_TRANS_BLOCKS,
730 			ext4_blocks_for_truncate(inode), revoke_creds,
731 			ext4_ind_trunc_restart_fn(handle, inode, bh, &dropped));
732 	if (dropped)
733 		down_write(&EXT4_I(inode)->i_data_sem);
734 	if (ret <= 0)
735 		return ret;
736 	if (bh) {
737 		BUFFER_TRACE(bh, "retaking write access");
738 		ret = ext4_journal_get_write_access(handle, bh);
739 		if (unlikely(ret))
740 			return ret;
741 	}
742 	return 0;
743 }
744 
745 /*
746  * Probably it should be a library function... search for first non-zero word
747  * or memcmp with zero_page, whatever is better for particular architecture.
748  * Linus?
749  */
all_zeroes(__le32 * p,__le32 * q)750 static inline int all_zeroes(__le32 *p, __le32 *q)
751 {
752 	while (p < q)
753 		if (*p++)
754 			return 0;
755 	return 1;
756 }
757 
758 /**
759  *	ext4_find_shared - find the indirect blocks for partial truncation.
760  *	@inode:	  inode in question
761  *	@depth:	  depth of the affected branch
762  *	@offsets: offsets of pointers in that branch (see ext4_block_to_path)
763  *	@chain:	  place to store the pointers to partial indirect blocks
764  *	@top:	  place to the (detached) top of branch
765  *
766  *	This is a helper function used by ext4_truncate().
767  *
768  *	When we do truncate() we may have to clean the ends of several
769  *	indirect blocks but leave the blocks themselves alive. Block is
770  *	partially truncated if some data below the new i_size is referred
771  *	from it (and it is on the path to the first completely truncated
772  *	data block, indeed).  We have to free the top of that path along
773  *	with everything to the right of the path. Since no allocation
774  *	past the truncation point is possible until ext4_truncate()
775  *	finishes, we may safely do the latter, but top of branch may
776  *	require special attention - pageout below the truncation point
777  *	might try to populate it.
778  *
779  *	We atomically detach the top of branch from the tree, store the
780  *	block number of its root in *@top, pointers to buffer_heads of
781  *	partially truncated blocks - in @chain[].bh and pointers to
782  *	their last elements that should not be removed - in
783  *	@chain[].p. Return value is the pointer to last filled element
784  *	of @chain.
785  *
786  *	The work left to caller to do the actual freeing of subtrees:
787  *		a) free the subtree starting from *@top
788  *		b) free the subtrees whose roots are stored in
789  *			(@chain[i].p+1 .. end of @chain[i].bh->b_data)
790  *		c) free the subtrees growing from the inode past the @chain[0].
791  *			(no partially truncated stuff there).  */
792 
ext4_find_shared(struct inode * inode,int depth,ext4_lblk_t offsets[4],Indirect chain[4],__le32 * top)793 static Indirect *ext4_find_shared(struct inode *inode, int depth,
794 				  ext4_lblk_t offsets[4], Indirect chain[4],
795 				  __le32 *top)
796 {
797 	Indirect *partial, *p;
798 	int k, err;
799 
800 	*top = 0;
801 	/* Make k index the deepest non-null offset + 1 */
802 	for (k = depth; k > 1 && !offsets[k-1]; k--)
803 		;
804 	partial = ext4_get_branch(inode, k, offsets, chain, &err);
805 	/* Writer: pointers */
806 	if (!partial)
807 		partial = chain + k-1;
808 	/*
809 	 * If the branch acquired continuation since we've looked at it -
810 	 * fine, it should all survive and (new) top doesn't belong to us.
811 	 */
812 	if (!partial->key && *partial->p)
813 		/* Writer: end */
814 		goto no_top;
815 	for (p = partial; (p > chain) && all_zeroes((__le32 *) p->bh->b_data, p->p); p--)
816 		;
817 	/*
818 	 * OK, we've found the last block that must survive. The rest of our
819 	 * branch should be detached before unlocking. However, if that rest
820 	 * of branch is all ours and does not grow immediately from the inode
821 	 * it's easier to cheat and just decrement partial->p.
822 	 */
823 	if (p == chain + k - 1 && p > chain) {
824 		p->p--;
825 	} else {
826 		*top = *p->p;
827 		/* Nope, don't do this in ext4.  Must leave the tree intact */
828 #if 0
829 		*p->p = 0;
830 #endif
831 	}
832 	/* Writer: end */
833 
834 	while (partial > p) {
835 		brelse(partial->bh);
836 		partial--;
837 	}
838 no_top:
839 	return partial;
840 }
841 
842 /*
843  * Zero a number of block pointers in either an inode or an indirect block.
844  * If we restart the transaction we must again get write access to the
845  * indirect block for further modification.
846  *
847  * We release `count' blocks on disk, but (last - first) may be greater
848  * than `count' because there can be holes in there.
849  *
850  * Return 0 on success, 1 on invalid block range
851  * and < 0 on fatal error.
852  */
ext4_clear_blocks(handle_t * handle,struct inode * inode,struct buffer_head * bh,ext4_fsblk_t block_to_free,unsigned long count,__le32 * first,__le32 * last)853 static int ext4_clear_blocks(handle_t *handle, struct inode *inode,
854 			     struct buffer_head *bh,
855 			     ext4_fsblk_t block_to_free,
856 			     unsigned long count, __le32 *first,
857 			     __le32 *last)
858 {
859 	__le32 *p;
860 	int	flags = EXT4_FREE_BLOCKS_VALIDATED;
861 	int	err;
862 
863 	if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode) ||
864 	    ext4_test_inode_flag(inode, EXT4_INODE_EA_INODE))
865 		flags |= EXT4_FREE_BLOCKS_FORGET | EXT4_FREE_BLOCKS_METADATA;
866 	else if (ext4_should_journal_data(inode))
867 		flags |= EXT4_FREE_BLOCKS_FORGET;
868 
869 	if (!ext4_inode_block_valid(inode, block_to_free, count)) {
870 		EXT4_ERROR_INODE(inode, "attempt to clear invalid "
871 				 "blocks %llu len %lu",
872 				 (unsigned long long) block_to_free, count);
873 		return 1;
874 	}
875 
876 	err = ext4_ind_truncate_ensure_credits(handle, inode, bh,
877 				ext4_free_data_revoke_credits(inode, count));
878 	if (err < 0)
879 		goto out_err;
880 
881 	for (p = first; p < last; p++)
882 		*p = 0;
883 
884 	ext4_free_blocks(handle, inode, NULL, block_to_free, count, flags);
885 	return 0;
886 out_err:
887 	ext4_std_error(inode->i_sb, err);
888 	return err;
889 }
890 
891 /**
892  * ext4_free_data - free a list of data blocks
893  * @handle:	handle for this transaction
894  * @inode:	inode we are dealing with
895  * @this_bh:	indirect buffer_head which contains *@first and *@last
896  * @first:	array of block numbers
897  * @last:	points immediately past the end of array
898  *
899  * We are freeing all blocks referred from that array (numbers are stored as
900  * little-endian 32-bit) and updating @inode->i_blocks appropriately.
901  *
902  * We accumulate contiguous runs of blocks to free.  Conveniently, if these
903  * blocks are contiguous then releasing them at one time will only affect one
904  * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't
905  * actually use a lot of journal space.
906  *
907  * @this_bh will be %NULL if @first and @last point into the inode's direct
908  * block pointers.
909  */
ext4_free_data(handle_t * handle,struct inode * inode,struct buffer_head * this_bh,__le32 * first,__le32 * last)910 static void ext4_free_data(handle_t *handle, struct inode *inode,
911 			   struct buffer_head *this_bh,
912 			   __le32 *first, __le32 *last)
913 {
914 	ext4_fsblk_t block_to_free = 0;    /* Starting block # of a run */
915 	unsigned long count = 0;	    /* Number of blocks in the run */
916 	__le32 *block_to_free_p = NULL;	    /* Pointer into inode/ind
917 					       corresponding to
918 					       block_to_free */
919 	ext4_fsblk_t nr;		    /* Current block # */
920 	__le32 *p;			    /* Pointer into inode/ind
921 					       for current block */
922 	int err = 0;
923 
924 	if (this_bh) {				/* For indirect block */
925 		BUFFER_TRACE(this_bh, "get_write_access");
926 		err = ext4_journal_get_write_access(handle, this_bh);
927 		/* Important: if we can't update the indirect pointers
928 		 * to the blocks, we can't free them. */
929 		if (err)
930 			return;
931 	}
932 
933 	for (p = first; p < last; p++) {
934 		nr = le32_to_cpu(*p);
935 		if (nr) {
936 			/* accumulate blocks to free if they're contiguous */
937 			if (count == 0) {
938 				block_to_free = nr;
939 				block_to_free_p = p;
940 				count = 1;
941 			} else if (nr == block_to_free + count) {
942 				count++;
943 			} else {
944 				err = ext4_clear_blocks(handle, inode, this_bh,
945 						        block_to_free, count,
946 						        block_to_free_p, p);
947 				if (err)
948 					break;
949 				block_to_free = nr;
950 				block_to_free_p = p;
951 				count = 1;
952 			}
953 		}
954 	}
955 
956 	if (!err && count > 0)
957 		err = ext4_clear_blocks(handle, inode, this_bh, block_to_free,
958 					count, block_to_free_p, p);
959 	if (err < 0)
960 		/* fatal error */
961 		return;
962 
963 	if (this_bh) {
964 		BUFFER_TRACE(this_bh, "call ext4_handle_dirty_metadata");
965 
966 		/*
967 		 * The buffer head should have an attached journal head at this
968 		 * point. However, if the data is corrupted and an indirect
969 		 * block pointed to itself, it would have been detached when
970 		 * the block was cleared. Check for this instead of OOPSing.
971 		 */
972 		if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(this_bh))
973 			ext4_handle_dirty_metadata(handle, inode, this_bh);
974 		else
975 			EXT4_ERROR_INODE(inode,
976 					 "circular indirect block detected at "
977 					 "block %llu",
978 				(unsigned long long) this_bh->b_blocknr);
979 	}
980 }
981 
982 /**
983  *	ext4_free_branches - free an array of branches
984  *	@handle: JBD handle for this transaction
985  *	@inode:	inode we are dealing with
986  *	@parent_bh: the buffer_head which contains *@first and *@last
987  *	@first:	array of block numbers
988  *	@last:	pointer immediately past the end of array
989  *	@depth:	depth of the branches to free
990  *
991  *	We are freeing all blocks referred from these branches (numbers are
992  *	stored as little-endian 32-bit) and updating @inode->i_blocks
993  *	appropriately.
994  */
ext4_free_branches(handle_t * handle,struct inode * inode,struct buffer_head * parent_bh,__le32 * first,__le32 * last,int depth)995 static void ext4_free_branches(handle_t *handle, struct inode *inode,
996 			       struct buffer_head *parent_bh,
997 			       __le32 *first, __le32 *last, int depth)
998 {
999 	ext4_fsblk_t nr;
1000 	__le32 *p;
1001 
1002 	if (ext4_handle_is_aborted(handle))
1003 		return;
1004 
1005 	if (depth--) {
1006 		struct buffer_head *bh;
1007 		int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
1008 		p = last;
1009 		while (--p >= first) {
1010 			nr = le32_to_cpu(*p);
1011 			if (!nr)
1012 				continue;		/* A hole */
1013 
1014 			if (!ext4_inode_block_valid(inode, nr, 1)) {
1015 				EXT4_ERROR_INODE(inode,
1016 						 "invalid indirect mapped "
1017 						 "block %lu (level %d)",
1018 						 (unsigned long) nr, depth);
1019 				break;
1020 			}
1021 
1022 			/* Go read the buffer for the next level down */
1023 			bh = ext4_sb_bread(inode->i_sb, nr, 0);
1024 
1025 			/*
1026 			 * A read failure? Report error and clear slot
1027 			 * (should be rare).
1028 			 */
1029 			if (IS_ERR(bh)) {
1030 				ext4_error_inode_block(inode, nr, -PTR_ERR(bh),
1031 						       "Read failure");
1032 				continue;
1033 			}
1034 
1035 			/* This zaps the entire block.  Bottom up. */
1036 			BUFFER_TRACE(bh, "free child branches");
1037 			ext4_free_branches(handle, inode, bh,
1038 					(__le32 *) bh->b_data,
1039 					(__le32 *) bh->b_data + addr_per_block,
1040 					depth);
1041 			brelse(bh);
1042 
1043 			/*
1044 			 * Everything below this pointer has been
1045 			 * released.  Now let this top-of-subtree go.
1046 			 *
1047 			 * We want the freeing of this indirect block to be
1048 			 * atomic in the journal with the updating of the
1049 			 * bitmap block which owns it.  So make some room in
1050 			 * the journal.
1051 			 *
1052 			 * We zero the parent pointer *after* freeing its
1053 			 * pointee in the bitmaps, so if extend_transaction()
1054 			 * for some reason fails to put the bitmap changes and
1055 			 * the release into the same transaction, recovery
1056 			 * will merely complain about releasing a free block,
1057 			 * rather than leaking blocks.
1058 			 */
1059 			if (ext4_handle_is_aborted(handle))
1060 				return;
1061 			if (ext4_ind_truncate_ensure_credits(handle, inode,
1062 					NULL,
1063 					ext4_free_metadata_revoke_credits(
1064 							inode->i_sb, 1)) < 0)
1065 				return;
1066 
1067 			/*
1068 			 * The forget flag here is critical because if
1069 			 * we are journaling (and not doing data
1070 			 * journaling), we have to make sure a revoke
1071 			 * record is written to prevent the journal
1072 			 * replay from overwriting the (former)
1073 			 * indirect block if it gets reallocated as a
1074 			 * data block.  This must happen in the same
1075 			 * transaction where the data blocks are
1076 			 * actually freed.
1077 			 */
1078 			ext4_free_blocks(handle, inode, NULL, nr, 1,
1079 					 EXT4_FREE_BLOCKS_METADATA|
1080 					 EXT4_FREE_BLOCKS_FORGET);
1081 
1082 			if (parent_bh) {
1083 				/*
1084 				 * The block which we have just freed is
1085 				 * pointed to by an indirect block: journal it
1086 				 */
1087 				BUFFER_TRACE(parent_bh, "get_write_access");
1088 				if (!ext4_journal_get_write_access(handle,
1089 								   parent_bh)){
1090 					*p = 0;
1091 					BUFFER_TRACE(parent_bh,
1092 					"call ext4_handle_dirty_metadata");
1093 					ext4_handle_dirty_metadata(handle,
1094 								   inode,
1095 								   parent_bh);
1096 				}
1097 			}
1098 		}
1099 	} else {
1100 		/* We have reached the bottom of the tree. */
1101 		BUFFER_TRACE(parent_bh, "free data blocks");
1102 		ext4_free_data(handle, inode, parent_bh, first, last);
1103 	}
1104 }
1105 
ext4_ind_truncate(handle_t * handle,struct inode * inode)1106 void ext4_ind_truncate(handle_t *handle, struct inode *inode)
1107 {
1108 	struct ext4_inode_info *ei = EXT4_I(inode);
1109 	__le32 *i_data = ei->i_data;
1110 	int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
1111 	ext4_lblk_t offsets[4];
1112 	Indirect chain[4];
1113 	Indirect *partial;
1114 	__le32 nr = 0;
1115 	int n = 0;
1116 	ext4_lblk_t last_block, max_block;
1117 	unsigned blocksize = inode->i_sb->s_blocksize;
1118 
1119 	last_block = (inode->i_size + blocksize-1)
1120 					>> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
1121 	max_block = (EXT4_SB(inode->i_sb)->s_bitmap_maxbytes + blocksize-1)
1122 					>> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
1123 
1124 	if (last_block != max_block) {
1125 		n = ext4_block_to_path(inode, last_block, offsets, NULL);
1126 		if (n == 0)
1127 			return;
1128 	}
1129 
1130 	ext4_es_remove_extent(inode, last_block, EXT_MAX_BLOCKS - last_block);
1131 
1132 	/*
1133 	 * The orphan list entry will now protect us from any crash which
1134 	 * occurs before the truncate completes, so it is now safe to propagate
1135 	 * the new, shorter inode size (held for now in i_size) into the
1136 	 * on-disk inode. We do this via i_disksize, which is the value which
1137 	 * ext4 *really* writes onto the disk inode.
1138 	 */
1139 	ei->i_disksize = inode->i_size;
1140 
1141 	if (last_block == max_block) {
1142 		/*
1143 		 * It is unnecessary to free any data blocks if last_block is
1144 		 * equal to the indirect block limit.
1145 		 */
1146 		return;
1147 	} else if (n == 1) {		/* direct blocks */
1148 		ext4_free_data(handle, inode, NULL, i_data+offsets[0],
1149 			       i_data + EXT4_NDIR_BLOCKS);
1150 		goto do_indirects;
1151 	}
1152 
1153 	partial = ext4_find_shared(inode, n, offsets, chain, &nr);
1154 	/* Kill the top of shared branch (not detached) */
1155 	if (nr) {
1156 		if (partial == chain) {
1157 			/* Shared branch grows from the inode */
1158 			ext4_free_branches(handle, inode, NULL,
1159 					   &nr, &nr+1, (chain+n-1) - partial);
1160 			*partial->p = 0;
1161 			/*
1162 			 * We mark the inode dirty prior to restart,
1163 			 * and prior to stop.  No need for it here.
1164 			 */
1165 		} else {
1166 			/* Shared branch grows from an indirect block */
1167 			BUFFER_TRACE(partial->bh, "get_write_access");
1168 			ext4_free_branches(handle, inode, partial->bh,
1169 					partial->p,
1170 					partial->p+1, (chain+n-1) - partial);
1171 		}
1172 	}
1173 	/* Clear the ends of indirect blocks on the shared branch */
1174 	while (partial > chain) {
1175 		ext4_free_branches(handle, inode, partial->bh, partial->p + 1,
1176 				   (__le32*)partial->bh->b_data+addr_per_block,
1177 				   (chain+n-1) - partial);
1178 		BUFFER_TRACE(partial->bh, "call brelse");
1179 		brelse(partial->bh);
1180 		partial--;
1181 	}
1182 do_indirects:
1183 	/* Kill the remaining (whole) subtrees */
1184 	switch (offsets[0]) {
1185 	default:
1186 		nr = i_data[EXT4_IND_BLOCK];
1187 		if (nr) {
1188 			ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
1189 			i_data[EXT4_IND_BLOCK] = 0;
1190 		}
1191 		fallthrough;
1192 	case EXT4_IND_BLOCK:
1193 		nr = i_data[EXT4_DIND_BLOCK];
1194 		if (nr) {
1195 			ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
1196 			i_data[EXT4_DIND_BLOCK] = 0;
1197 		}
1198 		fallthrough;
1199 	case EXT4_DIND_BLOCK:
1200 		nr = i_data[EXT4_TIND_BLOCK];
1201 		if (nr) {
1202 			ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
1203 			i_data[EXT4_TIND_BLOCK] = 0;
1204 		}
1205 		fallthrough;
1206 	case EXT4_TIND_BLOCK:
1207 		;
1208 	}
1209 }
1210 
1211 /**
1212  *	ext4_ind_remove_space - remove space from the range
1213  *	@handle: JBD handle for this transaction
1214  *	@inode:	inode we are dealing with
1215  *	@start:	First block to remove
1216  *	@end:	One block after the last block to remove (exclusive)
1217  *
1218  *	Free the blocks in the defined range (end is exclusive endpoint of
1219  *	range). This is used by ext4_punch_hole().
1220  */
ext4_ind_remove_space(handle_t * handle,struct inode * inode,ext4_lblk_t start,ext4_lblk_t end)1221 int ext4_ind_remove_space(handle_t *handle, struct inode *inode,
1222 			  ext4_lblk_t start, ext4_lblk_t end)
1223 {
1224 	struct ext4_inode_info *ei = EXT4_I(inode);
1225 	__le32 *i_data = ei->i_data;
1226 	int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
1227 	ext4_lblk_t offsets[4], offsets2[4];
1228 	Indirect chain[4], chain2[4];
1229 	Indirect *partial, *partial2;
1230 	Indirect *p = NULL, *p2 = NULL;
1231 	ext4_lblk_t max_block;
1232 	__le32 nr = 0, nr2 = 0;
1233 	int n = 0, n2 = 0;
1234 	unsigned blocksize = inode->i_sb->s_blocksize;
1235 
1236 	max_block = (EXT4_SB(inode->i_sb)->s_bitmap_maxbytes + blocksize-1)
1237 					>> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
1238 	if (end >= max_block)
1239 		end = max_block;
1240 	if ((start >= end) || (start > max_block))
1241 		return 0;
1242 
1243 	n = ext4_block_to_path(inode, start, offsets, NULL);
1244 	n2 = ext4_block_to_path(inode, end, offsets2, NULL);
1245 
1246 	BUG_ON(n > n2);
1247 
1248 	if ((n == 1) && (n == n2)) {
1249 		/* We're punching only within direct block range */
1250 		ext4_free_data(handle, inode, NULL, i_data + offsets[0],
1251 			       i_data + offsets2[0]);
1252 		return 0;
1253 	} else if (n2 > n) {
1254 		/*
1255 		 * Start and end are on a different levels so we're going to
1256 		 * free partial block at start, and partial block at end of
1257 		 * the range. If there are some levels in between then
1258 		 * do_indirects label will take care of that.
1259 		 */
1260 
1261 		if (n == 1) {
1262 			/*
1263 			 * Start is at the direct block level, free
1264 			 * everything to the end of the level.
1265 			 */
1266 			ext4_free_data(handle, inode, NULL, i_data + offsets[0],
1267 				       i_data + EXT4_NDIR_BLOCKS);
1268 			goto end_range;
1269 		}
1270 
1271 
1272 		partial = p = ext4_find_shared(inode, n, offsets, chain, &nr);
1273 		if (nr) {
1274 			if (partial == chain) {
1275 				/* Shared branch grows from the inode */
1276 				ext4_free_branches(handle, inode, NULL,
1277 					   &nr, &nr+1, (chain+n-1) - partial);
1278 				*partial->p = 0;
1279 			} else {
1280 				/* Shared branch grows from an indirect block */
1281 				BUFFER_TRACE(partial->bh, "get_write_access");
1282 				ext4_free_branches(handle, inode, partial->bh,
1283 					partial->p,
1284 					partial->p+1, (chain+n-1) - partial);
1285 			}
1286 		}
1287 
1288 		/*
1289 		 * Clear the ends of indirect blocks on the shared branch
1290 		 * at the start of the range
1291 		 */
1292 		while (partial > chain) {
1293 			ext4_free_branches(handle, inode, partial->bh,
1294 				partial->p + 1,
1295 				(__le32 *)partial->bh->b_data+addr_per_block,
1296 				(chain+n-1) - partial);
1297 			partial--;
1298 		}
1299 
1300 end_range:
1301 		partial2 = p2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
1302 		if (nr2) {
1303 			if (partial2 == chain2) {
1304 				/*
1305 				 * Remember, end is exclusive so here we're at
1306 				 * the start of the next level we're not going
1307 				 * to free. Everything was covered by the start
1308 				 * of the range.
1309 				 */
1310 				goto do_indirects;
1311 			}
1312 		} else {
1313 			/*
1314 			 * ext4_find_shared returns Indirect structure which
1315 			 * points to the last element which should not be
1316 			 * removed by truncate. But this is end of the range
1317 			 * in punch_hole so we need to point to the next element
1318 			 */
1319 			partial2->p++;
1320 		}
1321 
1322 		/*
1323 		 * Clear the ends of indirect blocks on the shared branch
1324 		 * at the end of the range
1325 		 */
1326 		while (partial2 > chain2) {
1327 			ext4_free_branches(handle, inode, partial2->bh,
1328 					   (__le32 *)partial2->bh->b_data,
1329 					   partial2->p,
1330 					   (chain2+n2-1) - partial2);
1331 			partial2--;
1332 		}
1333 		goto do_indirects;
1334 	}
1335 
1336 	/* Punch happened within the same level (n == n2) */
1337 	partial = p = ext4_find_shared(inode, n, offsets, chain, &nr);
1338 	partial2 = p2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
1339 
1340 	/* Free top, but only if partial2 isn't its subtree. */
1341 	if (nr) {
1342 		int level = min(partial - chain, partial2 - chain2);
1343 		int i;
1344 		int subtree = 1;
1345 
1346 		for (i = 0; i <= level; i++) {
1347 			if (offsets[i] != offsets2[i]) {
1348 				subtree = 0;
1349 				break;
1350 			}
1351 		}
1352 
1353 		if (!subtree) {
1354 			if (partial == chain) {
1355 				/* Shared branch grows from the inode */
1356 				ext4_free_branches(handle, inode, NULL,
1357 						   &nr, &nr+1,
1358 						   (chain+n-1) - partial);
1359 				*partial->p = 0;
1360 			} else {
1361 				/* Shared branch grows from an indirect block */
1362 				BUFFER_TRACE(partial->bh, "get_write_access");
1363 				ext4_free_branches(handle, inode, partial->bh,
1364 						   partial->p,
1365 						   partial->p+1,
1366 						   (chain+n-1) - partial);
1367 			}
1368 		}
1369 	}
1370 
1371 	if (!nr2) {
1372 		/*
1373 		 * ext4_find_shared returns Indirect structure which
1374 		 * points to the last element which should not be
1375 		 * removed by truncate. But this is end of the range
1376 		 * in punch_hole so we need to point to the next element
1377 		 */
1378 		partial2->p++;
1379 	}
1380 
1381 	while (partial > chain || partial2 > chain2) {
1382 		int depth = (chain+n-1) - partial;
1383 		int depth2 = (chain2+n2-1) - partial2;
1384 
1385 		if (partial > chain && partial2 > chain2 &&
1386 		    partial->bh->b_blocknr == partial2->bh->b_blocknr) {
1387 			/*
1388 			 * We've converged on the same block. Clear the range,
1389 			 * then we're done.
1390 			 */
1391 			ext4_free_branches(handle, inode, partial->bh,
1392 					   partial->p + 1,
1393 					   partial2->p,
1394 					   (chain+n-1) - partial);
1395 			goto cleanup;
1396 		}
1397 
1398 		/*
1399 		 * The start and end partial branches may not be at the same
1400 		 * level even though the punch happened within one level. So, we
1401 		 * give them a chance to arrive at the same level, then walk
1402 		 * them in step with each other until we converge on the same
1403 		 * block.
1404 		 */
1405 		if (partial > chain && depth <= depth2) {
1406 			ext4_free_branches(handle, inode, partial->bh,
1407 					   partial->p + 1,
1408 					   (__le32 *)partial->bh->b_data+addr_per_block,
1409 					   (chain+n-1) - partial);
1410 			partial--;
1411 		}
1412 		if (partial2 > chain2 && depth2 <= depth) {
1413 			ext4_free_branches(handle, inode, partial2->bh,
1414 					   (__le32 *)partial2->bh->b_data,
1415 					   partial2->p,
1416 					   (chain2+n2-1) - partial2);
1417 			partial2--;
1418 		}
1419 	}
1420 
1421 cleanup:
1422 	while (p && p > chain) {
1423 		BUFFER_TRACE(p->bh, "call brelse");
1424 		brelse(p->bh);
1425 		p--;
1426 	}
1427 	while (p2 && p2 > chain2) {
1428 		BUFFER_TRACE(p2->bh, "call brelse");
1429 		brelse(p2->bh);
1430 		p2--;
1431 	}
1432 	return 0;
1433 
1434 do_indirects:
1435 	/* Kill the remaining (whole) subtrees */
1436 	switch (offsets[0]) {
1437 	default:
1438 		if (++n >= n2)
1439 			break;
1440 		nr = i_data[EXT4_IND_BLOCK];
1441 		if (nr) {
1442 			ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
1443 			i_data[EXT4_IND_BLOCK] = 0;
1444 		}
1445 		fallthrough;
1446 	case EXT4_IND_BLOCK:
1447 		if (++n >= n2)
1448 			break;
1449 		nr = i_data[EXT4_DIND_BLOCK];
1450 		if (nr) {
1451 			ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
1452 			i_data[EXT4_DIND_BLOCK] = 0;
1453 		}
1454 		fallthrough;
1455 	case EXT4_DIND_BLOCK:
1456 		if (++n >= n2)
1457 			break;
1458 		nr = i_data[EXT4_TIND_BLOCK];
1459 		if (nr) {
1460 			ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
1461 			i_data[EXT4_TIND_BLOCK] = 0;
1462 		}
1463 		fallthrough;
1464 	case EXT4_TIND_BLOCK:
1465 		;
1466 	}
1467 	goto cleanup;
1468 }
1469