• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
4  * Written by Alex Tomas <alex@clusterfs.com>
5  */
6 
7 
8 /*
9  * mballoc.c contains the multiblocks allocation routines
10  */
11 
12 #include "ext4_jbd2.h"
13 #include "mballoc.h"
14 #include <linux/log2.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/nospec.h>
18 #include <linux/backing-dev.h>
19 #include <trace/events/ext4.h>
20 
21 /*
22  * MUSTDO:
23  *   - test ext4_ext_search_left() and ext4_ext_search_right()
24  *   - search for metadata in few groups
25  *
26  * TODO v4:
27  *   - normalization should take into account whether file is still open
28  *   - discard preallocations if no free space left (policy?)
29  *   - don't normalize tails
30  *   - quota
31  *   - reservation for superuser
32  *
33  * TODO v3:
34  *   - bitmap read-ahead (proposed by Oleg Drokin aka green)
35  *   - track min/max extents in each group for better group selection
36  *   - mb_mark_used() may allocate chunk right after splitting buddy
37  *   - tree of groups sorted by number of free blocks
38  *   - error handling
39  */
40 
41 /*
42  * The allocation request involve request for multiple number of blocks
43  * near to the goal(block) value specified.
44  *
45  * During initialization phase of the allocator we decide to use the
46  * group preallocation or inode preallocation depending on the size of
47  * the file. The size of the file could be the resulting file size we
48  * would have after allocation, or the current file size, which ever
49  * is larger. If the size is less than sbi->s_mb_stream_request we
50  * select to use the group preallocation. The default value of
51  * s_mb_stream_request is 16 blocks. This can also be tuned via
52  * /sys/fs/ext4/<partition>/mb_stream_req. The value is represented in
53  * terms of number of blocks.
54  *
55  * The main motivation for having small file use group preallocation is to
56  * ensure that we have small files closer together on the disk.
57  *
58  * First stage the allocator looks at the inode prealloc list,
59  * ext4_inode_info->i_prealloc_list, which contains list of prealloc
60  * spaces for this particular inode. The inode prealloc space is
61  * represented as:
62  *
63  * pa_lstart -> the logical start block for this prealloc space
64  * pa_pstart -> the physical start block for this prealloc space
65  * pa_len    -> length for this prealloc space (in clusters)
66  * pa_free   ->  free space available in this prealloc space (in clusters)
67  *
68  * The inode preallocation space is used looking at the _logical_ start
69  * block. If only the logical file block falls within the range of prealloc
70  * space we will consume the particular prealloc space. This makes sure that
71  * we have contiguous physical blocks representing the file blocks
72  *
73  * The important thing to be noted in case of inode prealloc space is that
74  * we don't modify the values associated to inode prealloc space except
75  * pa_free.
76  *
77  * If we are not able to find blocks in the inode prealloc space and if we
78  * have the group allocation flag set then we look at the locality group
79  * prealloc space. These are per CPU prealloc list represented as
80  *
81  * ext4_sb_info.s_locality_groups[smp_processor_id()]
82  *
83  * The reason for having a per cpu locality group is to reduce the contention
84  * between CPUs. It is possible to get scheduled at this point.
85  *
86  * The locality group prealloc space is used looking at whether we have
87  * enough free space (pa_free) within the prealloc space.
88  *
89  * If we can't allocate blocks via inode prealloc or/and locality group
90  * prealloc then we look at the buddy cache. The buddy cache is represented
91  * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets
92  * mapped to the buddy and bitmap information regarding different
93  * groups. The buddy information is attached to buddy cache inode so that
94  * we can access them through the page cache. The information regarding
95  * each group is loaded via ext4_mb_load_buddy.  The information involve
96  * block bitmap and buddy information. The information are stored in the
97  * inode as:
98  *
99  *  {                        page                        }
100  *  [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
101  *
102  *
103  * one block each for bitmap and buddy information.  So for each group we
104  * take up 2 blocks. A page can contain blocks_per_page (PAGE_SIZE /
105  * blocksize) blocks.  So it can have information regarding groups_per_page
106  * which is blocks_per_page/2
107  *
108  * The buddy cache inode is not stored on disk. The inode is thrown
109  * away when the filesystem is unmounted.
110  *
111  * We look for count number of blocks in the buddy cache. If we were able
112  * to locate that many free blocks we return with additional information
113  * regarding rest of the contiguous physical block available
114  *
115  * Before allocating blocks via buddy cache we normalize the request
116  * blocks. This ensure we ask for more blocks that we needed. The extra
117  * blocks that we get after allocation is added to the respective prealloc
118  * list. In case of inode preallocation we follow a list of heuristics
119  * based on file size. This can be found in ext4_mb_normalize_request. If
120  * we are doing a group prealloc we try to normalize the request to
121  * sbi->s_mb_group_prealloc.  The default value of s_mb_group_prealloc is
122  * dependent on the cluster size; for non-bigalloc file systems, it is
123  * 512 blocks. This can be tuned via
124  * /sys/fs/ext4/<partition>/mb_group_prealloc. The value is represented in
125  * terms of number of blocks. If we have mounted the file system with -O
126  * stripe=<value> option the group prealloc request is normalized to the
127  * smallest multiple of the stripe value (sbi->s_stripe) which is
128  * greater than the default mb_group_prealloc.
129  *
130  * The regular allocator (using the buddy cache) supports a few tunables.
131  *
132  * /sys/fs/ext4/<partition>/mb_min_to_scan
133  * /sys/fs/ext4/<partition>/mb_max_to_scan
134  * /sys/fs/ext4/<partition>/mb_order2_req
135  *
136  * The regular allocator uses buddy scan only if the request len is power of
137  * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The
138  * value of s_mb_order2_reqs can be tuned via
139  * /sys/fs/ext4/<partition>/mb_order2_req.  If the request len is equal to
140  * stripe size (sbi->s_stripe), we try to search for contiguous block in
141  * stripe size. This should result in better allocation on RAID setups. If
142  * not, we search in the specific group using bitmap for best extents. The
143  * tunable min_to_scan and max_to_scan control the behaviour here.
144  * min_to_scan indicate how long the mballoc __must__ look for a best
145  * extent and max_to_scan indicates how long the mballoc __can__ look for a
146  * best extent in the found extents. Searching for the blocks starts with
147  * the group specified as the goal value in allocation context via
148  * ac_g_ex. Each group is first checked based on the criteria whether it
149  * can be used for allocation. ext4_mb_good_group explains how the groups are
150  * checked.
151  *
152  * Both the prealloc space are getting populated as above. So for the first
153  * request we will hit the buddy cache which will result in this prealloc
154  * space getting filled. The prealloc space is then later used for the
155  * subsequent request.
156  */
157 
158 /*
159  * mballoc operates on the following data:
160  *  - on-disk bitmap
161  *  - in-core buddy (actually includes buddy and bitmap)
162  *  - preallocation descriptors (PAs)
163  *
164  * there are two types of preallocations:
165  *  - inode
166  *    assiged to specific inode and can be used for this inode only.
167  *    it describes part of inode's space preallocated to specific
168  *    physical blocks. any block from that preallocated can be used
169  *    independent. the descriptor just tracks number of blocks left
170  *    unused. so, before taking some block from descriptor, one must
171  *    make sure corresponded logical block isn't allocated yet. this
172  *    also means that freeing any block within descriptor's range
173  *    must discard all preallocated blocks.
174  *  - locality group
175  *    assigned to specific locality group which does not translate to
176  *    permanent set of inodes: inode can join and leave group. space
177  *    from this type of preallocation can be used for any inode. thus
178  *    it's consumed from the beginning to the end.
179  *
180  * relation between them can be expressed as:
181  *    in-core buddy = on-disk bitmap + preallocation descriptors
182  *
183  * this mean blocks mballoc considers used are:
184  *  - allocated blocks (persistent)
185  *  - preallocated blocks (non-persistent)
186  *
187  * consistency in mballoc world means that at any time a block is either
188  * free or used in ALL structures. notice: "any time" should not be read
189  * literally -- time is discrete and delimited by locks.
190  *
191  *  to keep it simple, we don't use block numbers, instead we count number of
192  *  blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA.
193  *
194  * all operations can be expressed as:
195  *  - init buddy:			buddy = on-disk + PAs
196  *  - new PA:				buddy += N; PA = N
197  *  - use inode PA:			on-disk += N; PA -= N
198  *  - discard inode PA			buddy -= on-disk - PA; PA = 0
199  *  - use locality group PA		on-disk += N; PA -= N
200  *  - discard locality group PA		buddy -= PA; PA = 0
201  *  note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap
202  *        is used in real operation because we can't know actual used
203  *        bits from PA, only from on-disk bitmap
204  *
205  * if we follow this strict logic, then all operations above should be atomic.
206  * given some of them can block, we'd have to use something like semaphores
207  * killing performance on high-end SMP hardware. let's try to relax it using
208  * the following knowledge:
209  *  1) if buddy is referenced, it's already initialized
210  *  2) while block is used in buddy and the buddy is referenced,
211  *     nobody can re-allocate that block
212  *  3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has
213  *     bit set and PA claims same block, it's OK. IOW, one can set bit in
214  *     on-disk bitmap if buddy has same bit set or/and PA covers corresponded
215  *     block
216  *
217  * so, now we're building a concurrency table:
218  *  - init buddy vs.
219  *    - new PA
220  *      blocks for PA are allocated in the buddy, buddy must be referenced
221  *      until PA is linked to allocation group to avoid concurrent buddy init
222  *    - use inode PA
223  *      we need to make sure that either on-disk bitmap or PA has uptodate data
224  *      given (3) we care that PA-=N operation doesn't interfere with init
225  *    - discard inode PA
226  *      the simplest way would be to have buddy initialized by the discard
227  *    - use locality group PA
228  *      again PA-=N must be serialized with init
229  *    - discard locality group PA
230  *      the simplest way would be to have buddy initialized by the discard
231  *  - new PA vs.
232  *    - use inode PA
233  *      i_data_sem serializes them
234  *    - discard inode PA
235  *      discard process must wait until PA isn't used by another process
236  *    - use locality group PA
237  *      some mutex should serialize them
238  *    - discard locality group PA
239  *      discard process must wait until PA isn't used by another process
240  *  - use inode PA
241  *    - use inode PA
242  *      i_data_sem or another mutex should serializes them
243  *    - discard inode PA
244  *      discard process must wait until PA isn't used by another process
245  *    - use locality group PA
246  *      nothing wrong here -- they're different PAs covering different blocks
247  *    - discard locality group PA
248  *      discard process must wait until PA isn't used by another process
249  *
250  * now we're ready to make few consequences:
251  *  - PA is referenced and while it is no discard is possible
252  *  - PA is referenced until block isn't marked in on-disk bitmap
253  *  - PA changes only after on-disk bitmap
254  *  - discard must not compete with init. either init is done before
255  *    any discard or they're serialized somehow
256  *  - buddy init as sum of on-disk bitmap and PAs is done atomically
257  *
258  * a special case when we've used PA to emptiness. no need to modify buddy
259  * in this case, but we should care about concurrent init
260  *
261  */
262 
263  /*
264  * Logic in few words:
265  *
266  *  - allocation:
267  *    load group
268  *    find blocks
269  *    mark bits in on-disk bitmap
270  *    release group
271  *
272  *  - use preallocation:
273  *    find proper PA (per-inode or group)
274  *    load group
275  *    mark bits in on-disk bitmap
276  *    release group
277  *    release PA
278  *
279  *  - free:
280  *    load group
281  *    mark bits in on-disk bitmap
282  *    release group
283  *
284  *  - discard preallocations in group:
285  *    mark PAs deleted
286  *    move them onto local list
287  *    load on-disk bitmap
288  *    load group
289  *    remove PA from object (inode or locality group)
290  *    mark free blocks in-core
291  *
292  *  - discard inode's preallocations:
293  */
294 
295 /*
296  * Locking rules
297  *
298  * Locks:
299  *  - bitlock on a group	(group)
300  *  - object (inode/locality)	(object)
301  *  - per-pa lock		(pa)
302  *
303  * Paths:
304  *  - new pa
305  *    object
306  *    group
307  *
308  *  - find and use pa:
309  *    pa
310  *
311  *  - release consumed pa:
312  *    pa
313  *    group
314  *    object
315  *
316  *  - generate in-core bitmap:
317  *    group
318  *        pa
319  *
320  *  - discard all for given object (inode, locality group):
321  *    object
322  *        pa
323  *    group
324  *
325  *  - discard all for given group:
326  *    group
327  *        pa
328  *    group
329  *        object
330  *
331  */
332 static struct kmem_cache *ext4_pspace_cachep;
333 static struct kmem_cache *ext4_ac_cachep;
334 static struct kmem_cache *ext4_free_data_cachep;
335 
336 /* We create slab caches for groupinfo data structures based on the
337  * superblock block size.  There will be one per mounted filesystem for
338  * each unique s_blocksize_bits */
339 #define NR_GRPINFO_CACHES 8
340 static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES];
341 
342 static const char * const ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = {
343 	"ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k",
344 	"ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k",
345 	"ext4_groupinfo_64k", "ext4_groupinfo_128k"
346 };
347 
348 static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
349 					ext4_group_t group);
350 static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
351 						ext4_group_t group);
352 static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac);
353 
354 /*
355  * The algorithm using this percpu seq counter goes below:
356  * 1. We sample the percpu discard_pa_seq counter before trying for block
357  *    allocation in ext4_mb_new_blocks().
358  * 2. We increment this percpu discard_pa_seq counter when we either allocate
359  *    or free these blocks i.e. while marking those blocks as used/free in
360  *    mb_mark_used()/mb_free_blocks().
361  * 3. We also increment this percpu seq counter when we successfully identify
362  *    that the bb_prealloc_list is not empty and hence proceed for discarding
363  *    of those PAs inside ext4_mb_discard_group_preallocations().
364  *
365  * Now to make sure that the regular fast path of block allocation is not
366  * affected, as a small optimization we only sample the percpu seq counter
367  * on that cpu. Only when the block allocation fails and when freed blocks
368  * found were 0, that is when we sample percpu seq counter for all cpus using
369  * below function ext4_get_discard_pa_seq_sum(). This happens after making
370  * sure that all the PAs on grp->bb_prealloc_list got freed or if it's empty.
371  */
372 static DEFINE_PER_CPU(u64, discard_pa_seq);
ext4_get_discard_pa_seq_sum(void)373 static inline u64 ext4_get_discard_pa_seq_sum(void)
374 {
375 	int __cpu;
376 	u64 __seq = 0;
377 
378 	for_each_possible_cpu(__cpu)
379 		__seq += per_cpu(discard_pa_seq, __cpu);
380 	return __seq;
381 }
382 
mb_correct_addr_and_bit(int * bit,void * addr)383 static inline void *mb_correct_addr_and_bit(int *bit, void *addr)
384 {
385 #if BITS_PER_LONG == 64
386 	*bit += ((unsigned long) addr & 7UL) << 3;
387 	addr = (void *) ((unsigned long) addr & ~7UL);
388 #elif BITS_PER_LONG == 32
389 	*bit += ((unsigned long) addr & 3UL) << 3;
390 	addr = (void *) ((unsigned long) addr & ~3UL);
391 #else
392 #error "how many bits you are?!"
393 #endif
394 	return addr;
395 }
396 
mb_test_bit(int bit,void * addr)397 static inline int mb_test_bit(int bit, void *addr)
398 {
399 	/*
400 	 * ext4_test_bit on architecture like powerpc
401 	 * needs unsigned long aligned address
402 	 */
403 	addr = mb_correct_addr_and_bit(&bit, addr);
404 	return ext4_test_bit(bit, addr);
405 }
406 
mb_set_bit(int bit,void * addr)407 static inline void mb_set_bit(int bit, void *addr)
408 {
409 	addr = mb_correct_addr_and_bit(&bit, addr);
410 	ext4_set_bit(bit, addr);
411 }
412 
mb_clear_bit(int bit,void * addr)413 static inline void mb_clear_bit(int bit, void *addr)
414 {
415 	addr = mb_correct_addr_and_bit(&bit, addr);
416 	ext4_clear_bit(bit, addr);
417 }
418 
mb_test_and_clear_bit(int bit,void * addr)419 static inline int mb_test_and_clear_bit(int bit, void *addr)
420 {
421 	addr = mb_correct_addr_and_bit(&bit, addr);
422 	return ext4_test_and_clear_bit(bit, addr);
423 }
424 
mb_find_next_zero_bit(void * addr,int max,int start)425 static inline int mb_find_next_zero_bit(void *addr, int max, int start)
426 {
427 	int fix = 0, ret, tmpmax;
428 	addr = mb_correct_addr_and_bit(&fix, addr);
429 	tmpmax = max + fix;
430 	start += fix;
431 
432 	ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix;
433 	if (ret > max)
434 		return max;
435 	return ret;
436 }
437 
mb_find_next_bit(void * addr,int max,int start)438 static inline int mb_find_next_bit(void *addr, int max, int start)
439 {
440 	int fix = 0, ret, tmpmax;
441 	addr = mb_correct_addr_and_bit(&fix, addr);
442 	tmpmax = max + fix;
443 	start += fix;
444 
445 	ret = ext4_find_next_bit(addr, tmpmax, start) - fix;
446 	if (ret > max)
447 		return max;
448 	return ret;
449 }
450 
mb_find_buddy(struct ext4_buddy * e4b,int order,int * max)451 static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max)
452 {
453 	char *bb;
454 
455 	BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
456 	BUG_ON(max == NULL);
457 
458 	if (order > e4b->bd_blkbits + 1) {
459 		*max = 0;
460 		return NULL;
461 	}
462 
463 	/* at order 0 we see each particular block */
464 	if (order == 0) {
465 		*max = 1 << (e4b->bd_blkbits + 3);
466 		return e4b->bd_bitmap;
467 	}
468 
469 	bb = e4b->bd_buddy + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order];
470 	*max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order];
471 
472 	return bb;
473 }
474 
475 #ifdef DOUBLE_CHECK
mb_free_blocks_double(struct inode * inode,struct ext4_buddy * e4b,int first,int count)476 static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b,
477 			   int first, int count)
478 {
479 	int i;
480 	struct super_block *sb = e4b->bd_sb;
481 
482 	if (unlikely(e4b->bd_info->bb_bitmap == NULL))
483 		return;
484 	assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
485 	for (i = 0; i < count; i++) {
486 		if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) {
487 			ext4_fsblk_t blocknr;
488 
489 			blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
490 			blocknr += EXT4_C2B(EXT4_SB(sb), first + i);
491 			ext4_grp_locked_error(sb, e4b->bd_group,
492 					      inode ? inode->i_ino : 0,
493 					      blocknr,
494 					      "freeing block already freed "
495 					      "(bit %u)",
496 					      first + i);
497 			ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
498 					EXT4_GROUP_INFO_BBITMAP_CORRUPT);
499 		}
500 		mb_clear_bit(first + i, e4b->bd_info->bb_bitmap);
501 	}
502 }
503 
mb_mark_used_double(struct ext4_buddy * e4b,int first,int count)504 static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count)
505 {
506 	int i;
507 
508 	if (unlikely(e4b->bd_info->bb_bitmap == NULL))
509 		return;
510 	assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
511 	for (i = 0; i < count; i++) {
512 		BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap));
513 		mb_set_bit(first + i, e4b->bd_info->bb_bitmap);
514 	}
515 }
516 
mb_cmp_bitmaps(struct ext4_buddy * e4b,void * bitmap)517 static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
518 {
519 	if (unlikely(e4b->bd_info->bb_bitmap == NULL))
520 		return;
521 	if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) {
522 		unsigned char *b1, *b2;
523 		int i;
524 		b1 = (unsigned char *) e4b->bd_info->bb_bitmap;
525 		b2 = (unsigned char *) bitmap;
526 		for (i = 0; i < e4b->bd_sb->s_blocksize; i++) {
527 			if (b1[i] != b2[i]) {
528 				ext4_msg(e4b->bd_sb, KERN_ERR,
529 					 "corruption in group %u "
530 					 "at byte %u(%u): %x in copy != %x "
531 					 "on disk/prealloc",
532 					 e4b->bd_group, i, i * 8, b1[i], b2[i]);
533 				BUG();
534 			}
535 		}
536 	}
537 }
538 
mb_group_bb_bitmap_alloc(struct super_block * sb,struct ext4_group_info * grp,ext4_group_t group)539 static void mb_group_bb_bitmap_alloc(struct super_block *sb,
540 			struct ext4_group_info *grp, ext4_group_t group)
541 {
542 	struct buffer_head *bh;
543 
544 	grp->bb_bitmap = kmalloc(sb->s_blocksize, GFP_NOFS);
545 	if (!grp->bb_bitmap)
546 		return;
547 
548 	bh = ext4_read_block_bitmap(sb, group);
549 	if (IS_ERR_OR_NULL(bh)) {
550 		kfree(grp->bb_bitmap);
551 		grp->bb_bitmap = NULL;
552 		return;
553 	}
554 
555 	memcpy(grp->bb_bitmap, bh->b_data, sb->s_blocksize);
556 	put_bh(bh);
557 }
558 
mb_group_bb_bitmap_free(struct ext4_group_info * grp)559 static void mb_group_bb_bitmap_free(struct ext4_group_info *grp)
560 {
561 	kfree(grp->bb_bitmap);
562 }
563 
564 #else
mb_free_blocks_double(struct inode * inode,struct ext4_buddy * e4b,int first,int count)565 static inline void mb_free_blocks_double(struct inode *inode,
566 				struct ext4_buddy *e4b, int first, int count)
567 {
568 	return;
569 }
mb_mark_used_double(struct ext4_buddy * e4b,int first,int count)570 static inline void mb_mark_used_double(struct ext4_buddy *e4b,
571 						int first, int count)
572 {
573 	return;
574 }
mb_cmp_bitmaps(struct ext4_buddy * e4b,void * bitmap)575 static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
576 {
577 	return;
578 }
579 
mb_group_bb_bitmap_alloc(struct super_block * sb,struct ext4_group_info * grp,ext4_group_t group)580 static inline void mb_group_bb_bitmap_alloc(struct super_block *sb,
581 			struct ext4_group_info *grp, ext4_group_t group)
582 {
583 	return;
584 }
585 
mb_group_bb_bitmap_free(struct ext4_group_info * grp)586 static inline void mb_group_bb_bitmap_free(struct ext4_group_info *grp)
587 {
588 	return;
589 }
590 #endif
591 
592 #ifdef AGGRESSIVE_CHECK
593 
594 #define MB_CHECK_ASSERT(assert)						\
595 do {									\
596 	if (!(assert)) {						\
597 		printk(KERN_EMERG					\
598 			"Assertion failure in %s() at %s:%d: \"%s\"\n",	\
599 			function, file, line, # assert);		\
600 		BUG();							\
601 	}								\
602 } while (0)
603 
__mb_check_buddy(struct ext4_buddy * e4b,char * file,const char * function,int line)604 static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
605 				const char *function, int line)
606 {
607 	struct super_block *sb = e4b->bd_sb;
608 	int order = e4b->bd_blkbits + 1;
609 	int max;
610 	int max2;
611 	int i;
612 	int j;
613 	int k;
614 	int count;
615 	struct ext4_group_info *grp;
616 	int fragments = 0;
617 	int fstart;
618 	struct list_head *cur;
619 	void *buddy;
620 	void *buddy2;
621 
622 	if (e4b->bd_info->bb_check_counter++ % 10)
623 		return 0;
624 
625 	while (order > 1) {
626 		buddy = mb_find_buddy(e4b, order, &max);
627 		MB_CHECK_ASSERT(buddy);
628 		buddy2 = mb_find_buddy(e4b, order - 1, &max2);
629 		MB_CHECK_ASSERT(buddy2);
630 		MB_CHECK_ASSERT(buddy != buddy2);
631 		MB_CHECK_ASSERT(max * 2 == max2);
632 
633 		count = 0;
634 		for (i = 0; i < max; i++) {
635 
636 			if (mb_test_bit(i, buddy)) {
637 				/* only single bit in buddy2 may be 1 */
638 				if (!mb_test_bit(i << 1, buddy2)) {
639 					MB_CHECK_ASSERT(
640 						mb_test_bit((i<<1)+1, buddy2));
641 				} else if (!mb_test_bit((i << 1) + 1, buddy2)) {
642 					MB_CHECK_ASSERT(
643 						mb_test_bit(i << 1, buddy2));
644 				}
645 				continue;
646 			}
647 
648 			/* both bits in buddy2 must be 1 */
649 			MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2));
650 			MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2));
651 
652 			for (j = 0; j < (1 << order); j++) {
653 				k = (i * (1 << order)) + j;
654 				MB_CHECK_ASSERT(
655 					!mb_test_bit(k, e4b->bd_bitmap));
656 			}
657 			count++;
658 		}
659 		MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count);
660 		order--;
661 	}
662 
663 	fstart = -1;
664 	buddy = mb_find_buddy(e4b, 0, &max);
665 	for (i = 0; i < max; i++) {
666 		if (!mb_test_bit(i, buddy)) {
667 			MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free);
668 			if (fstart == -1) {
669 				fragments++;
670 				fstart = i;
671 			}
672 			continue;
673 		}
674 		fstart = -1;
675 		/* check used bits only */
676 		for (j = 0; j < e4b->bd_blkbits + 1; j++) {
677 			buddy2 = mb_find_buddy(e4b, j, &max2);
678 			k = i >> j;
679 			MB_CHECK_ASSERT(k < max2);
680 			MB_CHECK_ASSERT(mb_test_bit(k, buddy2));
681 		}
682 	}
683 	MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info));
684 	MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments);
685 
686 	grp = ext4_get_group_info(sb, e4b->bd_group);
687 	if (!grp)
688 		return NULL;
689 	list_for_each(cur, &grp->bb_prealloc_list) {
690 		ext4_group_t groupnr;
691 		struct ext4_prealloc_space *pa;
692 		pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
693 		ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k);
694 		MB_CHECK_ASSERT(groupnr == e4b->bd_group);
695 		for (i = 0; i < pa->pa_len; i++)
696 			MB_CHECK_ASSERT(mb_test_bit(k + i, buddy));
697 	}
698 	return 0;
699 }
700 #undef MB_CHECK_ASSERT
701 #define mb_check_buddy(e4b) __mb_check_buddy(e4b,	\
702 					__FILE__, __func__, __LINE__)
703 #else
704 #define mb_check_buddy(e4b)
705 #endif
706 
707 /*
708  * Divide blocks started from @first with length @len into
709  * smaller chunks with power of 2 blocks.
710  * Clear the bits in bitmap which the blocks of the chunk(s) covered,
711  * then increase bb_counters[] for corresponded chunk size.
712  */
ext4_mb_mark_free_simple(struct super_block * sb,void * buddy,ext4_grpblk_t first,ext4_grpblk_t len,struct ext4_group_info * grp)713 static void ext4_mb_mark_free_simple(struct super_block *sb,
714 				void *buddy, ext4_grpblk_t first, ext4_grpblk_t len,
715 					struct ext4_group_info *grp)
716 {
717 	struct ext4_sb_info *sbi = EXT4_SB(sb);
718 	ext4_grpblk_t min;
719 	ext4_grpblk_t max;
720 	ext4_grpblk_t chunk;
721 	unsigned int border;
722 
723 	BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb));
724 
725 	border = 2 << sb->s_blocksize_bits;
726 
727 	while (len > 0) {
728 		/* find how many blocks can be covered since this position */
729 		max = ffs(first | border) - 1;
730 
731 		/* find how many blocks of power 2 we need to mark */
732 		min = fls(len) - 1;
733 
734 		if (max < min)
735 			min = max;
736 		chunk = 1 << min;
737 
738 		/* mark multiblock chunks only */
739 		grp->bb_counters[min]++;
740 		if (min > 0)
741 			mb_clear_bit(first >> min,
742 				     buddy + sbi->s_mb_offsets[min]);
743 
744 		len -= chunk;
745 		first += chunk;
746 	}
747 }
748 
749 /*
750  * Cache the order of the largest free extent we have available in this block
751  * group.
752  */
753 static void
mb_set_largest_free_order(struct super_block * sb,struct ext4_group_info * grp)754 mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp)
755 {
756 	int i;
757 	int bits;
758 
759 	grp->bb_largest_free_order = -1; /* uninit */
760 
761 	bits = sb->s_blocksize_bits + 1;
762 	for (i = bits; i >= 0; i--) {
763 		if (grp->bb_counters[i] > 0) {
764 			grp->bb_largest_free_order = i;
765 			break;
766 		}
767 	}
768 }
769 
770 static noinline_for_stack
ext4_mb_generate_buddy(struct super_block * sb,void * buddy,void * bitmap,ext4_group_t group,struct ext4_group_info * grp)771 void ext4_mb_generate_buddy(struct super_block *sb,
772 			    void *buddy, void *bitmap, ext4_group_t group,
773 			    struct ext4_group_info *grp)
774 {
775 	struct ext4_sb_info *sbi = EXT4_SB(sb);
776 	ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
777 	ext4_grpblk_t i = 0;
778 	ext4_grpblk_t first;
779 	ext4_grpblk_t len;
780 	unsigned free = 0;
781 	unsigned fragments = 0;
782 	unsigned long long period = get_cycles();
783 
784 	/* initialize buddy from bitmap which is aggregation
785 	 * of on-disk bitmap and preallocations */
786 	i = mb_find_next_zero_bit(bitmap, max, 0);
787 	grp->bb_first_free = i;
788 	while (i < max) {
789 		fragments++;
790 		first = i;
791 		i = mb_find_next_bit(bitmap, max, i);
792 		len = i - first;
793 		free += len;
794 		if (len > 1)
795 			ext4_mb_mark_free_simple(sb, buddy, first, len, grp);
796 		else
797 			grp->bb_counters[0]++;
798 		if (i < max)
799 			i = mb_find_next_zero_bit(bitmap, max, i);
800 	}
801 	grp->bb_fragments = fragments;
802 
803 	if (free != grp->bb_free) {
804 		ext4_grp_locked_error(sb, group, 0, 0,
805 				      "block bitmap and bg descriptor "
806 				      "inconsistent: %u vs %u free clusters",
807 				      free, grp->bb_free);
808 		/*
809 		 * If we intend to continue, we consider group descriptor
810 		 * corrupt and update bb_free using bitmap value
811 		 */
812 		grp->bb_free = free;
813 		ext4_mark_group_bitmap_corrupted(sb, group,
814 					EXT4_GROUP_INFO_BBITMAP_CORRUPT);
815 	}
816 	mb_set_largest_free_order(sb, grp);
817 
818 	clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state));
819 
820 	period = get_cycles() - period;
821 	atomic_inc(&sbi->s_mb_buddies_generated);
822 	atomic64_add(period, &sbi->s_mb_generation_time);
823 }
824 
mb_regenerate_buddy(struct ext4_buddy * e4b)825 static void mb_regenerate_buddy(struct ext4_buddy *e4b)
826 {
827 	int count;
828 	int order = 1;
829 	void *buddy;
830 
831 	while ((buddy = mb_find_buddy(e4b, order++, &count)))
832 		ext4_set_bits(buddy, 0, count);
833 
834 	e4b->bd_info->bb_fragments = 0;
835 	memset(e4b->bd_info->bb_counters, 0,
836 		sizeof(*e4b->bd_info->bb_counters) *
837 		(e4b->bd_sb->s_blocksize_bits + 2));
838 
839 	ext4_mb_generate_buddy(e4b->bd_sb, e4b->bd_buddy,
840 		e4b->bd_bitmap, e4b->bd_group, e4b->bd_info);
841 }
842 
843 /* The buddy information is attached the buddy cache inode
844  * for convenience. The information regarding each group
845  * is loaded via ext4_mb_load_buddy. The information involve
846  * block bitmap and buddy information. The information are
847  * stored in the inode as
848  *
849  * {                        page                        }
850  * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
851  *
852  *
853  * one block each for bitmap and buddy information.
854  * So for each group we take up 2 blocks. A page can
855  * contain blocks_per_page (PAGE_SIZE / blocksize)  blocks.
856  * So it can have information regarding groups_per_page which
857  * is blocks_per_page/2
858  *
859  * Locking note:  This routine takes the block group lock of all groups
860  * for this page; do not hold this lock when calling this routine!
861  */
862 
ext4_mb_init_cache(struct page * page,char * incore,gfp_t gfp)863 static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
864 {
865 	ext4_group_t ngroups;
866 	int blocksize;
867 	int blocks_per_page;
868 	int groups_per_page;
869 	int err = 0;
870 	int i;
871 	ext4_group_t first_group, group;
872 	int first_block;
873 	struct super_block *sb;
874 	struct buffer_head *bhs;
875 	struct buffer_head **bh = NULL;
876 	struct inode *inode;
877 	char *data;
878 	char *bitmap;
879 	struct ext4_group_info *grinfo;
880 
881 	inode = page->mapping->host;
882 	sb = inode->i_sb;
883 	ngroups = ext4_get_groups_count(sb);
884 	blocksize = i_blocksize(inode);
885 	blocks_per_page = PAGE_SIZE / blocksize;
886 
887 	mb_debug(sb, "init page %lu\n", page->index);
888 
889 	groups_per_page = blocks_per_page >> 1;
890 	if (groups_per_page == 0)
891 		groups_per_page = 1;
892 
893 	/* allocate buffer_heads to read bitmaps */
894 	if (groups_per_page > 1) {
895 		i = sizeof(struct buffer_head *) * groups_per_page;
896 		bh = kzalloc(i, gfp);
897 		if (bh == NULL) {
898 			err = -ENOMEM;
899 			goto out;
900 		}
901 	} else
902 		bh = &bhs;
903 
904 	first_group = page->index * blocks_per_page / 2;
905 
906 	/* read all groups the page covers into the cache */
907 	for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
908 		if (group >= ngroups)
909 			break;
910 
911 		grinfo = ext4_get_group_info(sb, group);
912 		if (!grinfo)
913 			continue;
914 		/*
915 		 * If page is uptodate then we came here after online resize
916 		 * which added some new uninitialized group info structs, so
917 		 * we must skip all initialized uptodate buddies on the page,
918 		 * which may be currently in use by an allocating task.
919 		 */
920 		if (PageUptodate(page) && !EXT4_MB_GRP_NEED_INIT(grinfo)) {
921 			bh[i] = NULL;
922 			continue;
923 		}
924 		bh[i] = ext4_read_block_bitmap_nowait(sb, group, false);
925 		if (IS_ERR(bh[i])) {
926 			err = PTR_ERR(bh[i]);
927 			bh[i] = NULL;
928 			goto out;
929 		}
930 		mb_debug(sb, "read bitmap for group %u\n", group);
931 	}
932 
933 	/* wait for I/O completion */
934 	for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
935 		int err2;
936 
937 		if (!bh[i])
938 			continue;
939 		err2 = ext4_wait_block_bitmap(sb, group, bh[i]);
940 		if (!err)
941 			err = err2;
942 	}
943 
944 	first_block = page->index * blocks_per_page;
945 	for (i = 0; i < blocks_per_page; i++) {
946 		group = (first_block + i) >> 1;
947 		if (group >= ngroups)
948 			break;
949 
950 		if (!bh[group - first_group])
951 			/* skip initialized uptodate buddy */
952 			continue;
953 
954 		if (!buffer_verified(bh[group - first_group]))
955 			/* Skip faulty bitmaps */
956 			continue;
957 		err = 0;
958 
959 		/*
960 		 * data carry information regarding this
961 		 * particular group in the format specified
962 		 * above
963 		 *
964 		 */
965 		data = page_address(page) + (i * blocksize);
966 		bitmap = bh[group - first_group]->b_data;
967 
968 		/*
969 		 * We place the buddy block and bitmap block
970 		 * close together
971 		 */
972 		if ((first_block + i) & 1) {
973 			/* this is block of buddy */
974 			BUG_ON(incore == NULL);
975 			mb_debug(sb, "put buddy for group %u in page %lu/%x\n",
976 				group, page->index, i * blocksize);
977 			trace_ext4_mb_buddy_bitmap_load(sb, group);
978 			grinfo = ext4_get_group_info(sb, group);
979 			if (!grinfo) {
980 				err = -EFSCORRUPTED;
981 				goto out;
982 			}
983 			grinfo->bb_fragments = 0;
984 			memset(grinfo->bb_counters, 0,
985 			       sizeof(*grinfo->bb_counters) *
986 				(sb->s_blocksize_bits+2));
987 			/*
988 			 * incore got set to the group block bitmap below
989 			 */
990 			ext4_lock_group(sb, group);
991 			/* init the buddy */
992 			memset(data, 0xff, blocksize);
993 			ext4_mb_generate_buddy(sb, data, incore, group, grinfo);
994 			ext4_unlock_group(sb, group);
995 			incore = NULL;
996 		} else {
997 			/* this is block of bitmap */
998 			BUG_ON(incore != NULL);
999 			mb_debug(sb, "put bitmap for group %u in page %lu/%x\n",
1000 				group, page->index, i * blocksize);
1001 			trace_ext4_mb_bitmap_load(sb, group);
1002 
1003 			/* see comments in ext4_mb_put_pa() */
1004 			ext4_lock_group(sb, group);
1005 			memcpy(data, bitmap, blocksize);
1006 
1007 			/* mark all preallocated blks used in in-core bitmap */
1008 			ext4_mb_generate_from_pa(sb, data, group);
1009 			ext4_mb_generate_from_freelist(sb, data, group);
1010 			ext4_unlock_group(sb, group);
1011 
1012 			/* set incore so that the buddy information can be
1013 			 * generated using this
1014 			 */
1015 			incore = data;
1016 		}
1017 	}
1018 	SetPageUptodate(page);
1019 
1020 out:
1021 	if (bh) {
1022 		for (i = 0; i < groups_per_page; i++)
1023 			brelse(bh[i]);
1024 		if (bh != &bhs)
1025 			kfree(bh);
1026 	}
1027 	return err;
1028 }
1029 
1030 /*
1031  * Lock the buddy and bitmap pages. This make sure other parallel init_group
1032  * on the same buddy page doesn't happen whild holding the buddy page lock.
1033  * Return locked buddy and bitmap pages on e4b struct. If buddy and bitmap
1034  * are on the same page e4b->bd_buddy_page is NULL and return value is 0.
1035  */
ext4_mb_get_buddy_page_lock(struct super_block * sb,ext4_group_t group,struct ext4_buddy * e4b,gfp_t gfp)1036 static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
1037 		ext4_group_t group, struct ext4_buddy *e4b, gfp_t gfp)
1038 {
1039 	struct inode *inode = EXT4_SB(sb)->s_buddy_cache;
1040 	int block, pnum, poff;
1041 	int blocks_per_page;
1042 	struct page *page;
1043 
1044 	e4b->bd_buddy_page = NULL;
1045 	e4b->bd_bitmap_page = NULL;
1046 
1047 	blocks_per_page = PAGE_SIZE / sb->s_blocksize;
1048 	/*
1049 	 * the buddy cache inode stores the block bitmap
1050 	 * and buddy information in consecutive blocks.
1051 	 * So for each group we need two blocks.
1052 	 */
1053 	block = group * 2;
1054 	pnum = block / blocks_per_page;
1055 	poff = block % blocks_per_page;
1056 	page = find_or_create_page(inode->i_mapping, pnum, gfp);
1057 	if (!page)
1058 		return -ENOMEM;
1059 	BUG_ON(page->mapping != inode->i_mapping);
1060 	e4b->bd_bitmap_page = page;
1061 	e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
1062 
1063 	if (blocks_per_page >= 2) {
1064 		/* buddy and bitmap are on the same page */
1065 		return 0;
1066 	}
1067 
1068 	block++;
1069 	pnum = block / blocks_per_page;
1070 	page = find_or_create_page(inode->i_mapping, pnum, gfp);
1071 	if (!page)
1072 		return -ENOMEM;
1073 	BUG_ON(page->mapping != inode->i_mapping);
1074 	e4b->bd_buddy_page = page;
1075 	return 0;
1076 }
1077 
ext4_mb_put_buddy_page_lock(struct ext4_buddy * e4b)1078 static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b)
1079 {
1080 	if (e4b->bd_bitmap_page) {
1081 		unlock_page(e4b->bd_bitmap_page);
1082 		put_page(e4b->bd_bitmap_page);
1083 	}
1084 	if (e4b->bd_buddy_page) {
1085 		unlock_page(e4b->bd_buddy_page);
1086 		put_page(e4b->bd_buddy_page);
1087 	}
1088 }
1089 
1090 /*
1091  * Locking note:  This routine calls ext4_mb_init_cache(), which takes the
1092  * block group lock of all groups for this page; do not hold the BG lock when
1093  * calling this routine!
1094  */
1095 static noinline_for_stack
ext4_mb_init_group(struct super_block * sb,ext4_group_t group,gfp_t gfp)1096 int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp)
1097 {
1098 
1099 	struct ext4_group_info *this_grp;
1100 	struct ext4_buddy e4b;
1101 	struct page *page;
1102 	int ret = 0;
1103 
1104 	might_sleep();
1105 	mb_debug(sb, "init group %u\n", group);
1106 	this_grp = ext4_get_group_info(sb, group);
1107 	if (!this_grp)
1108 		return -EFSCORRUPTED;
1109 
1110 	/*
1111 	 * This ensures that we don't reinit the buddy cache
1112 	 * page which map to the group from which we are already
1113 	 * allocating. If we are looking at the buddy cache we would
1114 	 * have taken a reference using ext4_mb_load_buddy and that
1115 	 * would have pinned buddy page to page cache.
1116 	 * The call to ext4_mb_get_buddy_page_lock will mark the
1117 	 * page accessed.
1118 	 */
1119 	ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b, gfp);
1120 	if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) {
1121 		/*
1122 		 * somebody initialized the group
1123 		 * return without doing anything
1124 		 */
1125 		goto err;
1126 	}
1127 
1128 	page = e4b.bd_bitmap_page;
1129 	ret = ext4_mb_init_cache(page, NULL, gfp);
1130 	if (ret)
1131 		goto err;
1132 	if (!PageUptodate(page)) {
1133 		ret = -EIO;
1134 		goto err;
1135 	}
1136 
1137 	if (e4b.bd_buddy_page == NULL) {
1138 		/*
1139 		 * If both the bitmap and buddy are in
1140 		 * the same page we don't need to force
1141 		 * init the buddy
1142 		 */
1143 		ret = 0;
1144 		goto err;
1145 	}
1146 	/* init buddy cache */
1147 	page = e4b.bd_buddy_page;
1148 	ret = ext4_mb_init_cache(page, e4b.bd_bitmap, gfp);
1149 	if (ret)
1150 		goto err;
1151 	if (!PageUptodate(page)) {
1152 		ret = -EIO;
1153 		goto err;
1154 	}
1155 err:
1156 	ext4_mb_put_buddy_page_lock(&e4b);
1157 	return ret;
1158 }
1159 
1160 /*
1161  * Locking note:  This routine calls ext4_mb_init_cache(), which takes the
1162  * block group lock of all groups for this page; do not hold the BG lock when
1163  * calling this routine!
1164  */
1165 static noinline_for_stack int
ext4_mb_load_buddy_gfp(struct super_block * sb,ext4_group_t group,struct ext4_buddy * e4b,gfp_t gfp)1166 ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
1167 		       struct ext4_buddy *e4b, gfp_t gfp)
1168 {
1169 	int blocks_per_page;
1170 	int block;
1171 	int pnum;
1172 	int poff;
1173 	struct page *page;
1174 	int ret;
1175 	struct ext4_group_info *grp;
1176 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1177 	struct inode *inode = sbi->s_buddy_cache;
1178 
1179 	might_sleep();
1180 	mb_debug(sb, "load group %u\n", group);
1181 
1182 	blocks_per_page = PAGE_SIZE / sb->s_blocksize;
1183 	grp = ext4_get_group_info(sb, group);
1184 	if (!grp)
1185 		return -EFSCORRUPTED;
1186 
1187 	e4b->bd_blkbits = sb->s_blocksize_bits;
1188 	e4b->bd_info = grp;
1189 	e4b->bd_sb = sb;
1190 	e4b->bd_group = group;
1191 	e4b->bd_buddy_page = NULL;
1192 	e4b->bd_bitmap_page = NULL;
1193 
1194 	if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
1195 		/*
1196 		 * we need full data about the group
1197 		 * to make a good selection
1198 		 */
1199 		ret = ext4_mb_init_group(sb, group, gfp);
1200 		if (ret)
1201 			return ret;
1202 	}
1203 
1204 	/*
1205 	 * the buddy cache inode stores the block bitmap
1206 	 * and buddy information in consecutive blocks.
1207 	 * So for each group we need two blocks.
1208 	 */
1209 	block = group * 2;
1210 	pnum = block / blocks_per_page;
1211 	poff = block % blocks_per_page;
1212 
1213 	/* we could use find_or_create_page(), but it locks page
1214 	 * what we'd like to avoid in fast path ... */
1215 	page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
1216 	if (page == NULL || !PageUptodate(page)) {
1217 		if (page)
1218 			/*
1219 			 * drop the page reference and try
1220 			 * to get the page with lock. If we
1221 			 * are not uptodate that implies
1222 			 * somebody just created the page but
1223 			 * is yet to initialize the same. So
1224 			 * wait for it to initialize.
1225 			 */
1226 			put_page(page);
1227 		page = find_or_create_page(inode->i_mapping, pnum, gfp);
1228 		if (page) {
1229 			BUG_ON(page->mapping != inode->i_mapping);
1230 			if (!PageUptodate(page)) {
1231 				ret = ext4_mb_init_cache(page, NULL, gfp);
1232 				if (ret) {
1233 					unlock_page(page);
1234 					goto err;
1235 				}
1236 				mb_cmp_bitmaps(e4b, page_address(page) +
1237 					       (poff * sb->s_blocksize));
1238 			}
1239 			unlock_page(page);
1240 		}
1241 	}
1242 	if (page == NULL) {
1243 		ret = -ENOMEM;
1244 		goto err;
1245 	}
1246 	if (!PageUptodate(page)) {
1247 		ret = -EIO;
1248 		goto err;
1249 	}
1250 
1251 	/* Pages marked accessed already */
1252 	e4b->bd_bitmap_page = page;
1253 	e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
1254 
1255 	block++;
1256 	pnum = block / blocks_per_page;
1257 	poff = block % blocks_per_page;
1258 
1259 	page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
1260 	if (page == NULL || !PageUptodate(page)) {
1261 		if (page)
1262 			put_page(page);
1263 		page = find_or_create_page(inode->i_mapping, pnum, gfp);
1264 		if (page) {
1265 			BUG_ON(page->mapping != inode->i_mapping);
1266 			if (!PageUptodate(page)) {
1267 				ret = ext4_mb_init_cache(page, e4b->bd_bitmap,
1268 							 gfp);
1269 				if (ret) {
1270 					unlock_page(page);
1271 					goto err;
1272 				}
1273 			}
1274 			unlock_page(page);
1275 		}
1276 	}
1277 	if (page == NULL) {
1278 		ret = -ENOMEM;
1279 		goto err;
1280 	}
1281 	if (!PageUptodate(page)) {
1282 		ret = -EIO;
1283 		goto err;
1284 	}
1285 
1286 	/* Pages marked accessed already */
1287 	e4b->bd_buddy_page = page;
1288 	e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
1289 
1290 	return 0;
1291 
1292 err:
1293 	if (page)
1294 		put_page(page);
1295 	if (e4b->bd_bitmap_page)
1296 		put_page(e4b->bd_bitmap_page);
1297 	if (e4b->bd_buddy_page)
1298 		put_page(e4b->bd_buddy_page);
1299 	e4b->bd_buddy = NULL;
1300 	e4b->bd_bitmap = NULL;
1301 	return ret;
1302 }
1303 
ext4_mb_load_buddy(struct super_block * sb,ext4_group_t group,struct ext4_buddy * e4b)1304 static int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
1305 			      struct ext4_buddy *e4b)
1306 {
1307 	return ext4_mb_load_buddy_gfp(sb, group, e4b, GFP_NOFS);
1308 }
1309 
ext4_mb_unload_buddy(struct ext4_buddy * e4b)1310 static void ext4_mb_unload_buddy(struct ext4_buddy *e4b)
1311 {
1312 	if (e4b->bd_bitmap_page)
1313 		put_page(e4b->bd_bitmap_page);
1314 	if (e4b->bd_buddy_page)
1315 		put_page(e4b->bd_buddy_page);
1316 }
1317 
1318 
mb_find_order_for_block(struct ext4_buddy * e4b,int block)1319 static int mb_find_order_for_block(struct ext4_buddy *e4b, int block)
1320 {
1321 	int order = 1;
1322 	int bb_incr = 1 << (e4b->bd_blkbits - 1);
1323 	void *bb;
1324 
1325 	BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
1326 	BUG_ON(block >= (1 << (e4b->bd_blkbits + 3)));
1327 
1328 	bb = e4b->bd_buddy;
1329 	while (order <= e4b->bd_blkbits + 1) {
1330 		block = block >> 1;
1331 		if (!mb_test_bit(block, bb)) {
1332 			/* this block is part of buddy of order 'order' */
1333 			return order;
1334 		}
1335 		bb += bb_incr;
1336 		bb_incr >>= 1;
1337 		order++;
1338 	}
1339 	return 0;
1340 }
1341 
mb_clear_bits(void * bm,int cur,int len)1342 static void mb_clear_bits(void *bm, int cur, int len)
1343 {
1344 	__u32 *addr;
1345 
1346 	len = cur + len;
1347 	while (cur < len) {
1348 		if ((cur & 31) == 0 && (len - cur) >= 32) {
1349 			/* fast path: clear whole word at once */
1350 			addr = bm + (cur >> 3);
1351 			*addr = 0;
1352 			cur += 32;
1353 			continue;
1354 		}
1355 		mb_clear_bit(cur, bm);
1356 		cur++;
1357 	}
1358 }
1359 
1360 /* clear bits in given range
1361  * will return first found zero bit if any, -1 otherwise
1362  */
mb_test_and_clear_bits(void * bm,int cur,int len)1363 static int mb_test_and_clear_bits(void *bm, int cur, int len)
1364 {
1365 	__u32 *addr;
1366 	int zero_bit = -1;
1367 
1368 	len = cur + len;
1369 	while (cur < len) {
1370 		if ((cur & 31) == 0 && (len - cur) >= 32) {
1371 			/* fast path: clear whole word at once */
1372 			addr = bm + (cur >> 3);
1373 			if (*addr != (__u32)(-1) && zero_bit == -1)
1374 				zero_bit = cur + mb_find_next_zero_bit(addr, 32, 0);
1375 			*addr = 0;
1376 			cur += 32;
1377 			continue;
1378 		}
1379 		if (!mb_test_and_clear_bit(cur, bm) && zero_bit == -1)
1380 			zero_bit = cur;
1381 		cur++;
1382 	}
1383 
1384 	return zero_bit;
1385 }
1386 
ext4_set_bits(void * bm,int cur,int len)1387 void ext4_set_bits(void *bm, int cur, int len)
1388 {
1389 	__u32 *addr;
1390 
1391 	len = cur + len;
1392 	while (cur < len) {
1393 		if ((cur & 31) == 0 && (len - cur) >= 32) {
1394 			/* fast path: set whole word at once */
1395 			addr = bm + (cur >> 3);
1396 			*addr = 0xffffffff;
1397 			cur += 32;
1398 			continue;
1399 		}
1400 		mb_set_bit(cur, bm);
1401 		cur++;
1402 	}
1403 }
1404 
mb_buddy_adjust_border(int * bit,void * bitmap,int side)1405 static inline int mb_buddy_adjust_border(int* bit, void* bitmap, int side)
1406 {
1407 	if (mb_test_bit(*bit + side, bitmap)) {
1408 		mb_clear_bit(*bit, bitmap);
1409 		(*bit) -= side;
1410 		return 1;
1411 	}
1412 	else {
1413 		(*bit) += side;
1414 		mb_set_bit(*bit, bitmap);
1415 		return -1;
1416 	}
1417 }
1418 
mb_buddy_mark_free(struct ext4_buddy * e4b,int first,int last)1419 static void mb_buddy_mark_free(struct ext4_buddy *e4b, int first, int last)
1420 {
1421 	int max;
1422 	int order = 1;
1423 	void *buddy = mb_find_buddy(e4b, order, &max);
1424 
1425 	while (buddy) {
1426 		void *buddy2;
1427 
1428 		/* Bits in range [first; last] are known to be set since
1429 		 * corresponding blocks were allocated. Bits in range
1430 		 * (first; last) will stay set because they form buddies on
1431 		 * upper layer. We just deal with borders if they don't
1432 		 * align with upper layer and then go up.
1433 		 * Releasing entire group is all about clearing
1434 		 * single bit of highest order buddy.
1435 		 */
1436 
1437 		/* Example:
1438 		 * ---------------------------------
1439 		 * |   1   |   1   |   1   |   1   |
1440 		 * ---------------------------------
1441 		 * | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 |
1442 		 * ---------------------------------
1443 		 *   0   1   2   3   4   5   6   7
1444 		 *      \_____________________/
1445 		 *
1446 		 * Neither [1] nor [6] is aligned to above layer.
1447 		 * Left neighbour [0] is free, so mark it busy,
1448 		 * decrease bb_counters and extend range to
1449 		 * [0; 6]
1450 		 * Right neighbour [7] is busy. It can't be coaleasced with [6], so
1451 		 * mark [6] free, increase bb_counters and shrink range to
1452 		 * [0; 5].
1453 		 * Then shift range to [0; 2], go up and do the same.
1454 		 */
1455 
1456 
1457 		if (first & 1)
1458 			e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&first, buddy, -1);
1459 		if (!(last & 1))
1460 			e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&last, buddy, 1);
1461 		if (first > last)
1462 			break;
1463 		order++;
1464 
1465 		if (first == last || !(buddy2 = mb_find_buddy(e4b, order, &max))) {
1466 			mb_clear_bits(buddy, first, last - first + 1);
1467 			e4b->bd_info->bb_counters[order - 1] += last - first + 1;
1468 			break;
1469 		}
1470 		first >>= 1;
1471 		last >>= 1;
1472 		buddy = buddy2;
1473 	}
1474 }
1475 
mb_free_blocks(struct inode * inode,struct ext4_buddy * e4b,int first,int count)1476 static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
1477 			   int first, int count)
1478 {
1479 	int left_is_free = 0;
1480 	int right_is_free = 0;
1481 	int block;
1482 	int last = first + count - 1;
1483 	struct super_block *sb = e4b->bd_sb;
1484 
1485 	if (WARN_ON(count == 0))
1486 		return;
1487 	BUG_ON(last >= (sb->s_blocksize << 3));
1488 	assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
1489 	/* Don't bother if the block group is corrupt. */
1490 	if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
1491 		return;
1492 
1493 	mb_check_buddy(e4b);
1494 	mb_free_blocks_double(inode, e4b, first, count);
1495 
1496 	this_cpu_inc(discard_pa_seq);
1497 	e4b->bd_info->bb_free += count;
1498 	if (first < e4b->bd_info->bb_first_free)
1499 		e4b->bd_info->bb_first_free = first;
1500 
1501 	/* access memory sequentially: check left neighbour,
1502 	 * clear range and then check right neighbour
1503 	 */
1504 	if (first != 0)
1505 		left_is_free = !mb_test_bit(first - 1, e4b->bd_bitmap);
1506 	block = mb_test_and_clear_bits(e4b->bd_bitmap, first, count);
1507 	if (last + 1 < EXT4_SB(sb)->s_mb_maxs[0])
1508 		right_is_free = !mb_test_bit(last + 1, e4b->bd_bitmap);
1509 
1510 	if (unlikely(block != -1)) {
1511 		struct ext4_sb_info *sbi = EXT4_SB(sb);
1512 		ext4_fsblk_t blocknr;
1513 
1514 		blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
1515 		blocknr += EXT4_C2B(sbi, block);
1516 		if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) {
1517 			ext4_grp_locked_error(sb, e4b->bd_group,
1518 					      inode ? inode->i_ino : 0,
1519 					      blocknr,
1520 					      "freeing already freed block (bit %u); block bitmap corrupt.",
1521 					      block);
1522 			ext4_mark_group_bitmap_corrupted(
1523 				sb, e4b->bd_group,
1524 				EXT4_GROUP_INFO_BBITMAP_CORRUPT);
1525 		} else {
1526 			mb_regenerate_buddy(e4b);
1527 		}
1528 		goto done;
1529 	}
1530 
1531 	/* let's maintain fragments counter */
1532 	if (left_is_free && right_is_free)
1533 		e4b->bd_info->bb_fragments--;
1534 	else if (!left_is_free && !right_is_free)
1535 		e4b->bd_info->bb_fragments++;
1536 
1537 	/* buddy[0] == bd_bitmap is a special case, so handle
1538 	 * it right away and let mb_buddy_mark_free stay free of
1539 	 * zero order checks.
1540 	 * Check if neighbours are to be coaleasced,
1541 	 * adjust bitmap bb_counters and borders appropriately.
1542 	 */
1543 	if (first & 1) {
1544 		first += !left_is_free;
1545 		e4b->bd_info->bb_counters[0] += left_is_free ? -1 : 1;
1546 	}
1547 	if (!(last & 1)) {
1548 		last -= !right_is_free;
1549 		e4b->bd_info->bb_counters[0] += right_is_free ? -1 : 1;
1550 	}
1551 
1552 	if (first <= last)
1553 		mb_buddy_mark_free(e4b, first >> 1, last >> 1);
1554 
1555 done:
1556 	mb_set_largest_free_order(sb, e4b->bd_info);
1557 	mb_check_buddy(e4b);
1558 }
1559 
mb_find_extent(struct ext4_buddy * e4b,int block,int needed,struct ext4_free_extent * ex)1560 static int mb_find_extent(struct ext4_buddy *e4b, int block,
1561 				int needed, struct ext4_free_extent *ex)
1562 {
1563 	int next = block;
1564 	int max, order;
1565 	void *buddy;
1566 
1567 	assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
1568 	BUG_ON(ex == NULL);
1569 
1570 	buddy = mb_find_buddy(e4b, 0, &max);
1571 	BUG_ON(buddy == NULL);
1572 	BUG_ON(block >= max);
1573 	if (mb_test_bit(block, buddy)) {
1574 		ex->fe_len = 0;
1575 		ex->fe_start = 0;
1576 		ex->fe_group = 0;
1577 		return 0;
1578 	}
1579 
1580 	/* find actual order */
1581 	order = mb_find_order_for_block(e4b, block);
1582 	block = block >> order;
1583 
1584 	ex->fe_len = 1 << order;
1585 	ex->fe_start = block << order;
1586 	ex->fe_group = e4b->bd_group;
1587 
1588 	/* calc difference from given start */
1589 	next = next - ex->fe_start;
1590 	ex->fe_len -= next;
1591 	ex->fe_start += next;
1592 
1593 	while (needed > ex->fe_len &&
1594 	       mb_find_buddy(e4b, order, &max)) {
1595 
1596 		if (block + 1 >= max)
1597 			break;
1598 
1599 		next = (block + 1) * (1 << order);
1600 		if (mb_test_bit(next, e4b->bd_bitmap))
1601 			break;
1602 
1603 		order = mb_find_order_for_block(e4b, next);
1604 
1605 		block = next >> order;
1606 		ex->fe_len += 1 << order;
1607 	}
1608 
1609 	if (ex->fe_start + ex->fe_len > EXT4_CLUSTERS_PER_GROUP(e4b->bd_sb)) {
1610 		/* Should never happen! (but apparently sometimes does?!?) */
1611 		WARN_ON(1);
1612 		ext4_grp_locked_error(e4b->bd_sb, e4b->bd_group, 0, 0,
1613 			"corruption or bug in mb_find_extent "
1614 			"block=%d, order=%d needed=%d ex=%u/%d/%d@%u",
1615 			block, order, needed, ex->fe_group, ex->fe_start,
1616 			ex->fe_len, ex->fe_logical);
1617 		ex->fe_len = 0;
1618 		ex->fe_start = 0;
1619 		ex->fe_group = 0;
1620 	}
1621 	return ex->fe_len;
1622 }
1623 
mb_mark_used(struct ext4_buddy * e4b,struct ext4_free_extent * ex)1624 static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex)
1625 {
1626 	int ord;
1627 	int mlen = 0;
1628 	int max = 0;
1629 	int cur;
1630 	int start = ex->fe_start;
1631 	int len = ex->fe_len;
1632 	unsigned ret = 0;
1633 	int len0 = len;
1634 	void *buddy;
1635 
1636 	BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3));
1637 	BUG_ON(e4b->bd_group != ex->fe_group);
1638 	assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
1639 	mb_check_buddy(e4b);
1640 	mb_mark_used_double(e4b, start, len);
1641 
1642 	this_cpu_inc(discard_pa_seq);
1643 	e4b->bd_info->bb_free -= len;
1644 	if (e4b->bd_info->bb_first_free == start)
1645 		e4b->bd_info->bb_first_free += len;
1646 
1647 	/* let's maintain fragments counter */
1648 	if (start != 0)
1649 		mlen = !mb_test_bit(start - 1, e4b->bd_bitmap);
1650 	if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0])
1651 		max = !mb_test_bit(start + len, e4b->bd_bitmap);
1652 	if (mlen && max)
1653 		e4b->bd_info->bb_fragments++;
1654 	else if (!mlen && !max)
1655 		e4b->bd_info->bb_fragments--;
1656 
1657 	/* let's maintain buddy itself */
1658 	while (len) {
1659 		ord = mb_find_order_for_block(e4b, start);
1660 
1661 		if (((start >> ord) << ord) == start && len >= (1 << ord)) {
1662 			/* the whole chunk may be allocated at once! */
1663 			mlen = 1 << ord;
1664 			buddy = mb_find_buddy(e4b, ord, &max);
1665 			BUG_ON((start >> ord) >= max);
1666 			mb_set_bit(start >> ord, buddy);
1667 			e4b->bd_info->bb_counters[ord]--;
1668 			start += mlen;
1669 			len -= mlen;
1670 			BUG_ON(len < 0);
1671 			continue;
1672 		}
1673 
1674 		/* store for history */
1675 		if (ret == 0)
1676 			ret = len | (ord << 16);
1677 
1678 		/* we have to split large buddy */
1679 		BUG_ON(ord <= 0);
1680 		buddy = mb_find_buddy(e4b, ord, &max);
1681 		mb_set_bit(start >> ord, buddy);
1682 		e4b->bd_info->bb_counters[ord]--;
1683 
1684 		ord--;
1685 		cur = (start >> ord) & ~1U;
1686 		buddy = mb_find_buddy(e4b, ord, &max);
1687 		mb_clear_bit(cur, buddy);
1688 		mb_clear_bit(cur + 1, buddy);
1689 		e4b->bd_info->bb_counters[ord]++;
1690 		e4b->bd_info->bb_counters[ord]++;
1691 	}
1692 	mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info);
1693 
1694 	ext4_set_bits(e4b->bd_bitmap, ex->fe_start, len0);
1695 	mb_check_buddy(e4b);
1696 
1697 	return ret;
1698 }
1699 
1700 /*
1701  * Must be called under group lock!
1702  */
ext4_mb_use_best_found(struct ext4_allocation_context * ac,struct ext4_buddy * e4b)1703 static void ext4_mb_use_best_found(struct ext4_allocation_context *ac,
1704 					struct ext4_buddy *e4b)
1705 {
1706 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1707 	int ret;
1708 
1709 	BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group);
1710 	BUG_ON(ac->ac_status == AC_STATUS_FOUND);
1711 
1712 	ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len);
1713 	ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical;
1714 	ret = mb_mark_used(e4b, &ac->ac_b_ex);
1715 
1716 	/* preallocation can change ac_b_ex, thus we store actually
1717 	 * allocated blocks for history */
1718 	ac->ac_f_ex = ac->ac_b_ex;
1719 
1720 	ac->ac_status = AC_STATUS_FOUND;
1721 	ac->ac_tail = ret & 0xffff;
1722 	ac->ac_buddy = ret >> 16;
1723 
1724 	/*
1725 	 * take the page reference. We want the page to be pinned
1726 	 * so that we don't get a ext4_mb_init_cache_call for this
1727 	 * group until we update the bitmap. That would mean we
1728 	 * double allocate blocks. The reference is dropped
1729 	 * in ext4_mb_release_context
1730 	 */
1731 	ac->ac_bitmap_page = e4b->bd_bitmap_page;
1732 	get_page(ac->ac_bitmap_page);
1733 	ac->ac_buddy_page = e4b->bd_buddy_page;
1734 	get_page(ac->ac_buddy_page);
1735 	/* store last allocated for subsequent stream allocation */
1736 	if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
1737 		spin_lock(&sbi->s_md_lock);
1738 		sbi->s_mb_last_group = ac->ac_f_ex.fe_group;
1739 		sbi->s_mb_last_start = ac->ac_f_ex.fe_start;
1740 		spin_unlock(&sbi->s_md_lock);
1741 	}
1742 	/*
1743 	 * As we've just preallocated more space than
1744 	 * user requested originally, we store allocated
1745 	 * space in a special descriptor.
1746 	 */
1747 	if (ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len)
1748 		ext4_mb_new_preallocation(ac);
1749 
1750 }
1751 
ext4_mb_check_limits(struct ext4_allocation_context * ac,struct ext4_buddy * e4b,int finish_group)1752 static void ext4_mb_check_limits(struct ext4_allocation_context *ac,
1753 					struct ext4_buddy *e4b,
1754 					int finish_group)
1755 {
1756 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1757 	struct ext4_free_extent *bex = &ac->ac_b_ex;
1758 	struct ext4_free_extent *gex = &ac->ac_g_ex;
1759 	struct ext4_free_extent ex;
1760 	int max;
1761 
1762 	if (ac->ac_status == AC_STATUS_FOUND)
1763 		return;
1764 	/*
1765 	 * We don't want to scan for a whole year
1766 	 */
1767 	if (ac->ac_found > sbi->s_mb_max_to_scan &&
1768 			!(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
1769 		ac->ac_status = AC_STATUS_BREAK;
1770 		return;
1771 	}
1772 
1773 	/*
1774 	 * Haven't found good chunk so far, let's continue
1775 	 */
1776 	if (bex->fe_len < gex->fe_len)
1777 		return;
1778 
1779 	if ((finish_group || ac->ac_found > sbi->s_mb_min_to_scan)
1780 			&& bex->fe_group == e4b->bd_group) {
1781 		/* recheck chunk's availability - we don't know
1782 		 * when it was found (within this lock-unlock
1783 		 * period or not) */
1784 		max = mb_find_extent(e4b, bex->fe_start, gex->fe_len, &ex);
1785 		if (max >= gex->fe_len) {
1786 			ext4_mb_use_best_found(ac, e4b);
1787 			return;
1788 		}
1789 	}
1790 }
1791 
1792 /*
1793  * The routine checks whether found extent is good enough. If it is,
1794  * then the extent gets marked used and flag is set to the context
1795  * to stop scanning. Otherwise, the extent is compared with the
1796  * previous found extent and if new one is better, then it's stored
1797  * in the context. Later, the best found extent will be used, if
1798  * mballoc can't find good enough extent.
1799  *
1800  * FIXME: real allocation policy is to be designed yet!
1801  */
ext4_mb_measure_extent(struct ext4_allocation_context * ac,struct ext4_free_extent * ex,struct ext4_buddy * e4b)1802 static void ext4_mb_measure_extent(struct ext4_allocation_context *ac,
1803 					struct ext4_free_extent *ex,
1804 					struct ext4_buddy *e4b)
1805 {
1806 	struct ext4_free_extent *bex = &ac->ac_b_ex;
1807 	struct ext4_free_extent *gex = &ac->ac_g_ex;
1808 
1809 	BUG_ON(ex->fe_len <= 0);
1810 	BUG_ON(ex->fe_len > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
1811 	BUG_ON(ex->fe_start >= EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
1812 	BUG_ON(ac->ac_status != AC_STATUS_CONTINUE);
1813 
1814 	ac->ac_found++;
1815 
1816 	/*
1817 	 * The special case - take what you catch first
1818 	 */
1819 	if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
1820 		*bex = *ex;
1821 		ext4_mb_use_best_found(ac, e4b);
1822 		return;
1823 	}
1824 
1825 	/*
1826 	 * Let's check whether the chuck is good enough
1827 	 */
1828 	if (ex->fe_len == gex->fe_len) {
1829 		*bex = *ex;
1830 		ext4_mb_use_best_found(ac, e4b);
1831 		return;
1832 	}
1833 
1834 	/*
1835 	 * If this is first found extent, just store it in the context
1836 	 */
1837 	if (bex->fe_len == 0) {
1838 		*bex = *ex;
1839 		return;
1840 	}
1841 
1842 	/*
1843 	 * If new found extent is better, store it in the context
1844 	 */
1845 	if (bex->fe_len < gex->fe_len) {
1846 		/* if the request isn't satisfied, any found extent
1847 		 * larger than previous best one is better */
1848 		if (ex->fe_len > bex->fe_len)
1849 			*bex = *ex;
1850 	} else if (ex->fe_len > gex->fe_len) {
1851 		/* if the request is satisfied, then we try to find
1852 		 * an extent that still satisfy the request, but is
1853 		 * smaller than previous one */
1854 		if (ex->fe_len < bex->fe_len)
1855 			*bex = *ex;
1856 	}
1857 
1858 	ext4_mb_check_limits(ac, e4b, 0);
1859 }
1860 
1861 static noinline_for_stack
ext4_mb_try_best_found(struct ext4_allocation_context * ac,struct ext4_buddy * e4b)1862 int ext4_mb_try_best_found(struct ext4_allocation_context *ac,
1863 					struct ext4_buddy *e4b)
1864 {
1865 	struct ext4_free_extent ex = ac->ac_b_ex;
1866 	ext4_group_t group = ex.fe_group;
1867 	int max;
1868 	int err;
1869 
1870 	BUG_ON(ex.fe_len <= 0);
1871 	err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
1872 	if (err)
1873 		return err;
1874 
1875 	ext4_lock_group(ac->ac_sb, group);
1876 	max = mb_find_extent(e4b, ex.fe_start, ex.fe_len, &ex);
1877 
1878 	if (max > 0) {
1879 		ac->ac_b_ex = ex;
1880 		ext4_mb_use_best_found(ac, e4b);
1881 	}
1882 
1883 	ext4_unlock_group(ac->ac_sb, group);
1884 	ext4_mb_unload_buddy(e4b);
1885 
1886 	return 0;
1887 }
1888 
1889 static noinline_for_stack
ext4_mb_find_by_goal(struct ext4_allocation_context * ac,struct ext4_buddy * e4b)1890 int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
1891 				struct ext4_buddy *e4b)
1892 {
1893 	ext4_group_t group = ac->ac_g_ex.fe_group;
1894 	int max;
1895 	int err;
1896 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1897 	struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
1898 	struct ext4_free_extent ex;
1899 
1900 	if (!grp)
1901 		return -EFSCORRUPTED;
1902 	if (!(ac->ac_flags & (EXT4_MB_HINT_TRY_GOAL | EXT4_MB_HINT_GOAL_ONLY)))
1903 		return 0;
1904 	if (grp->bb_free == 0)
1905 		return 0;
1906 
1907 	err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
1908 	if (err)
1909 		return err;
1910 
1911 	if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) {
1912 		ext4_mb_unload_buddy(e4b);
1913 		return 0;
1914 	}
1915 
1916 	ext4_lock_group(ac->ac_sb, group);
1917 	max = mb_find_extent(e4b, ac->ac_g_ex.fe_start,
1918 			     ac->ac_g_ex.fe_len, &ex);
1919 	ex.fe_logical = 0xDEADFA11; /* debug value */
1920 
1921 	if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) {
1922 		ext4_fsblk_t start;
1923 
1924 		start = ext4_group_first_block_no(ac->ac_sb, e4b->bd_group) +
1925 			ex.fe_start;
1926 		/* use do_div to get remainder (would be 64-bit modulo) */
1927 		if (do_div(start, sbi->s_stripe) == 0) {
1928 			ac->ac_found++;
1929 			ac->ac_b_ex = ex;
1930 			ext4_mb_use_best_found(ac, e4b);
1931 		}
1932 	} else if (max >= ac->ac_g_ex.fe_len) {
1933 		BUG_ON(ex.fe_len <= 0);
1934 		BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
1935 		BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
1936 		ac->ac_found++;
1937 		ac->ac_b_ex = ex;
1938 		ext4_mb_use_best_found(ac, e4b);
1939 	} else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) {
1940 		/* Sometimes, caller may want to merge even small
1941 		 * number of blocks to an existing extent */
1942 		BUG_ON(ex.fe_len <= 0);
1943 		BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
1944 		BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
1945 		ac->ac_found++;
1946 		ac->ac_b_ex = ex;
1947 		ext4_mb_use_best_found(ac, e4b);
1948 	}
1949 	ext4_unlock_group(ac->ac_sb, group);
1950 	ext4_mb_unload_buddy(e4b);
1951 
1952 	return 0;
1953 }
1954 
1955 /*
1956  * The routine scans buddy structures (not bitmap!) from given order
1957  * to max order and tries to find big enough chunk to satisfy the req
1958  */
1959 static noinline_for_stack
ext4_mb_simple_scan_group(struct ext4_allocation_context * ac,struct ext4_buddy * e4b)1960 void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
1961 					struct ext4_buddy *e4b)
1962 {
1963 	struct super_block *sb = ac->ac_sb;
1964 	struct ext4_group_info *grp = e4b->bd_info;
1965 	void *buddy;
1966 	int i;
1967 	int k;
1968 	int max;
1969 
1970 	BUG_ON(ac->ac_2order <= 0);
1971 	for (i = ac->ac_2order; i <= sb->s_blocksize_bits + 1; i++) {
1972 		if (grp->bb_counters[i] == 0)
1973 			continue;
1974 
1975 		buddy = mb_find_buddy(e4b, i, &max);
1976 		BUG_ON(buddy == NULL);
1977 
1978 		k = mb_find_next_zero_bit(buddy, max, 0);
1979 		if (k >= max) {
1980 			ext4_grp_locked_error(ac->ac_sb, e4b->bd_group, 0, 0,
1981 				"%d free clusters of order %d. But found 0",
1982 				grp->bb_counters[i], i);
1983 			ext4_mark_group_bitmap_corrupted(ac->ac_sb,
1984 					 e4b->bd_group,
1985 					EXT4_GROUP_INFO_BBITMAP_CORRUPT);
1986 			break;
1987 		}
1988 		ac->ac_found++;
1989 
1990 		ac->ac_b_ex.fe_len = 1 << i;
1991 		ac->ac_b_ex.fe_start = k << i;
1992 		ac->ac_b_ex.fe_group = e4b->bd_group;
1993 
1994 		ext4_mb_use_best_found(ac, e4b);
1995 
1996 		BUG_ON(ac->ac_f_ex.fe_len != ac->ac_g_ex.fe_len);
1997 
1998 		if (EXT4_SB(sb)->s_mb_stats)
1999 			atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
2000 
2001 		break;
2002 	}
2003 }
2004 
2005 /*
2006  * The routine scans the group and measures all found extents.
2007  * In order to optimize scanning, caller must pass number of
2008  * free blocks in the group, so the routine can know upper limit.
2009  */
2010 static noinline_for_stack
ext4_mb_complex_scan_group(struct ext4_allocation_context * ac,struct ext4_buddy * e4b)2011 void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
2012 					struct ext4_buddy *e4b)
2013 {
2014 	struct super_block *sb = ac->ac_sb;
2015 	void *bitmap = e4b->bd_bitmap;
2016 	struct ext4_free_extent ex;
2017 	int i;
2018 	int free;
2019 
2020 	free = e4b->bd_info->bb_free;
2021 	if (WARN_ON(free <= 0))
2022 		return;
2023 
2024 	i = e4b->bd_info->bb_first_free;
2025 
2026 	while (free && ac->ac_status == AC_STATUS_CONTINUE) {
2027 		i = mb_find_next_zero_bit(bitmap,
2028 						EXT4_CLUSTERS_PER_GROUP(sb), i);
2029 		if (i >= EXT4_CLUSTERS_PER_GROUP(sb)) {
2030 			/*
2031 			 * IF we have corrupt bitmap, we won't find any
2032 			 * free blocks even though group info says we
2033 			 * have free blocks
2034 			 */
2035 			ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
2036 					"%d free clusters as per "
2037 					"group info. But bitmap says 0",
2038 					free);
2039 			ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
2040 					EXT4_GROUP_INFO_BBITMAP_CORRUPT);
2041 			break;
2042 		}
2043 
2044 		mb_find_extent(e4b, i, ac->ac_g_ex.fe_len, &ex);
2045 		if (WARN_ON(ex.fe_len <= 0))
2046 			break;
2047 		if (free < ex.fe_len) {
2048 			ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
2049 					"%d free clusters as per "
2050 					"group info. But got %d blocks",
2051 					free, ex.fe_len);
2052 			ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
2053 					EXT4_GROUP_INFO_BBITMAP_CORRUPT);
2054 			/*
2055 			 * The number of free blocks differs. This mostly
2056 			 * indicate that the bitmap is corrupt. So exit
2057 			 * without claiming the space.
2058 			 */
2059 			break;
2060 		}
2061 		ex.fe_logical = 0xDEADC0DE; /* debug value */
2062 		ext4_mb_measure_extent(ac, &ex, e4b);
2063 
2064 		i += ex.fe_len;
2065 		free -= ex.fe_len;
2066 	}
2067 
2068 	ext4_mb_check_limits(ac, e4b, 1);
2069 }
2070 
2071 /*
2072  * This is a special case for storages like raid5
2073  * we try to find stripe-aligned chunks for stripe-size-multiple requests
2074  */
2075 static noinline_for_stack
ext4_mb_scan_aligned(struct ext4_allocation_context * ac,struct ext4_buddy * e4b)2076 void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
2077 				 struct ext4_buddy *e4b)
2078 {
2079 	struct super_block *sb = ac->ac_sb;
2080 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2081 	void *bitmap = e4b->bd_bitmap;
2082 	struct ext4_free_extent ex;
2083 	ext4_fsblk_t first_group_block;
2084 	ext4_fsblk_t a;
2085 	ext4_grpblk_t i;
2086 	int max;
2087 
2088 	BUG_ON(sbi->s_stripe == 0);
2089 
2090 	/* find first stripe-aligned block in group */
2091 	first_group_block = ext4_group_first_block_no(sb, e4b->bd_group);
2092 
2093 	a = first_group_block + sbi->s_stripe - 1;
2094 	do_div(a, sbi->s_stripe);
2095 	i = (a * sbi->s_stripe) - first_group_block;
2096 
2097 	while (i < EXT4_CLUSTERS_PER_GROUP(sb)) {
2098 		if (!mb_test_bit(i, bitmap)) {
2099 			max = mb_find_extent(e4b, i, sbi->s_stripe, &ex);
2100 			if (max >= sbi->s_stripe) {
2101 				ac->ac_found++;
2102 				ex.fe_logical = 0xDEADF00D; /* debug value */
2103 				ac->ac_b_ex = ex;
2104 				ext4_mb_use_best_found(ac, e4b);
2105 				break;
2106 			}
2107 		}
2108 		i += sbi->s_stripe;
2109 	}
2110 }
2111 
2112 /*
2113  * This is also called BEFORE we load the buddy bitmap.
2114  * Returns either 1 or 0 indicating that the group is either suitable
2115  * for the allocation or not.
2116  */
ext4_mb_good_group(struct ext4_allocation_context * ac,ext4_group_t group,int cr)2117 static bool ext4_mb_good_group(struct ext4_allocation_context *ac,
2118 				ext4_group_t group, int cr)
2119 {
2120 	ext4_grpblk_t free, fragments;
2121 	int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb));
2122 	struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
2123 
2124 	BUG_ON(cr < 0 || cr >= 4);
2125 
2126 	if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp) || !grp))
2127 		return false;
2128 
2129 	free = grp->bb_free;
2130 	if (free == 0)
2131 		return false;
2132 
2133 	fragments = grp->bb_fragments;
2134 	if (fragments == 0)
2135 		return false;
2136 
2137 	switch (cr) {
2138 	case 0:
2139 		BUG_ON(ac->ac_2order == 0);
2140 
2141 		/* Avoid using the first bg of a flexgroup for data files */
2142 		if ((ac->ac_flags & EXT4_MB_HINT_DATA) &&
2143 		    (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) &&
2144 		    ((group % flex_size) == 0))
2145 			return false;
2146 
2147 		if (free < ac->ac_g_ex.fe_len)
2148 			return false;
2149 
2150 		if (ac->ac_2order > ac->ac_sb->s_blocksize_bits+1)
2151 			return true;
2152 
2153 		if (grp->bb_largest_free_order < ac->ac_2order)
2154 			return false;
2155 
2156 		return true;
2157 	case 1:
2158 		if ((free / fragments) >= ac->ac_g_ex.fe_len)
2159 			return true;
2160 		break;
2161 	case 2:
2162 		if (free >= ac->ac_g_ex.fe_len)
2163 			return true;
2164 		break;
2165 	case 3:
2166 		return true;
2167 	default:
2168 		BUG();
2169 	}
2170 
2171 	return false;
2172 }
2173 
2174 /*
2175  * This could return negative error code if something goes wrong
2176  * during ext4_mb_init_group(). This should not be called with
2177  * ext4_lock_group() held.
2178  */
ext4_mb_good_group_nolock(struct ext4_allocation_context * ac,ext4_group_t group,int cr)2179 static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac,
2180 				     ext4_group_t group, int cr)
2181 {
2182 	struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
2183 	struct super_block *sb = ac->ac_sb;
2184 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2185 	bool should_lock = ac->ac_flags & EXT4_MB_STRICT_CHECK;
2186 	ext4_grpblk_t free;
2187 	int ret = 0;
2188 
2189 	if (!grp)
2190 		return -EFSCORRUPTED;
2191 	if (sbi->s_mb_stats)
2192 		atomic64_inc(&sbi->s_bal_cX_groups_considered[ac->ac_criteria]);
2193 	if (should_lock)
2194 		ext4_lock_group(sb, group);
2195 	free = grp->bb_free;
2196 	if (free == 0)
2197 		goto out;
2198 	if (cr <= 2 && free < ac->ac_g_ex.fe_len)
2199 		goto out;
2200 	if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
2201 		goto out;
2202 	if (should_lock)
2203 		ext4_unlock_group(sb, group);
2204 
2205 	/* We only do this if the grp has never been initialized */
2206 	if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
2207 		struct ext4_group_desc *gdp =
2208 			ext4_get_group_desc(sb, group, NULL);
2209 		int ret;
2210 
2211 		/* cr=0/1 is a very optimistic search to find large
2212 		 * good chunks almost for free.  If buddy data is not
2213 		 * ready, then this optimization makes no sense.  But
2214 		 * we never skip the first block group in a flex_bg,
2215 		 * since this gets used for metadata block allocation,
2216 		 * and we want to make sure we locate metadata blocks
2217 		 * in the first block group in the flex_bg if possible.
2218 		 */
2219 		if (cr < 2 &&
2220 		    (!sbi->s_log_groups_per_flex ||
2221 		     ((group & ((1 << sbi->s_log_groups_per_flex) - 1)) != 0)) &&
2222 		    !(ext4_has_group_desc_csum(sb) &&
2223 		      (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))))
2224 			return 0;
2225 		ret = ext4_mb_init_group(sb, group, GFP_NOFS);
2226 		if (ret)
2227 			return ret;
2228 	}
2229 
2230 	if (should_lock)
2231 		ext4_lock_group(sb, group);
2232 	ret = ext4_mb_good_group(ac, group, cr);
2233 out:
2234 	if (should_lock)
2235 		ext4_unlock_group(sb, group);
2236 	return ret;
2237 }
2238 
2239 /*
2240  * Start prefetching @nr block bitmaps starting at @group.
2241  * Return the next group which needs to be prefetched.
2242  */
ext4_mb_prefetch(struct super_block * sb,ext4_group_t group,unsigned int nr,int * cnt)2243 ext4_group_t ext4_mb_prefetch(struct super_block *sb, ext4_group_t group,
2244 			      unsigned int nr, int *cnt)
2245 {
2246 	ext4_group_t ngroups = ext4_get_groups_count(sb);
2247 	struct buffer_head *bh;
2248 	struct blk_plug plug;
2249 
2250 	blk_start_plug(&plug);
2251 	while (nr-- > 0) {
2252 		struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group,
2253 								  NULL);
2254 		struct ext4_group_info *grp = ext4_get_group_info(sb, group);
2255 
2256 		/*
2257 		 * Prefetch block groups with free blocks; but don't
2258 		 * bother if it is marked uninitialized on disk, since
2259 		 * it won't require I/O to read.  Also only try to
2260 		 * prefetch once, so we avoid getblk() call, which can
2261 		 * be expensive.
2262 		 */
2263 		if (gdp && grp && !EXT4_MB_GRP_TEST_AND_SET_READ(grp) &&
2264 		    EXT4_MB_GRP_NEED_INIT(grp) &&
2265 		    ext4_free_group_clusters(sb, gdp) > 0 &&
2266 		    !(ext4_has_group_desc_csum(sb) &&
2267 		      (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) {
2268 			bh = ext4_read_block_bitmap_nowait(sb, group, true);
2269 			if (bh && !IS_ERR(bh)) {
2270 				if (!buffer_uptodate(bh) && cnt)
2271 					(*cnt)++;
2272 				brelse(bh);
2273 			}
2274 		}
2275 		if (++group >= ngroups)
2276 			group = 0;
2277 	}
2278 	blk_finish_plug(&plug);
2279 	return group;
2280 }
2281 
2282 /*
2283  * Prefetching reads the block bitmap into the buffer cache; but we
2284  * need to make sure that the buddy bitmap in the page cache has been
2285  * initialized.  Note that ext4_mb_init_group() will block if the I/O
2286  * is not yet completed, or indeed if it was not initiated by
2287  * ext4_mb_prefetch did not start the I/O.
2288  *
2289  * TODO: We should actually kick off the buddy bitmap setup in a work
2290  * queue when the buffer I/O is completed, so that we don't block
2291  * waiting for the block allocation bitmap read to finish when
2292  * ext4_mb_prefetch_fini is called from ext4_mb_regular_allocator().
2293  */
ext4_mb_prefetch_fini(struct super_block * sb,ext4_group_t group,unsigned int nr)2294 void ext4_mb_prefetch_fini(struct super_block *sb, ext4_group_t group,
2295 			   unsigned int nr)
2296 {
2297 	while (nr-- > 0) {
2298 		struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group,
2299 								  NULL);
2300 		struct ext4_group_info *grp = ext4_get_group_info(sb, group);
2301 
2302 		if (!group)
2303 			group = ext4_get_groups_count(sb);
2304 		group--;
2305 		grp = ext4_get_group_info(sb, group);
2306 
2307 		if (grp && gdp && EXT4_MB_GRP_NEED_INIT(grp) &&
2308 		    ext4_free_group_clusters(sb, gdp) > 0 &&
2309 		    !(ext4_has_group_desc_csum(sb) &&
2310 		      (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) {
2311 			if (ext4_mb_init_group(sb, group, GFP_NOFS))
2312 				break;
2313 		}
2314 	}
2315 }
2316 
2317 static noinline_for_stack int
ext4_mb_regular_allocator(struct ext4_allocation_context * ac)2318 ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
2319 {
2320 	ext4_group_t prefetch_grp = 0, ngroups, group, i;
2321 	int cr = -1;
2322 	int err = 0, first_err = 0;
2323 	unsigned int nr = 0, prefetch_ios = 0;
2324 	struct ext4_sb_info *sbi;
2325 	struct super_block *sb;
2326 	struct ext4_buddy e4b;
2327 	int lost;
2328 
2329 	sb = ac->ac_sb;
2330 	sbi = EXT4_SB(sb);
2331 	ngroups = ext4_get_groups_count(sb);
2332 	/* non-extent files are limited to low blocks/groups */
2333 	if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)))
2334 		ngroups = sbi->s_blockfile_groups;
2335 
2336 	BUG_ON(ac->ac_status == AC_STATUS_FOUND);
2337 
2338 	/* first, try the goal */
2339 	err = ext4_mb_find_by_goal(ac, &e4b);
2340 	if (err || ac->ac_status == AC_STATUS_FOUND)
2341 		goto out;
2342 
2343 	if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
2344 		goto out;
2345 
2346 	/*
2347 	 * ac->ac_2order is set only if the fe_len is a power of 2
2348 	 * if ac->ac_2order is set we also set criteria to 0 so that we
2349 	 * try exact allocation using buddy.
2350 	 */
2351 	i = fls(ac->ac_g_ex.fe_len);
2352 	ac->ac_2order = 0;
2353 	/*
2354 	 * We search using buddy data only if the order of the request
2355 	 * is greater than equal to the sbi_s_mb_order2_reqs
2356 	 * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req
2357 	 * We also support searching for power-of-two requests only for
2358 	 * requests upto maximum buddy size we have constructed.
2359 	 */
2360 	if (i >= sbi->s_mb_order2_reqs && i <= sb->s_blocksize_bits + 2) {
2361 		/*
2362 		 * This should tell if fe_len is exactly power of 2
2363 		 */
2364 		if ((ac->ac_g_ex.fe_len & (~(1 << (i - 1)))) == 0)
2365 			ac->ac_2order = array_index_nospec(i - 1,
2366 							   sb->s_blocksize_bits + 2);
2367 	}
2368 
2369 	/* if stream allocation is enabled, use global goal */
2370 	if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
2371 		/* TBD: may be hot point */
2372 		spin_lock(&sbi->s_md_lock);
2373 		ac->ac_g_ex.fe_group = sbi->s_mb_last_group;
2374 		ac->ac_g_ex.fe_start = sbi->s_mb_last_start;
2375 		spin_unlock(&sbi->s_md_lock);
2376 	}
2377 
2378 	/* Let's just scan groups to find more-less suitable blocks */
2379 	cr = ac->ac_2order ? 0 : 1;
2380 	/*
2381 	 * cr == 0 try to get exact allocation,
2382 	 * cr == 3  try to get anything
2383 	 */
2384 repeat:
2385 	for (; cr < 4 && ac->ac_status == AC_STATUS_CONTINUE; cr++) {
2386 		ac->ac_criteria = cr;
2387 		/*
2388 		 * searching for the right group start
2389 		 * from the goal value specified
2390 		 */
2391 		group = ac->ac_g_ex.fe_group;
2392 		prefetch_grp = group;
2393 
2394 		for (i = 0; i < ngroups; group++, i++) {
2395 			int ret = 0;
2396 			cond_resched();
2397 			/*
2398 			 * Artificially restricted ngroups for non-extent
2399 			 * files makes group > ngroups possible on first loop.
2400 			 */
2401 			if (group >= ngroups)
2402 				group = 0;
2403 
2404 			/*
2405 			 * Batch reads of the block allocation bitmaps
2406 			 * to get multiple READs in flight; limit
2407 			 * prefetching at cr=0/1, otherwise mballoc can
2408 			 * spend a lot of time loading imperfect groups
2409 			 */
2410 			if ((prefetch_grp == group) &&
2411 			    (cr > 1 ||
2412 			     prefetch_ios < sbi->s_mb_prefetch_limit)) {
2413 				unsigned int curr_ios = prefetch_ios;
2414 
2415 				nr = sbi->s_mb_prefetch;
2416 				if (ext4_has_feature_flex_bg(sb)) {
2417 					nr = 1 << sbi->s_log_groups_per_flex;
2418 					nr -= group & (nr - 1);
2419 					nr = min(nr, sbi->s_mb_prefetch);
2420 				}
2421 				prefetch_grp = ext4_mb_prefetch(sb, group,
2422 							nr, &prefetch_ios);
2423 				if (prefetch_ios == curr_ios)
2424 					nr = 0;
2425 			}
2426 
2427 			/* This now checks without needing the buddy page */
2428 			ret = ext4_mb_good_group_nolock(ac, group, cr);
2429 			if (ret <= 0) {
2430 				if (!first_err)
2431 					first_err = ret;
2432 				continue;
2433 			}
2434 
2435 			err = ext4_mb_load_buddy(sb, group, &e4b);
2436 			if (err)
2437 				goto out;
2438 
2439 			ext4_lock_group(sb, group);
2440 
2441 			/*
2442 			 * We need to check again after locking the
2443 			 * block group
2444 			 */
2445 			ret = ext4_mb_good_group(ac, group, cr);
2446 			if (ret == 0) {
2447 				ext4_unlock_group(sb, group);
2448 				ext4_mb_unload_buddy(&e4b);
2449 				continue;
2450 			}
2451 
2452 			ac->ac_groups_scanned++;
2453 			if (cr == 0)
2454 				ext4_mb_simple_scan_group(ac, &e4b);
2455 			else if (cr == 1 && sbi->s_stripe &&
2456 					!(ac->ac_g_ex.fe_len % sbi->s_stripe))
2457 				ext4_mb_scan_aligned(ac, &e4b);
2458 			else
2459 				ext4_mb_complex_scan_group(ac, &e4b);
2460 
2461 			ext4_unlock_group(sb, group);
2462 			ext4_mb_unload_buddy(&e4b);
2463 
2464 			if (ac->ac_status != AC_STATUS_CONTINUE)
2465 				break;
2466 		}
2467 		/* Processed all groups and haven't found blocks */
2468 		if (sbi->s_mb_stats && i == ngroups)
2469 			atomic64_inc(&sbi->s_bal_cX_failed[cr]);
2470 	}
2471 
2472 	if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND &&
2473 	    !(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2474 		/*
2475 		 * We've been searching too long. Let's try to allocate
2476 		 * the best chunk we've found so far
2477 		 */
2478 		ext4_mb_try_best_found(ac, &e4b);
2479 		if (ac->ac_status != AC_STATUS_FOUND) {
2480 			/*
2481 			 * Someone more lucky has already allocated it.
2482 			 * The only thing we can do is just take first
2483 			 * found block(s)
2484 			 */
2485 			lost = atomic_inc_return(&sbi->s_mb_lost_chunks);
2486 			mb_debug(sb, "lost chunk, group: %u, start: %d, len: %d, lost: %d\n",
2487 				 ac->ac_b_ex.fe_group, ac->ac_b_ex.fe_start,
2488 				 ac->ac_b_ex.fe_len, lost);
2489 
2490 			ac->ac_b_ex.fe_group = 0;
2491 			ac->ac_b_ex.fe_start = 0;
2492 			ac->ac_b_ex.fe_len = 0;
2493 			ac->ac_status = AC_STATUS_CONTINUE;
2494 			ac->ac_flags |= EXT4_MB_HINT_FIRST;
2495 			cr = 3;
2496 			goto repeat;
2497 		}
2498 	}
2499 
2500 	if (sbi->s_mb_stats && ac->ac_status == AC_STATUS_FOUND)
2501 		atomic64_inc(&sbi->s_bal_cX_hits[ac->ac_criteria]);
2502 out:
2503 	if (!err && ac->ac_status != AC_STATUS_FOUND && first_err)
2504 		err = first_err;
2505 
2506 	mb_debug(sb, "Best len %d, origin len %d, ac_status %u, ac_flags 0x%x, cr %d ret %d\n",
2507 		 ac->ac_b_ex.fe_len, ac->ac_o_ex.fe_len, ac->ac_status,
2508 		 ac->ac_flags, cr, err);
2509 
2510 	if (nr)
2511 		ext4_mb_prefetch_fini(sb, prefetch_grp, nr);
2512 
2513 	return err;
2514 }
2515 
ext4_mb_seq_groups_start(struct seq_file * seq,loff_t * pos)2516 static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos)
2517 {
2518 	struct super_block *sb = PDE_DATA(file_inode(seq->file));
2519 	ext4_group_t group;
2520 
2521 	if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
2522 		return NULL;
2523 	group = *pos + 1;
2524 	return (void *) ((unsigned long) group);
2525 }
2526 
ext4_mb_seq_groups_next(struct seq_file * seq,void * v,loff_t * pos)2527 static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos)
2528 {
2529 	struct super_block *sb = PDE_DATA(file_inode(seq->file));
2530 	ext4_group_t group;
2531 
2532 	++*pos;
2533 	if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
2534 		return NULL;
2535 	group = *pos + 1;
2536 	return (void *) ((unsigned long) group);
2537 }
2538 
ext4_mb_seq_groups_show(struct seq_file * seq,void * v)2539 static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
2540 {
2541 	struct super_block *sb = PDE_DATA(file_inode(seq->file));
2542 	ext4_group_t group = (ext4_group_t) ((unsigned long) v);
2543 	int i;
2544 	int err, buddy_loaded = 0;
2545 	struct ext4_buddy e4b;
2546 	struct ext4_group_info *grinfo;
2547 	unsigned char blocksize_bits = min_t(unsigned char,
2548 					     sb->s_blocksize_bits,
2549 					     EXT4_MAX_BLOCK_LOG_SIZE);
2550 	struct sg {
2551 		struct ext4_group_info info;
2552 		ext4_grpblk_t counters[EXT4_MAX_BLOCK_LOG_SIZE + 2];
2553 	} sg;
2554 
2555 	group--;
2556 	if (group == 0)
2557 		seq_puts(seq, "#group: free  frags first ["
2558 			      " 2^0   2^1   2^2   2^3   2^4   2^5   2^6  "
2559 			      " 2^7   2^8   2^9   2^10  2^11  2^12  2^13  ]\n");
2560 
2561 	i = (blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) +
2562 		sizeof(struct ext4_group_info);
2563 
2564 	grinfo = ext4_get_group_info(sb, group);
2565 	if (!grinfo)
2566 		return 0;
2567 	/* Load the group info in memory only if not already loaded. */
2568 	if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) {
2569 		err = ext4_mb_load_buddy(sb, group, &e4b);
2570 		if (err) {
2571 			seq_printf(seq, "#%-5u: I/O error\n", group);
2572 			return 0;
2573 		}
2574 		buddy_loaded = 1;
2575 	}
2576 
2577 	memcpy(&sg, grinfo, i);
2578 
2579 	if (buddy_loaded)
2580 		ext4_mb_unload_buddy(&e4b);
2581 
2582 	seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free,
2583 			sg.info.bb_fragments, sg.info.bb_first_free);
2584 	for (i = 0; i <= 13; i++)
2585 		seq_printf(seq, " %-5u", i <= blocksize_bits + 1 ?
2586 				sg.info.bb_counters[i] : 0);
2587 	seq_puts(seq, " ]\n");
2588 
2589 	return 0;
2590 }
2591 
ext4_mb_seq_groups_stop(struct seq_file * seq,void * v)2592 static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v)
2593 {
2594 }
2595 
2596 const struct seq_operations ext4_mb_seq_groups_ops = {
2597 	.start  = ext4_mb_seq_groups_start,
2598 	.next   = ext4_mb_seq_groups_next,
2599 	.stop   = ext4_mb_seq_groups_stop,
2600 	.show   = ext4_mb_seq_groups_show,
2601 };
2602 
ext4_seq_mb_stats_show(struct seq_file * seq,void * offset)2603 int ext4_seq_mb_stats_show(struct seq_file *seq, void *offset)
2604 {
2605 	struct super_block *sb = (struct super_block *)seq->private;
2606 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2607 
2608 	seq_puts(seq, "mballoc:\n");
2609 	if (!sbi->s_mb_stats) {
2610 		seq_puts(seq, "\tmb stats collection turned off.\n");
2611 		seq_puts(seq, "\tTo enable, please write \"1\" to sysfs file mb_stats.\n");
2612 		return 0;
2613 	}
2614 	seq_printf(seq, "\treqs: %u\n", atomic_read(&sbi->s_bal_reqs));
2615 	seq_printf(seq, "\tsuccess: %u\n", atomic_read(&sbi->s_bal_success));
2616 
2617 	seq_printf(seq, "\tgroups_scanned: %u\n",  atomic_read(&sbi->s_bal_groups_scanned));
2618 
2619 	seq_puts(seq, "\tcr0_stats:\n");
2620 	seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[0]));
2621 	seq_printf(seq, "\t\tgroups_considered: %llu\n",
2622 		   atomic64_read(&sbi->s_bal_cX_groups_considered[0]));
2623 	seq_printf(seq, "\t\tuseless_loops: %llu\n",
2624 		   atomic64_read(&sbi->s_bal_cX_failed[0]));
2625 
2626 	seq_puts(seq, "\tcr1_stats:\n");
2627 	seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[1]));
2628 	seq_printf(seq, "\t\tgroups_considered: %llu\n",
2629 		   atomic64_read(&sbi->s_bal_cX_groups_considered[1]));
2630 	seq_printf(seq, "\t\tuseless_loops: %llu\n",
2631 		   atomic64_read(&sbi->s_bal_cX_failed[1]));
2632 
2633 	seq_puts(seq, "\tcr2_stats:\n");
2634 	seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[2]));
2635 	seq_printf(seq, "\t\tgroups_considered: %llu\n",
2636 		   atomic64_read(&sbi->s_bal_cX_groups_considered[2]));
2637 	seq_printf(seq, "\t\tuseless_loops: %llu\n",
2638 		   atomic64_read(&sbi->s_bal_cX_failed[2]));
2639 
2640 	seq_puts(seq, "\tcr3_stats:\n");
2641 	seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[3]));
2642 	seq_printf(seq, "\t\tgroups_considered: %llu\n",
2643 		   atomic64_read(&sbi->s_bal_cX_groups_considered[3]));
2644 	seq_printf(seq, "\t\tuseless_loops: %llu\n",
2645 		   atomic64_read(&sbi->s_bal_cX_failed[3]));
2646 	seq_printf(seq, "\textents_scanned: %u\n", atomic_read(&sbi->s_bal_ex_scanned));
2647 	seq_printf(seq, "\t\tgoal_hits: %u\n", atomic_read(&sbi->s_bal_goals));
2648 	seq_printf(seq, "\t\t2^n_hits: %u\n", atomic_read(&sbi->s_bal_2orders));
2649 	seq_printf(seq, "\t\tbreaks: %u\n", atomic_read(&sbi->s_bal_breaks));
2650 	seq_printf(seq, "\t\tlost: %u\n", atomic_read(&sbi->s_mb_lost_chunks));
2651 
2652 	seq_printf(seq, "\tbuddies_generated: %u/%u\n",
2653 		   atomic_read(&sbi->s_mb_buddies_generated),
2654 		   ext4_get_groups_count(sb));
2655 	seq_printf(seq, "\tbuddies_time_used: %llu\n",
2656 		   atomic64_read(&sbi->s_mb_generation_time));
2657 	seq_printf(seq, "\tpreallocated: %u\n",
2658 		   atomic_read(&sbi->s_mb_preallocated));
2659 	seq_printf(seq, "\tdiscarded: %u\n",
2660 		   atomic_read(&sbi->s_mb_discarded));
2661 	return 0;
2662 }
2663 
get_groupinfo_cache(int blocksize_bits)2664 static struct kmem_cache *get_groupinfo_cache(int blocksize_bits)
2665 {
2666 	int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
2667 	struct kmem_cache *cachep = ext4_groupinfo_caches[cache_index];
2668 
2669 	BUG_ON(!cachep);
2670 	return cachep;
2671 }
2672 
2673 /*
2674  * Allocate the top-level s_group_info array for the specified number
2675  * of groups
2676  */
ext4_mb_alloc_groupinfo(struct super_block * sb,ext4_group_t ngroups)2677 int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups)
2678 {
2679 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2680 	unsigned size;
2681 	struct ext4_group_info ***old_groupinfo, ***new_groupinfo;
2682 
2683 	size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >>
2684 		EXT4_DESC_PER_BLOCK_BITS(sb);
2685 	if (size <= sbi->s_group_info_size)
2686 		return 0;
2687 
2688 	size = roundup_pow_of_two(sizeof(*sbi->s_group_info) * size);
2689 	new_groupinfo = kvzalloc(size, GFP_KERNEL);
2690 	if (!new_groupinfo) {
2691 		ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group");
2692 		return -ENOMEM;
2693 	}
2694 	rcu_read_lock();
2695 	old_groupinfo = rcu_dereference(sbi->s_group_info);
2696 	if (old_groupinfo)
2697 		memcpy(new_groupinfo, old_groupinfo,
2698 		       sbi->s_group_info_size * sizeof(*sbi->s_group_info));
2699 	rcu_read_unlock();
2700 	rcu_assign_pointer(sbi->s_group_info, new_groupinfo);
2701 	sbi->s_group_info_size = size / sizeof(*sbi->s_group_info);
2702 	if (old_groupinfo)
2703 		ext4_kvfree_array_rcu(old_groupinfo);
2704 	ext4_debug("allocated s_groupinfo array for %d meta_bg's\n",
2705 		   sbi->s_group_info_size);
2706 	return 0;
2707 }
2708 
2709 /* Create and initialize ext4_group_info data for the given group. */
ext4_mb_add_groupinfo(struct super_block * sb,ext4_group_t group,struct ext4_group_desc * desc)2710 int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
2711 			  struct ext4_group_desc *desc)
2712 {
2713 	int i;
2714 	int metalen = 0;
2715 	int idx = group >> EXT4_DESC_PER_BLOCK_BITS(sb);
2716 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2717 	struct ext4_group_info **meta_group_info;
2718 	struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
2719 
2720 	/*
2721 	 * First check if this group is the first of a reserved block.
2722 	 * If it's true, we have to allocate a new table of pointers
2723 	 * to ext4_group_info structures
2724 	 */
2725 	if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
2726 		metalen = sizeof(*meta_group_info) <<
2727 			EXT4_DESC_PER_BLOCK_BITS(sb);
2728 		meta_group_info = kmalloc(metalen, GFP_NOFS);
2729 		if (meta_group_info == NULL) {
2730 			ext4_msg(sb, KERN_ERR, "can't allocate mem "
2731 				 "for a buddy group");
2732 			goto exit_meta_group_info;
2733 		}
2734 		rcu_read_lock();
2735 		rcu_dereference(sbi->s_group_info)[idx] = meta_group_info;
2736 		rcu_read_unlock();
2737 	}
2738 
2739 	meta_group_info = sbi_array_rcu_deref(sbi, s_group_info, idx);
2740 	i = group & (EXT4_DESC_PER_BLOCK(sb) - 1);
2741 
2742 	meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_NOFS);
2743 	if (meta_group_info[i] == NULL) {
2744 		ext4_msg(sb, KERN_ERR, "can't allocate buddy mem");
2745 		goto exit_group_info;
2746 	}
2747 	set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT,
2748 		&(meta_group_info[i]->bb_state));
2749 
2750 	/*
2751 	 * initialize bb_free to be able to skip
2752 	 * empty groups without initialization
2753 	 */
2754 	if (ext4_has_group_desc_csum(sb) &&
2755 	    (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
2756 		meta_group_info[i]->bb_free =
2757 			ext4_free_clusters_after_init(sb, group, desc);
2758 	} else {
2759 		meta_group_info[i]->bb_free =
2760 			ext4_free_group_clusters(sb, desc);
2761 	}
2762 
2763 	INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list);
2764 	init_rwsem(&meta_group_info[i]->alloc_sem);
2765 	meta_group_info[i]->bb_free_root = RB_ROOT;
2766 	meta_group_info[i]->bb_largest_free_order = -1;  /* uninit */
2767 
2768 	mb_group_bb_bitmap_alloc(sb, meta_group_info[i], group);
2769 	return 0;
2770 
2771 exit_group_info:
2772 	/* If a meta_group_info table has been allocated, release it now */
2773 	if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
2774 		struct ext4_group_info ***group_info;
2775 
2776 		rcu_read_lock();
2777 		group_info = rcu_dereference(sbi->s_group_info);
2778 		kfree(group_info[idx]);
2779 		group_info[idx] = NULL;
2780 		rcu_read_unlock();
2781 	}
2782 exit_meta_group_info:
2783 	return -ENOMEM;
2784 } /* ext4_mb_add_groupinfo */
2785 
ext4_mb_init_backend(struct super_block * sb)2786 static int ext4_mb_init_backend(struct super_block *sb)
2787 {
2788 	ext4_group_t ngroups = ext4_get_groups_count(sb);
2789 	ext4_group_t i;
2790 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2791 	int err;
2792 	struct ext4_group_desc *desc;
2793 	struct ext4_group_info ***group_info;
2794 	struct kmem_cache *cachep;
2795 
2796 	err = ext4_mb_alloc_groupinfo(sb, ngroups);
2797 	if (err)
2798 		return err;
2799 
2800 	sbi->s_buddy_cache = new_inode(sb);
2801 	if (sbi->s_buddy_cache == NULL) {
2802 		ext4_msg(sb, KERN_ERR, "can't get new inode");
2803 		goto err_freesgi;
2804 	}
2805 	/* To avoid potentially colliding with an valid on-disk inode number,
2806 	 * use EXT4_BAD_INO for the buddy cache inode number.  This inode is
2807 	 * not in the inode hash, so it should never be found by iget(), but
2808 	 * this will avoid confusion if it ever shows up during debugging. */
2809 	sbi->s_buddy_cache->i_ino = EXT4_BAD_INO;
2810 	EXT4_I(sbi->s_buddy_cache)->i_disksize = 0;
2811 	for (i = 0; i < ngroups; i++) {
2812 		cond_resched();
2813 		desc = ext4_get_group_desc(sb, i, NULL);
2814 		if (desc == NULL) {
2815 			ext4_msg(sb, KERN_ERR, "can't read descriptor %u", i);
2816 			goto err_freebuddy;
2817 		}
2818 		if (ext4_mb_add_groupinfo(sb, i, desc) != 0)
2819 			goto err_freebuddy;
2820 	}
2821 
2822 	if (ext4_has_feature_flex_bg(sb)) {
2823 		/* a single flex group is supposed to be read by a single IO.
2824 		 * 2 ^ s_log_groups_per_flex != UINT_MAX as s_mb_prefetch is
2825 		 * unsigned integer, so the maximum shift is 32.
2826 		 */
2827 		if (sbi->s_es->s_log_groups_per_flex >= 32) {
2828 			ext4_msg(sb, KERN_ERR, "too many log groups per flexible block group");
2829 			goto err_freebuddy;
2830 		}
2831 		sbi->s_mb_prefetch = min_t(uint, 1 << sbi->s_es->s_log_groups_per_flex,
2832 			BLK_MAX_SEGMENT_SIZE >> (sb->s_blocksize_bits - 9));
2833 		sbi->s_mb_prefetch *= 8; /* 8 prefetch IOs in flight at most */
2834 	} else {
2835 		sbi->s_mb_prefetch = 32;
2836 	}
2837 	if (sbi->s_mb_prefetch > ext4_get_groups_count(sb))
2838 		sbi->s_mb_prefetch = ext4_get_groups_count(sb);
2839 	/* now many real IOs to prefetch within a single allocation at cr=0
2840 	 * given cr=0 is an CPU-related optimization we shouldn't try to
2841 	 * load too many groups, at some point we should start to use what
2842 	 * we've got in memory.
2843 	 * with an average random access time 5ms, it'd take a second to get
2844 	 * 200 groups (* N with flex_bg), so let's make this limit 4
2845 	 */
2846 	sbi->s_mb_prefetch_limit = sbi->s_mb_prefetch * 4;
2847 	if (sbi->s_mb_prefetch_limit > ext4_get_groups_count(sb))
2848 		sbi->s_mb_prefetch_limit = ext4_get_groups_count(sb);
2849 
2850 	return 0;
2851 
2852 err_freebuddy:
2853 	cachep = get_groupinfo_cache(sb->s_blocksize_bits);
2854 	while (i-- > 0) {
2855 		struct ext4_group_info *grp = ext4_get_group_info(sb, i);
2856 
2857 		if (grp)
2858 			kmem_cache_free(cachep, grp);
2859 	}
2860 	i = sbi->s_group_info_size;
2861 	rcu_read_lock();
2862 	group_info = rcu_dereference(sbi->s_group_info);
2863 	while (i-- > 0)
2864 		kfree(group_info[i]);
2865 	rcu_read_unlock();
2866 	iput(sbi->s_buddy_cache);
2867 err_freesgi:
2868 	rcu_read_lock();
2869 	kvfree(rcu_dereference(sbi->s_group_info));
2870 	rcu_read_unlock();
2871 	return -ENOMEM;
2872 }
2873 
ext4_groupinfo_destroy_slabs(void)2874 static void ext4_groupinfo_destroy_slabs(void)
2875 {
2876 	int i;
2877 
2878 	for (i = 0; i < NR_GRPINFO_CACHES; i++) {
2879 		kmem_cache_destroy(ext4_groupinfo_caches[i]);
2880 		ext4_groupinfo_caches[i] = NULL;
2881 	}
2882 }
2883 
ext4_groupinfo_create_slab(size_t size)2884 static int ext4_groupinfo_create_slab(size_t size)
2885 {
2886 	static DEFINE_MUTEX(ext4_grpinfo_slab_create_mutex);
2887 	int slab_size;
2888 	int blocksize_bits = order_base_2(size);
2889 	int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
2890 	struct kmem_cache *cachep;
2891 
2892 	if (cache_index >= NR_GRPINFO_CACHES)
2893 		return -EINVAL;
2894 
2895 	if (unlikely(cache_index < 0))
2896 		cache_index = 0;
2897 
2898 	mutex_lock(&ext4_grpinfo_slab_create_mutex);
2899 	if (ext4_groupinfo_caches[cache_index]) {
2900 		mutex_unlock(&ext4_grpinfo_slab_create_mutex);
2901 		return 0;	/* Already created */
2902 	}
2903 
2904 	slab_size = offsetof(struct ext4_group_info,
2905 				bb_counters[blocksize_bits + 2]);
2906 
2907 	cachep = kmem_cache_create(ext4_groupinfo_slab_names[cache_index],
2908 					slab_size, 0, SLAB_RECLAIM_ACCOUNT,
2909 					NULL);
2910 
2911 	ext4_groupinfo_caches[cache_index] = cachep;
2912 
2913 	mutex_unlock(&ext4_grpinfo_slab_create_mutex);
2914 	if (!cachep) {
2915 		printk(KERN_EMERG
2916 		       "EXT4-fs: no memory for groupinfo slab cache\n");
2917 		return -ENOMEM;
2918 	}
2919 
2920 	return 0;
2921 }
2922 
ext4_mb_init(struct super_block * sb)2923 int ext4_mb_init(struct super_block *sb)
2924 {
2925 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2926 	unsigned i, j;
2927 	unsigned offset, offset_incr;
2928 	unsigned max;
2929 	int ret;
2930 
2931 	i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_offsets);
2932 
2933 	sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL);
2934 	if (sbi->s_mb_offsets == NULL) {
2935 		ret = -ENOMEM;
2936 		goto out;
2937 	}
2938 
2939 	i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_maxs);
2940 	sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL);
2941 	if (sbi->s_mb_maxs == NULL) {
2942 		ret = -ENOMEM;
2943 		goto out;
2944 	}
2945 
2946 	ret = ext4_groupinfo_create_slab(sb->s_blocksize);
2947 	if (ret < 0)
2948 		goto out;
2949 
2950 	/* order 0 is regular bitmap */
2951 	sbi->s_mb_maxs[0] = sb->s_blocksize << 3;
2952 	sbi->s_mb_offsets[0] = 0;
2953 
2954 	i = 1;
2955 	offset = 0;
2956 	offset_incr = 1 << (sb->s_blocksize_bits - 1);
2957 	max = sb->s_blocksize << 2;
2958 	do {
2959 		sbi->s_mb_offsets[i] = offset;
2960 		sbi->s_mb_maxs[i] = max;
2961 		offset += offset_incr;
2962 		offset_incr = offset_incr >> 1;
2963 		max = max >> 1;
2964 		i++;
2965 	} while (i <= sb->s_blocksize_bits + 1);
2966 
2967 	spin_lock_init(&sbi->s_md_lock);
2968 	sbi->s_mb_free_pending = 0;
2969 	INIT_LIST_HEAD(&sbi->s_freed_data_list);
2970 
2971 	sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN;
2972 	sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN;
2973 	sbi->s_mb_stats = MB_DEFAULT_STATS;
2974 	sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
2975 	sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
2976 	sbi->s_mb_max_inode_prealloc = MB_DEFAULT_MAX_INODE_PREALLOC;
2977 	/*
2978 	 * The default group preallocation is 512, which for 4k block
2979 	 * sizes translates to 2 megabytes.  However for bigalloc file
2980 	 * systems, this is probably too big (i.e, if the cluster size
2981 	 * is 1 megabyte, then group preallocation size becomes half a
2982 	 * gigabyte!).  As a default, we will keep a two megabyte
2983 	 * group pralloc size for cluster sizes up to 64k, and after
2984 	 * that, we will force a minimum group preallocation size of
2985 	 * 32 clusters.  This translates to 8 megs when the cluster
2986 	 * size is 256k, and 32 megs when the cluster size is 1 meg,
2987 	 * which seems reasonable as a default.
2988 	 */
2989 	sbi->s_mb_group_prealloc = max(MB_DEFAULT_GROUP_PREALLOC >>
2990 				       sbi->s_cluster_bits, 32);
2991 	/*
2992 	 * If there is a s_stripe > 1, then we set the s_mb_group_prealloc
2993 	 * to the lowest multiple of s_stripe which is bigger than
2994 	 * the s_mb_group_prealloc as determined above. We want
2995 	 * the preallocation size to be an exact multiple of the
2996 	 * RAID stripe size so that preallocations don't fragment
2997 	 * the stripes.
2998 	 */
2999 	if (sbi->s_stripe > 1) {
3000 		sbi->s_mb_group_prealloc = roundup(
3001 			sbi->s_mb_group_prealloc, sbi->s_stripe);
3002 	}
3003 
3004 	sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
3005 	if (sbi->s_locality_groups == NULL) {
3006 		ret = -ENOMEM;
3007 		goto out;
3008 	}
3009 	for_each_possible_cpu(i) {
3010 		struct ext4_locality_group *lg;
3011 		lg = per_cpu_ptr(sbi->s_locality_groups, i);
3012 		mutex_init(&lg->lg_mutex);
3013 		for (j = 0; j < PREALLOC_TB_SIZE; j++)
3014 			INIT_LIST_HEAD(&lg->lg_prealloc_list[j]);
3015 		spin_lock_init(&lg->lg_prealloc_lock);
3016 	}
3017 
3018 	/* init file for buddy data */
3019 	ret = ext4_mb_init_backend(sb);
3020 	if (ret != 0)
3021 		goto out_free_locality_groups;
3022 
3023 	return 0;
3024 
3025 out_free_locality_groups:
3026 	free_percpu(sbi->s_locality_groups);
3027 	sbi->s_locality_groups = NULL;
3028 out:
3029 	kfree(sbi->s_mb_offsets);
3030 	sbi->s_mb_offsets = NULL;
3031 	kfree(sbi->s_mb_maxs);
3032 	sbi->s_mb_maxs = NULL;
3033 	return ret;
3034 }
3035 
3036 /* need to called with the ext4 group lock held */
ext4_mb_cleanup_pa(struct ext4_group_info * grp)3037 static int ext4_mb_cleanup_pa(struct ext4_group_info *grp)
3038 {
3039 	struct ext4_prealloc_space *pa;
3040 	struct list_head *cur, *tmp;
3041 	int count = 0;
3042 
3043 	list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) {
3044 		pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
3045 		list_del(&pa->pa_group_list);
3046 		count++;
3047 		kmem_cache_free(ext4_pspace_cachep, pa);
3048 	}
3049 	return count;
3050 }
3051 
ext4_mb_release(struct super_block * sb)3052 int ext4_mb_release(struct super_block *sb)
3053 {
3054 	ext4_group_t ngroups = ext4_get_groups_count(sb);
3055 	ext4_group_t i;
3056 	int num_meta_group_infos;
3057 	struct ext4_group_info *grinfo, ***group_info;
3058 	struct ext4_sb_info *sbi = EXT4_SB(sb);
3059 	struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
3060 	int count;
3061 
3062 	if (sbi->s_group_info) {
3063 		for (i = 0; i < ngroups; i++) {
3064 			cond_resched();
3065 			grinfo = ext4_get_group_info(sb, i);
3066 			if (!grinfo)
3067 				continue;
3068 			mb_group_bb_bitmap_free(grinfo);
3069 			ext4_lock_group(sb, i);
3070 			count = ext4_mb_cleanup_pa(grinfo);
3071 			if (count)
3072 				mb_debug(sb, "mballoc: %d PAs left\n",
3073 					 count);
3074 			ext4_unlock_group(sb, i);
3075 			kmem_cache_free(cachep, grinfo);
3076 		}
3077 		num_meta_group_infos = (ngroups +
3078 				EXT4_DESC_PER_BLOCK(sb) - 1) >>
3079 			EXT4_DESC_PER_BLOCK_BITS(sb);
3080 		rcu_read_lock();
3081 		group_info = rcu_dereference(sbi->s_group_info);
3082 		for (i = 0; i < num_meta_group_infos; i++)
3083 			kfree(group_info[i]);
3084 		kvfree(group_info);
3085 		rcu_read_unlock();
3086 	}
3087 	kfree(sbi->s_mb_offsets);
3088 	kfree(sbi->s_mb_maxs);
3089 	iput(sbi->s_buddy_cache);
3090 	if (sbi->s_mb_stats) {
3091 		ext4_msg(sb, KERN_INFO,
3092 		       "mballoc: %u blocks %u reqs (%u success)",
3093 				atomic_read(&sbi->s_bal_allocated),
3094 				atomic_read(&sbi->s_bal_reqs),
3095 				atomic_read(&sbi->s_bal_success));
3096 		ext4_msg(sb, KERN_INFO,
3097 		      "mballoc: %u extents scanned, %u groups scanned, %u goal hits, "
3098 				"%u 2^N hits, %u breaks, %u lost",
3099 				atomic_read(&sbi->s_bal_ex_scanned),
3100 				atomic_read(&sbi->s_bal_groups_scanned),
3101 				atomic_read(&sbi->s_bal_goals),
3102 				atomic_read(&sbi->s_bal_2orders),
3103 				atomic_read(&sbi->s_bal_breaks),
3104 				atomic_read(&sbi->s_mb_lost_chunks));
3105 		ext4_msg(sb, KERN_INFO,
3106 		       "mballoc: %u generated and it took %llu",
3107 				atomic_read(&sbi->s_mb_buddies_generated),
3108 				atomic64_read(&sbi->s_mb_generation_time));
3109 		ext4_msg(sb, KERN_INFO,
3110 		       "mballoc: %u preallocated, %u discarded",
3111 				atomic_read(&sbi->s_mb_preallocated),
3112 				atomic_read(&sbi->s_mb_discarded));
3113 	}
3114 
3115 	free_percpu(sbi->s_locality_groups);
3116 
3117 	return 0;
3118 }
3119 
ext4_issue_discard(struct super_block * sb,ext4_group_t block_group,ext4_grpblk_t cluster,int count,struct bio ** biop)3120 static inline int ext4_issue_discard(struct super_block *sb,
3121 		ext4_group_t block_group, ext4_grpblk_t cluster, int count,
3122 		struct bio **biop)
3123 {
3124 	ext4_fsblk_t discard_block;
3125 
3126 	discard_block = (EXT4_C2B(EXT4_SB(sb), cluster) +
3127 			 ext4_group_first_block_no(sb, block_group));
3128 	count = EXT4_C2B(EXT4_SB(sb), count);
3129 	trace_ext4_discard_blocks(sb,
3130 			(unsigned long long) discard_block, count);
3131 	if (biop) {
3132 		return __blkdev_issue_discard(sb->s_bdev,
3133 			(sector_t)discard_block << (sb->s_blocksize_bits - 9),
3134 			(sector_t)count << (sb->s_blocksize_bits - 9),
3135 			GFP_NOFS, 0, biop);
3136 	} else
3137 		return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
3138 }
3139 
ext4_free_data_in_buddy(struct super_block * sb,struct ext4_free_data * entry)3140 static void ext4_free_data_in_buddy(struct super_block *sb,
3141 				    struct ext4_free_data *entry)
3142 {
3143 	struct ext4_buddy e4b;
3144 	struct ext4_group_info *db;
3145 	int err, count = 0, count2 = 0;
3146 
3147 	mb_debug(sb, "gonna free %u blocks in group %u (0x%p):",
3148 		 entry->efd_count, entry->efd_group, entry);
3149 
3150 	err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b);
3151 	/* we expect to find existing buddy because it's pinned */
3152 	BUG_ON(err != 0);
3153 
3154 	spin_lock(&EXT4_SB(sb)->s_md_lock);
3155 	EXT4_SB(sb)->s_mb_free_pending -= entry->efd_count;
3156 	spin_unlock(&EXT4_SB(sb)->s_md_lock);
3157 
3158 	db = e4b.bd_info;
3159 	/* there are blocks to put in buddy to make them really free */
3160 	count += entry->efd_count;
3161 	count2++;
3162 	ext4_lock_group(sb, entry->efd_group);
3163 	/* Take it out of per group rb tree */
3164 	rb_erase(&entry->efd_node, &(db->bb_free_root));
3165 	mb_free_blocks(NULL, &e4b, entry->efd_start_cluster, entry->efd_count);
3166 
3167 	/*
3168 	 * Clear the trimmed flag for the group so that the next
3169 	 * ext4_trim_fs can trim it.
3170 	 * If the volume is mounted with -o discard, online discard
3171 	 * is supported and the free blocks will be trimmed online.
3172 	 */
3173 	if (!test_opt(sb, DISCARD))
3174 		EXT4_MB_GRP_CLEAR_TRIMMED(db);
3175 
3176 	if (!db->bb_free_root.rb_node) {
3177 		/* No more items in the per group rb tree
3178 		 * balance refcounts from ext4_mb_free_metadata()
3179 		 */
3180 		put_page(e4b.bd_buddy_page);
3181 		put_page(e4b.bd_bitmap_page);
3182 	}
3183 	ext4_unlock_group(sb, entry->efd_group);
3184 	kmem_cache_free(ext4_free_data_cachep, entry);
3185 	ext4_mb_unload_buddy(&e4b);
3186 
3187 	mb_debug(sb, "freed %d blocks in %d structures\n", count,
3188 		 count2);
3189 }
3190 
3191 /*
3192  * This function is called by the jbd2 layer once the commit has finished,
3193  * so we know we can free the blocks that were released with that commit.
3194  */
ext4_process_freed_data(struct super_block * sb,tid_t commit_tid)3195 void ext4_process_freed_data(struct super_block *sb, tid_t commit_tid)
3196 {
3197 	struct ext4_sb_info *sbi = EXT4_SB(sb);
3198 	struct ext4_free_data *entry, *tmp;
3199 	struct bio *discard_bio = NULL;
3200 	struct list_head freed_data_list;
3201 	struct list_head *cut_pos = NULL;
3202 	int err;
3203 
3204 	INIT_LIST_HEAD(&freed_data_list);
3205 
3206 	spin_lock(&sbi->s_md_lock);
3207 	list_for_each_entry(entry, &sbi->s_freed_data_list, efd_list) {
3208 		if (entry->efd_tid != commit_tid)
3209 			break;
3210 		cut_pos = &entry->efd_list;
3211 	}
3212 	if (cut_pos)
3213 		list_cut_position(&freed_data_list, &sbi->s_freed_data_list,
3214 				  cut_pos);
3215 	spin_unlock(&sbi->s_md_lock);
3216 
3217 	if (test_opt(sb, DISCARD)) {
3218 		list_for_each_entry(entry, &freed_data_list, efd_list) {
3219 			err = ext4_issue_discard(sb, entry->efd_group,
3220 						 entry->efd_start_cluster,
3221 						 entry->efd_count,
3222 						 &discard_bio);
3223 			if (err && err != -EOPNOTSUPP) {
3224 				ext4_msg(sb, KERN_WARNING, "discard request in"
3225 					 " group:%d block:%d count:%d failed"
3226 					 " with %d", entry->efd_group,
3227 					 entry->efd_start_cluster,
3228 					 entry->efd_count, err);
3229 			} else if (err == -EOPNOTSUPP)
3230 				break;
3231 		}
3232 
3233 		if (discard_bio) {
3234 			submit_bio_wait(discard_bio);
3235 			bio_put(discard_bio);
3236 		}
3237 	}
3238 
3239 	list_for_each_entry_safe(entry, tmp, &freed_data_list, efd_list)
3240 		ext4_free_data_in_buddy(sb, entry);
3241 }
3242 
ext4_init_mballoc(void)3243 int __init ext4_init_mballoc(void)
3244 {
3245 	ext4_pspace_cachep = KMEM_CACHE(ext4_prealloc_space,
3246 					SLAB_RECLAIM_ACCOUNT);
3247 	if (ext4_pspace_cachep == NULL)
3248 		goto out;
3249 
3250 	ext4_ac_cachep = KMEM_CACHE(ext4_allocation_context,
3251 				    SLAB_RECLAIM_ACCOUNT);
3252 	if (ext4_ac_cachep == NULL)
3253 		goto out_pa_free;
3254 
3255 	ext4_free_data_cachep = KMEM_CACHE(ext4_free_data,
3256 					   SLAB_RECLAIM_ACCOUNT);
3257 	if (ext4_free_data_cachep == NULL)
3258 		goto out_ac_free;
3259 
3260 	return 0;
3261 
3262 out_ac_free:
3263 	kmem_cache_destroy(ext4_ac_cachep);
3264 out_pa_free:
3265 	kmem_cache_destroy(ext4_pspace_cachep);
3266 out:
3267 	return -ENOMEM;
3268 }
3269 
ext4_exit_mballoc(void)3270 void ext4_exit_mballoc(void)
3271 {
3272 	/*
3273 	 * Wait for completion of call_rcu()'s on ext4_pspace_cachep
3274 	 * before destroying the slab cache.
3275 	 */
3276 	rcu_barrier();
3277 	kmem_cache_destroy(ext4_pspace_cachep);
3278 	kmem_cache_destroy(ext4_ac_cachep);
3279 	kmem_cache_destroy(ext4_free_data_cachep);
3280 	ext4_groupinfo_destroy_slabs();
3281 }
3282 
3283 
3284 /*
3285  * Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps
3286  * Returns 0 if success or error code
3287  */
3288 static noinline_for_stack int
ext4_mb_mark_diskspace_used(struct ext4_allocation_context * ac,handle_t * handle,unsigned int reserv_clstrs)3289 ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
3290 				handle_t *handle, unsigned int reserv_clstrs)
3291 {
3292 	struct buffer_head *bitmap_bh = NULL;
3293 	struct ext4_group_desc *gdp;
3294 	struct buffer_head *gdp_bh;
3295 	struct ext4_sb_info *sbi;
3296 	struct super_block *sb;
3297 	ext4_fsblk_t block;
3298 	int err, len;
3299 
3300 	BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3301 	BUG_ON(ac->ac_b_ex.fe_len <= 0);
3302 
3303 	sb = ac->ac_sb;
3304 	sbi = EXT4_SB(sb);
3305 
3306 	bitmap_bh = ext4_read_block_bitmap(sb, ac->ac_b_ex.fe_group);
3307 	if (IS_ERR(bitmap_bh)) {
3308 		err = PTR_ERR(bitmap_bh);
3309 		bitmap_bh = NULL;
3310 		goto out_err;
3311 	}
3312 
3313 	BUFFER_TRACE(bitmap_bh, "getting write access");
3314 	err = ext4_journal_get_write_access(handle, bitmap_bh);
3315 	if (err)
3316 		goto out_err;
3317 
3318 	err = -EIO;
3319 	gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, &gdp_bh);
3320 	if (!gdp)
3321 		goto out_err;
3322 
3323 	ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group,
3324 			ext4_free_group_clusters(sb, gdp));
3325 
3326 	BUFFER_TRACE(gdp_bh, "get_write_access");
3327 	err = ext4_journal_get_write_access(handle, gdp_bh);
3328 	if (err)
3329 		goto out_err;
3330 
3331 	block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
3332 
3333 	len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
3334 	if (!ext4_inode_block_valid(ac->ac_inode, block, len)) {
3335 		ext4_error(sb, "Allocating blocks %llu-%llu which overlap "
3336 			   "fs metadata", block, block+len);
3337 		/* File system mounted not to panic on error
3338 		 * Fix the bitmap and return EFSCORRUPTED
3339 		 * We leak some of the blocks here.
3340 		 */
3341 		ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3342 		ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
3343 			      ac->ac_b_ex.fe_len);
3344 		ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3345 		err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
3346 		if (!err)
3347 			err = -EFSCORRUPTED;
3348 		goto out_err;
3349 	}
3350 
3351 	ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3352 #ifdef AGGRESSIVE_CHECK
3353 	{
3354 		int i;
3355 		for (i = 0; i < ac->ac_b_ex.fe_len; i++) {
3356 			BUG_ON(mb_test_bit(ac->ac_b_ex.fe_start + i,
3357 						bitmap_bh->b_data));
3358 		}
3359 	}
3360 #endif
3361 	ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
3362 		      ac->ac_b_ex.fe_len);
3363 	if (ext4_has_group_desc_csum(sb) &&
3364 	    (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
3365 		gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
3366 		ext4_free_group_clusters_set(sb, gdp,
3367 					     ext4_free_clusters_after_init(sb,
3368 						ac->ac_b_ex.fe_group, gdp));
3369 	}
3370 	len = ext4_free_group_clusters(sb, gdp) - ac->ac_b_ex.fe_len;
3371 	ext4_free_group_clusters_set(sb, gdp, len);
3372 	ext4_block_bitmap_csum_set(sb, ac->ac_b_ex.fe_group, gdp, bitmap_bh);
3373 	ext4_group_desc_csum_set(sb, ac->ac_b_ex.fe_group, gdp);
3374 
3375 	ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3376 	percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len);
3377 	/*
3378 	 * Now reduce the dirty block count also. Should not go negative
3379 	 */
3380 	if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED))
3381 		/* release all the reserved blocks if non delalloc */
3382 		percpu_counter_sub(&sbi->s_dirtyclusters_counter,
3383 				   reserv_clstrs);
3384 
3385 	if (sbi->s_log_groups_per_flex) {
3386 		ext4_group_t flex_group = ext4_flex_group(sbi,
3387 							  ac->ac_b_ex.fe_group);
3388 		atomic64_sub(ac->ac_b_ex.fe_len,
3389 			     &sbi_array_rcu_deref(sbi, s_flex_groups,
3390 						  flex_group)->free_clusters);
3391 	}
3392 
3393 	err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
3394 	if (err)
3395 		goto out_err;
3396 	err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh);
3397 
3398 out_err:
3399 	brelse(bitmap_bh);
3400 	return err;
3401 }
3402 
3403 /*
3404  * Idempotent helper for Ext4 fast commit replay path to set the state of
3405  * blocks in bitmaps and update counters.
3406  */
ext4_mb_mark_bb(struct super_block * sb,ext4_fsblk_t block,int len,int state)3407 void ext4_mb_mark_bb(struct super_block *sb, ext4_fsblk_t block,
3408 			int len, int state)
3409 {
3410 	struct buffer_head *bitmap_bh = NULL;
3411 	struct ext4_group_desc *gdp;
3412 	struct buffer_head *gdp_bh;
3413 	struct ext4_sb_info *sbi = EXT4_SB(sb);
3414 	ext4_group_t group;
3415 	ext4_grpblk_t blkoff;
3416 	int i, err;
3417 	int already;
3418 	unsigned int clen, clen_changed, thisgrp_len;
3419 
3420 	while (len > 0) {
3421 		ext4_get_group_no_and_offset(sb, block, &group, &blkoff);
3422 
3423 		/*
3424 		 * Check to see if we are freeing blocks across a group
3425 		 * boundary.
3426 		 * In case of flex_bg, this can happen that (block, len) may
3427 		 * span across more than one group. In that case we need to
3428 		 * get the corresponding group metadata to work with.
3429 		 * For this we have goto again loop.
3430 		 */
3431 		thisgrp_len = min_t(unsigned int, (unsigned int)len,
3432 			EXT4_BLOCKS_PER_GROUP(sb) - EXT4_C2B(sbi, blkoff));
3433 		clen = EXT4_NUM_B2C(sbi, thisgrp_len);
3434 
3435 		bitmap_bh = ext4_read_block_bitmap(sb, group);
3436 		if (IS_ERR(bitmap_bh)) {
3437 			err = PTR_ERR(bitmap_bh);
3438 			bitmap_bh = NULL;
3439 			break;
3440 		}
3441 
3442 		err = -EIO;
3443 		gdp = ext4_get_group_desc(sb, group, &gdp_bh);
3444 		if (!gdp)
3445 			break;
3446 
3447 		ext4_lock_group(sb, group);
3448 		already = 0;
3449 		for (i = 0; i < clen; i++)
3450 			if (!mb_test_bit(blkoff + i, bitmap_bh->b_data) ==
3451 					 !state)
3452 				already++;
3453 
3454 		clen_changed = clen - already;
3455 		if (state)
3456 			ext4_set_bits(bitmap_bh->b_data, blkoff, clen);
3457 		else
3458 			mb_test_and_clear_bits(bitmap_bh->b_data, blkoff, clen);
3459 		if (ext4_has_group_desc_csum(sb) &&
3460 		    (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
3461 			gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
3462 			ext4_free_group_clusters_set(sb, gdp,
3463 			     ext4_free_clusters_after_init(sb, group, gdp));
3464 		}
3465 		if (state)
3466 			clen = ext4_free_group_clusters(sb, gdp) - clen_changed;
3467 		else
3468 			clen = ext4_free_group_clusters(sb, gdp) + clen_changed;
3469 
3470 		ext4_free_group_clusters_set(sb, gdp, clen);
3471 		ext4_block_bitmap_csum_set(sb, group, gdp, bitmap_bh);
3472 		ext4_group_desc_csum_set(sb, group, gdp);
3473 
3474 		ext4_unlock_group(sb, group);
3475 
3476 		if (sbi->s_log_groups_per_flex) {
3477 			ext4_group_t flex_group = ext4_flex_group(sbi, group);
3478 			struct flex_groups *fg = sbi_array_rcu_deref(sbi,
3479 						   s_flex_groups, flex_group);
3480 
3481 			if (state)
3482 				atomic64_sub(clen_changed, &fg->free_clusters);
3483 			else
3484 				atomic64_add(clen_changed, &fg->free_clusters);
3485 
3486 		}
3487 
3488 		err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh);
3489 		if (err)
3490 			break;
3491 		sync_dirty_buffer(bitmap_bh);
3492 		err = ext4_handle_dirty_metadata(NULL, NULL, gdp_bh);
3493 		sync_dirty_buffer(gdp_bh);
3494 		if (err)
3495 			break;
3496 
3497 		block += thisgrp_len;
3498 		len -= thisgrp_len;
3499 		brelse(bitmap_bh);
3500 		BUG_ON(len < 0);
3501 	}
3502 
3503 	if (err)
3504 		brelse(bitmap_bh);
3505 }
3506 
3507 /*
3508  * here we normalize request for locality group
3509  * Group request are normalized to s_mb_group_prealloc, which goes to
3510  * s_strip if we set the same via mount option.
3511  * s_mb_group_prealloc can be configured via
3512  * /sys/fs/ext4/<partition>/mb_group_prealloc
3513  *
3514  * XXX: should we try to preallocate more than the group has now?
3515  */
ext4_mb_normalize_group_request(struct ext4_allocation_context * ac)3516 static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac)
3517 {
3518 	struct super_block *sb = ac->ac_sb;
3519 	struct ext4_locality_group *lg = ac->ac_lg;
3520 
3521 	BUG_ON(lg == NULL);
3522 	ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc;
3523 	mb_debug(sb, "goal %u blocks for locality group\n", ac->ac_g_ex.fe_len);
3524 }
3525 
3526 /*
3527  * Normalization means making request better in terms of
3528  * size and alignment
3529  */
3530 static noinline_for_stack void
ext4_mb_normalize_request(struct ext4_allocation_context * ac,struct ext4_allocation_request * ar)3531 ext4_mb_normalize_request(struct ext4_allocation_context *ac,
3532 				struct ext4_allocation_request *ar)
3533 {
3534 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3535 	struct ext4_super_block *es = sbi->s_es;
3536 	int bsbits, max;
3537 	ext4_lblk_t end;
3538 	loff_t size, start_off;
3539 	loff_t orig_size __maybe_unused;
3540 	ext4_lblk_t start;
3541 	struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
3542 	struct ext4_prealloc_space *pa;
3543 
3544 	/* do normalize only data requests, metadata requests
3545 	   do not need preallocation */
3546 	if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
3547 		return;
3548 
3549 	/* sometime caller may want exact blocks */
3550 	if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
3551 		return;
3552 
3553 	/* caller may indicate that preallocation isn't
3554 	 * required (it's a tail, for example) */
3555 	if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC)
3556 		return;
3557 
3558 	if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) {
3559 		ext4_mb_normalize_group_request(ac);
3560 		return ;
3561 	}
3562 
3563 	bsbits = ac->ac_sb->s_blocksize_bits;
3564 
3565 	/* first, let's learn actual file size
3566 	 * given current request is allocated */
3567 	size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len);
3568 	size = size << bsbits;
3569 	if (size < i_size_read(ac->ac_inode))
3570 		size = i_size_read(ac->ac_inode);
3571 	orig_size = size;
3572 
3573 	/* max size of free chunks */
3574 	max = 2 << bsbits;
3575 
3576 #define NRL_CHECK_SIZE(req, size, max, chunk_size)	\
3577 		(req <= (size) || max <= (chunk_size))
3578 
3579 	/* first, try to predict filesize */
3580 	/* XXX: should this table be tunable? */
3581 	start_off = 0;
3582 	if (size <= 16 * 1024) {
3583 		size = 16 * 1024;
3584 	} else if (size <= 32 * 1024) {
3585 		size = 32 * 1024;
3586 	} else if (size <= 64 * 1024) {
3587 		size = 64 * 1024;
3588 	} else if (size <= 128 * 1024) {
3589 		size = 128 * 1024;
3590 	} else if (size <= 256 * 1024) {
3591 		size = 256 * 1024;
3592 	} else if (size <= 512 * 1024) {
3593 		size = 512 * 1024;
3594 	} else if (size <= 1024 * 1024) {
3595 		size = 1024 * 1024;
3596 	} else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) {
3597 		start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
3598 						(21 - bsbits)) << 21;
3599 		size = 2 * 1024 * 1024;
3600 	} else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) {
3601 		start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
3602 							(22 - bsbits)) << 22;
3603 		size = 4 * 1024 * 1024;
3604 	} else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len,
3605 					(8<<20)>>bsbits, max, 8 * 1024)) {
3606 		start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
3607 							(23 - bsbits)) << 23;
3608 		size = 8 * 1024 * 1024;
3609 	} else {
3610 		start_off = (loff_t) ac->ac_o_ex.fe_logical << bsbits;
3611 		size	  = (loff_t) EXT4_C2B(EXT4_SB(ac->ac_sb),
3612 					      ac->ac_o_ex.fe_len) << bsbits;
3613 	}
3614 	size = size >> bsbits;
3615 	start = start_off >> bsbits;
3616 
3617 	/*
3618 	 * For tiny groups (smaller than 8MB) the chosen allocation
3619 	 * alignment may be larger than group size. Make sure the
3620 	 * alignment does not move allocation to a different group which
3621 	 * makes mballoc fail assertions later.
3622 	 */
3623 	start = max(start, rounddown(ac->ac_o_ex.fe_logical,
3624 			(ext4_lblk_t)EXT4_BLOCKS_PER_GROUP(ac->ac_sb)));
3625 
3626 	/* don't cover already allocated blocks in selected range */
3627 	if (ar->pleft && start <= ar->lleft) {
3628 		size -= ar->lleft + 1 - start;
3629 		start = ar->lleft + 1;
3630 	}
3631 	if (ar->pright && start + size - 1 >= ar->lright)
3632 		size -= start + size - ar->lright;
3633 
3634 	/*
3635 	 * Trim allocation request for filesystems with artificially small
3636 	 * groups.
3637 	 */
3638 	if (size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb))
3639 		size = EXT4_BLOCKS_PER_GROUP(ac->ac_sb);
3640 
3641 	end = start + size;
3642 
3643 	/* check we don't cross already preallocated blocks */
3644 	rcu_read_lock();
3645 	list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
3646 		ext4_lblk_t pa_end;
3647 
3648 		if (pa->pa_deleted)
3649 			continue;
3650 		spin_lock(&pa->pa_lock);
3651 		if (pa->pa_deleted) {
3652 			spin_unlock(&pa->pa_lock);
3653 			continue;
3654 		}
3655 
3656 		pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb),
3657 						  pa->pa_len);
3658 
3659 		/* PA must not overlap original request */
3660 		BUG_ON(!(ac->ac_o_ex.fe_logical >= pa_end ||
3661 			ac->ac_o_ex.fe_logical < pa->pa_lstart));
3662 
3663 		/* skip PAs this normalized request doesn't overlap with */
3664 		if (pa->pa_lstart >= end || pa_end <= start) {
3665 			spin_unlock(&pa->pa_lock);
3666 			continue;
3667 		}
3668 		BUG_ON(pa->pa_lstart <= start && pa_end >= end);
3669 
3670 		/* adjust start or end to be adjacent to this pa */
3671 		if (pa_end <= ac->ac_o_ex.fe_logical) {
3672 			BUG_ON(pa_end < start);
3673 			start = pa_end;
3674 		} else if (pa->pa_lstart > ac->ac_o_ex.fe_logical) {
3675 			BUG_ON(pa->pa_lstart > end);
3676 			end = pa->pa_lstart;
3677 		}
3678 		spin_unlock(&pa->pa_lock);
3679 	}
3680 	rcu_read_unlock();
3681 	size = end - start;
3682 
3683 	/* XXX: extra loop to check we really don't overlap preallocations */
3684 	rcu_read_lock();
3685 	list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
3686 		ext4_lblk_t pa_end;
3687 
3688 		spin_lock(&pa->pa_lock);
3689 		if (pa->pa_deleted == 0) {
3690 			pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb),
3691 							  pa->pa_len);
3692 			BUG_ON(!(start >= pa_end || end <= pa->pa_lstart));
3693 		}
3694 		spin_unlock(&pa->pa_lock);
3695 	}
3696 	rcu_read_unlock();
3697 
3698 	if (start + size <= ac->ac_o_ex.fe_logical &&
3699 			start > ac->ac_o_ex.fe_logical) {
3700 		ext4_msg(ac->ac_sb, KERN_ERR,
3701 			 "start %lu, size %lu, fe_logical %lu",
3702 			 (unsigned long) start, (unsigned long) size,
3703 			 (unsigned long) ac->ac_o_ex.fe_logical);
3704 		BUG();
3705 	}
3706 	BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
3707 
3708 	/* now prepare goal request */
3709 
3710 	/* XXX: is it better to align blocks WRT to logical
3711 	 * placement or satisfy big request as is */
3712 	ac->ac_g_ex.fe_logical = start;
3713 	ac->ac_g_ex.fe_len = EXT4_NUM_B2C(sbi, size);
3714 
3715 	/* define goal start in order to merge */
3716 	if (ar->pright && (ar->lright == (start + size)) &&
3717 	    ar->pright >= size &&
3718 	    ar->pright - size >= le32_to_cpu(es->s_first_data_block)) {
3719 		/* merge to the right */
3720 		ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size,
3721 						&ac->ac_g_ex.fe_group,
3722 						&ac->ac_g_ex.fe_start);
3723 		ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
3724 	}
3725 	if (ar->pleft && (ar->lleft + 1 == start) &&
3726 	    ar->pleft + 1 < ext4_blocks_count(es)) {
3727 		/* merge to the left */
3728 		ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1,
3729 						&ac->ac_g_ex.fe_group,
3730 						&ac->ac_g_ex.fe_start);
3731 		ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
3732 	}
3733 
3734 	mb_debug(ac->ac_sb, "goal: %lld(was %lld) blocks at %u\n", size,
3735 		 orig_size, start);
3736 }
3737 
ext4_mb_collect_stats(struct ext4_allocation_context * ac)3738 static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
3739 {
3740 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3741 
3742 	if (sbi->s_mb_stats && ac->ac_g_ex.fe_len >= 1) {
3743 		atomic_inc(&sbi->s_bal_reqs);
3744 		atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
3745 		if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
3746 			atomic_inc(&sbi->s_bal_success);
3747 		atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
3748 		atomic_add(ac->ac_groups_scanned, &sbi->s_bal_groups_scanned);
3749 		if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
3750 				ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
3751 			atomic_inc(&sbi->s_bal_goals);
3752 		if (ac->ac_found > sbi->s_mb_max_to_scan)
3753 			atomic_inc(&sbi->s_bal_breaks);
3754 	}
3755 
3756 	if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
3757 		trace_ext4_mballoc_alloc(ac);
3758 	else
3759 		trace_ext4_mballoc_prealloc(ac);
3760 }
3761 
3762 /*
3763  * Called on failure; free up any blocks from the inode PA for this
3764  * context.  We don't need this for MB_GROUP_PA because we only change
3765  * pa_free in ext4_mb_release_context(), but on failure, we've already
3766  * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed.
3767  */
ext4_discard_allocated_blocks(struct ext4_allocation_context * ac)3768 static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac)
3769 {
3770 	struct ext4_prealloc_space *pa = ac->ac_pa;
3771 	struct ext4_buddy e4b;
3772 	int err;
3773 
3774 	if (pa == NULL) {
3775 		if (ac->ac_f_ex.fe_len == 0)
3776 			return;
3777 		err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b);
3778 		if (err) {
3779 			/*
3780 			 * This should never happen since we pin the
3781 			 * pages in the ext4_allocation_context so
3782 			 * ext4_mb_load_buddy() should never fail.
3783 			 */
3784 			WARN(1, "mb_load_buddy failed (%d)", err);
3785 			return;
3786 		}
3787 		ext4_lock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
3788 		mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start,
3789 			       ac->ac_f_ex.fe_len);
3790 		ext4_unlock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
3791 		ext4_mb_unload_buddy(&e4b);
3792 		return;
3793 	}
3794 	if (pa->pa_type == MB_INODE_PA)
3795 		pa->pa_free += ac->ac_b_ex.fe_len;
3796 }
3797 
3798 /*
3799  * use blocks preallocated to inode
3800  */
ext4_mb_use_inode_pa(struct ext4_allocation_context * ac,struct ext4_prealloc_space * pa)3801 static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac,
3802 				struct ext4_prealloc_space *pa)
3803 {
3804 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3805 	ext4_fsblk_t start;
3806 	ext4_fsblk_t end;
3807 	int len;
3808 
3809 	/* found preallocated blocks, use them */
3810 	start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart);
3811 	end = min(pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len),
3812 		  start + EXT4_C2B(sbi, ac->ac_o_ex.fe_len));
3813 	len = EXT4_NUM_B2C(sbi, end - start);
3814 	ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group,
3815 					&ac->ac_b_ex.fe_start);
3816 	ac->ac_b_ex.fe_len = len;
3817 	ac->ac_status = AC_STATUS_FOUND;
3818 	ac->ac_pa = pa;
3819 
3820 	BUG_ON(start < pa->pa_pstart);
3821 	BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len));
3822 	BUG_ON(pa->pa_free < len);
3823 	BUG_ON(ac->ac_b_ex.fe_len <= 0);
3824 	pa->pa_free -= len;
3825 
3826 	mb_debug(ac->ac_sb, "use %llu/%d from inode pa %p\n", start, len, pa);
3827 }
3828 
3829 /*
3830  * use blocks preallocated to locality group
3831  */
ext4_mb_use_group_pa(struct ext4_allocation_context * ac,struct ext4_prealloc_space * pa)3832 static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac,
3833 				struct ext4_prealloc_space *pa)
3834 {
3835 	unsigned int len = ac->ac_o_ex.fe_len;
3836 
3837 	ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart,
3838 					&ac->ac_b_ex.fe_group,
3839 					&ac->ac_b_ex.fe_start);
3840 	ac->ac_b_ex.fe_len = len;
3841 	ac->ac_status = AC_STATUS_FOUND;
3842 	ac->ac_pa = pa;
3843 
3844 	/* we don't correct pa_pstart or pa_plen here to avoid
3845 	 * possible race when the group is being loaded concurrently
3846 	 * instead we correct pa later, after blocks are marked
3847 	 * in on-disk bitmap -- see ext4_mb_release_context()
3848 	 * Other CPUs are prevented from allocating from this pa by lg_mutex
3849 	 */
3850 	mb_debug(ac->ac_sb, "use %u/%u from group pa %p\n",
3851 		 pa->pa_lstart-len, len, pa);
3852 }
3853 
3854 /*
3855  * Return the prealloc space that have minimal distance
3856  * from the goal block. @cpa is the prealloc
3857  * space that is having currently known minimal distance
3858  * from the goal block.
3859  */
3860 static struct ext4_prealloc_space *
ext4_mb_check_group_pa(ext4_fsblk_t goal_block,struct ext4_prealloc_space * pa,struct ext4_prealloc_space * cpa)3861 ext4_mb_check_group_pa(ext4_fsblk_t goal_block,
3862 			struct ext4_prealloc_space *pa,
3863 			struct ext4_prealloc_space *cpa)
3864 {
3865 	ext4_fsblk_t cur_distance, new_distance;
3866 
3867 	if (cpa == NULL) {
3868 		atomic_inc(&pa->pa_count);
3869 		return pa;
3870 	}
3871 	cur_distance = abs(goal_block - cpa->pa_pstart);
3872 	new_distance = abs(goal_block - pa->pa_pstart);
3873 
3874 	if (cur_distance <= new_distance)
3875 		return cpa;
3876 
3877 	/* drop the previous reference */
3878 	atomic_dec(&cpa->pa_count);
3879 	atomic_inc(&pa->pa_count);
3880 	return pa;
3881 }
3882 
3883 /*
3884  * search goal blocks in preallocated space
3885  */
3886 static noinline_for_stack bool
ext4_mb_use_preallocated(struct ext4_allocation_context * ac)3887 ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
3888 {
3889 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3890 	int order, i;
3891 	struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
3892 	struct ext4_locality_group *lg;
3893 	struct ext4_prealloc_space *pa, *cpa = NULL;
3894 	ext4_fsblk_t goal_block;
3895 
3896 	/* only data can be preallocated */
3897 	if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
3898 		return false;
3899 
3900 	/* first, try per-file preallocation */
3901 	rcu_read_lock();
3902 	list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
3903 
3904 		/* all fields in this condition don't change,
3905 		 * so we can skip locking for them */
3906 		if (ac->ac_o_ex.fe_logical < pa->pa_lstart ||
3907 		    ac->ac_o_ex.fe_logical >= (pa->pa_lstart +
3908 					       EXT4_C2B(sbi, pa->pa_len)))
3909 			continue;
3910 
3911 		/* non-extent files can't have physical blocks past 2^32 */
3912 		if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) &&
3913 		    (pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len) >
3914 		     EXT4_MAX_BLOCK_FILE_PHYS))
3915 			continue;
3916 
3917 		/* found preallocated blocks, use them */
3918 		spin_lock(&pa->pa_lock);
3919 		if (pa->pa_deleted == 0 && pa->pa_free) {
3920 			atomic_inc(&pa->pa_count);
3921 			ext4_mb_use_inode_pa(ac, pa);
3922 			spin_unlock(&pa->pa_lock);
3923 			ac->ac_criteria = 10;
3924 			rcu_read_unlock();
3925 			return true;
3926 		}
3927 		spin_unlock(&pa->pa_lock);
3928 	}
3929 	rcu_read_unlock();
3930 
3931 	/* can we use group allocation? */
3932 	if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC))
3933 		return false;
3934 
3935 	/* inode may have no locality group for some reason */
3936 	lg = ac->ac_lg;
3937 	if (lg == NULL)
3938 		return false;
3939 	order  = fls(ac->ac_o_ex.fe_len) - 1;
3940 	if (order > PREALLOC_TB_SIZE - 1)
3941 		/* The max size of hash table is PREALLOC_TB_SIZE */
3942 		order = PREALLOC_TB_SIZE - 1;
3943 
3944 	goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex);
3945 	/*
3946 	 * search for the prealloc space that is having
3947 	 * minimal distance from the goal block.
3948 	 */
3949 	for (i = order; i < PREALLOC_TB_SIZE; i++) {
3950 		rcu_read_lock();
3951 		list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[i],
3952 					pa_inode_list) {
3953 			spin_lock(&pa->pa_lock);
3954 			if (pa->pa_deleted == 0 &&
3955 					pa->pa_free >= ac->ac_o_ex.fe_len) {
3956 
3957 				cpa = ext4_mb_check_group_pa(goal_block,
3958 								pa, cpa);
3959 			}
3960 			spin_unlock(&pa->pa_lock);
3961 		}
3962 		rcu_read_unlock();
3963 	}
3964 	if (cpa) {
3965 		ext4_mb_use_group_pa(ac, cpa);
3966 		ac->ac_criteria = 20;
3967 		return true;
3968 	}
3969 	return false;
3970 }
3971 
3972 /*
3973  * the function goes through all block freed in the group
3974  * but not yet committed and marks them used in in-core bitmap.
3975  * buddy must be generated from this bitmap
3976  * Need to be called with the ext4 group lock held
3977  */
ext4_mb_generate_from_freelist(struct super_block * sb,void * bitmap,ext4_group_t group)3978 static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
3979 						ext4_group_t group)
3980 {
3981 	struct rb_node *n;
3982 	struct ext4_group_info *grp;
3983 	struct ext4_free_data *entry;
3984 
3985 	grp = ext4_get_group_info(sb, group);
3986 	if (!grp)
3987 		return;
3988 	n = rb_first(&(grp->bb_free_root));
3989 
3990 	while (n) {
3991 		entry = rb_entry(n, struct ext4_free_data, efd_node);
3992 		ext4_set_bits(bitmap, entry->efd_start_cluster, entry->efd_count);
3993 		n = rb_next(n);
3994 	}
3995 	return;
3996 }
3997 
3998 /*
3999  * the function goes through all preallocation in this group and marks them
4000  * used in in-core bitmap. buddy must be generated from this bitmap
4001  * Need to be called with ext4 group lock held
4002  */
4003 static noinline_for_stack
ext4_mb_generate_from_pa(struct super_block * sb,void * bitmap,ext4_group_t group)4004 void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
4005 					ext4_group_t group)
4006 {
4007 	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
4008 	struct ext4_prealloc_space *pa;
4009 	struct list_head *cur;
4010 	ext4_group_t groupnr;
4011 	ext4_grpblk_t start;
4012 	int preallocated = 0;
4013 	int len;
4014 
4015 	if (!grp)
4016 		return;
4017 
4018 	/* all form of preallocation discards first load group,
4019 	 * so the only competing code is preallocation use.
4020 	 * we don't need any locking here
4021 	 * notice we do NOT ignore preallocations with pa_deleted
4022 	 * otherwise we could leave used blocks available for
4023 	 * allocation in buddy when concurrent ext4_mb_put_pa()
4024 	 * is dropping preallocation
4025 	 */
4026 	list_for_each(cur, &grp->bb_prealloc_list) {
4027 		pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
4028 		spin_lock(&pa->pa_lock);
4029 		ext4_get_group_no_and_offset(sb, pa->pa_pstart,
4030 					     &groupnr, &start);
4031 		len = pa->pa_len;
4032 		spin_unlock(&pa->pa_lock);
4033 		if (unlikely(len == 0))
4034 			continue;
4035 		BUG_ON(groupnr != group);
4036 		ext4_set_bits(bitmap, start, len);
4037 		preallocated += len;
4038 	}
4039 	mb_debug(sb, "preallocated %d for group %u\n", preallocated, group);
4040 }
4041 
ext4_mb_mark_pa_deleted(struct super_block * sb,struct ext4_prealloc_space * pa)4042 static void ext4_mb_mark_pa_deleted(struct super_block *sb,
4043 				    struct ext4_prealloc_space *pa)
4044 {
4045 	struct ext4_inode_info *ei;
4046 
4047 	if (pa->pa_deleted) {
4048 		ext4_warning(sb, "deleted pa, type:%d, pblk:%llu, lblk:%u, len:%d\n",
4049 			     pa->pa_type, pa->pa_pstart, pa->pa_lstart,
4050 			     pa->pa_len);
4051 		return;
4052 	}
4053 
4054 	pa->pa_deleted = 1;
4055 
4056 	if (pa->pa_type == MB_INODE_PA) {
4057 		ei = EXT4_I(pa->pa_inode);
4058 		atomic_dec(&ei->i_prealloc_active);
4059 	}
4060 }
4061 
ext4_mb_pa_callback(struct rcu_head * head)4062 static void ext4_mb_pa_callback(struct rcu_head *head)
4063 {
4064 	struct ext4_prealloc_space *pa;
4065 	pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu);
4066 
4067 	BUG_ON(atomic_read(&pa->pa_count));
4068 	BUG_ON(pa->pa_deleted == 0);
4069 	kmem_cache_free(ext4_pspace_cachep, pa);
4070 }
4071 
4072 /*
4073  * drops a reference to preallocated space descriptor
4074  * if this was the last reference and the space is consumed
4075  */
ext4_mb_put_pa(struct ext4_allocation_context * ac,struct super_block * sb,struct ext4_prealloc_space * pa)4076 static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
4077 			struct super_block *sb, struct ext4_prealloc_space *pa)
4078 {
4079 	ext4_group_t grp;
4080 	ext4_fsblk_t grp_blk;
4081 
4082 	/* in this short window concurrent discard can set pa_deleted */
4083 	spin_lock(&pa->pa_lock);
4084 	if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) {
4085 		spin_unlock(&pa->pa_lock);
4086 		return;
4087 	}
4088 
4089 	if (pa->pa_deleted == 1) {
4090 		spin_unlock(&pa->pa_lock);
4091 		return;
4092 	}
4093 
4094 	ext4_mb_mark_pa_deleted(sb, pa);
4095 	spin_unlock(&pa->pa_lock);
4096 
4097 	grp_blk = pa->pa_pstart;
4098 	/*
4099 	 * If doing group-based preallocation, pa_pstart may be in the
4100 	 * next group when pa is used up
4101 	 */
4102 	if (pa->pa_type == MB_GROUP_PA)
4103 		grp_blk--;
4104 
4105 	grp = ext4_get_group_number(sb, grp_blk);
4106 
4107 	/*
4108 	 * possible race:
4109 	 *
4110 	 *  P1 (buddy init)			P2 (regular allocation)
4111 	 *					find block B in PA
4112 	 *  copy on-disk bitmap to buddy
4113 	 *  					mark B in on-disk bitmap
4114 	 *					drop PA from group
4115 	 *  mark all PAs in buddy
4116 	 *
4117 	 * thus, P1 initializes buddy with B available. to prevent this
4118 	 * we make "copy" and "mark all PAs" atomic and serialize "drop PA"
4119 	 * against that pair
4120 	 */
4121 	ext4_lock_group(sb, grp);
4122 	list_del(&pa->pa_group_list);
4123 	ext4_unlock_group(sb, grp);
4124 
4125 	spin_lock(pa->pa_obj_lock);
4126 	list_del_rcu(&pa->pa_inode_list);
4127 	spin_unlock(pa->pa_obj_lock);
4128 
4129 	call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
4130 }
4131 
4132 /*
4133  * creates new preallocated space for given inode
4134  */
4135 static noinline_for_stack void
ext4_mb_new_inode_pa(struct ext4_allocation_context * ac)4136 ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
4137 {
4138 	struct super_block *sb = ac->ac_sb;
4139 	struct ext4_sb_info *sbi = EXT4_SB(sb);
4140 	struct ext4_prealloc_space *pa;
4141 	struct ext4_group_info *grp;
4142 	struct ext4_inode_info *ei;
4143 
4144 	/* preallocate only when found space is larger then requested */
4145 	BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
4146 	BUG_ON(ac->ac_status != AC_STATUS_FOUND);
4147 	BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
4148 	BUG_ON(ac->ac_pa == NULL);
4149 
4150 	pa = ac->ac_pa;
4151 
4152 	if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) {
4153 		int new_bex_start;
4154 		int new_bex_end;
4155 
4156 		/* we can't allocate as much as normalizer wants.
4157 		 * so, found space must get proper lstart
4158 		 * to cover original request */
4159 		BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical);
4160 		BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len);
4161 
4162 		/*
4163 		 * Use the below logic for adjusting best extent as it keeps
4164 		 * fragmentation in check while ensuring logical range of best
4165 		 * extent doesn't overflow out of goal extent:
4166 		 *
4167 		 * 1. Check if best ex can be kept at end of goal and still
4168 		 *    cover original start
4169 		 * 2. Else, check if best ex can be kept at start of goal and
4170 		 *    still cover original start
4171 		 * 3. Else, keep the best ex at start of original request.
4172 		 */
4173 		new_bex_end = ac->ac_g_ex.fe_logical +
4174 			EXT4_C2B(sbi, ac->ac_g_ex.fe_len);
4175 		new_bex_start = new_bex_end - EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
4176 		if (ac->ac_o_ex.fe_logical >= new_bex_start)
4177 			goto adjust_bex;
4178 
4179 		new_bex_start = ac->ac_g_ex.fe_logical;
4180 		new_bex_end =
4181 			new_bex_start + EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
4182 		if (ac->ac_o_ex.fe_logical < new_bex_end)
4183 			goto adjust_bex;
4184 
4185 		new_bex_start = ac->ac_o_ex.fe_logical;
4186 		new_bex_end =
4187 			new_bex_start + EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
4188 
4189 adjust_bex:
4190 		ac->ac_b_ex.fe_logical = new_bex_start;
4191 
4192 		BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
4193 		BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len);
4194 		BUG_ON(new_bex_end > (ac->ac_g_ex.fe_logical +
4195 				      EXT4_C2B(sbi, ac->ac_g_ex.fe_len)));
4196 	}
4197 
4198 	/* preallocation can change ac_b_ex, thus we store actually
4199 	 * allocated blocks for history */
4200 	ac->ac_f_ex = ac->ac_b_ex;
4201 
4202 	pa->pa_lstart = ac->ac_b_ex.fe_logical;
4203 	pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
4204 	pa->pa_len = ac->ac_b_ex.fe_len;
4205 	pa->pa_free = pa->pa_len;
4206 	spin_lock_init(&pa->pa_lock);
4207 	INIT_LIST_HEAD(&pa->pa_inode_list);
4208 	INIT_LIST_HEAD(&pa->pa_group_list);
4209 	pa->pa_deleted = 0;
4210 	pa->pa_type = MB_INODE_PA;
4211 
4212 	mb_debug(sb, "new inode pa %p: %llu/%d for %u\n", pa, pa->pa_pstart,
4213 		 pa->pa_len, pa->pa_lstart);
4214 	trace_ext4_mb_new_inode_pa(ac, pa);
4215 
4216 	ext4_mb_use_inode_pa(ac, pa);
4217 	atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
4218 
4219 	ei = EXT4_I(ac->ac_inode);
4220 	grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
4221 	if (!grp)
4222 		return;
4223 
4224 	pa->pa_obj_lock = &ei->i_prealloc_lock;
4225 	pa->pa_inode = ac->ac_inode;
4226 
4227 	list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
4228 
4229 	spin_lock(pa->pa_obj_lock);
4230 	list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list);
4231 	spin_unlock(pa->pa_obj_lock);
4232 	atomic_inc(&ei->i_prealloc_active);
4233 }
4234 
4235 /*
4236  * creates new preallocated space for locality group inodes belongs to
4237  */
4238 static noinline_for_stack void
ext4_mb_new_group_pa(struct ext4_allocation_context * ac)4239 ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
4240 {
4241 	struct super_block *sb = ac->ac_sb;
4242 	struct ext4_locality_group *lg;
4243 	struct ext4_prealloc_space *pa;
4244 	struct ext4_group_info *grp;
4245 
4246 	/* preallocate only when found space is larger then requested */
4247 	BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
4248 	BUG_ON(ac->ac_status != AC_STATUS_FOUND);
4249 	BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
4250 	BUG_ON(ac->ac_pa == NULL);
4251 
4252 	pa = ac->ac_pa;
4253 
4254 	/* preallocation can change ac_b_ex, thus we store actually
4255 	 * allocated blocks for history */
4256 	ac->ac_f_ex = ac->ac_b_ex;
4257 
4258 	pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
4259 	pa->pa_lstart = pa->pa_pstart;
4260 	pa->pa_len = ac->ac_b_ex.fe_len;
4261 	pa->pa_free = pa->pa_len;
4262 	spin_lock_init(&pa->pa_lock);
4263 	INIT_LIST_HEAD(&pa->pa_inode_list);
4264 	INIT_LIST_HEAD(&pa->pa_group_list);
4265 	pa->pa_deleted = 0;
4266 	pa->pa_type = MB_GROUP_PA;
4267 
4268 	mb_debug(sb, "new group pa %p: %llu/%d for %u\n", pa, pa->pa_pstart,
4269 		 pa->pa_len, pa->pa_lstart);
4270 	trace_ext4_mb_new_group_pa(ac, pa);
4271 
4272 	ext4_mb_use_group_pa(ac, pa);
4273 	atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
4274 
4275 	grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
4276 	if (!grp)
4277 		return;
4278 	lg = ac->ac_lg;
4279 	BUG_ON(lg == NULL);
4280 
4281 	pa->pa_obj_lock = &lg->lg_prealloc_lock;
4282 	pa->pa_inode = NULL;
4283 
4284 	list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
4285 
4286 	/*
4287 	 * We will later add the new pa to the right bucket
4288 	 * after updating the pa_free in ext4_mb_release_context
4289 	 */
4290 }
4291 
ext4_mb_new_preallocation(struct ext4_allocation_context * ac)4292 static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac)
4293 {
4294 	if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
4295 		ext4_mb_new_group_pa(ac);
4296 	else
4297 		ext4_mb_new_inode_pa(ac);
4298 }
4299 
4300 /*
4301  * finds all unused blocks in on-disk bitmap, frees them in
4302  * in-core bitmap and buddy.
4303  * @pa must be unlinked from inode and group lists, so that
4304  * nobody else can find/use it.
4305  * the caller MUST hold group/inode locks.
4306  * TODO: optimize the case when there are no in-core structures yet
4307  */
4308 static noinline_for_stack int
ext4_mb_release_inode_pa(struct ext4_buddy * e4b,struct buffer_head * bitmap_bh,struct ext4_prealloc_space * pa)4309 ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
4310 			struct ext4_prealloc_space *pa)
4311 {
4312 	struct super_block *sb = e4b->bd_sb;
4313 	struct ext4_sb_info *sbi = EXT4_SB(sb);
4314 	unsigned int end;
4315 	unsigned int next;
4316 	ext4_group_t group;
4317 	ext4_grpblk_t bit;
4318 	unsigned long long grp_blk_start;
4319 	int free = 0;
4320 
4321 	BUG_ON(pa->pa_deleted == 0);
4322 	ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
4323 	grp_blk_start = pa->pa_pstart - EXT4_C2B(sbi, bit);
4324 	BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
4325 	end = bit + pa->pa_len;
4326 
4327 	while (bit < end) {
4328 		bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit);
4329 		if (bit >= end)
4330 			break;
4331 		next = mb_find_next_bit(bitmap_bh->b_data, end, bit);
4332 		mb_debug(sb, "free preallocated %u/%u in group %u\n",
4333 			 (unsigned) ext4_group_first_block_no(sb, group) + bit,
4334 			 (unsigned) next - bit, (unsigned) group);
4335 		free += next - bit;
4336 
4337 		trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit);
4338 		trace_ext4_mb_release_inode_pa(pa, (grp_blk_start +
4339 						    EXT4_C2B(sbi, bit)),
4340 					       next - bit);
4341 		mb_free_blocks(pa->pa_inode, e4b, bit, next - bit);
4342 		bit = next + 1;
4343 	}
4344 	if (free != pa->pa_free) {
4345 		ext4_msg(e4b->bd_sb, KERN_CRIT,
4346 			 "pa %p: logic %lu, phys. %lu, len %d",
4347 			 pa, (unsigned long) pa->pa_lstart,
4348 			 (unsigned long) pa->pa_pstart,
4349 			 pa->pa_len);
4350 		ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u",
4351 					free, pa->pa_free);
4352 		/*
4353 		 * pa is already deleted so we use the value obtained
4354 		 * from the bitmap and continue.
4355 		 */
4356 	}
4357 	atomic_add(free, &sbi->s_mb_discarded);
4358 
4359 	return 0;
4360 }
4361 
4362 static noinline_for_stack int
ext4_mb_release_group_pa(struct ext4_buddy * e4b,struct ext4_prealloc_space * pa)4363 ext4_mb_release_group_pa(struct ext4_buddy *e4b,
4364 				struct ext4_prealloc_space *pa)
4365 {
4366 	struct super_block *sb = e4b->bd_sb;
4367 	ext4_group_t group;
4368 	ext4_grpblk_t bit;
4369 
4370 	trace_ext4_mb_release_group_pa(sb, pa);
4371 	BUG_ON(pa->pa_deleted == 0);
4372 	ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
4373 	if (unlikely(group != e4b->bd_group && pa->pa_len != 0)) {
4374 		ext4_warning(sb, "bad group: expected %u, group %u, pa_start %llu",
4375 			     e4b->bd_group, group, pa->pa_pstart);
4376 		return 0;
4377 	}
4378 	mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
4379 	atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
4380 	trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
4381 
4382 	return 0;
4383 }
4384 
4385 /*
4386  * releases all preallocations in given group
4387  *
4388  * first, we need to decide discard policy:
4389  * - when do we discard
4390  *   1) ENOSPC
4391  * - how many do we discard
4392  *   1) how many requested
4393  */
4394 static noinline_for_stack int
ext4_mb_discard_group_preallocations(struct super_block * sb,ext4_group_t group,int * busy)4395 ext4_mb_discard_group_preallocations(struct super_block *sb,
4396 				     ext4_group_t group, int *busy)
4397 {
4398 	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
4399 	struct buffer_head *bitmap_bh = NULL;
4400 	struct ext4_prealloc_space *pa, *tmp;
4401 	struct list_head list;
4402 	struct ext4_buddy e4b;
4403 	int err;
4404 	int free = 0;
4405 
4406 	if (!grp)
4407 		return 0;
4408 	mb_debug(sb, "discard preallocation for group %u\n", group);
4409 	if (list_empty(&grp->bb_prealloc_list))
4410 		goto out_dbg;
4411 
4412 	bitmap_bh = ext4_read_block_bitmap(sb, group);
4413 	if (IS_ERR(bitmap_bh)) {
4414 		err = PTR_ERR(bitmap_bh);
4415 		ext4_error_err(sb, -err,
4416 			       "Error %d reading block bitmap for %u",
4417 			       err, group);
4418 		goto out_dbg;
4419 	}
4420 
4421 	err = ext4_mb_load_buddy(sb, group, &e4b);
4422 	if (err) {
4423 		ext4_warning(sb, "Error %d loading buddy information for %u",
4424 			     err, group);
4425 		put_bh(bitmap_bh);
4426 		goto out_dbg;
4427 	}
4428 
4429 	INIT_LIST_HEAD(&list);
4430 	ext4_lock_group(sb, group);
4431 	list_for_each_entry_safe(pa, tmp,
4432 				&grp->bb_prealloc_list, pa_group_list) {
4433 		spin_lock(&pa->pa_lock);
4434 		if (atomic_read(&pa->pa_count)) {
4435 			spin_unlock(&pa->pa_lock);
4436 			*busy = 1;
4437 			continue;
4438 		}
4439 		if (pa->pa_deleted) {
4440 			spin_unlock(&pa->pa_lock);
4441 			continue;
4442 		}
4443 
4444 		/* seems this one can be freed ... */
4445 		ext4_mb_mark_pa_deleted(sb, pa);
4446 
4447 		if (!free)
4448 			this_cpu_inc(discard_pa_seq);
4449 
4450 		/* we can trust pa_free ... */
4451 		free += pa->pa_free;
4452 
4453 		spin_unlock(&pa->pa_lock);
4454 
4455 		list_del(&pa->pa_group_list);
4456 		list_add(&pa->u.pa_tmp_list, &list);
4457 	}
4458 
4459 	/* now free all selected PAs */
4460 	list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
4461 
4462 		/* remove from object (inode or locality group) */
4463 		spin_lock(pa->pa_obj_lock);
4464 		list_del_rcu(&pa->pa_inode_list);
4465 		spin_unlock(pa->pa_obj_lock);
4466 
4467 		if (pa->pa_type == MB_GROUP_PA)
4468 			ext4_mb_release_group_pa(&e4b, pa);
4469 		else
4470 			ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
4471 
4472 		list_del(&pa->u.pa_tmp_list);
4473 		call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
4474 	}
4475 
4476 	ext4_unlock_group(sb, group);
4477 	ext4_mb_unload_buddy(&e4b);
4478 	put_bh(bitmap_bh);
4479 out_dbg:
4480 	mb_debug(sb, "discarded (%d) blocks preallocated for group %u bb_free (%d)\n",
4481 		 free, group, grp->bb_free);
4482 	return free;
4483 }
4484 
4485 /*
4486  * releases all non-used preallocated blocks for given inode
4487  *
4488  * It's important to discard preallocations under i_data_sem
4489  * We don't want another block to be served from the prealloc
4490  * space when we are discarding the inode prealloc space.
4491  *
4492  * FIXME!! Make sure it is valid at all the call sites
4493  */
ext4_discard_preallocations(struct inode * inode,unsigned int needed)4494 void ext4_discard_preallocations(struct inode *inode, unsigned int needed)
4495 {
4496 	struct ext4_inode_info *ei = EXT4_I(inode);
4497 	struct super_block *sb = inode->i_sb;
4498 	struct buffer_head *bitmap_bh = NULL;
4499 	struct ext4_prealloc_space *pa, *tmp;
4500 	ext4_group_t group = 0;
4501 	struct list_head list;
4502 	struct ext4_buddy e4b;
4503 	int err;
4504 
4505 	if (!S_ISREG(inode->i_mode)) {
4506 		/*BUG_ON(!list_empty(&ei->i_prealloc_list));*/
4507 		return;
4508 	}
4509 
4510 	if (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY)
4511 		return;
4512 
4513 	mb_debug(sb, "discard preallocation for inode %lu\n",
4514 		 inode->i_ino);
4515 	trace_ext4_discard_preallocations(inode,
4516 			atomic_read(&ei->i_prealloc_active), needed);
4517 
4518 	INIT_LIST_HEAD(&list);
4519 
4520 	if (needed == 0)
4521 		needed = UINT_MAX;
4522 
4523 repeat:
4524 	/* first, collect all pa's in the inode */
4525 	spin_lock(&ei->i_prealloc_lock);
4526 	while (!list_empty(&ei->i_prealloc_list) && needed) {
4527 		pa = list_entry(ei->i_prealloc_list.prev,
4528 				struct ext4_prealloc_space, pa_inode_list);
4529 		BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock);
4530 		spin_lock(&pa->pa_lock);
4531 		if (atomic_read(&pa->pa_count)) {
4532 			/* this shouldn't happen often - nobody should
4533 			 * use preallocation while we're discarding it */
4534 			spin_unlock(&pa->pa_lock);
4535 			spin_unlock(&ei->i_prealloc_lock);
4536 			ext4_msg(sb, KERN_ERR,
4537 				 "uh-oh! used pa while discarding");
4538 			WARN_ON(1);
4539 			schedule_timeout_uninterruptible(HZ);
4540 			goto repeat;
4541 
4542 		}
4543 		if (pa->pa_deleted == 0) {
4544 			ext4_mb_mark_pa_deleted(sb, pa);
4545 			spin_unlock(&pa->pa_lock);
4546 			list_del_rcu(&pa->pa_inode_list);
4547 			list_add(&pa->u.pa_tmp_list, &list);
4548 			needed--;
4549 			continue;
4550 		}
4551 
4552 		/* someone is deleting pa right now */
4553 		spin_unlock(&pa->pa_lock);
4554 		spin_unlock(&ei->i_prealloc_lock);
4555 
4556 		/* we have to wait here because pa_deleted
4557 		 * doesn't mean pa is already unlinked from
4558 		 * the list. as we might be called from
4559 		 * ->clear_inode() the inode will get freed
4560 		 * and concurrent thread which is unlinking
4561 		 * pa from inode's list may access already
4562 		 * freed memory, bad-bad-bad */
4563 
4564 		/* XXX: if this happens too often, we can
4565 		 * add a flag to force wait only in case
4566 		 * of ->clear_inode(), but not in case of
4567 		 * regular truncate */
4568 		schedule_timeout_uninterruptible(HZ);
4569 		goto repeat;
4570 	}
4571 	spin_unlock(&ei->i_prealloc_lock);
4572 
4573 	list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
4574 		BUG_ON(pa->pa_type != MB_INODE_PA);
4575 		group = ext4_get_group_number(sb, pa->pa_pstart);
4576 
4577 		err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
4578 					     GFP_NOFS|__GFP_NOFAIL);
4579 		if (err) {
4580 			ext4_error_err(sb, -err, "Error %d loading buddy information for %u",
4581 				       err, group);
4582 			continue;
4583 		}
4584 
4585 		bitmap_bh = ext4_read_block_bitmap(sb, group);
4586 		if (IS_ERR(bitmap_bh)) {
4587 			err = PTR_ERR(bitmap_bh);
4588 			ext4_error_err(sb, -err, "Error %d reading block bitmap for %u",
4589 				       err, group);
4590 			ext4_mb_unload_buddy(&e4b);
4591 			continue;
4592 		}
4593 
4594 		ext4_lock_group(sb, group);
4595 		list_del(&pa->pa_group_list);
4596 		ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
4597 		ext4_unlock_group(sb, group);
4598 
4599 		ext4_mb_unload_buddy(&e4b);
4600 		put_bh(bitmap_bh);
4601 
4602 		list_del(&pa->u.pa_tmp_list);
4603 		call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
4604 	}
4605 }
4606 
ext4_mb_pa_alloc(struct ext4_allocation_context * ac)4607 static int ext4_mb_pa_alloc(struct ext4_allocation_context *ac)
4608 {
4609 	struct ext4_prealloc_space *pa;
4610 
4611 	BUG_ON(ext4_pspace_cachep == NULL);
4612 	pa = kmem_cache_zalloc(ext4_pspace_cachep, GFP_NOFS);
4613 	if (!pa)
4614 		return -ENOMEM;
4615 	atomic_set(&pa->pa_count, 1);
4616 	ac->ac_pa = pa;
4617 	return 0;
4618 }
4619 
ext4_mb_pa_free(struct ext4_allocation_context * ac)4620 static void ext4_mb_pa_free(struct ext4_allocation_context *ac)
4621 {
4622 	struct ext4_prealloc_space *pa = ac->ac_pa;
4623 
4624 	BUG_ON(!pa);
4625 	ac->ac_pa = NULL;
4626 	WARN_ON(!atomic_dec_and_test(&pa->pa_count));
4627 	kmem_cache_free(ext4_pspace_cachep, pa);
4628 }
4629 
4630 #ifdef CONFIG_EXT4_DEBUG
ext4_mb_show_pa(struct super_block * sb)4631 static inline void ext4_mb_show_pa(struct super_block *sb)
4632 {
4633 	ext4_group_t i, ngroups;
4634 
4635 	if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED))
4636 		return;
4637 
4638 	ngroups = ext4_get_groups_count(sb);
4639 	mb_debug(sb, "groups: ");
4640 	for (i = 0; i < ngroups; i++) {
4641 		struct ext4_group_info *grp = ext4_get_group_info(sb, i);
4642 		struct ext4_prealloc_space *pa;
4643 		ext4_grpblk_t start;
4644 		struct list_head *cur;
4645 
4646 		if (!grp)
4647 			continue;
4648 		ext4_lock_group(sb, i);
4649 		list_for_each(cur, &grp->bb_prealloc_list) {
4650 			pa = list_entry(cur, struct ext4_prealloc_space,
4651 					pa_group_list);
4652 			spin_lock(&pa->pa_lock);
4653 			ext4_get_group_no_and_offset(sb, pa->pa_pstart,
4654 						     NULL, &start);
4655 			spin_unlock(&pa->pa_lock);
4656 			mb_debug(sb, "PA:%u:%d:%d\n", i, start,
4657 				 pa->pa_len);
4658 		}
4659 		ext4_unlock_group(sb, i);
4660 		mb_debug(sb, "%u: %d/%d\n", i, grp->bb_free,
4661 			 grp->bb_fragments);
4662 	}
4663 }
4664 
ext4_mb_show_ac(struct ext4_allocation_context * ac)4665 static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
4666 {
4667 	struct super_block *sb = ac->ac_sb;
4668 
4669 	if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED))
4670 		return;
4671 
4672 	mb_debug(sb, "Can't allocate:"
4673 			" Allocation context details:");
4674 	mb_debug(sb, "status %u flags 0x%x",
4675 			ac->ac_status, ac->ac_flags);
4676 	mb_debug(sb, "orig %lu/%lu/%lu@%lu, "
4677 			"goal %lu/%lu/%lu@%lu, "
4678 			"best %lu/%lu/%lu@%lu cr %d",
4679 			(unsigned long)ac->ac_o_ex.fe_group,
4680 			(unsigned long)ac->ac_o_ex.fe_start,
4681 			(unsigned long)ac->ac_o_ex.fe_len,
4682 			(unsigned long)ac->ac_o_ex.fe_logical,
4683 			(unsigned long)ac->ac_g_ex.fe_group,
4684 			(unsigned long)ac->ac_g_ex.fe_start,
4685 			(unsigned long)ac->ac_g_ex.fe_len,
4686 			(unsigned long)ac->ac_g_ex.fe_logical,
4687 			(unsigned long)ac->ac_b_ex.fe_group,
4688 			(unsigned long)ac->ac_b_ex.fe_start,
4689 			(unsigned long)ac->ac_b_ex.fe_len,
4690 			(unsigned long)ac->ac_b_ex.fe_logical,
4691 			(int)ac->ac_criteria);
4692 	mb_debug(sb, "%u found", ac->ac_found);
4693 	ext4_mb_show_pa(sb);
4694 }
4695 #else
ext4_mb_show_pa(struct super_block * sb)4696 static inline void ext4_mb_show_pa(struct super_block *sb)
4697 {
4698 	return;
4699 }
ext4_mb_show_ac(struct ext4_allocation_context * ac)4700 static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac)
4701 {
4702 	ext4_mb_show_pa(ac->ac_sb);
4703 	return;
4704 }
4705 #endif
4706 
4707 /*
4708  * We use locality group preallocation for small size file. The size of the
4709  * file is determined by the current size or the resulting size after
4710  * allocation which ever is larger
4711  *
4712  * One can tune this size via /sys/fs/ext4/<partition>/mb_stream_req
4713  */
ext4_mb_group_or_file(struct ext4_allocation_context * ac)4714 static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
4715 {
4716 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4717 	int bsbits = ac->ac_sb->s_blocksize_bits;
4718 	loff_t size, isize;
4719 
4720 	if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
4721 		return;
4722 
4723 	if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
4724 		return;
4725 
4726 	size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len);
4727 	isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1)
4728 		>> bsbits;
4729 
4730 	if ((size == isize) && !ext4_fs_is_busy(sbi) &&
4731 	    !inode_is_open_for_write(ac->ac_inode)) {
4732 		ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC;
4733 		return;
4734 	}
4735 
4736 	if (sbi->s_mb_group_prealloc <= 0) {
4737 		ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
4738 		return;
4739 	}
4740 
4741 	/* don't use group allocation for large files */
4742 	size = max(size, isize);
4743 	if (size > sbi->s_mb_stream_request) {
4744 		ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
4745 		return;
4746 	}
4747 
4748 	BUG_ON(ac->ac_lg != NULL);
4749 	/*
4750 	 * locality group prealloc space are per cpu. The reason for having
4751 	 * per cpu locality group is to reduce the contention between block
4752 	 * request from multiple CPUs.
4753 	 */
4754 	ac->ac_lg = raw_cpu_ptr(sbi->s_locality_groups);
4755 
4756 	/* we're going to use group allocation */
4757 	ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC;
4758 
4759 	/* serialize all allocations in the group */
4760 	mutex_lock(&ac->ac_lg->lg_mutex);
4761 }
4762 
4763 static noinline_for_stack int
ext4_mb_initialize_context(struct ext4_allocation_context * ac,struct ext4_allocation_request * ar)4764 ext4_mb_initialize_context(struct ext4_allocation_context *ac,
4765 				struct ext4_allocation_request *ar)
4766 {
4767 	struct super_block *sb = ar->inode->i_sb;
4768 	struct ext4_sb_info *sbi = EXT4_SB(sb);
4769 	struct ext4_super_block *es = sbi->s_es;
4770 	ext4_group_t group;
4771 	unsigned int len;
4772 	ext4_fsblk_t goal;
4773 	ext4_grpblk_t block;
4774 
4775 	/* we can't allocate > group size */
4776 	len = ar->len;
4777 
4778 	/* just a dirty hack to filter too big requests  */
4779 	if (len >= EXT4_CLUSTERS_PER_GROUP(sb))
4780 		len = EXT4_CLUSTERS_PER_GROUP(sb);
4781 
4782 	/* start searching from the goal */
4783 	goal = ar->goal;
4784 	if (goal < le32_to_cpu(es->s_first_data_block) ||
4785 			goal >= ext4_blocks_count(es))
4786 		goal = le32_to_cpu(es->s_first_data_block);
4787 	ext4_get_group_no_and_offset(sb, goal, &group, &block);
4788 
4789 	/* set up allocation goals */
4790 	ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical);
4791 	ac->ac_status = AC_STATUS_CONTINUE;
4792 	ac->ac_sb = sb;
4793 	ac->ac_inode = ar->inode;
4794 	ac->ac_o_ex.fe_logical = ac->ac_b_ex.fe_logical;
4795 	ac->ac_o_ex.fe_group = group;
4796 	ac->ac_o_ex.fe_start = block;
4797 	ac->ac_o_ex.fe_len = len;
4798 	ac->ac_g_ex = ac->ac_o_ex;
4799 	ac->ac_flags = ar->flags;
4800 
4801 	/* we have to define context: we'll work with a file or
4802 	 * locality group. this is a policy, actually */
4803 	ext4_mb_group_or_file(ac);
4804 
4805 	mb_debug(sb, "init ac: %u blocks @ %u, goal %u, flags 0x%x, 2^%d, "
4806 			"left: %u/%u, right %u/%u to %swritable\n",
4807 			(unsigned) ar->len, (unsigned) ar->logical,
4808 			(unsigned) ar->goal, ac->ac_flags, ac->ac_2order,
4809 			(unsigned) ar->lleft, (unsigned) ar->pleft,
4810 			(unsigned) ar->lright, (unsigned) ar->pright,
4811 			inode_is_open_for_write(ar->inode) ? "" : "non-");
4812 	return 0;
4813 
4814 }
4815 
4816 static noinline_for_stack void
ext4_mb_discard_lg_preallocations(struct super_block * sb,struct ext4_locality_group * lg,int order,int total_entries)4817 ext4_mb_discard_lg_preallocations(struct super_block *sb,
4818 					struct ext4_locality_group *lg,
4819 					int order, int total_entries)
4820 {
4821 	ext4_group_t group = 0;
4822 	struct ext4_buddy e4b;
4823 	struct list_head discard_list;
4824 	struct ext4_prealloc_space *pa, *tmp;
4825 
4826 	mb_debug(sb, "discard locality group preallocation\n");
4827 
4828 	INIT_LIST_HEAD(&discard_list);
4829 
4830 	spin_lock(&lg->lg_prealloc_lock);
4831 	list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order],
4832 				pa_inode_list,
4833 				lockdep_is_held(&lg->lg_prealloc_lock)) {
4834 		spin_lock(&pa->pa_lock);
4835 		if (atomic_read(&pa->pa_count)) {
4836 			/*
4837 			 * This is the pa that we just used
4838 			 * for block allocation. So don't
4839 			 * free that
4840 			 */
4841 			spin_unlock(&pa->pa_lock);
4842 			continue;
4843 		}
4844 		if (pa->pa_deleted) {
4845 			spin_unlock(&pa->pa_lock);
4846 			continue;
4847 		}
4848 		/* only lg prealloc space */
4849 		BUG_ON(pa->pa_type != MB_GROUP_PA);
4850 
4851 		/* seems this one can be freed ... */
4852 		ext4_mb_mark_pa_deleted(sb, pa);
4853 		spin_unlock(&pa->pa_lock);
4854 
4855 		list_del_rcu(&pa->pa_inode_list);
4856 		list_add(&pa->u.pa_tmp_list, &discard_list);
4857 
4858 		total_entries--;
4859 		if (total_entries <= 5) {
4860 			/*
4861 			 * we want to keep only 5 entries
4862 			 * allowing it to grow to 8. This
4863 			 * mak sure we don't call discard
4864 			 * soon for this list.
4865 			 */
4866 			break;
4867 		}
4868 	}
4869 	spin_unlock(&lg->lg_prealloc_lock);
4870 
4871 	list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) {
4872 		int err;
4873 
4874 		group = ext4_get_group_number(sb, pa->pa_pstart);
4875 		err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
4876 					     GFP_NOFS|__GFP_NOFAIL);
4877 		if (err) {
4878 			ext4_error_err(sb, -err, "Error %d loading buddy information for %u",
4879 				       err, group);
4880 			continue;
4881 		}
4882 		ext4_lock_group(sb, group);
4883 		list_del(&pa->pa_group_list);
4884 		ext4_mb_release_group_pa(&e4b, pa);
4885 		ext4_unlock_group(sb, group);
4886 
4887 		ext4_mb_unload_buddy(&e4b);
4888 		list_del(&pa->u.pa_tmp_list);
4889 		call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
4890 	}
4891 }
4892 
4893 /*
4894  * We have incremented pa_count. So it cannot be freed at this
4895  * point. Also we hold lg_mutex. So no parallel allocation is
4896  * possible from this lg. That means pa_free cannot be updated.
4897  *
4898  * A parallel ext4_mb_discard_group_preallocations is possible.
4899  * which can cause the lg_prealloc_list to be updated.
4900  */
4901 
ext4_mb_add_n_trim(struct ext4_allocation_context * ac)4902 static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
4903 {
4904 	int order, added = 0, lg_prealloc_count = 1;
4905 	struct super_block *sb = ac->ac_sb;
4906 	struct ext4_locality_group *lg = ac->ac_lg;
4907 	struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa;
4908 
4909 	order = fls(pa->pa_free) - 1;
4910 	if (order > PREALLOC_TB_SIZE - 1)
4911 		/* The max size of hash table is PREALLOC_TB_SIZE */
4912 		order = PREALLOC_TB_SIZE - 1;
4913 	/* Add the prealloc space to lg */
4914 	spin_lock(&lg->lg_prealloc_lock);
4915 	list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order],
4916 				pa_inode_list,
4917 				lockdep_is_held(&lg->lg_prealloc_lock)) {
4918 		spin_lock(&tmp_pa->pa_lock);
4919 		if (tmp_pa->pa_deleted) {
4920 			spin_unlock(&tmp_pa->pa_lock);
4921 			continue;
4922 		}
4923 		if (!added && pa->pa_free < tmp_pa->pa_free) {
4924 			/* Add to the tail of the previous entry */
4925 			list_add_tail_rcu(&pa->pa_inode_list,
4926 						&tmp_pa->pa_inode_list);
4927 			added = 1;
4928 			/*
4929 			 * we want to count the total
4930 			 * number of entries in the list
4931 			 */
4932 		}
4933 		spin_unlock(&tmp_pa->pa_lock);
4934 		lg_prealloc_count++;
4935 	}
4936 	if (!added)
4937 		list_add_tail_rcu(&pa->pa_inode_list,
4938 					&lg->lg_prealloc_list[order]);
4939 	spin_unlock(&lg->lg_prealloc_lock);
4940 
4941 	/* Now trim the list to be not more than 8 elements */
4942 	if (lg_prealloc_count > 8) {
4943 		ext4_mb_discard_lg_preallocations(sb, lg,
4944 						  order, lg_prealloc_count);
4945 		return;
4946 	}
4947 	return ;
4948 }
4949 
4950 /*
4951  * if per-inode prealloc list is too long, trim some PA
4952  */
ext4_mb_trim_inode_pa(struct inode * inode)4953 static void ext4_mb_trim_inode_pa(struct inode *inode)
4954 {
4955 	struct ext4_inode_info *ei = EXT4_I(inode);
4956 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4957 	int count, delta;
4958 
4959 	count = atomic_read(&ei->i_prealloc_active);
4960 	delta = (sbi->s_mb_max_inode_prealloc >> 2) + 1;
4961 	if (count > sbi->s_mb_max_inode_prealloc + delta) {
4962 		count -= sbi->s_mb_max_inode_prealloc;
4963 		ext4_discard_preallocations(inode, count);
4964 	}
4965 }
4966 
4967 /*
4968  * release all resource we used in allocation
4969  */
ext4_mb_release_context(struct ext4_allocation_context * ac)4970 static int ext4_mb_release_context(struct ext4_allocation_context *ac)
4971 {
4972 	struct inode *inode = ac->ac_inode;
4973 	struct ext4_inode_info *ei = EXT4_I(inode);
4974 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4975 	struct ext4_prealloc_space *pa = ac->ac_pa;
4976 	if (pa) {
4977 		if (pa->pa_type == MB_GROUP_PA) {
4978 			/* see comment in ext4_mb_use_group_pa() */
4979 			spin_lock(&pa->pa_lock);
4980 			pa->pa_pstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
4981 			pa->pa_lstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
4982 			pa->pa_free -= ac->ac_b_ex.fe_len;
4983 			pa->pa_len -= ac->ac_b_ex.fe_len;
4984 			spin_unlock(&pa->pa_lock);
4985 
4986 			/*
4987 			 * We want to add the pa to the right bucket.
4988 			 * Remove it from the list and while adding
4989 			 * make sure the list to which we are adding
4990 			 * doesn't grow big.
4991 			 */
4992 			if (likely(pa->pa_free)) {
4993 				spin_lock(pa->pa_obj_lock);
4994 				list_del_rcu(&pa->pa_inode_list);
4995 				spin_unlock(pa->pa_obj_lock);
4996 				ext4_mb_add_n_trim(ac);
4997 			}
4998 		}
4999 
5000 		if (pa->pa_type == MB_INODE_PA) {
5001 			/*
5002 			 * treat per-inode prealloc list as a lru list, then try
5003 			 * to trim the least recently used PA.
5004 			 */
5005 			spin_lock(pa->pa_obj_lock);
5006 			list_move(&pa->pa_inode_list, &ei->i_prealloc_list);
5007 			spin_unlock(pa->pa_obj_lock);
5008 		}
5009 
5010 		ext4_mb_put_pa(ac, ac->ac_sb, pa);
5011 	}
5012 	if (ac->ac_bitmap_page)
5013 		put_page(ac->ac_bitmap_page);
5014 	if (ac->ac_buddy_page)
5015 		put_page(ac->ac_buddy_page);
5016 	if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
5017 		mutex_unlock(&ac->ac_lg->lg_mutex);
5018 	ext4_mb_collect_stats(ac);
5019 	ext4_mb_trim_inode_pa(inode);
5020 	return 0;
5021 }
5022 
ext4_mb_discard_preallocations(struct super_block * sb,int needed)5023 static int ext4_mb_discard_preallocations(struct super_block *sb, int needed)
5024 {
5025 	ext4_group_t i, ngroups = ext4_get_groups_count(sb);
5026 	int ret;
5027 	int freed = 0, busy = 0;
5028 	int retry = 0;
5029 
5030 	trace_ext4_mb_discard_preallocations(sb, needed);
5031 
5032 	if (needed == 0)
5033 		needed = EXT4_CLUSTERS_PER_GROUP(sb) + 1;
5034  repeat:
5035 	for (i = 0; i < ngroups && needed > 0; i++) {
5036 		ret = ext4_mb_discard_group_preallocations(sb, i, &busy);
5037 		freed += ret;
5038 		needed -= ret;
5039 		cond_resched();
5040 	}
5041 
5042 	if (needed > 0 && busy && ++retry < 3) {
5043 		busy = 0;
5044 		goto repeat;
5045 	}
5046 
5047 	return freed;
5048 }
5049 
ext4_mb_discard_preallocations_should_retry(struct super_block * sb,struct ext4_allocation_context * ac,u64 * seq)5050 static bool ext4_mb_discard_preallocations_should_retry(struct super_block *sb,
5051 			struct ext4_allocation_context *ac, u64 *seq)
5052 {
5053 	int freed;
5054 	u64 seq_retry = 0;
5055 	bool ret = false;
5056 
5057 	freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len);
5058 	if (freed) {
5059 		ret = true;
5060 		goto out_dbg;
5061 	}
5062 	seq_retry = ext4_get_discard_pa_seq_sum();
5063 	if (!(ac->ac_flags & EXT4_MB_STRICT_CHECK) || seq_retry != *seq) {
5064 		ac->ac_flags |= EXT4_MB_STRICT_CHECK;
5065 		*seq = seq_retry;
5066 		ret = true;
5067 	}
5068 
5069 out_dbg:
5070 	mb_debug(sb, "freed %d, retry ? %s\n", freed, ret ? "yes" : "no");
5071 	return ret;
5072 }
5073 
5074 static ext4_fsblk_t ext4_mb_new_blocks_simple(handle_t *handle,
5075 				struct ext4_allocation_request *ar, int *errp);
5076 
5077 /*
5078  * Main entry point into mballoc to allocate blocks
5079  * it tries to use preallocation first, then falls back
5080  * to usual allocation
5081  */
ext4_mb_new_blocks(handle_t * handle,struct ext4_allocation_request * ar,int * errp)5082 ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
5083 				struct ext4_allocation_request *ar, int *errp)
5084 {
5085 	struct ext4_allocation_context *ac = NULL;
5086 	struct ext4_sb_info *sbi;
5087 	struct super_block *sb;
5088 	ext4_fsblk_t block = 0;
5089 	unsigned int inquota = 0;
5090 	unsigned int reserv_clstrs = 0;
5091 	int retries = 0;
5092 	u64 seq;
5093 
5094 	might_sleep();
5095 	sb = ar->inode->i_sb;
5096 	sbi = EXT4_SB(sb);
5097 
5098 	trace_ext4_request_blocks(ar);
5099 	if (sbi->s_mount_state & EXT4_FC_REPLAY)
5100 		return ext4_mb_new_blocks_simple(handle, ar, errp);
5101 
5102 	/* Allow to use superuser reservation for quota file */
5103 	if (ext4_is_quota_file(ar->inode))
5104 		ar->flags |= EXT4_MB_USE_ROOT_BLOCKS;
5105 
5106 	if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) {
5107 		/* Without delayed allocation we need to verify
5108 		 * there is enough free blocks to do block allocation
5109 		 * and verify allocation doesn't exceed the quota limits.
5110 		 */
5111 		while (ar->len &&
5112 			ext4_claim_free_clusters(sbi, ar->len, ar->flags)) {
5113 
5114 			/* let others to free the space */
5115 			cond_resched();
5116 			ar->len = ar->len >> 1;
5117 		}
5118 		if (!ar->len) {
5119 			ext4_mb_show_pa(sb);
5120 			*errp = -ENOSPC;
5121 			return 0;
5122 		}
5123 		reserv_clstrs = ar->len;
5124 		if (ar->flags & EXT4_MB_USE_ROOT_BLOCKS) {
5125 			dquot_alloc_block_nofail(ar->inode,
5126 						 EXT4_C2B(sbi, ar->len));
5127 		} else {
5128 			while (ar->len &&
5129 				dquot_alloc_block(ar->inode,
5130 						  EXT4_C2B(sbi, ar->len))) {
5131 
5132 				ar->flags |= EXT4_MB_HINT_NOPREALLOC;
5133 				ar->len--;
5134 			}
5135 		}
5136 		inquota = ar->len;
5137 		if (ar->len == 0) {
5138 			*errp = -EDQUOT;
5139 			goto out;
5140 		}
5141 	}
5142 
5143 	ac = kmem_cache_zalloc(ext4_ac_cachep, GFP_NOFS);
5144 	if (!ac) {
5145 		ar->len = 0;
5146 		*errp = -ENOMEM;
5147 		goto out;
5148 	}
5149 
5150 	*errp = ext4_mb_initialize_context(ac, ar);
5151 	if (*errp) {
5152 		ar->len = 0;
5153 		goto out;
5154 	}
5155 
5156 	ac->ac_op = EXT4_MB_HISTORY_PREALLOC;
5157 	seq = this_cpu_read(discard_pa_seq);
5158 	if (!ext4_mb_use_preallocated(ac)) {
5159 		ac->ac_op = EXT4_MB_HISTORY_ALLOC;
5160 		ext4_mb_normalize_request(ac, ar);
5161 
5162 		*errp = ext4_mb_pa_alloc(ac);
5163 		if (*errp)
5164 			goto errout;
5165 repeat:
5166 		/* allocate space in core */
5167 		*errp = ext4_mb_regular_allocator(ac);
5168 		/*
5169 		 * pa allocated above is added to grp->bb_prealloc_list only
5170 		 * when we were able to allocate some block i.e. when
5171 		 * ac->ac_status == AC_STATUS_FOUND.
5172 		 * And error from above mean ac->ac_status != AC_STATUS_FOUND
5173 		 * So we have to free this pa here itself.
5174 		 */
5175 		if (*errp) {
5176 			ext4_mb_pa_free(ac);
5177 			ext4_discard_allocated_blocks(ac);
5178 			goto errout;
5179 		}
5180 		if (ac->ac_status == AC_STATUS_FOUND &&
5181 			ac->ac_o_ex.fe_len >= ac->ac_f_ex.fe_len)
5182 			ext4_mb_pa_free(ac);
5183 	}
5184 	if (likely(ac->ac_status == AC_STATUS_FOUND)) {
5185 		*errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs);
5186 		if (*errp) {
5187 			ext4_discard_allocated_blocks(ac);
5188 			goto errout;
5189 		} else {
5190 			block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
5191 			ar->len = ac->ac_b_ex.fe_len;
5192 		}
5193 	} else {
5194 		if (++retries < 3 &&
5195 		    ext4_mb_discard_preallocations_should_retry(sb, ac, &seq))
5196 			goto repeat;
5197 		/*
5198 		 * If block allocation fails then the pa allocated above
5199 		 * needs to be freed here itself.
5200 		 */
5201 		ext4_mb_pa_free(ac);
5202 		*errp = -ENOSPC;
5203 	}
5204 
5205 errout:
5206 	if (*errp) {
5207 		ac->ac_b_ex.fe_len = 0;
5208 		ar->len = 0;
5209 		ext4_mb_show_ac(ac);
5210 	}
5211 	ext4_mb_release_context(ac);
5212 out:
5213 	if (ac)
5214 		kmem_cache_free(ext4_ac_cachep, ac);
5215 	if (inquota && ar->len < inquota)
5216 		dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len));
5217 	if (!ar->len) {
5218 		if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0)
5219 			/* release all the reserved blocks if non delalloc */
5220 			percpu_counter_sub(&sbi->s_dirtyclusters_counter,
5221 						reserv_clstrs);
5222 	}
5223 
5224 	trace_ext4_allocate_blocks(ar, (unsigned long long)block);
5225 
5226 	return block;
5227 }
5228 
5229 /*
5230  * We can merge two free data extents only if the physical blocks
5231  * are contiguous, AND the extents were freed by the same transaction,
5232  * AND the blocks are associated with the same group.
5233  */
ext4_try_merge_freed_extent(struct ext4_sb_info * sbi,struct ext4_free_data * entry,struct ext4_free_data * new_entry,struct rb_root * entry_rb_root)5234 static void ext4_try_merge_freed_extent(struct ext4_sb_info *sbi,
5235 					struct ext4_free_data *entry,
5236 					struct ext4_free_data *new_entry,
5237 					struct rb_root *entry_rb_root)
5238 {
5239 	if ((entry->efd_tid != new_entry->efd_tid) ||
5240 	    (entry->efd_group != new_entry->efd_group))
5241 		return;
5242 	if (entry->efd_start_cluster + entry->efd_count ==
5243 	    new_entry->efd_start_cluster) {
5244 		new_entry->efd_start_cluster = entry->efd_start_cluster;
5245 		new_entry->efd_count += entry->efd_count;
5246 	} else if (new_entry->efd_start_cluster + new_entry->efd_count ==
5247 		   entry->efd_start_cluster) {
5248 		new_entry->efd_count += entry->efd_count;
5249 	} else
5250 		return;
5251 	spin_lock(&sbi->s_md_lock);
5252 	list_del(&entry->efd_list);
5253 	spin_unlock(&sbi->s_md_lock);
5254 	rb_erase(&entry->efd_node, entry_rb_root);
5255 	kmem_cache_free(ext4_free_data_cachep, entry);
5256 }
5257 
5258 static noinline_for_stack int
ext4_mb_free_metadata(handle_t * handle,struct ext4_buddy * e4b,struct ext4_free_data * new_entry)5259 ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
5260 		      struct ext4_free_data *new_entry)
5261 {
5262 	ext4_group_t group = e4b->bd_group;
5263 	ext4_grpblk_t cluster;
5264 	ext4_grpblk_t clusters = new_entry->efd_count;
5265 	struct ext4_free_data *entry;
5266 	struct ext4_group_info *db = e4b->bd_info;
5267 	struct super_block *sb = e4b->bd_sb;
5268 	struct ext4_sb_info *sbi = EXT4_SB(sb);
5269 	struct rb_node **n = &db->bb_free_root.rb_node, *node;
5270 	struct rb_node *parent = NULL, *new_node;
5271 
5272 	BUG_ON(!ext4_handle_valid(handle));
5273 	BUG_ON(e4b->bd_bitmap_page == NULL);
5274 	BUG_ON(e4b->bd_buddy_page == NULL);
5275 
5276 	new_node = &new_entry->efd_node;
5277 	cluster = new_entry->efd_start_cluster;
5278 
5279 	if (!*n) {
5280 		/* first free block exent. We need to
5281 		   protect buddy cache from being freed,
5282 		 * otherwise we'll refresh it from
5283 		 * on-disk bitmap and lose not-yet-available
5284 		 * blocks */
5285 		get_page(e4b->bd_buddy_page);
5286 		get_page(e4b->bd_bitmap_page);
5287 	}
5288 	while (*n) {
5289 		parent = *n;
5290 		entry = rb_entry(parent, struct ext4_free_data, efd_node);
5291 		if (cluster < entry->efd_start_cluster)
5292 			n = &(*n)->rb_left;
5293 		else if (cluster >= (entry->efd_start_cluster + entry->efd_count))
5294 			n = &(*n)->rb_right;
5295 		else {
5296 			ext4_grp_locked_error(sb, group, 0,
5297 				ext4_group_first_block_no(sb, group) +
5298 				EXT4_C2B(sbi, cluster),
5299 				"Block already on to-be-freed list");
5300 			kmem_cache_free(ext4_free_data_cachep, new_entry);
5301 			return 0;
5302 		}
5303 	}
5304 
5305 	rb_link_node(new_node, parent, n);
5306 	rb_insert_color(new_node, &db->bb_free_root);
5307 
5308 	/* Now try to see the extent can be merged to left and right */
5309 	node = rb_prev(new_node);
5310 	if (node) {
5311 		entry = rb_entry(node, struct ext4_free_data, efd_node);
5312 		ext4_try_merge_freed_extent(sbi, entry, new_entry,
5313 					    &(db->bb_free_root));
5314 	}
5315 
5316 	node = rb_next(new_node);
5317 	if (node) {
5318 		entry = rb_entry(node, struct ext4_free_data, efd_node);
5319 		ext4_try_merge_freed_extent(sbi, entry, new_entry,
5320 					    &(db->bb_free_root));
5321 	}
5322 
5323 	spin_lock(&sbi->s_md_lock);
5324 	list_add_tail(&new_entry->efd_list, &sbi->s_freed_data_list);
5325 	sbi->s_mb_free_pending += clusters;
5326 	spin_unlock(&sbi->s_md_lock);
5327 	return 0;
5328 }
5329 
5330 /*
5331  * Simple allocator for Ext4 fast commit replay path. It searches for blocks
5332  * linearly starting at the goal block and also excludes the blocks which
5333  * are going to be in use after fast commit replay.
5334  */
ext4_mb_new_blocks_simple(handle_t * handle,struct ext4_allocation_request * ar,int * errp)5335 static ext4_fsblk_t ext4_mb_new_blocks_simple(handle_t *handle,
5336 				struct ext4_allocation_request *ar, int *errp)
5337 {
5338 	struct buffer_head *bitmap_bh;
5339 	struct super_block *sb = ar->inode->i_sb;
5340 	ext4_group_t group;
5341 	ext4_grpblk_t blkoff;
5342 	ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
5343 	ext4_grpblk_t i = 0;
5344 	ext4_fsblk_t goal, block;
5345 	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
5346 
5347 	goal = ar->goal;
5348 	if (goal < le32_to_cpu(es->s_first_data_block) ||
5349 			goal >= ext4_blocks_count(es))
5350 		goal = le32_to_cpu(es->s_first_data_block);
5351 
5352 	ar->len = 0;
5353 	ext4_get_group_no_and_offset(sb, goal, &group, &blkoff);
5354 	for (; group < ext4_get_groups_count(sb); group++) {
5355 		bitmap_bh = ext4_read_block_bitmap(sb, group);
5356 		if (IS_ERR(bitmap_bh)) {
5357 			*errp = PTR_ERR(bitmap_bh);
5358 			pr_warn("Failed to read block bitmap\n");
5359 			return 0;
5360 		}
5361 
5362 		ext4_get_group_no_and_offset(sb,
5363 			max(ext4_group_first_block_no(sb, group), goal),
5364 			NULL, &blkoff);
5365 		while (1) {
5366 			i = mb_find_next_zero_bit(bitmap_bh->b_data, max,
5367 						blkoff);
5368 			if (i >= max)
5369 				break;
5370 			if (ext4_fc_replay_check_excluded(sb,
5371 				ext4_group_first_block_no(sb, group) + i)) {
5372 				blkoff = i + 1;
5373 			} else
5374 				break;
5375 		}
5376 		brelse(bitmap_bh);
5377 		if (i < max)
5378 			break;
5379 	}
5380 
5381 	if (group >= ext4_get_groups_count(sb) || i >= max) {
5382 		*errp = -ENOSPC;
5383 		return 0;
5384 	}
5385 
5386 	block = ext4_group_first_block_no(sb, group) + i;
5387 	ext4_mb_mark_bb(sb, block, 1, 1);
5388 	ar->len = 1;
5389 
5390 	return block;
5391 }
5392 
ext4_free_blocks_simple(struct inode * inode,ext4_fsblk_t block,unsigned long count)5393 static void ext4_free_blocks_simple(struct inode *inode, ext4_fsblk_t block,
5394 					unsigned long count)
5395 {
5396 	struct buffer_head *bitmap_bh;
5397 	struct super_block *sb = inode->i_sb;
5398 	struct ext4_group_desc *gdp;
5399 	struct buffer_head *gdp_bh;
5400 	ext4_group_t group;
5401 	ext4_grpblk_t blkoff;
5402 	int already_freed = 0, err, i;
5403 
5404 	ext4_get_group_no_and_offset(sb, block, &group, &blkoff);
5405 	bitmap_bh = ext4_read_block_bitmap(sb, group);
5406 	if (IS_ERR(bitmap_bh)) {
5407 		err = PTR_ERR(bitmap_bh);
5408 		pr_warn("Failed to read block bitmap\n");
5409 		return;
5410 	}
5411 	gdp = ext4_get_group_desc(sb, group, &gdp_bh);
5412 	if (!gdp)
5413 		return;
5414 
5415 	for (i = 0; i < count; i++) {
5416 		if (!mb_test_bit(blkoff + i, bitmap_bh->b_data))
5417 			already_freed++;
5418 	}
5419 	mb_clear_bits(bitmap_bh->b_data, blkoff, count);
5420 	err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh);
5421 	if (err)
5422 		return;
5423 	ext4_free_group_clusters_set(
5424 		sb, gdp, ext4_free_group_clusters(sb, gdp) +
5425 		count - already_freed);
5426 	ext4_block_bitmap_csum_set(sb, group, gdp, bitmap_bh);
5427 	ext4_group_desc_csum_set(sb, group, gdp);
5428 	ext4_handle_dirty_metadata(NULL, NULL, gdp_bh);
5429 	sync_dirty_buffer(bitmap_bh);
5430 	sync_dirty_buffer(gdp_bh);
5431 	brelse(bitmap_bh);
5432 }
5433 
5434 /**
5435  * ext4_mb_clear_bb() -- helper function for freeing blocks.
5436  *			Used by ext4_free_blocks()
5437  * @handle:		handle for this transaction
5438  * @inode:		inode
5439  * @bh:			optional buffer of the block to be freed
5440  * @block:		starting physical block to be freed
5441  * @count:		number of blocks to be freed
5442  * @flags:		flags used by ext4_free_blocks
5443  */
ext4_mb_clear_bb(handle_t * handle,struct inode * inode,ext4_fsblk_t block,unsigned long count,int flags)5444 static void ext4_mb_clear_bb(handle_t *handle, struct inode *inode,
5445 			       ext4_fsblk_t block, unsigned long count,
5446 			       int flags)
5447 {
5448 	struct buffer_head *bitmap_bh = NULL;
5449 	struct super_block *sb = inode->i_sb;
5450 	struct ext4_group_desc *gdp;
5451 	struct ext4_group_info *grp;
5452 	unsigned int overflow;
5453 	ext4_grpblk_t bit;
5454 	struct buffer_head *gd_bh;
5455 	ext4_group_t block_group;
5456 	struct ext4_sb_info *sbi;
5457 	struct ext4_buddy e4b;
5458 	unsigned int count_clusters;
5459 	int err = 0;
5460 	int ret;
5461 
5462 	sbi = EXT4_SB(sb);
5463 
5464 	if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
5465 	    !ext4_inode_block_valid(inode, block, count)) {
5466 		ext4_error(sb, "Freeing blocks in system zone - "
5467 			   "Block = %llu, count = %lu", block, count);
5468 		/* err = 0. ext4_std_error should be a no op */
5469 		goto error_return;
5470 	}
5471 	flags |= EXT4_FREE_BLOCKS_VALIDATED;
5472 
5473 do_more:
5474 	overflow = 0;
5475 	ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
5476 
5477 	grp = ext4_get_group_info(sb, block_group);
5478 	if (unlikely(!grp || EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
5479 		return;
5480 
5481 	/*
5482 	 * Check to see if we are freeing blocks across a group
5483 	 * boundary.
5484 	 */
5485 	if (EXT4_C2B(sbi, bit) + count > EXT4_BLOCKS_PER_GROUP(sb)) {
5486 		overflow = EXT4_C2B(sbi, bit) + count -
5487 			EXT4_BLOCKS_PER_GROUP(sb);
5488 		count -= overflow;
5489 		/* The range changed so it's no longer validated */
5490 		flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
5491 	}
5492 	count_clusters = EXT4_NUM_B2C(sbi, count);
5493 	bitmap_bh = ext4_read_block_bitmap(sb, block_group);
5494 	if (IS_ERR(bitmap_bh)) {
5495 		err = PTR_ERR(bitmap_bh);
5496 		bitmap_bh = NULL;
5497 		goto error_return;
5498 	}
5499 	gdp = ext4_get_group_desc(sb, block_group, &gd_bh);
5500 	if (!gdp) {
5501 		err = -EIO;
5502 		goto error_return;
5503 	}
5504 
5505 	if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
5506 	    !ext4_inode_block_valid(inode, block, count)) {
5507 		ext4_error(sb, "Freeing blocks in system zone - "
5508 			   "Block = %llu, count = %lu", block, count);
5509 		/* err = 0. ext4_std_error should be a no op */
5510 		goto error_return;
5511 	}
5512 
5513 	BUFFER_TRACE(bitmap_bh, "getting write access");
5514 	err = ext4_journal_get_write_access(handle, bitmap_bh);
5515 	if (err)
5516 		goto error_return;
5517 
5518 	/*
5519 	 * We are about to modify some metadata.  Call the journal APIs
5520 	 * to unshare ->b_data if a currently-committing transaction is
5521 	 * using it
5522 	 */
5523 	BUFFER_TRACE(gd_bh, "get_write_access");
5524 	err = ext4_journal_get_write_access(handle, gd_bh);
5525 	if (err)
5526 		goto error_return;
5527 #ifdef AGGRESSIVE_CHECK
5528 	{
5529 		int i;
5530 		for (i = 0; i < count_clusters; i++)
5531 			BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data));
5532 	}
5533 #endif
5534 	trace_ext4_mballoc_free(sb, inode, block_group, bit, count_clusters);
5535 
5536 	/* __GFP_NOFAIL: retry infinitely, ignore TIF_MEMDIE and memcg limit. */
5537 	err = ext4_mb_load_buddy_gfp(sb, block_group, &e4b,
5538 				     GFP_NOFS|__GFP_NOFAIL);
5539 	if (err)
5540 		goto error_return;
5541 
5542 	/*
5543 	 * We need to make sure we don't reuse the freed block until after the
5544 	 * transaction is committed. We make an exception if the inode is to be
5545 	 * written in writeback mode since writeback mode has weak data
5546 	 * consistency guarantees.
5547 	 */
5548 	if (ext4_handle_valid(handle) &&
5549 	    ((flags & EXT4_FREE_BLOCKS_METADATA) ||
5550 	     !ext4_should_writeback_data(inode))) {
5551 		struct ext4_free_data *new_entry;
5552 		/*
5553 		 * We use __GFP_NOFAIL because ext4_free_blocks() is not allowed
5554 		 * to fail.
5555 		 */
5556 		new_entry = kmem_cache_alloc(ext4_free_data_cachep,
5557 				GFP_NOFS|__GFP_NOFAIL);
5558 		new_entry->efd_start_cluster = bit;
5559 		new_entry->efd_group = block_group;
5560 		new_entry->efd_count = count_clusters;
5561 		new_entry->efd_tid = handle->h_transaction->t_tid;
5562 
5563 		ext4_lock_group(sb, block_group);
5564 		mb_clear_bits(bitmap_bh->b_data, bit, count_clusters);
5565 		ext4_mb_free_metadata(handle, &e4b, new_entry);
5566 	} else {
5567 		/* need to update group_info->bb_free and bitmap
5568 		 * with group lock held. generate_buddy look at
5569 		 * them with group lock_held
5570 		 */
5571 		if (test_opt(sb, DISCARD)) {
5572 			err = ext4_issue_discard(sb, block_group, bit, count,
5573 						 NULL);
5574 			if (err && err != -EOPNOTSUPP)
5575 				ext4_msg(sb, KERN_WARNING, "discard request in"
5576 					 " group:%u block:%d count:%lu failed"
5577 					 " with %d", block_group, bit, count,
5578 					 err);
5579 		} else
5580 			EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info);
5581 
5582 		ext4_lock_group(sb, block_group);
5583 		mb_clear_bits(bitmap_bh->b_data, bit, count_clusters);
5584 		mb_free_blocks(inode, &e4b, bit, count_clusters);
5585 	}
5586 
5587 	ret = ext4_free_group_clusters(sb, gdp) + count_clusters;
5588 	ext4_free_group_clusters_set(sb, gdp, ret);
5589 	ext4_block_bitmap_csum_set(sb, block_group, gdp, bitmap_bh);
5590 	ext4_group_desc_csum_set(sb, block_group, gdp);
5591 	ext4_unlock_group(sb, block_group);
5592 
5593 	if (sbi->s_log_groups_per_flex) {
5594 		ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
5595 		atomic64_add(count_clusters,
5596 			     &sbi_array_rcu_deref(sbi, s_flex_groups,
5597 						  flex_group)->free_clusters);
5598 	}
5599 
5600 	/*
5601 	 * on a bigalloc file system, defer the s_freeclusters_counter
5602 	 * update to the caller (ext4_remove_space and friends) so they
5603 	 * can determine if a cluster freed here should be rereserved
5604 	 */
5605 	if (!(flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)) {
5606 		if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
5607 			dquot_free_block(inode, EXT4_C2B(sbi, count_clusters));
5608 		percpu_counter_add(&sbi->s_freeclusters_counter,
5609 				   count_clusters);
5610 	}
5611 
5612 	ext4_mb_unload_buddy(&e4b);
5613 
5614 	/* We dirtied the bitmap block */
5615 	BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
5616 	err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
5617 
5618 	/* And the group descriptor block */
5619 	BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
5620 	ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
5621 	if (!err)
5622 		err = ret;
5623 
5624 	if (overflow && !err) {
5625 		block += count;
5626 		count = overflow;
5627 		put_bh(bitmap_bh);
5628 		/* The range changed so it's no longer validated */
5629 		flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
5630 		goto do_more;
5631 	}
5632 error_return:
5633 	brelse(bitmap_bh);
5634 	ext4_std_error(sb, err);
5635 	return;
5636 }
5637 
5638 /**
5639  * ext4_free_blocks() -- Free given blocks and update quota
5640  * @handle:		handle for this transaction
5641  * @inode:		inode
5642  * @bh:			optional buffer of the block to be freed
5643  * @block:		starting physical block to be freed
5644  * @count:		number of blocks to be freed
5645  * @flags:		flags used by ext4_free_blocks
5646  */
ext4_free_blocks(handle_t * handle,struct inode * inode,struct buffer_head * bh,ext4_fsblk_t block,unsigned long count,int flags)5647 void ext4_free_blocks(handle_t *handle, struct inode *inode,
5648 		      struct buffer_head *bh, ext4_fsblk_t block,
5649 		      unsigned long count, int flags)
5650 {
5651 	struct super_block *sb = inode->i_sb;
5652 	unsigned int overflow;
5653 	struct ext4_sb_info *sbi;
5654 
5655 	sbi = EXT4_SB(sb);
5656 
5657 	if (sbi->s_mount_state & EXT4_FC_REPLAY) {
5658 		ext4_free_blocks_simple(inode, block, count);
5659 		return;
5660 	}
5661 
5662 	might_sleep();
5663 	if (bh) {
5664 		if (block)
5665 			BUG_ON(block != bh->b_blocknr);
5666 		else
5667 			block = bh->b_blocknr;
5668 	}
5669 
5670 	if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
5671 	    !ext4_inode_block_valid(inode, block, count)) {
5672 		ext4_error(sb, "Freeing blocks not in datazone - "
5673 			   "block = %llu, count = %lu", block, count);
5674 		return;
5675 	}
5676 	flags |= EXT4_FREE_BLOCKS_VALIDATED;
5677 
5678 	ext4_debug("freeing block %llu\n", block);
5679 	trace_ext4_free_blocks(inode, block, count, flags);
5680 
5681 	if (bh && (flags & EXT4_FREE_BLOCKS_FORGET)) {
5682 		BUG_ON(count > 1);
5683 
5684 		ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA,
5685 			    inode, bh, block);
5686 	}
5687 
5688 	/*
5689 	 * If the extent to be freed does not begin on a cluster
5690 	 * boundary, we need to deal with partial clusters at the
5691 	 * beginning and end of the extent.  Normally we will free
5692 	 * blocks at the beginning or the end unless we are explicitly
5693 	 * requested to avoid doing so.
5694 	 */
5695 	overflow = EXT4_PBLK_COFF(sbi, block);
5696 	if (overflow) {
5697 		if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) {
5698 			overflow = sbi->s_cluster_ratio - overflow;
5699 			block += overflow;
5700 			if (count > overflow)
5701 				count -= overflow;
5702 			else
5703 				return;
5704 		} else {
5705 			block -= overflow;
5706 			count += overflow;
5707 		}
5708 		/* The range changed so it's no longer validated */
5709 		flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
5710 	}
5711 	overflow = EXT4_LBLK_COFF(sbi, count);
5712 	if (overflow) {
5713 		if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) {
5714 			if (count > overflow)
5715 				count -= overflow;
5716 			else
5717 				return;
5718 		} else
5719 			count += sbi->s_cluster_ratio - overflow;
5720 		/* The range changed so it's no longer validated */
5721 		flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
5722 	}
5723 
5724 	if (!bh && (flags & EXT4_FREE_BLOCKS_FORGET)) {
5725 		int i;
5726 		int is_metadata = flags & EXT4_FREE_BLOCKS_METADATA;
5727 
5728 		for (i = 0; i < count; i++) {
5729 			cond_resched();
5730 			if (is_metadata)
5731 				bh = sb_find_get_block(inode->i_sb, block + i);
5732 			ext4_forget(handle, is_metadata, inode, bh, block + i);
5733 		}
5734 	}
5735 
5736 	ext4_mb_clear_bb(handle, inode, block, count, flags);
5737 	return;
5738 }
5739 
5740 /**
5741  * ext4_group_add_blocks() -- Add given blocks to an existing group
5742  * @handle:			handle to this transaction
5743  * @sb:				super block
5744  * @block:			start physical block to add to the block group
5745  * @count:			number of blocks to free
5746  *
5747  * This marks the blocks as free in the bitmap and buddy.
5748  */
ext4_group_add_blocks(handle_t * handle,struct super_block * sb,ext4_fsblk_t block,unsigned long count)5749 int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
5750 			 ext4_fsblk_t block, unsigned long count)
5751 {
5752 	struct buffer_head *bitmap_bh = NULL;
5753 	struct buffer_head *gd_bh;
5754 	ext4_group_t block_group;
5755 	ext4_grpblk_t bit;
5756 	unsigned int i;
5757 	struct ext4_group_desc *desc;
5758 	struct ext4_sb_info *sbi = EXT4_SB(sb);
5759 	struct ext4_buddy e4b;
5760 	int err = 0, ret, free_clusters_count;
5761 	ext4_grpblk_t clusters_freed;
5762 	ext4_fsblk_t first_cluster = EXT4_B2C(sbi, block);
5763 	ext4_fsblk_t last_cluster = EXT4_B2C(sbi, block + count - 1);
5764 	unsigned long cluster_count = last_cluster - first_cluster + 1;
5765 
5766 	ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1);
5767 
5768 	if (count == 0)
5769 		return 0;
5770 
5771 	ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
5772 	/*
5773 	 * Check to see if we are freeing blocks across a group
5774 	 * boundary.
5775 	 */
5776 	if (bit + cluster_count > EXT4_CLUSTERS_PER_GROUP(sb)) {
5777 		ext4_warning(sb, "too many blocks added to group %u",
5778 			     block_group);
5779 		err = -EINVAL;
5780 		goto error_return;
5781 	}
5782 
5783 	bitmap_bh = ext4_read_block_bitmap(sb, block_group);
5784 	if (IS_ERR(bitmap_bh)) {
5785 		err = PTR_ERR(bitmap_bh);
5786 		bitmap_bh = NULL;
5787 		goto error_return;
5788 	}
5789 
5790 	desc = ext4_get_group_desc(sb, block_group, &gd_bh);
5791 	if (!desc) {
5792 		err = -EIO;
5793 		goto error_return;
5794 	}
5795 
5796 	if (!ext4_sb_block_valid(sb, NULL, block, count)) {
5797 		ext4_error(sb, "Adding blocks in system zones - "
5798 			   "Block = %llu, count = %lu",
5799 			   block, count);
5800 		err = -EINVAL;
5801 		goto error_return;
5802 	}
5803 
5804 	BUFFER_TRACE(bitmap_bh, "getting write access");
5805 	err = ext4_journal_get_write_access(handle, bitmap_bh);
5806 	if (err)
5807 		goto error_return;
5808 
5809 	/*
5810 	 * We are about to modify some metadata.  Call the journal APIs
5811 	 * to unshare ->b_data if a currently-committing transaction is
5812 	 * using it
5813 	 */
5814 	BUFFER_TRACE(gd_bh, "get_write_access");
5815 	err = ext4_journal_get_write_access(handle, gd_bh);
5816 	if (err)
5817 		goto error_return;
5818 
5819 	for (i = 0, clusters_freed = 0; i < cluster_count; i++) {
5820 		BUFFER_TRACE(bitmap_bh, "clear bit");
5821 		if (!mb_test_bit(bit + i, bitmap_bh->b_data)) {
5822 			ext4_error(sb, "bit already cleared for block %llu",
5823 				   (ext4_fsblk_t)(block + i));
5824 			BUFFER_TRACE(bitmap_bh, "bit already cleared");
5825 		} else {
5826 			clusters_freed++;
5827 		}
5828 	}
5829 
5830 	err = ext4_mb_load_buddy(sb, block_group, &e4b);
5831 	if (err)
5832 		goto error_return;
5833 
5834 	/*
5835 	 * need to update group_info->bb_free and bitmap
5836 	 * with group lock held. generate_buddy look at
5837 	 * them with group lock_held
5838 	 */
5839 	ext4_lock_group(sb, block_group);
5840 	mb_clear_bits(bitmap_bh->b_data, bit, cluster_count);
5841 	mb_free_blocks(NULL, &e4b, bit, cluster_count);
5842 	free_clusters_count = clusters_freed +
5843 		ext4_free_group_clusters(sb, desc);
5844 	ext4_free_group_clusters_set(sb, desc, free_clusters_count);
5845 	ext4_block_bitmap_csum_set(sb, block_group, desc, bitmap_bh);
5846 	ext4_group_desc_csum_set(sb, block_group, desc);
5847 	ext4_unlock_group(sb, block_group);
5848 	percpu_counter_add(&sbi->s_freeclusters_counter,
5849 			   clusters_freed);
5850 
5851 	if (sbi->s_log_groups_per_flex) {
5852 		ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
5853 		atomic64_add(clusters_freed,
5854 			     &sbi_array_rcu_deref(sbi, s_flex_groups,
5855 						  flex_group)->free_clusters);
5856 	}
5857 
5858 	ext4_mb_unload_buddy(&e4b);
5859 
5860 	/* We dirtied the bitmap block */
5861 	BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
5862 	err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
5863 
5864 	/* And the group descriptor block */
5865 	BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
5866 	ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
5867 	if (!err)
5868 		err = ret;
5869 
5870 error_return:
5871 	brelse(bitmap_bh);
5872 	ext4_std_error(sb, err);
5873 	return err;
5874 }
5875 
5876 /**
5877  * ext4_trim_extent -- function to TRIM one single free extent in the group
5878  * @sb:		super block for the file system
5879  * @start:	starting block of the free extent in the alloc. group
5880  * @count:	number of blocks to TRIM
5881  * @group:	alloc. group we are working with
5882  * @e4b:	ext4 buddy for the group
5883  *
5884  * Trim "count" blocks starting at "start" in the "group". To assure that no
5885  * one will allocate those blocks, mark it as used in buddy bitmap. This must
5886  * be called with under the group lock.
5887  */
ext4_trim_extent(struct super_block * sb,int start,int count,ext4_group_t group,struct ext4_buddy * e4b)5888 static int ext4_trim_extent(struct super_block *sb, int start, int count,
5889 			     ext4_group_t group, struct ext4_buddy *e4b)
5890 __releases(bitlock)
5891 __acquires(bitlock)
5892 {
5893 	struct ext4_free_extent ex;
5894 	int ret = 0;
5895 
5896 	trace_ext4_trim_extent(sb, group, start, count);
5897 
5898 	assert_spin_locked(ext4_group_lock_ptr(sb, group));
5899 
5900 	ex.fe_start = start;
5901 	ex.fe_group = group;
5902 	ex.fe_len = count;
5903 
5904 	/*
5905 	 * Mark blocks used, so no one can reuse them while
5906 	 * being trimmed.
5907 	 */
5908 	mb_mark_used(e4b, &ex);
5909 	ext4_unlock_group(sb, group);
5910 	ret = ext4_issue_discard(sb, group, start, count, NULL);
5911 	ext4_lock_group(sb, group);
5912 	mb_free_blocks(NULL, e4b, start, ex.fe_len);
5913 	return ret;
5914 }
5915 
5916 /**
5917  * ext4_trim_all_free -- function to trim all free space in alloc. group
5918  * @sb:			super block for file system
5919  * @group:		group to be trimmed
5920  * @start:		first group block to examine
5921  * @max:		last group block to examine
5922  * @minblocks:		minimum extent block count
5923  *
5924  * ext4_trim_all_free walks through group's buddy bitmap searching for free
5925  * extents. When the free block is found, ext4_trim_extent is called to TRIM
5926  * the extent.
5927  *
5928  *
5929  * ext4_trim_all_free walks through group's block bitmap searching for free
5930  * extents. When the free extent is found, mark it as used in group buddy
5931  * bitmap. Then issue a TRIM command on this extent and free the extent in
5932  * the group buddy bitmap. This is done until whole group is scanned.
5933  */
5934 static ext4_grpblk_t
ext4_trim_all_free(struct super_block * sb,ext4_group_t group,ext4_grpblk_t start,ext4_grpblk_t max,ext4_grpblk_t minblocks)5935 ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
5936 		   ext4_grpblk_t start, ext4_grpblk_t max,
5937 		   ext4_grpblk_t minblocks)
5938 {
5939 	void *bitmap;
5940 	ext4_grpblk_t next, count = 0, free_count = 0;
5941 	struct ext4_buddy e4b;
5942 	int ret = 0;
5943 
5944 	trace_ext4_trim_all_free(sb, group, start, max);
5945 
5946 	ret = ext4_mb_load_buddy(sb, group, &e4b);
5947 	if (ret) {
5948 		ext4_warning(sb, "Error %d loading buddy information for %u",
5949 			     ret, group);
5950 		return ret;
5951 	}
5952 	bitmap = e4b.bd_bitmap;
5953 
5954 	ext4_lock_group(sb, group);
5955 	if (EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) &&
5956 	    minblocks >= atomic_read(&EXT4_SB(sb)->s_last_trim_minblks))
5957 		goto out;
5958 
5959 	start = (e4b.bd_info->bb_first_free > start) ?
5960 		e4b.bd_info->bb_first_free : start;
5961 
5962 	while (start <= max) {
5963 		start = mb_find_next_zero_bit(bitmap, max + 1, start);
5964 		if (start > max)
5965 			break;
5966 		next = mb_find_next_bit(bitmap, max + 1, start);
5967 
5968 		if ((next - start) >= minblocks) {
5969 			ret = ext4_trim_extent(sb, start,
5970 					       next - start, group, &e4b);
5971 			if (ret && ret != -EOPNOTSUPP)
5972 				break;
5973 			ret = 0;
5974 			count += next - start;
5975 		}
5976 		free_count += next - start;
5977 		start = next + 1;
5978 
5979 		if (fatal_signal_pending(current)) {
5980 			count = -ERESTARTSYS;
5981 			break;
5982 		}
5983 
5984 		if (need_resched()) {
5985 			ext4_unlock_group(sb, group);
5986 			cond_resched();
5987 			ext4_lock_group(sb, group);
5988 		}
5989 
5990 		if ((e4b.bd_info->bb_free - free_count) < minblocks)
5991 			break;
5992 	}
5993 
5994 	if (!ret) {
5995 		ret = count;
5996 		EXT4_MB_GRP_SET_TRIMMED(e4b.bd_info);
5997 	}
5998 out:
5999 	ext4_unlock_group(sb, group);
6000 	ext4_mb_unload_buddy(&e4b);
6001 
6002 	ext4_debug("trimmed %d blocks in the group %d\n",
6003 		count, group);
6004 
6005 	return ret;
6006 }
6007 
6008 /**
6009  * ext4_trim_fs() -- trim ioctl handle function
6010  * @sb:			superblock for filesystem
6011  * @range:		fstrim_range structure
6012  *
6013  * start:	First Byte to trim
6014  * len:		number of Bytes to trim from start
6015  * minlen:	minimum extent length in Bytes
6016  * ext4_trim_fs goes through all allocation groups containing Bytes from
6017  * start to start+len. For each such a group ext4_trim_all_free function
6018  * is invoked to trim all free space.
6019  */
ext4_trim_fs(struct super_block * sb,struct fstrim_range * range)6020 int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
6021 {
6022 	struct request_queue *q = bdev_get_queue(sb->s_bdev);
6023 	struct ext4_group_info *grp;
6024 	ext4_group_t group, first_group, last_group;
6025 	ext4_grpblk_t cnt = 0, first_cluster, last_cluster;
6026 	uint64_t start, end, minlen, trimmed = 0;
6027 	ext4_fsblk_t first_data_blk =
6028 			le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
6029 	ext4_fsblk_t max_blks = ext4_blocks_count(EXT4_SB(sb)->s_es);
6030 	int ret = 0;
6031 
6032 	start = range->start >> sb->s_blocksize_bits;
6033 	end = start + (range->len >> sb->s_blocksize_bits) - 1;
6034 	minlen = EXT4_NUM_B2C(EXT4_SB(sb),
6035 			      range->minlen >> sb->s_blocksize_bits);
6036 
6037 	if (minlen > EXT4_CLUSTERS_PER_GROUP(sb) ||
6038 	    start >= max_blks ||
6039 	    range->len < sb->s_blocksize)
6040 		return -EINVAL;
6041 	/* No point to try to trim less than discard granularity */
6042 	if (range->minlen < q->limits.discard_granularity) {
6043 		minlen = EXT4_NUM_B2C(EXT4_SB(sb),
6044 			q->limits.discard_granularity >> sb->s_blocksize_bits);
6045 		if (minlen > EXT4_CLUSTERS_PER_GROUP(sb))
6046 			goto out;
6047 	}
6048 	if (end >= max_blks)
6049 		end = max_blks - 1;
6050 	if (end <= first_data_blk)
6051 		goto out;
6052 	if (start < first_data_blk)
6053 		start = first_data_blk;
6054 
6055 	/* Determine first and last group to examine based on start and end */
6056 	ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start,
6057 				     &first_group, &first_cluster);
6058 	ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) end,
6059 				     &last_group, &last_cluster);
6060 
6061 	/* end now represents the last cluster to discard in this group */
6062 	end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
6063 
6064 	for (group = first_group; group <= last_group; group++) {
6065 		grp = ext4_get_group_info(sb, group);
6066 		if (!grp)
6067 			continue;
6068 		/* We only do this if the grp has never been initialized */
6069 		if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
6070 			ret = ext4_mb_init_group(sb, group, GFP_NOFS);
6071 			if (ret)
6072 				break;
6073 		}
6074 
6075 		/*
6076 		 * For all the groups except the last one, last cluster will
6077 		 * always be EXT4_CLUSTERS_PER_GROUP(sb)-1, so we only need to
6078 		 * change it for the last group, note that last_cluster is
6079 		 * already computed earlier by ext4_get_group_no_and_offset()
6080 		 */
6081 		if (group == last_group)
6082 			end = last_cluster;
6083 
6084 		if (grp->bb_free >= minlen) {
6085 			cnt = ext4_trim_all_free(sb, group, first_cluster,
6086 						end, minlen);
6087 			if (cnt < 0) {
6088 				ret = cnt;
6089 				break;
6090 			}
6091 			trimmed += cnt;
6092 		}
6093 
6094 		/*
6095 		 * For every group except the first one, we are sure
6096 		 * that the first cluster to discard will be cluster #0.
6097 		 */
6098 		first_cluster = 0;
6099 	}
6100 
6101 	if (!ret)
6102 		atomic_set(&EXT4_SB(sb)->s_last_trim_minblks, minlen);
6103 
6104 out:
6105 	range->len = EXT4_C2B(EXT4_SB(sb), trimmed) << sb->s_blocksize_bits;
6106 	return ret;
6107 }
6108 
6109 /* Iterate all the free extents in the group. */
6110 int
ext4_mballoc_query_range(struct super_block * sb,ext4_group_t group,ext4_grpblk_t start,ext4_grpblk_t end,ext4_mballoc_query_range_fn formatter,void * priv)6111 ext4_mballoc_query_range(
6112 	struct super_block		*sb,
6113 	ext4_group_t			group,
6114 	ext4_grpblk_t			start,
6115 	ext4_grpblk_t			end,
6116 	ext4_mballoc_query_range_fn	formatter,
6117 	void				*priv)
6118 {
6119 	void				*bitmap;
6120 	ext4_grpblk_t			next;
6121 	struct ext4_buddy		e4b;
6122 	int				error;
6123 
6124 	error = ext4_mb_load_buddy(sb, group, &e4b);
6125 	if (error)
6126 		return error;
6127 	bitmap = e4b.bd_bitmap;
6128 
6129 	ext4_lock_group(sb, group);
6130 
6131 	start = (e4b.bd_info->bb_first_free > start) ?
6132 		e4b.bd_info->bb_first_free : start;
6133 	if (end >= EXT4_CLUSTERS_PER_GROUP(sb))
6134 		end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
6135 
6136 	while (start <= end) {
6137 		start = mb_find_next_zero_bit(bitmap, end + 1, start);
6138 		if (start > end)
6139 			break;
6140 		next = mb_find_next_bit(bitmap, end + 1, start);
6141 
6142 		ext4_unlock_group(sb, group);
6143 		error = formatter(sb, group, start, next - start, priv);
6144 		if (error)
6145 			goto out_unload;
6146 		ext4_lock_group(sb, group);
6147 
6148 		start = next + 1;
6149 	}
6150 
6151 	ext4_unlock_group(sb, group);
6152 out_unload:
6153 	ext4_mb_unload_buddy(&e4b);
6154 
6155 	return error;
6156 }
6157