• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
3  * Written by Alex Tomas <alex@clusterfs.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public Licens
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
17  */
18 
19 
20 /*
21  * mballoc.c contains the multiblocks allocation routines
22  */
23 
24 #include "ext4_jbd2.h"
25 #include "mballoc.h"
26 #include <linux/debugfs.h>
27 #include <linux/slab.h>
28 #include <trace/events/ext4.h>
29 
30 /*
31  * MUSTDO:
32  *   - test ext4_ext_search_left() and ext4_ext_search_right()
33  *   - search for metadata in few groups
34  *
35  * TODO v4:
36  *   - normalization should take into account whether file is still open
37  *   - discard preallocations if no free space left (policy?)
38  *   - don't normalize tails
39  *   - quota
40  *   - reservation for superuser
41  *
42  * TODO v3:
43  *   - bitmap read-ahead (proposed by Oleg Drokin aka green)
44  *   - track min/max extents in each group for better group selection
45  *   - mb_mark_used() may allocate chunk right after splitting buddy
46  *   - tree of groups sorted by number of free blocks
47  *   - error handling
48  */
49 
50 /*
51  * The allocation request involve request for multiple number of blocks
52  * near to the goal(block) value specified.
53  *
54  * During initialization phase of the allocator we decide to use the
55  * group preallocation or inode preallocation depending on the size of
56  * the file. The size of the file could be the resulting file size we
57  * would have after allocation, or the current file size, which ever
58  * is larger. If the size is less than sbi->s_mb_stream_request we
59  * select to use the group preallocation. The default value of
60  * s_mb_stream_request is 16 blocks. This can also be tuned via
61  * /sys/fs/ext4/<partition>/mb_stream_req. The value is represented in
62  * terms of number of blocks.
63  *
64  * The main motivation for having small file use group preallocation is to
65  * ensure that we have small files closer together on the disk.
66  *
67  * First stage the allocator looks at the inode prealloc list,
68  * ext4_inode_info->i_prealloc_list, which contains list of prealloc
69  * spaces for this particular inode. The inode prealloc space is
70  * represented as:
71  *
72  * pa_lstart -> the logical start block for this prealloc space
73  * pa_pstart -> the physical start block for this prealloc space
74  * pa_len    -> length for this prealloc space (in clusters)
75  * pa_free   ->  free space available in this prealloc space (in clusters)
76  *
77  * The inode preallocation space is used looking at the _logical_ start
78  * block. If only the logical file block falls within the range of prealloc
79  * space we will consume the particular prealloc space. This makes sure that
80  * we have contiguous physical blocks representing the file blocks
81  *
82  * The important thing to be noted in case of inode prealloc space is that
83  * we don't modify the values associated to inode prealloc space except
84  * pa_free.
85  *
86  * If we are not able to find blocks in the inode prealloc space and if we
87  * have the group allocation flag set then we look at the locality group
88  * prealloc space. These are per CPU prealloc list represented as
89  *
90  * ext4_sb_info.s_locality_groups[smp_processor_id()]
91  *
92  * The reason for having a per cpu locality group is to reduce the contention
93  * between CPUs. It is possible to get scheduled at this point.
94  *
95  * The locality group prealloc space is used looking at whether we have
96  * enough free space (pa_free) within the prealloc space.
97  *
98  * If we can't allocate blocks via inode prealloc or/and locality group
99  * prealloc then we look at the buddy cache. The buddy cache is represented
100  * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets
101  * mapped to the buddy and bitmap information regarding different
102  * groups. The buddy information is attached to buddy cache inode so that
103  * we can access them through the page cache. The information regarding
104  * each group is loaded via ext4_mb_load_buddy.  The information involve
105  * block bitmap and buddy information. The information are stored in the
106  * inode as:
107  *
108  *  {                        page                        }
109  *  [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
110  *
111  *
112  * one block each for bitmap and buddy information.  So for each group we
113  * take up 2 blocks. A page can contain blocks_per_page (PAGE_CACHE_SIZE /
114  * blocksize) blocks.  So it can have information regarding groups_per_page
115  * which is blocks_per_page/2
116  *
117  * The buddy cache inode is not stored on disk. The inode is thrown
118  * away when the filesystem is unmounted.
119  *
120  * We look for count number of blocks in the buddy cache. If we were able
121  * to locate that many free blocks we return with additional information
122  * regarding rest of the contiguous physical block available
123  *
124  * Before allocating blocks via buddy cache we normalize the request
125  * blocks. This ensure we ask for more blocks that we needed. The extra
126  * blocks that we get after allocation is added to the respective prealloc
127  * list. In case of inode preallocation we follow a list of heuristics
128  * based on file size. This can be found in ext4_mb_normalize_request. If
129  * we are doing a group prealloc we try to normalize the request to
130  * sbi->s_mb_group_prealloc.  The default value of s_mb_group_prealloc is
131  * dependent on the cluster size; for non-bigalloc file systems, it is
132  * 512 blocks. This can be tuned via
133  * /sys/fs/ext4/<partition>/mb_group_prealloc. The value is represented in
134  * terms of number of blocks. If we have mounted the file system with -O
135  * stripe=<value> option the group prealloc request is normalized to the
136  * the smallest multiple of the stripe value (sbi->s_stripe) which is
137  * greater than the default mb_group_prealloc.
138  *
139  * The regular allocator (using the buddy cache) supports a few tunables.
140  *
141  * /sys/fs/ext4/<partition>/mb_min_to_scan
142  * /sys/fs/ext4/<partition>/mb_max_to_scan
143  * /sys/fs/ext4/<partition>/mb_order2_req
144  *
145  * The regular allocator uses buddy scan only if the request len is power of
146  * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The
147  * value of s_mb_order2_reqs can be tuned via
148  * /sys/fs/ext4/<partition>/mb_order2_req.  If the request len is equal to
149  * stripe size (sbi->s_stripe), we try to search for contiguous block in
150  * stripe size. This should result in better allocation on RAID setups. If
151  * not, we search in the specific group using bitmap for best extents. The
152  * tunable min_to_scan and max_to_scan control the behaviour here.
153  * min_to_scan indicate how long the mballoc __must__ look for a best
154  * extent and max_to_scan indicates how long the mballoc __can__ look for a
155  * best extent in the found extents. Searching for the blocks starts with
156  * the group specified as the goal value in allocation context via
157  * ac_g_ex. Each group is first checked based on the criteria whether it
158  * can be used for allocation. ext4_mb_good_group explains how the groups are
159  * checked.
160  *
161  * Both the prealloc space are getting populated as above. So for the first
162  * request we will hit the buddy cache which will result in this prealloc
163  * space getting filled. The prealloc space is then later used for the
164  * subsequent request.
165  */
166 
167 /*
168  * mballoc operates on the following data:
169  *  - on-disk bitmap
170  *  - in-core buddy (actually includes buddy and bitmap)
171  *  - preallocation descriptors (PAs)
172  *
173  * there are two types of preallocations:
174  *  - inode
175  *    assiged to specific inode and can be used for this inode only.
176  *    it describes part of inode's space preallocated to specific
177  *    physical blocks. any block from that preallocated can be used
178  *    independent. the descriptor just tracks number of blocks left
179  *    unused. so, before taking some block from descriptor, one must
180  *    make sure corresponded logical block isn't allocated yet. this
181  *    also means that freeing any block within descriptor's range
182  *    must discard all preallocated blocks.
183  *  - locality group
184  *    assigned to specific locality group which does not translate to
185  *    permanent set of inodes: inode can join and leave group. space
186  *    from this type of preallocation can be used for any inode. thus
187  *    it's consumed from the beginning to the end.
188  *
189  * relation between them can be expressed as:
190  *    in-core buddy = on-disk bitmap + preallocation descriptors
191  *
192  * this mean blocks mballoc considers used are:
193  *  - allocated blocks (persistent)
194  *  - preallocated blocks (non-persistent)
195  *
196  * consistency in mballoc world means that at any time a block is either
197  * free or used in ALL structures. notice: "any time" should not be read
198  * literally -- time is discrete and delimited by locks.
199  *
200  *  to keep it simple, we don't use block numbers, instead we count number of
201  *  blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA.
202  *
203  * all operations can be expressed as:
204  *  - init buddy:			buddy = on-disk + PAs
205  *  - new PA:				buddy += N; PA = N
206  *  - use inode PA:			on-disk += N; PA -= N
207  *  - discard inode PA			buddy -= on-disk - PA; PA = 0
208  *  - use locality group PA		on-disk += N; PA -= N
209  *  - discard locality group PA		buddy -= PA; PA = 0
210  *  note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap
211  *        is used in real operation because we can't know actual used
212  *        bits from PA, only from on-disk bitmap
213  *
214  * if we follow this strict logic, then all operations above should be atomic.
215  * given some of them can block, we'd have to use something like semaphores
216  * killing performance on high-end SMP hardware. let's try to relax it using
217  * the following knowledge:
218  *  1) if buddy is referenced, it's already initialized
219  *  2) while block is used in buddy and the buddy is referenced,
220  *     nobody can re-allocate that block
221  *  3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has
222  *     bit set and PA claims same block, it's OK. IOW, one can set bit in
223  *     on-disk bitmap if buddy has same bit set or/and PA covers corresponded
224  *     block
225  *
226  * so, now we're building a concurrency table:
227  *  - init buddy vs.
228  *    - new PA
229  *      blocks for PA are allocated in the buddy, buddy must be referenced
230  *      until PA is linked to allocation group to avoid concurrent buddy init
231  *    - use inode PA
232  *      we need to make sure that either on-disk bitmap or PA has uptodate data
233  *      given (3) we care that PA-=N operation doesn't interfere with init
234  *    - discard inode PA
235  *      the simplest way would be to have buddy initialized by the discard
236  *    - use locality group PA
237  *      again PA-=N must be serialized with init
238  *    - discard locality group PA
239  *      the simplest way would be to have buddy initialized by the discard
240  *  - new PA vs.
241  *    - use inode PA
242  *      i_data_sem serializes them
243  *    - discard inode PA
244  *      discard process must wait until PA isn't used by another process
245  *    - use locality group PA
246  *      some mutex should serialize them
247  *    - discard locality group PA
248  *      discard process must wait until PA isn't used by another process
249  *  - use inode PA
250  *    - use inode PA
251  *      i_data_sem or another mutex should serializes them
252  *    - discard inode PA
253  *      discard process must wait until PA isn't used by another process
254  *    - use locality group PA
255  *      nothing wrong here -- they're different PAs covering different blocks
256  *    - discard locality group PA
257  *      discard process must wait until PA isn't used by another process
258  *
259  * now we're ready to make few consequences:
260  *  - PA is referenced and while it is no discard is possible
261  *  - PA is referenced until block isn't marked in on-disk bitmap
262  *  - PA changes only after on-disk bitmap
263  *  - discard must not compete with init. either init is done before
264  *    any discard or they're serialized somehow
265  *  - buddy init as sum of on-disk bitmap and PAs is done atomically
266  *
267  * a special case when we've used PA to emptiness. no need to modify buddy
268  * in this case, but we should care about concurrent init
269  *
270  */
271 
272  /*
273  * Logic in few words:
274  *
275  *  - allocation:
276  *    load group
277  *    find blocks
278  *    mark bits in on-disk bitmap
279  *    release group
280  *
281  *  - use preallocation:
282  *    find proper PA (per-inode or group)
283  *    load group
284  *    mark bits in on-disk bitmap
285  *    release group
286  *    release PA
287  *
288  *  - free:
289  *    load group
290  *    mark bits in on-disk bitmap
291  *    release group
292  *
293  *  - discard preallocations in group:
294  *    mark PAs deleted
295  *    move them onto local list
296  *    load on-disk bitmap
297  *    load group
298  *    remove PA from object (inode or locality group)
299  *    mark free blocks in-core
300  *
301  *  - discard inode's preallocations:
302  */
303 
304 /*
305  * Locking rules
306  *
307  * Locks:
308  *  - bitlock on a group	(group)
309  *  - object (inode/locality)	(object)
310  *  - per-pa lock		(pa)
311  *
312  * Paths:
313  *  - new pa
314  *    object
315  *    group
316  *
317  *  - find and use pa:
318  *    pa
319  *
320  *  - release consumed pa:
321  *    pa
322  *    group
323  *    object
324  *
325  *  - generate in-core bitmap:
326  *    group
327  *        pa
328  *
329  *  - discard all for given object (inode, locality group):
330  *    object
331  *        pa
332  *    group
333  *
334  *  - discard all for given group:
335  *    group
336  *        pa
337  *    group
338  *        object
339  *
340  */
341 static struct kmem_cache *ext4_pspace_cachep;
342 static struct kmem_cache *ext4_ac_cachep;
343 static struct kmem_cache *ext4_free_data_cachep;
344 
345 /* We create slab caches for groupinfo data structures based on the
346  * superblock block size.  There will be one per mounted filesystem for
347  * each unique s_blocksize_bits */
348 #define NR_GRPINFO_CACHES 8
349 static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES];
350 
351 static const char *ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = {
352 	"ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k",
353 	"ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k",
354 	"ext4_groupinfo_64k", "ext4_groupinfo_128k"
355 };
356 
357 static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
358 					ext4_group_t group);
359 static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
360 						ext4_group_t group);
361 static void ext4_free_data_callback(struct super_block *sb,
362 				struct ext4_journal_cb_entry *jce, int rc);
363 
mb_correct_addr_and_bit(int * bit,void * addr)364 static inline void *mb_correct_addr_and_bit(int *bit, void *addr)
365 {
366 #if BITS_PER_LONG == 64
367 	*bit += ((unsigned long) addr & 7UL) << 3;
368 	addr = (void *) ((unsigned long) addr & ~7UL);
369 #elif BITS_PER_LONG == 32
370 	*bit += ((unsigned long) addr & 3UL) << 3;
371 	addr = (void *) ((unsigned long) addr & ~3UL);
372 #else
373 #error "how many bits you are?!"
374 #endif
375 	return addr;
376 }
377 
mb_test_bit(int bit,void * addr)378 static inline int mb_test_bit(int bit, void *addr)
379 {
380 	/*
381 	 * ext4_test_bit on architecture like powerpc
382 	 * needs unsigned long aligned address
383 	 */
384 	addr = mb_correct_addr_and_bit(&bit, addr);
385 	return ext4_test_bit(bit, addr);
386 }
387 
mb_set_bit(int bit,void * addr)388 static inline void mb_set_bit(int bit, void *addr)
389 {
390 	addr = mb_correct_addr_and_bit(&bit, addr);
391 	ext4_set_bit(bit, addr);
392 }
393 
mb_clear_bit(int bit,void * addr)394 static inline void mb_clear_bit(int bit, void *addr)
395 {
396 	addr = mb_correct_addr_and_bit(&bit, addr);
397 	ext4_clear_bit(bit, addr);
398 }
399 
mb_find_next_zero_bit(void * addr,int max,int start)400 static inline int mb_find_next_zero_bit(void *addr, int max, int start)
401 {
402 	int fix = 0, ret, tmpmax;
403 	addr = mb_correct_addr_and_bit(&fix, addr);
404 	tmpmax = max + fix;
405 	start += fix;
406 
407 	ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix;
408 	if (ret > max)
409 		return max;
410 	return ret;
411 }
412 
mb_find_next_bit(void * addr,int max,int start)413 static inline int mb_find_next_bit(void *addr, int max, int start)
414 {
415 	int fix = 0, ret, tmpmax;
416 	addr = mb_correct_addr_and_bit(&fix, addr);
417 	tmpmax = max + fix;
418 	start += fix;
419 
420 	ret = ext4_find_next_bit(addr, tmpmax, start) - fix;
421 	if (ret > max)
422 		return max;
423 	return ret;
424 }
425 
mb_find_buddy(struct ext4_buddy * e4b,int order,int * max)426 static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max)
427 {
428 	char *bb;
429 
430 	BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
431 	BUG_ON(max == NULL);
432 
433 	if (order > e4b->bd_blkbits + 1) {
434 		*max = 0;
435 		return NULL;
436 	}
437 
438 	/* at order 0 we see each particular block */
439 	if (order == 0) {
440 		*max = 1 << (e4b->bd_blkbits + 3);
441 		return e4b->bd_bitmap;
442 	}
443 
444 	bb = e4b->bd_buddy + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order];
445 	*max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order];
446 
447 	return bb;
448 }
449 
450 #ifdef DOUBLE_CHECK
mb_free_blocks_double(struct inode * inode,struct ext4_buddy * e4b,int first,int count)451 static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b,
452 			   int first, int count)
453 {
454 	int i;
455 	struct super_block *sb = e4b->bd_sb;
456 
457 	if (unlikely(e4b->bd_info->bb_bitmap == NULL))
458 		return;
459 	assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
460 	for (i = 0; i < count; i++) {
461 		if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) {
462 			ext4_fsblk_t blocknr;
463 
464 			blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
465 			blocknr += EXT4_C2B(EXT4_SB(sb), first + i);
466 			ext4_grp_locked_error(sb, e4b->bd_group,
467 					      inode ? inode->i_ino : 0,
468 					      blocknr,
469 					      "freeing block already freed "
470 					      "(bit %u)",
471 					      first + i);
472 		}
473 		mb_clear_bit(first + i, e4b->bd_info->bb_bitmap);
474 	}
475 }
476 
mb_mark_used_double(struct ext4_buddy * e4b,int first,int count)477 static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count)
478 {
479 	int i;
480 
481 	if (unlikely(e4b->bd_info->bb_bitmap == NULL))
482 		return;
483 	assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
484 	for (i = 0; i < count; i++) {
485 		BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap));
486 		mb_set_bit(first + i, e4b->bd_info->bb_bitmap);
487 	}
488 }
489 
mb_cmp_bitmaps(struct ext4_buddy * e4b,void * bitmap)490 static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
491 {
492 	if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) {
493 		unsigned char *b1, *b2;
494 		int i;
495 		b1 = (unsigned char *) e4b->bd_info->bb_bitmap;
496 		b2 = (unsigned char *) bitmap;
497 		for (i = 0; i < e4b->bd_sb->s_blocksize; i++) {
498 			if (b1[i] != b2[i]) {
499 				ext4_msg(e4b->bd_sb, KERN_ERR,
500 					 "corruption in group %u "
501 					 "at byte %u(%u): %x in copy != %x "
502 					 "on disk/prealloc",
503 					 e4b->bd_group, i, i * 8, b1[i], b2[i]);
504 				BUG();
505 			}
506 		}
507 	}
508 }
509 
510 #else
mb_free_blocks_double(struct inode * inode,struct ext4_buddy * e4b,int first,int count)511 static inline void mb_free_blocks_double(struct inode *inode,
512 				struct ext4_buddy *e4b, int first, int count)
513 {
514 	return;
515 }
mb_mark_used_double(struct ext4_buddy * e4b,int first,int count)516 static inline void mb_mark_used_double(struct ext4_buddy *e4b,
517 						int first, int count)
518 {
519 	return;
520 }
mb_cmp_bitmaps(struct ext4_buddy * e4b,void * bitmap)521 static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
522 {
523 	return;
524 }
525 #endif
526 
527 #ifdef AGGRESSIVE_CHECK
528 
529 #define MB_CHECK_ASSERT(assert)						\
530 do {									\
531 	if (!(assert)) {						\
532 		printk(KERN_EMERG					\
533 			"Assertion failure in %s() at %s:%d: \"%s\"\n",	\
534 			function, file, line, # assert);		\
535 		BUG();							\
536 	}								\
537 } while (0)
538 
__mb_check_buddy(struct ext4_buddy * e4b,char * file,const char * function,int line)539 static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
540 				const char *function, int line)
541 {
542 	struct super_block *sb = e4b->bd_sb;
543 	int order = e4b->bd_blkbits + 1;
544 	int max;
545 	int max2;
546 	int i;
547 	int j;
548 	int k;
549 	int count;
550 	struct ext4_group_info *grp;
551 	int fragments = 0;
552 	int fstart;
553 	struct list_head *cur;
554 	void *buddy;
555 	void *buddy2;
556 
557 	{
558 		static int mb_check_counter;
559 		if (mb_check_counter++ % 100 != 0)
560 			return 0;
561 	}
562 
563 	while (order > 1) {
564 		buddy = mb_find_buddy(e4b, order, &max);
565 		MB_CHECK_ASSERT(buddy);
566 		buddy2 = mb_find_buddy(e4b, order - 1, &max2);
567 		MB_CHECK_ASSERT(buddy2);
568 		MB_CHECK_ASSERT(buddy != buddy2);
569 		MB_CHECK_ASSERT(max * 2 == max2);
570 
571 		count = 0;
572 		for (i = 0; i < max; i++) {
573 
574 			if (mb_test_bit(i, buddy)) {
575 				/* only single bit in buddy2 may be 1 */
576 				if (!mb_test_bit(i << 1, buddy2)) {
577 					MB_CHECK_ASSERT(
578 						mb_test_bit((i<<1)+1, buddy2));
579 				} else if (!mb_test_bit((i << 1) + 1, buddy2)) {
580 					MB_CHECK_ASSERT(
581 						mb_test_bit(i << 1, buddy2));
582 				}
583 				continue;
584 			}
585 
586 			/* both bits in buddy2 must be 1 */
587 			MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2));
588 			MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2));
589 
590 			for (j = 0; j < (1 << order); j++) {
591 				k = (i * (1 << order)) + j;
592 				MB_CHECK_ASSERT(
593 					!mb_test_bit(k, e4b->bd_bitmap));
594 			}
595 			count++;
596 		}
597 		MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count);
598 		order--;
599 	}
600 
601 	fstart = -1;
602 	buddy = mb_find_buddy(e4b, 0, &max);
603 	for (i = 0; i < max; i++) {
604 		if (!mb_test_bit(i, buddy)) {
605 			MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free);
606 			if (fstart == -1) {
607 				fragments++;
608 				fstart = i;
609 			}
610 			continue;
611 		}
612 		fstart = -1;
613 		/* check used bits only */
614 		for (j = 0; j < e4b->bd_blkbits + 1; j++) {
615 			buddy2 = mb_find_buddy(e4b, j, &max2);
616 			k = i >> j;
617 			MB_CHECK_ASSERT(k < max2);
618 			MB_CHECK_ASSERT(mb_test_bit(k, buddy2));
619 		}
620 	}
621 	MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info));
622 	MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments);
623 
624 	grp = ext4_get_group_info(sb, e4b->bd_group);
625 	list_for_each(cur, &grp->bb_prealloc_list) {
626 		ext4_group_t groupnr;
627 		struct ext4_prealloc_space *pa;
628 		pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
629 		ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k);
630 		MB_CHECK_ASSERT(groupnr == e4b->bd_group);
631 		for (i = 0; i < pa->pa_len; i++)
632 			MB_CHECK_ASSERT(mb_test_bit(k + i, buddy));
633 	}
634 	return 0;
635 }
636 #undef MB_CHECK_ASSERT
637 #define mb_check_buddy(e4b) __mb_check_buddy(e4b,	\
638 					__FILE__, __func__, __LINE__)
639 #else
640 #define mb_check_buddy(e4b)
641 #endif
642 
643 /*
644  * Divide blocks started from @first with length @len into
645  * smaller chunks with power of 2 blocks.
646  * Clear the bits in bitmap which the blocks of the chunk(s) covered,
647  * then increase bb_counters[] for corresponded chunk size.
648  */
ext4_mb_mark_free_simple(struct super_block * sb,void * buddy,ext4_grpblk_t first,ext4_grpblk_t len,struct ext4_group_info * grp)649 static void ext4_mb_mark_free_simple(struct super_block *sb,
650 				void *buddy, ext4_grpblk_t first, ext4_grpblk_t len,
651 					struct ext4_group_info *grp)
652 {
653 	struct ext4_sb_info *sbi = EXT4_SB(sb);
654 	ext4_grpblk_t min;
655 	ext4_grpblk_t max;
656 	ext4_grpblk_t chunk;
657 	unsigned short border;
658 
659 	BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb));
660 
661 	border = 2 << sb->s_blocksize_bits;
662 
663 	while (len > 0) {
664 		/* find how many blocks can be covered since this position */
665 		max = ffs(first | border) - 1;
666 
667 		/* find how many blocks of power 2 we need to mark */
668 		min = fls(len) - 1;
669 
670 		if (max < min)
671 			min = max;
672 		chunk = 1 << min;
673 
674 		/* mark multiblock chunks only */
675 		grp->bb_counters[min]++;
676 		if (min > 0)
677 			mb_clear_bit(first >> min,
678 				     buddy + sbi->s_mb_offsets[min]);
679 
680 		len -= chunk;
681 		first += chunk;
682 	}
683 }
684 
685 /*
686  * Cache the order of the largest free extent we have available in this block
687  * group.
688  */
689 static void
mb_set_largest_free_order(struct super_block * sb,struct ext4_group_info * grp)690 mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp)
691 {
692 	int i;
693 	int bits;
694 
695 	grp->bb_largest_free_order = -1; /* uninit */
696 
697 	bits = sb->s_blocksize_bits + 1;
698 	for (i = bits; i >= 0; i--) {
699 		if (grp->bb_counters[i] > 0) {
700 			grp->bb_largest_free_order = i;
701 			break;
702 		}
703 	}
704 }
705 
706 static noinline_for_stack
ext4_mb_generate_buddy(struct super_block * sb,void * buddy,void * bitmap,ext4_group_t group)707 void ext4_mb_generate_buddy(struct super_block *sb,
708 				void *buddy, void *bitmap, ext4_group_t group)
709 {
710 	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
711 	ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
712 	ext4_grpblk_t i = 0;
713 	ext4_grpblk_t first;
714 	ext4_grpblk_t len;
715 	unsigned free = 0;
716 	unsigned fragments = 0;
717 	unsigned long long period = get_cycles();
718 
719 	/* initialize buddy from bitmap which is aggregation
720 	 * of on-disk bitmap and preallocations */
721 	i = mb_find_next_zero_bit(bitmap, max, 0);
722 	grp->bb_first_free = i;
723 	while (i < max) {
724 		fragments++;
725 		first = i;
726 		i = mb_find_next_bit(bitmap, max, i);
727 		len = i - first;
728 		free += len;
729 		if (len > 1)
730 			ext4_mb_mark_free_simple(sb, buddy, first, len, grp);
731 		else
732 			grp->bb_counters[0]++;
733 		if (i < max)
734 			i = mb_find_next_zero_bit(bitmap, max, i);
735 	}
736 	grp->bb_fragments = fragments;
737 
738 	if (free != grp->bb_free) {
739 		ext4_grp_locked_error(sb, group, 0, 0,
740 				      "%u clusters in bitmap, %u in gd",
741 				      free, grp->bb_free);
742 		/*
743 		 * If we intent to continue, we consider group descritor
744 		 * corrupt and update bb_free using bitmap value
745 		 */
746 		grp->bb_free = free;
747 	}
748 	mb_set_largest_free_order(sb, grp);
749 
750 	clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state));
751 
752 	period = get_cycles() - period;
753 	spin_lock(&EXT4_SB(sb)->s_bal_lock);
754 	EXT4_SB(sb)->s_mb_buddies_generated++;
755 	EXT4_SB(sb)->s_mb_generation_time += period;
756 	spin_unlock(&EXT4_SB(sb)->s_bal_lock);
757 }
758 
759 /* The buddy information is attached the buddy cache inode
760  * for convenience. The information regarding each group
761  * is loaded via ext4_mb_load_buddy. The information involve
762  * block bitmap and buddy information. The information are
763  * stored in the inode as
764  *
765  * {                        page                        }
766  * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
767  *
768  *
769  * one block each for bitmap and buddy information.
770  * So for each group we take up 2 blocks. A page can
771  * contain blocks_per_page (PAGE_CACHE_SIZE / blocksize)  blocks.
772  * So it can have information regarding groups_per_page which
773  * is blocks_per_page/2
774  *
775  * Locking note:  This routine takes the block group lock of all groups
776  * for this page; do not hold this lock when calling this routine!
777  */
778 
ext4_mb_init_cache(struct page * page,char * incore)779 static int ext4_mb_init_cache(struct page *page, char *incore)
780 {
781 	ext4_group_t ngroups;
782 	int blocksize;
783 	int blocks_per_page;
784 	int groups_per_page;
785 	int err = 0;
786 	int i;
787 	ext4_group_t first_group, group;
788 	int first_block;
789 	struct super_block *sb;
790 	struct buffer_head *bhs;
791 	struct buffer_head **bh;
792 	struct inode *inode;
793 	char *data;
794 	char *bitmap;
795 	struct ext4_group_info *grinfo;
796 
797 	mb_debug(1, "init page %lu\n", page->index);
798 
799 	inode = page->mapping->host;
800 	sb = inode->i_sb;
801 	ngroups = ext4_get_groups_count(sb);
802 	blocksize = 1 << inode->i_blkbits;
803 	blocks_per_page = PAGE_CACHE_SIZE / blocksize;
804 
805 	groups_per_page = blocks_per_page >> 1;
806 	if (groups_per_page == 0)
807 		groups_per_page = 1;
808 
809 	/* allocate buffer_heads to read bitmaps */
810 	if (groups_per_page > 1) {
811 		i = sizeof(struct buffer_head *) * groups_per_page;
812 		bh = kzalloc(i, GFP_NOFS);
813 		if (bh == NULL) {
814 			err = -ENOMEM;
815 			goto out;
816 		}
817 	} else
818 		bh = &bhs;
819 
820 	first_group = page->index * blocks_per_page / 2;
821 
822 	/* read all groups the page covers into the cache */
823 	for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
824 		if (group >= ngroups)
825 			break;
826 
827 		grinfo = ext4_get_group_info(sb, group);
828 		/*
829 		 * If page is uptodate then we came here after online resize
830 		 * which added some new uninitialized group info structs, so
831 		 * we must skip all initialized uptodate buddies on the page,
832 		 * which may be currently in use by an allocating task.
833 		 */
834 		if (PageUptodate(page) && !EXT4_MB_GRP_NEED_INIT(grinfo)) {
835 			bh[i] = NULL;
836 			continue;
837 		}
838 		if (!(bh[i] = ext4_read_block_bitmap_nowait(sb, group))) {
839 			err = -ENOMEM;
840 			goto out;
841 		}
842 		mb_debug(1, "read bitmap for group %u\n", group);
843 	}
844 
845 	/* wait for I/O completion */
846 	for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
847 		if (bh[i] && ext4_wait_block_bitmap(sb, group, bh[i])) {
848 			err = -EIO;
849 			goto out;
850 		}
851 	}
852 
853 	first_block = page->index * blocks_per_page;
854 	for (i = 0; i < blocks_per_page; i++) {
855 		int group;
856 
857 		group = (first_block + i) >> 1;
858 		if (group >= ngroups)
859 			break;
860 
861 		if (!bh[group - first_group])
862 			/* skip initialized uptodate buddy */
863 			continue;
864 
865 		/*
866 		 * data carry information regarding this
867 		 * particular group in the format specified
868 		 * above
869 		 *
870 		 */
871 		data = page_address(page) + (i * blocksize);
872 		bitmap = bh[group - first_group]->b_data;
873 
874 		/*
875 		 * We place the buddy block and bitmap block
876 		 * close together
877 		 */
878 		if ((first_block + i) & 1) {
879 			/* this is block of buddy */
880 			BUG_ON(incore == NULL);
881 			mb_debug(1, "put buddy for group %u in page %lu/%x\n",
882 				group, page->index, i * blocksize);
883 			trace_ext4_mb_buddy_bitmap_load(sb, group);
884 			grinfo = ext4_get_group_info(sb, group);
885 			grinfo->bb_fragments = 0;
886 			memset(grinfo->bb_counters, 0,
887 			       sizeof(*grinfo->bb_counters) *
888 				(sb->s_blocksize_bits+2));
889 			/*
890 			 * incore got set to the group block bitmap below
891 			 */
892 			ext4_lock_group(sb, group);
893 			/* init the buddy */
894 			memset(data, 0xff, blocksize);
895 			ext4_mb_generate_buddy(sb, data, incore, group);
896 			ext4_unlock_group(sb, group);
897 			incore = NULL;
898 		} else {
899 			/* this is block of bitmap */
900 			BUG_ON(incore != NULL);
901 			mb_debug(1, "put bitmap for group %u in page %lu/%x\n",
902 				group, page->index, i * blocksize);
903 			trace_ext4_mb_bitmap_load(sb, group);
904 
905 			/* see comments in ext4_mb_put_pa() */
906 			ext4_lock_group(sb, group);
907 			memcpy(data, bitmap, blocksize);
908 
909 			/* mark all preallocated blks used in in-core bitmap */
910 			ext4_mb_generate_from_pa(sb, data, group);
911 			ext4_mb_generate_from_freelist(sb, data, group);
912 			ext4_unlock_group(sb, group);
913 
914 			/* set incore so that the buddy information can be
915 			 * generated using this
916 			 */
917 			incore = data;
918 		}
919 	}
920 	SetPageUptodate(page);
921 
922 out:
923 	if (bh) {
924 		for (i = 0; i < groups_per_page; i++)
925 			brelse(bh[i]);
926 		if (bh != &bhs)
927 			kfree(bh);
928 	}
929 	return err;
930 }
931 
932 /*
933  * Lock the buddy and bitmap pages. This make sure other parallel init_group
934  * on the same buddy page doesn't happen whild holding the buddy page lock.
935  * Return locked buddy and bitmap pages on e4b struct. If buddy and bitmap
936  * are on the same page e4b->bd_buddy_page is NULL and return value is 0.
937  */
ext4_mb_get_buddy_page_lock(struct super_block * sb,ext4_group_t group,struct ext4_buddy * e4b)938 static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
939 		ext4_group_t group, struct ext4_buddy *e4b)
940 {
941 	struct inode *inode = EXT4_SB(sb)->s_buddy_cache;
942 	int block, pnum, poff;
943 	int blocks_per_page;
944 	struct page *page;
945 
946 	e4b->bd_buddy_page = NULL;
947 	e4b->bd_bitmap_page = NULL;
948 
949 	blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
950 	/*
951 	 * the buddy cache inode stores the block bitmap
952 	 * and buddy information in consecutive blocks.
953 	 * So for each group we need two blocks.
954 	 */
955 	block = group * 2;
956 	pnum = block / blocks_per_page;
957 	poff = block % blocks_per_page;
958 	page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
959 	if (!page)
960 		return -EIO;
961 	BUG_ON(page->mapping != inode->i_mapping);
962 	e4b->bd_bitmap_page = page;
963 	e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
964 
965 	if (blocks_per_page >= 2) {
966 		/* buddy and bitmap are on the same page */
967 		return 0;
968 	}
969 
970 	block++;
971 	pnum = block / blocks_per_page;
972 	poff = block % blocks_per_page;
973 	page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
974 	if (!page)
975 		return -EIO;
976 	BUG_ON(page->mapping != inode->i_mapping);
977 	e4b->bd_buddy_page = page;
978 	return 0;
979 }
980 
ext4_mb_put_buddy_page_lock(struct ext4_buddy * e4b)981 static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b)
982 {
983 	if (e4b->bd_bitmap_page) {
984 		unlock_page(e4b->bd_bitmap_page);
985 		page_cache_release(e4b->bd_bitmap_page);
986 	}
987 	if (e4b->bd_buddy_page) {
988 		unlock_page(e4b->bd_buddy_page);
989 		page_cache_release(e4b->bd_buddy_page);
990 	}
991 }
992 
993 /*
994  * Locking note:  This routine calls ext4_mb_init_cache(), which takes the
995  * block group lock of all groups for this page; do not hold the BG lock when
996  * calling this routine!
997  */
998 static noinline_for_stack
ext4_mb_init_group(struct super_block * sb,ext4_group_t group)999 int ext4_mb_init_group(struct super_block *sb, ext4_group_t group)
1000 {
1001 
1002 	struct ext4_group_info *this_grp;
1003 	struct ext4_buddy e4b;
1004 	struct page *page;
1005 	int ret = 0;
1006 
1007 	mb_debug(1, "init group %u\n", group);
1008 	this_grp = ext4_get_group_info(sb, group);
1009 	/*
1010 	 * This ensures that we don't reinit the buddy cache
1011 	 * page which map to the group from which we are already
1012 	 * allocating. If we are looking at the buddy cache we would
1013 	 * have taken a reference using ext4_mb_load_buddy and that
1014 	 * would have pinned buddy page to page cache.
1015 	 */
1016 	ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b);
1017 	if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) {
1018 		/*
1019 		 * somebody initialized the group
1020 		 * return without doing anything
1021 		 */
1022 		goto err;
1023 	}
1024 
1025 	page = e4b.bd_bitmap_page;
1026 	ret = ext4_mb_init_cache(page, NULL);
1027 	if (ret)
1028 		goto err;
1029 	if (!PageUptodate(page)) {
1030 		ret = -EIO;
1031 		goto err;
1032 	}
1033 	mark_page_accessed(page);
1034 
1035 	if (e4b.bd_buddy_page == NULL) {
1036 		/*
1037 		 * If both the bitmap and buddy are in
1038 		 * the same page we don't need to force
1039 		 * init the buddy
1040 		 */
1041 		ret = 0;
1042 		goto err;
1043 	}
1044 	/* init buddy cache */
1045 	page = e4b.bd_buddy_page;
1046 	ret = ext4_mb_init_cache(page, e4b.bd_bitmap);
1047 	if (ret)
1048 		goto err;
1049 	if (!PageUptodate(page)) {
1050 		ret = -EIO;
1051 		goto err;
1052 	}
1053 	mark_page_accessed(page);
1054 err:
1055 	ext4_mb_put_buddy_page_lock(&e4b);
1056 	return ret;
1057 }
1058 
1059 /*
1060  * Locking note:  This routine calls ext4_mb_init_cache(), which takes the
1061  * block group lock of all groups for this page; do not hold the BG lock when
1062  * calling this routine!
1063  */
1064 static noinline_for_stack int
ext4_mb_load_buddy(struct super_block * sb,ext4_group_t group,struct ext4_buddy * e4b)1065 ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
1066 					struct ext4_buddy *e4b)
1067 {
1068 	int blocks_per_page;
1069 	int block;
1070 	int pnum;
1071 	int poff;
1072 	struct page *page;
1073 	int ret;
1074 	struct ext4_group_info *grp;
1075 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1076 	struct inode *inode = sbi->s_buddy_cache;
1077 
1078 	mb_debug(1, "load group %u\n", group);
1079 
1080 	blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
1081 	grp = ext4_get_group_info(sb, group);
1082 
1083 	e4b->bd_blkbits = sb->s_blocksize_bits;
1084 	e4b->bd_info = grp;
1085 	e4b->bd_sb = sb;
1086 	e4b->bd_group = group;
1087 	e4b->bd_buddy_page = NULL;
1088 	e4b->bd_bitmap_page = NULL;
1089 
1090 	if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
1091 		/*
1092 		 * we need full data about the group
1093 		 * to make a good selection
1094 		 */
1095 		ret = ext4_mb_init_group(sb, group);
1096 		if (ret)
1097 			return ret;
1098 	}
1099 
1100 	/*
1101 	 * the buddy cache inode stores the block bitmap
1102 	 * and buddy information in consecutive blocks.
1103 	 * So for each group we need two blocks.
1104 	 */
1105 	block = group * 2;
1106 	pnum = block / blocks_per_page;
1107 	poff = block % blocks_per_page;
1108 
1109 	/* we could use find_or_create_page(), but it locks page
1110 	 * what we'd like to avoid in fast path ... */
1111 	page = find_get_page(inode->i_mapping, pnum);
1112 	if (page == NULL || !PageUptodate(page)) {
1113 		if (page)
1114 			/*
1115 			 * drop the page reference and try
1116 			 * to get the page with lock. If we
1117 			 * are not uptodate that implies
1118 			 * somebody just created the page but
1119 			 * is yet to initialize the same. So
1120 			 * wait for it to initialize.
1121 			 */
1122 			page_cache_release(page);
1123 		page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
1124 		if (page) {
1125 			BUG_ON(page->mapping != inode->i_mapping);
1126 			if (!PageUptodate(page)) {
1127 				ret = ext4_mb_init_cache(page, NULL);
1128 				if (ret) {
1129 					unlock_page(page);
1130 					goto err;
1131 				}
1132 				mb_cmp_bitmaps(e4b, page_address(page) +
1133 					       (poff * sb->s_blocksize));
1134 			}
1135 			unlock_page(page);
1136 		}
1137 	}
1138 	if (page == NULL || !PageUptodate(page)) {
1139 		ret = -EIO;
1140 		goto err;
1141 	}
1142 	e4b->bd_bitmap_page = page;
1143 	e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
1144 	mark_page_accessed(page);
1145 
1146 	block++;
1147 	pnum = block / blocks_per_page;
1148 	poff = block % blocks_per_page;
1149 
1150 	page = find_get_page(inode->i_mapping, pnum);
1151 	if (page == NULL || !PageUptodate(page)) {
1152 		if (page)
1153 			page_cache_release(page);
1154 		page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
1155 		if (page) {
1156 			BUG_ON(page->mapping != inode->i_mapping);
1157 			if (!PageUptodate(page)) {
1158 				ret = ext4_mb_init_cache(page, e4b->bd_bitmap);
1159 				if (ret) {
1160 					unlock_page(page);
1161 					goto err;
1162 				}
1163 			}
1164 			unlock_page(page);
1165 		}
1166 	}
1167 	if (page == NULL || !PageUptodate(page)) {
1168 		ret = -EIO;
1169 		goto err;
1170 	}
1171 	e4b->bd_buddy_page = page;
1172 	e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
1173 	mark_page_accessed(page);
1174 
1175 	BUG_ON(e4b->bd_bitmap_page == NULL);
1176 	BUG_ON(e4b->bd_buddy_page == NULL);
1177 
1178 	return 0;
1179 
1180 err:
1181 	if (page)
1182 		page_cache_release(page);
1183 	if (e4b->bd_bitmap_page)
1184 		page_cache_release(e4b->bd_bitmap_page);
1185 	if (e4b->bd_buddy_page)
1186 		page_cache_release(e4b->bd_buddy_page);
1187 	e4b->bd_buddy = NULL;
1188 	e4b->bd_bitmap = NULL;
1189 	return ret;
1190 }
1191 
ext4_mb_unload_buddy(struct ext4_buddy * e4b)1192 static void ext4_mb_unload_buddy(struct ext4_buddy *e4b)
1193 {
1194 	if (e4b->bd_bitmap_page)
1195 		page_cache_release(e4b->bd_bitmap_page);
1196 	if (e4b->bd_buddy_page)
1197 		page_cache_release(e4b->bd_buddy_page);
1198 }
1199 
1200 
mb_find_order_for_block(struct ext4_buddy * e4b,int block)1201 static int mb_find_order_for_block(struct ext4_buddy *e4b, int block)
1202 {
1203 	int order = 1;
1204 	void *bb;
1205 
1206 	BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
1207 	BUG_ON(block >= (1 << (e4b->bd_blkbits + 3)));
1208 
1209 	bb = e4b->bd_buddy;
1210 	while (order <= e4b->bd_blkbits + 1) {
1211 		block = block >> 1;
1212 		if (!mb_test_bit(block, bb)) {
1213 			/* this block is part of buddy of order 'order' */
1214 			return order;
1215 		}
1216 		bb += 1 << (e4b->bd_blkbits - order);
1217 		order++;
1218 	}
1219 	return 0;
1220 }
1221 
mb_clear_bits(void * bm,int cur,int len)1222 static void mb_clear_bits(void *bm, int cur, int len)
1223 {
1224 	__u32 *addr;
1225 
1226 	len = cur + len;
1227 	while (cur < len) {
1228 		if ((cur & 31) == 0 && (len - cur) >= 32) {
1229 			/* fast path: clear whole word at once */
1230 			addr = bm + (cur >> 3);
1231 			*addr = 0;
1232 			cur += 32;
1233 			continue;
1234 		}
1235 		mb_clear_bit(cur, bm);
1236 		cur++;
1237 	}
1238 }
1239 
ext4_set_bits(void * bm,int cur,int len)1240 void ext4_set_bits(void *bm, int cur, int len)
1241 {
1242 	__u32 *addr;
1243 
1244 	len = cur + len;
1245 	while (cur < len) {
1246 		if ((cur & 31) == 0 && (len - cur) >= 32) {
1247 			/* fast path: set whole word at once */
1248 			addr = bm + (cur >> 3);
1249 			*addr = 0xffffffff;
1250 			cur += 32;
1251 			continue;
1252 		}
1253 		mb_set_bit(cur, bm);
1254 		cur++;
1255 	}
1256 }
1257 
mb_free_blocks(struct inode * inode,struct ext4_buddy * e4b,int first,int count)1258 static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
1259 			  int first, int count)
1260 {
1261 	int block = 0;
1262 	int max = 0;
1263 	int order;
1264 	void *buddy;
1265 	void *buddy2;
1266 	struct super_block *sb = e4b->bd_sb;
1267 
1268 	BUG_ON(first + count > (sb->s_blocksize << 3));
1269 	assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
1270 	mb_check_buddy(e4b);
1271 	mb_free_blocks_double(inode, e4b, first, count);
1272 
1273 	e4b->bd_info->bb_free += count;
1274 	if (first < e4b->bd_info->bb_first_free)
1275 		e4b->bd_info->bb_first_free = first;
1276 
1277 	/* let's maintain fragments counter */
1278 	if (first != 0)
1279 		block = !mb_test_bit(first - 1, e4b->bd_bitmap);
1280 	if (first + count < EXT4_SB(sb)->s_mb_maxs[0])
1281 		max = !mb_test_bit(first + count, e4b->bd_bitmap);
1282 	if (block && max)
1283 		e4b->bd_info->bb_fragments--;
1284 	else if (!block && !max)
1285 		e4b->bd_info->bb_fragments++;
1286 
1287 	/* let's maintain buddy itself */
1288 	while (count-- > 0) {
1289 		block = first++;
1290 		order = 0;
1291 
1292 		if (!mb_test_bit(block, e4b->bd_bitmap)) {
1293 			ext4_fsblk_t blocknr;
1294 
1295 			blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
1296 			blocknr += EXT4_C2B(EXT4_SB(sb), block);
1297 			ext4_grp_locked_error(sb, e4b->bd_group,
1298 					      inode ? inode->i_ino : 0,
1299 					      blocknr,
1300 					      "freeing already freed block "
1301 					      "(bit %u)", block);
1302 		}
1303 		mb_clear_bit(block, e4b->bd_bitmap);
1304 		e4b->bd_info->bb_counters[order]++;
1305 
1306 		/* start of the buddy */
1307 		buddy = mb_find_buddy(e4b, order, &max);
1308 
1309 		do {
1310 			block &= ~1UL;
1311 			if (mb_test_bit(block, buddy) ||
1312 					mb_test_bit(block + 1, buddy))
1313 				break;
1314 
1315 			/* both the buddies are free, try to coalesce them */
1316 			buddy2 = mb_find_buddy(e4b, order + 1, &max);
1317 
1318 			if (!buddy2)
1319 				break;
1320 
1321 			if (order > 0) {
1322 				/* for special purposes, we don't set
1323 				 * free bits in bitmap */
1324 				mb_set_bit(block, buddy);
1325 				mb_set_bit(block + 1, buddy);
1326 			}
1327 			e4b->bd_info->bb_counters[order]--;
1328 			e4b->bd_info->bb_counters[order]--;
1329 
1330 			block = block >> 1;
1331 			order++;
1332 			e4b->bd_info->bb_counters[order]++;
1333 
1334 			mb_clear_bit(block, buddy2);
1335 			buddy = buddy2;
1336 		} while (1);
1337 	}
1338 	mb_set_largest_free_order(sb, e4b->bd_info);
1339 	mb_check_buddy(e4b);
1340 }
1341 
mb_find_extent(struct ext4_buddy * e4b,int order,int block,int needed,struct ext4_free_extent * ex)1342 static int mb_find_extent(struct ext4_buddy *e4b, int order, int block,
1343 				int needed, struct ext4_free_extent *ex)
1344 {
1345 	int next = block;
1346 	int max;
1347 	void *buddy;
1348 
1349 	assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
1350 	BUG_ON(ex == NULL);
1351 
1352 	buddy = mb_find_buddy(e4b, order, &max);
1353 	BUG_ON(buddy == NULL);
1354 	BUG_ON(block >= max);
1355 	if (mb_test_bit(block, buddy)) {
1356 		ex->fe_len = 0;
1357 		ex->fe_start = 0;
1358 		ex->fe_group = 0;
1359 		return 0;
1360 	}
1361 
1362 	/* FIXME dorp order completely ? */
1363 	if (likely(order == 0)) {
1364 		/* find actual order */
1365 		order = mb_find_order_for_block(e4b, block);
1366 		block = block >> order;
1367 	}
1368 
1369 	ex->fe_len = 1 << order;
1370 	ex->fe_start = block << order;
1371 	ex->fe_group = e4b->bd_group;
1372 
1373 	/* calc difference from given start */
1374 	next = next - ex->fe_start;
1375 	ex->fe_len -= next;
1376 	ex->fe_start += next;
1377 
1378 	while (needed > ex->fe_len &&
1379 	       (buddy = mb_find_buddy(e4b, order, &max))) {
1380 
1381 		if (block + 1 >= max)
1382 			break;
1383 
1384 		next = (block + 1) * (1 << order);
1385 		if (mb_test_bit(next, e4b->bd_bitmap))
1386 			break;
1387 
1388 		order = mb_find_order_for_block(e4b, next);
1389 
1390 		block = next >> order;
1391 		ex->fe_len += 1 << order;
1392 	}
1393 
1394 	BUG_ON(ex->fe_start + ex->fe_len > (1 << (e4b->bd_blkbits + 3)));
1395 	return ex->fe_len;
1396 }
1397 
mb_mark_used(struct ext4_buddy * e4b,struct ext4_free_extent * ex)1398 static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex)
1399 {
1400 	int ord;
1401 	int mlen = 0;
1402 	int max = 0;
1403 	int cur;
1404 	int start = ex->fe_start;
1405 	int len = ex->fe_len;
1406 	unsigned ret = 0;
1407 	int len0 = len;
1408 	void *buddy;
1409 
1410 	BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3));
1411 	BUG_ON(e4b->bd_group != ex->fe_group);
1412 	assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
1413 	mb_check_buddy(e4b);
1414 	mb_mark_used_double(e4b, start, len);
1415 
1416 	e4b->bd_info->bb_free -= len;
1417 	if (e4b->bd_info->bb_first_free == start)
1418 		e4b->bd_info->bb_first_free += len;
1419 
1420 	/* let's maintain fragments counter */
1421 	if (start != 0)
1422 		mlen = !mb_test_bit(start - 1, e4b->bd_bitmap);
1423 	if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0])
1424 		max = !mb_test_bit(start + len, e4b->bd_bitmap);
1425 	if (mlen && max)
1426 		e4b->bd_info->bb_fragments++;
1427 	else if (!mlen && !max)
1428 		e4b->bd_info->bb_fragments--;
1429 
1430 	/* let's maintain buddy itself */
1431 	while (len) {
1432 		ord = mb_find_order_for_block(e4b, start);
1433 
1434 		if (((start >> ord) << ord) == start && len >= (1 << ord)) {
1435 			/* the whole chunk may be allocated at once! */
1436 			mlen = 1 << ord;
1437 			buddy = mb_find_buddy(e4b, ord, &max);
1438 			BUG_ON((start >> ord) >= max);
1439 			mb_set_bit(start >> ord, buddy);
1440 			e4b->bd_info->bb_counters[ord]--;
1441 			start += mlen;
1442 			len -= mlen;
1443 			BUG_ON(len < 0);
1444 			continue;
1445 		}
1446 
1447 		/* store for history */
1448 		if (ret == 0)
1449 			ret = len | (ord << 16);
1450 
1451 		/* we have to split large buddy */
1452 		BUG_ON(ord <= 0);
1453 		buddy = mb_find_buddy(e4b, ord, &max);
1454 		mb_set_bit(start >> ord, buddy);
1455 		e4b->bd_info->bb_counters[ord]--;
1456 
1457 		ord--;
1458 		cur = (start >> ord) & ~1U;
1459 		buddy = mb_find_buddy(e4b, ord, &max);
1460 		mb_clear_bit(cur, buddy);
1461 		mb_clear_bit(cur + 1, buddy);
1462 		e4b->bd_info->bb_counters[ord]++;
1463 		e4b->bd_info->bb_counters[ord]++;
1464 	}
1465 	mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info);
1466 
1467 	ext4_set_bits(e4b->bd_bitmap, ex->fe_start, len0);
1468 	mb_check_buddy(e4b);
1469 
1470 	return ret;
1471 }
1472 
1473 /*
1474  * Must be called under group lock!
1475  */
ext4_mb_use_best_found(struct ext4_allocation_context * ac,struct ext4_buddy * e4b)1476 static void ext4_mb_use_best_found(struct ext4_allocation_context *ac,
1477 					struct ext4_buddy *e4b)
1478 {
1479 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1480 	int ret;
1481 
1482 	BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group);
1483 	BUG_ON(ac->ac_status == AC_STATUS_FOUND);
1484 
1485 	ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len);
1486 	ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical;
1487 	ret = mb_mark_used(e4b, &ac->ac_b_ex);
1488 
1489 	/* preallocation can change ac_b_ex, thus we store actually
1490 	 * allocated blocks for history */
1491 	ac->ac_f_ex = ac->ac_b_ex;
1492 
1493 	ac->ac_status = AC_STATUS_FOUND;
1494 	ac->ac_tail = ret & 0xffff;
1495 	ac->ac_buddy = ret >> 16;
1496 
1497 	/*
1498 	 * take the page reference. We want the page to be pinned
1499 	 * so that we don't get a ext4_mb_init_cache_call for this
1500 	 * group until we update the bitmap. That would mean we
1501 	 * double allocate blocks. The reference is dropped
1502 	 * in ext4_mb_release_context
1503 	 */
1504 	ac->ac_bitmap_page = e4b->bd_bitmap_page;
1505 	get_page(ac->ac_bitmap_page);
1506 	ac->ac_buddy_page = e4b->bd_buddy_page;
1507 	get_page(ac->ac_buddy_page);
1508 	/* store last allocated for subsequent stream allocation */
1509 	if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
1510 		spin_lock(&sbi->s_md_lock);
1511 		sbi->s_mb_last_group = ac->ac_f_ex.fe_group;
1512 		sbi->s_mb_last_start = ac->ac_f_ex.fe_start;
1513 		spin_unlock(&sbi->s_md_lock);
1514 	}
1515 }
1516 
1517 /*
1518  * regular allocator, for general purposes allocation
1519  */
1520 
ext4_mb_check_limits(struct ext4_allocation_context * ac,struct ext4_buddy * e4b,int finish_group)1521 static void ext4_mb_check_limits(struct ext4_allocation_context *ac,
1522 					struct ext4_buddy *e4b,
1523 					int finish_group)
1524 {
1525 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1526 	struct ext4_free_extent *bex = &ac->ac_b_ex;
1527 	struct ext4_free_extent *gex = &ac->ac_g_ex;
1528 	struct ext4_free_extent ex;
1529 	int max;
1530 
1531 	if (ac->ac_status == AC_STATUS_FOUND)
1532 		return;
1533 	/*
1534 	 * We don't want to scan for a whole year
1535 	 */
1536 	if (ac->ac_found > sbi->s_mb_max_to_scan &&
1537 			!(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
1538 		ac->ac_status = AC_STATUS_BREAK;
1539 		return;
1540 	}
1541 
1542 	/*
1543 	 * Haven't found good chunk so far, let's continue
1544 	 */
1545 	if (bex->fe_len < gex->fe_len)
1546 		return;
1547 
1548 	if ((finish_group || ac->ac_found > sbi->s_mb_min_to_scan)
1549 			&& bex->fe_group == e4b->bd_group) {
1550 		/* recheck chunk's availability - we don't know
1551 		 * when it was found (within this lock-unlock
1552 		 * period or not) */
1553 		max = mb_find_extent(e4b, 0, bex->fe_start, gex->fe_len, &ex);
1554 		if (max >= gex->fe_len) {
1555 			ext4_mb_use_best_found(ac, e4b);
1556 			return;
1557 		}
1558 	}
1559 }
1560 
1561 /*
1562  * The routine checks whether found extent is good enough. If it is,
1563  * then the extent gets marked used and flag is set to the context
1564  * to stop scanning. Otherwise, the extent is compared with the
1565  * previous found extent and if new one is better, then it's stored
1566  * in the context. Later, the best found extent will be used, if
1567  * mballoc can't find good enough extent.
1568  *
1569  * FIXME: real allocation policy is to be designed yet!
1570  */
ext4_mb_measure_extent(struct ext4_allocation_context * ac,struct ext4_free_extent * ex,struct ext4_buddy * e4b)1571 static void ext4_mb_measure_extent(struct ext4_allocation_context *ac,
1572 					struct ext4_free_extent *ex,
1573 					struct ext4_buddy *e4b)
1574 {
1575 	struct ext4_free_extent *bex = &ac->ac_b_ex;
1576 	struct ext4_free_extent *gex = &ac->ac_g_ex;
1577 
1578 	BUG_ON(ex->fe_len <= 0);
1579 	BUG_ON(ex->fe_len > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
1580 	BUG_ON(ex->fe_start >= EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
1581 	BUG_ON(ac->ac_status != AC_STATUS_CONTINUE);
1582 
1583 	ac->ac_found++;
1584 
1585 	/*
1586 	 * The special case - take what you catch first
1587 	 */
1588 	if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
1589 		*bex = *ex;
1590 		ext4_mb_use_best_found(ac, e4b);
1591 		return;
1592 	}
1593 
1594 	/*
1595 	 * Let's check whether the chuck is good enough
1596 	 */
1597 	if (ex->fe_len == gex->fe_len) {
1598 		*bex = *ex;
1599 		ext4_mb_use_best_found(ac, e4b);
1600 		return;
1601 	}
1602 
1603 	/*
1604 	 * If this is first found extent, just store it in the context
1605 	 */
1606 	if (bex->fe_len == 0) {
1607 		*bex = *ex;
1608 		return;
1609 	}
1610 
1611 	/*
1612 	 * If new found extent is better, store it in the context
1613 	 */
1614 	if (bex->fe_len < gex->fe_len) {
1615 		/* if the request isn't satisfied, any found extent
1616 		 * larger than previous best one is better */
1617 		if (ex->fe_len > bex->fe_len)
1618 			*bex = *ex;
1619 	} else if (ex->fe_len > gex->fe_len) {
1620 		/* if the request is satisfied, then we try to find
1621 		 * an extent that still satisfy the request, but is
1622 		 * smaller than previous one */
1623 		if (ex->fe_len < bex->fe_len)
1624 			*bex = *ex;
1625 	}
1626 
1627 	ext4_mb_check_limits(ac, e4b, 0);
1628 }
1629 
1630 static noinline_for_stack
ext4_mb_try_best_found(struct ext4_allocation_context * ac,struct ext4_buddy * e4b)1631 int ext4_mb_try_best_found(struct ext4_allocation_context *ac,
1632 					struct ext4_buddy *e4b)
1633 {
1634 	struct ext4_free_extent ex = ac->ac_b_ex;
1635 	ext4_group_t group = ex.fe_group;
1636 	int max;
1637 	int err;
1638 
1639 	BUG_ON(ex.fe_len <= 0);
1640 	err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
1641 	if (err)
1642 		return err;
1643 
1644 	ext4_lock_group(ac->ac_sb, group);
1645 	max = mb_find_extent(e4b, 0, ex.fe_start, ex.fe_len, &ex);
1646 
1647 	if (max > 0) {
1648 		ac->ac_b_ex = ex;
1649 		ext4_mb_use_best_found(ac, e4b);
1650 	}
1651 
1652 	ext4_unlock_group(ac->ac_sb, group);
1653 	ext4_mb_unload_buddy(e4b);
1654 
1655 	return 0;
1656 }
1657 
1658 static noinline_for_stack
ext4_mb_find_by_goal(struct ext4_allocation_context * ac,struct ext4_buddy * e4b)1659 int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
1660 				struct ext4_buddy *e4b)
1661 {
1662 	ext4_group_t group = ac->ac_g_ex.fe_group;
1663 	int max;
1664 	int err;
1665 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1666 	struct ext4_free_extent ex;
1667 
1668 	if (!(ac->ac_flags & EXT4_MB_HINT_TRY_GOAL))
1669 		return 0;
1670 
1671 	err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
1672 	if (err)
1673 		return err;
1674 
1675 	ext4_lock_group(ac->ac_sb, group);
1676 	max = mb_find_extent(e4b, 0, ac->ac_g_ex.fe_start,
1677 			     ac->ac_g_ex.fe_len, &ex);
1678 
1679 	if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) {
1680 		ext4_fsblk_t start;
1681 
1682 		start = ext4_group_first_block_no(ac->ac_sb, e4b->bd_group) +
1683 			ex.fe_start;
1684 		/* use do_div to get remainder (would be 64-bit modulo) */
1685 		if (do_div(start, sbi->s_stripe) == 0) {
1686 			ac->ac_found++;
1687 			ac->ac_b_ex = ex;
1688 			ext4_mb_use_best_found(ac, e4b);
1689 		}
1690 	} else if (max >= ac->ac_g_ex.fe_len) {
1691 		BUG_ON(ex.fe_len <= 0);
1692 		BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
1693 		BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
1694 		ac->ac_found++;
1695 		ac->ac_b_ex = ex;
1696 		ext4_mb_use_best_found(ac, e4b);
1697 	} else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) {
1698 		/* Sometimes, caller may want to merge even small
1699 		 * number of blocks to an existing extent */
1700 		BUG_ON(ex.fe_len <= 0);
1701 		BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
1702 		BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
1703 		ac->ac_found++;
1704 		ac->ac_b_ex = ex;
1705 		ext4_mb_use_best_found(ac, e4b);
1706 	}
1707 	ext4_unlock_group(ac->ac_sb, group);
1708 	ext4_mb_unload_buddy(e4b);
1709 
1710 	return 0;
1711 }
1712 
1713 /*
1714  * The routine scans buddy structures (not bitmap!) from given order
1715  * to max order and tries to find big enough chunk to satisfy the req
1716  */
1717 static noinline_for_stack
ext4_mb_simple_scan_group(struct ext4_allocation_context * ac,struct ext4_buddy * e4b)1718 void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
1719 					struct ext4_buddy *e4b)
1720 {
1721 	struct super_block *sb = ac->ac_sb;
1722 	struct ext4_group_info *grp = e4b->bd_info;
1723 	void *buddy;
1724 	int i;
1725 	int k;
1726 	int max;
1727 
1728 	BUG_ON(ac->ac_2order <= 0);
1729 	for (i = ac->ac_2order; i <= sb->s_blocksize_bits + 1; i++) {
1730 		if (grp->bb_counters[i] == 0)
1731 			continue;
1732 
1733 		buddy = mb_find_buddy(e4b, i, &max);
1734 		BUG_ON(buddy == NULL);
1735 
1736 		k = mb_find_next_zero_bit(buddy, max, 0);
1737 		BUG_ON(k >= max);
1738 
1739 		ac->ac_found++;
1740 
1741 		ac->ac_b_ex.fe_len = 1 << i;
1742 		ac->ac_b_ex.fe_start = k << i;
1743 		ac->ac_b_ex.fe_group = e4b->bd_group;
1744 
1745 		ext4_mb_use_best_found(ac, e4b);
1746 
1747 		BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
1748 
1749 		if (EXT4_SB(sb)->s_mb_stats)
1750 			atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
1751 
1752 		break;
1753 	}
1754 }
1755 
1756 /*
1757  * The routine scans the group and measures all found extents.
1758  * In order to optimize scanning, caller must pass number of
1759  * free blocks in the group, so the routine can know upper limit.
1760  */
1761 static noinline_for_stack
ext4_mb_complex_scan_group(struct ext4_allocation_context * ac,struct ext4_buddy * e4b)1762 void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
1763 					struct ext4_buddy *e4b)
1764 {
1765 	struct super_block *sb = ac->ac_sb;
1766 	void *bitmap = e4b->bd_bitmap;
1767 	struct ext4_free_extent ex;
1768 	int i;
1769 	int free;
1770 
1771 	free = e4b->bd_info->bb_free;
1772 	BUG_ON(free <= 0);
1773 
1774 	i = e4b->bd_info->bb_first_free;
1775 
1776 	while (free && ac->ac_status == AC_STATUS_CONTINUE) {
1777 		i = mb_find_next_zero_bit(bitmap,
1778 						EXT4_CLUSTERS_PER_GROUP(sb), i);
1779 		if (i >= EXT4_CLUSTERS_PER_GROUP(sb)) {
1780 			/*
1781 			 * IF we have corrupt bitmap, we won't find any
1782 			 * free blocks even though group info says we
1783 			 * we have free blocks
1784 			 */
1785 			ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
1786 					"%d free clusters as per "
1787 					"group info. But bitmap says 0",
1788 					free);
1789 			break;
1790 		}
1791 
1792 		mb_find_extent(e4b, 0, i, ac->ac_g_ex.fe_len, &ex);
1793 		BUG_ON(ex.fe_len <= 0);
1794 		if (free < ex.fe_len) {
1795 			ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
1796 					"%d free clusters as per "
1797 					"group info. But got %d blocks",
1798 					free, ex.fe_len);
1799 			/*
1800 			 * The number of free blocks differs. This mostly
1801 			 * indicate that the bitmap is corrupt. So exit
1802 			 * without claiming the space.
1803 			 */
1804 			break;
1805 		}
1806 
1807 		ext4_mb_measure_extent(ac, &ex, e4b);
1808 
1809 		i += ex.fe_len;
1810 		free -= ex.fe_len;
1811 	}
1812 
1813 	ext4_mb_check_limits(ac, e4b, 1);
1814 }
1815 
1816 /*
1817  * This is a special case for storages like raid5
1818  * we try to find stripe-aligned chunks for stripe-size-multiple requests
1819  */
1820 static noinline_for_stack
ext4_mb_scan_aligned(struct ext4_allocation_context * ac,struct ext4_buddy * e4b)1821 void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
1822 				 struct ext4_buddy *e4b)
1823 {
1824 	struct super_block *sb = ac->ac_sb;
1825 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1826 	void *bitmap = e4b->bd_bitmap;
1827 	struct ext4_free_extent ex;
1828 	ext4_fsblk_t first_group_block;
1829 	ext4_fsblk_t a;
1830 	ext4_grpblk_t i;
1831 	int max;
1832 
1833 	BUG_ON(sbi->s_stripe == 0);
1834 
1835 	/* find first stripe-aligned block in group */
1836 	first_group_block = ext4_group_first_block_no(sb, e4b->bd_group);
1837 
1838 	a = first_group_block + sbi->s_stripe - 1;
1839 	do_div(a, sbi->s_stripe);
1840 	i = (a * sbi->s_stripe) - first_group_block;
1841 
1842 	while (i < EXT4_CLUSTERS_PER_GROUP(sb)) {
1843 		if (!mb_test_bit(i, bitmap)) {
1844 			max = mb_find_extent(e4b, 0, i, sbi->s_stripe, &ex);
1845 			if (max >= sbi->s_stripe) {
1846 				ac->ac_found++;
1847 				ac->ac_b_ex = ex;
1848 				ext4_mb_use_best_found(ac, e4b);
1849 				break;
1850 			}
1851 		}
1852 		i += sbi->s_stripe;
1853 	}
1854 }
1855 
1856 /* This is now called BEFORE we load the buddy bitmap. */
ext4_mb_good_group(struct ext4_allocation_context * ac,ext4_group_t group,int cr)1857 static int ext4_mb_good_group(struct ext4_allocation_context *ac,
1858 				ext4_group_t group, int cr)
1859 {
1860 	unsigned free, fragments;
1861 	int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb));
1862 	struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
1863 
1864 	BUG_ON(cr < 0 || cr >= 4);
1865 
1866 	/* We only do this if the grp has never been initialized */
1867 	if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
1868 		int ret = ext4_mb_init_group(ac->ac_sb, group);
1869 		if (ret)
1870 			return 0;
1871 	}
1872 
1873 	free = grp->bb_free;
1874 	fragments = grp->bb_fragments;
1875 	if (free == 0)
1876 		return 0;
1877 	if (fragments == 0)
1878 		return 0;
1879 
1880 	switch (cr) {
1881 	case 0:
1882 		BUG_ON(ac->ac_2order == 0);
1883 
1884 		if (grp->bb_largest_free_order < ac->ac_2order)
1885 			return 0;
1886 
1887 		/* Avoid using the first bg of a flexgroup for data files */
1888 		if ((ac->ac_flags & EXT4_MB_HINT_DATA) &&
1889 		    (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) &&
1890 		    ((group % flex_size) == 0))
1891 			return 0;
1892 
1893 		return 1;
1894 	case 1:
1895 		if ((free / fragments) >= ac->ac_g_ex.fe_len)
1896 			return 1;
1897 		break;
1898 	case 2:
1899 		if (free >= ac->ac_g_ex.fe_len)
1900 			return 1;
1901 		break;
1902 	case 3:
1903 		return 1;
1904 	default:
1905 		BUG();
1906 	}
1907 
1908 	return 0;
1909 }
1910 
1911 static noinline_for_stack int
ext4_mb_regular_allocator(struct ext4_allocation_context * ac)1912 ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
1913 {
1914 	ext4_group_t ngroups, group, i;
1915 	int cr;
1916 	int err = 0;
1917 	struct ext4_sb_info *sbi;
1918 	struct super_block *sb;
1919 	struct ext4_buddy e4b;
1920 
1921 	sb = ac->ac_sb;
1922 	sbi = EXT4_SB(sb);
1923 	ngroups = ext4_get_groups_count(sb);
1924 	/* non-extent files are limited to low blocks/groups */
1925 	if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)))
1926 		ngroups = sbi->s_blockfile_groups;
1927 
1928 	BUG_ON(ac->ac_status == AC_STATUS_FOUND);
1929 
1930 	/* first, try the goal */
1931 	err = ext4_mb_find_by_goal(ac, &e4b);
1932 	if (err || ac->ac_status == AC_STATUS_FOUND)
1933 		goto out;
1934 
1935 	if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
1936 		goto out;
1937 
1938 	/*
1939 	 * ac->ac2_order is set only if the fe_len is a power of 2
1940 	 * if ac2_order is set we also set criteria to 0 so that we
1941 	 * try exact allocation using buddy.
1942 	 */
1943 	i = fls(ac->ac_g_ex.fe_len);
1944 	ac->ac_2order = 0;
1945 	/*
1946 	 * We search using buddy data only if the order of the request
1947 	 * is greater than equal to the sbi_s_mb_order2_reqs
1948 	 * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req
1949 	 */
1950 	if (i >= sbi->s_mb_order2_reqs) {
1951 		/*
1952 		 * This should tell if fe_len is exactly power of 2
1953 		 */
1954 		if ((ac->ac_g_ex.fe_len & (~(1 << (i - 1)))) == 0)
1955 			ac->ac_2order = i - 1;
1956 	}
1957 
1958 	/* if stream allocation is enabled, use global goal */
1959 	if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
1960 		/* TBD: may be hot point */
1961 		spin_lock(&sbi->s_md_lock);
1962 		ac->ac_g_ex.fe_group = sbi->s_mb_last_group;
1963 		ac->ac_g_ex.fe_start = sbi->s_mb_last_start;
1964 		spin_unlock(&sbi->s_md_lock);
1965 	}
1966 
1967 	/* Let's just scan groups to find more-less suitable blocks */
1968 	cr = ac->ac_2order ? 0 : 1;
1969 	/*
1970 	 * cr == 0 try to get exact allocation,
1971 	 * cr == 3  try to get anything
1972 	 */
1973 repeat:
1974 	for (; cr < 4 && ac->ac_status == AC_STATUS_CONTINUE; cr++) {
1975 		ac->ac_criteria = cr;
1976 		/*
1977 		 * searching for the right group start
1978 		 * from the goal value specified
1979 		 */
1980 		group = ac->ac_g_ex.fe_group;
1981 
1982 		for (i = 0; i < ngroups; group++, i++) {
1983 			/*
1984 			 * Artificially restricted ngroups for non-extent
1985 			 * files makes group > ngroups possible on first loop.
1986 			 */
1987 			if (group >= ngroups)
1988 				group = 0;
1989 
1990 			/* This now checks without needing the buddy page */
1991 			if (!ext4_mb_good_group(ac, group, cr))
1992 				continue;
1993 
1994 			err = ext4_mb_load_buddy(sb, group, &e4b);
1995 			if (err)
1996 				goto out;
1997 
1998 			ext4_lock_group(sb, group);
1999 
2000 			/*
2001 			 * We need to check again after locking the
2002 			 * block group
2003 			 */
2004 			if (!ext4_mb_good_group(ac, group, cr)) {
2005 				ext4_unlock_group(sb, group);
2006 				ext4_mb_unload_buddy(&e4b);
2007 				continue;
2008 			}
2009 
2010 			ac->ac_groups_scanned++;
2011 			if (cr == 0)
2012 				ext4_mb_simple_scan_group(ac, &e4b);
2013 			else if (cr == 1 && sbi->s_stripe &&
2014 					!(ac->ac_g_ex.fe_len % sbi->s_stripe))
2015 				ext4_mb_scan_aligned(ac, &e4b);
2016 			else
2017 				ext4_mb_complex_scan_group(ac, &e4b);
2018 
2019 			ext4_unlock_group(sb, group);
2020 			ext4_mb_unload_buddy(&e4b);
2021 
2022 			if (ac->ac_status != AC_STATUS_CONTINUE)
2023 				break;
2024 		}
2025 	}
2026 
2027 	if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND &&
2028 	    !(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2029 		/*
2030 		 * We've been searching too long. Let's try to allocate
2031 		 * the best chunk we've found so far
2032 		 */
2033 
2034 		ext4_mb_try_best_found(ac, &e4b);
2035 		if (ac->ac_status != AC_STATUS_FOUND) {
2036 			/*
2037 			 * Someone more lucky has already allocated it.
2038 			 * The only thing we can do is just take first
2039 			 * found block(s)
2040 			printk(KERN_DEBUG "EXT4-fs: someone won our chunk\n");
2041 			 */
2042 			ac->ac_b_ex.fe_group = 0;
2043 			ac->ac_b_ex.fe_start = 0;
2044 			ac->ac_b_ex.fe_len = 0;
2045 			ac->ac_status = AC_STATUS_CONTINUE;
2046 			ac->ac_flags |= EXT4_MB_HINT_FIRST;
2047 			cr = 3;
2048 			atomic_inc(&sbi->s_mb_lost_chunks);
2049 			goto repeat;
2050 		}
2051 	}
2052 out:
2053 	return err;
2054 }
2055 
ext4_mb_seq_groups_start(struct seq_file * seq,loff_t * pos)2056 static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos)
2057 {
2058 	struct super_block *sb = seq->private;
2059 	ext4_group_t group;
2060 
2061 	if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
2062 		return NULL;
2063 	group = *pos + 1;
2064 	return (void *) ((unsigned long) group);
2065 }
2066 
ext4_mb_seq_groups_next(struct seq_file * seq,void * v,loff_t * pos)2067 static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos)
2068 {
2069 	struct super_block *sb = seq->private;
2070 	ext4_group_t group;
2071 
2072 	++*pos;
2073 	if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
2074 		return NULL;
2075 	group = *pos + 1;
2076 	return (void *) ((unsigned long) group);
2077 }
2078 
ext4_mb_seq_groups_show(struct seq_file * seq,void * v)2079 static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
2080 {
2081 	struct super_block *sb = seq->private;
2082 	ext4_group_t group = (ext4_group_t) ((unsigned long) v);
2083 	int i;
2084 	int err;
2085 	struct ext4_buddy e4b;
2086 	struct sg {
2087 		struct ext4_group_info info;
2088 		ext4_grpblk_t counters[16];
2089 	} sg;
2090 
2091 	group--;
2092 	if (group == 0)
2093 		seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
2094 				"[ %-5s %-5s %-5s %-5s %-5s %-5s %-5s "
2095 				  "%-5s %-5s %-5s %-5s %-5s %-5s %-5s ]\n",
2096 			   "group", "free", "frags", "first",
2097 			   "2^0", "2^1", "2^2", "2^3", "2^4", "2^5", "2^6",
2098 			   "2^7", "2^8", "2^9", "2^10", "2^11", "2^12", "2^13");
2099 
2100 	i = (sb->s_blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) +
2101 		sizeof(struct ext4_group_info);
2102 	err = ext4_mb_load_buddy(sb, group, &e4b);
2103 	if (err) {
2104 		seq_printf(seq, "#%-5u: I/O error\n", group);
2105 		return 0;
2106 	}
2107 	ext4_lock_group(sb, group);
2108 	memcpy(&sg, ext4_get_group_info(sb, group), i);
2109 	ext4_unlock_group(sb, group);
2110 	ext4_mb_unload_buddy(&e4b);
2111 
2112 	seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free,
2113 			sg.info.bb_fragments, sg.info.bb_first_free);
2114 	for (i = 0; i <= 13; i++)
2115 		seq_printf(seq, " %-5u", i <= sb->s_blocksize_bits + 1 ?
2116 				sg.info.bb_counters[i] : 0);
2117 	seq_printf(seq, " ]\n");
2118 
2119 	return 0;
2120 }
2121 
ext4_mb_seq_groups_stop(struct seq_file * seq,void * v)2122 static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v)
2123 {
2124 }
2125 
2126 static const struct seq_operations ext4_mb_seq_groups_ops = {
2127 	.start  = ext4_mb_seq_groups_start,
2128 	.next   = ext4_mb_seq_groups_next,
2129 	.stop   = ext4_mb_seq_groups_stop,
2130 	.show   = ext4_mb_seq_groups_show,
2131 };
2132 
ext4_mb_seq_groups_open(struct inode * inode,struct file * file)2133 static int ext4_mb_seq_groups_open(struct inode *inode, struct file *file)
2134 {
2135 	struct super_block *sb = PDE(inode)->data;
2136 	int rc;
2137 
2138 	rc = seq_open(file, &ext4_mb_seq_groups_ops);
2139 	if (rc == 0) {
2140 		struct seq_file *m = file->private_data;
2141 		m->private = sb;
2142 	}
2143 	return rc;
2144 
2145 }
2146 
2147 static const struct file_operations ext4_mb_seq_groups_fops = {
2148 	.owner		= THIS_MODULE,
2149 	.open		= ext4_mb_seq_groups_open,
2150 	.read		= seq_read,
2151 	.llseek		= seq_lseek,
2152 	.release	= seq_release,
2153 };
2154 
get_groupinfo_cache(int blocksize_bits)2155 static struct kmem_cache *get_groupinfo_cache(int blocksize_bits)
2156 {
2157 	int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
2158 	struct kmem_cache *cachep = ext4_groupinfo_caches[cache_index];
2159 
2160 	BUG_ON(!cachep);
2161 	return cachep;
2162 }
2163 
2164 /* Create and initialize ext4_group_info data for the given group. */
ext4_mb_add_groupinfo(struct super_block * sb,ext4_group_t group,struct ext4_group_desc * desc)2165 int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
2166 			  struct ext4_group_desc *desc)
2167 {
2168 	int i;
2169 	int metalen = 0;
2170 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2171 	struct ext4_group_info **meta_group_info;
2172 	struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
2173 
2174 	/*
2175 	 * First check if this group is the first of a reserved block.
2176 	 * If it's true, we have to allocate a new table of pointers
2177 	 * to ext4_group_info structures
2178 	 */
2179 	if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
2180 		metalen = sizeof(*meta_group_info) <<
2181 			EXT4_DESC_PER_BLOCK_BITS(sb);
2182 		meta_group_info = kmalloc(metalen, GFP_KERNEL);
2183 		if (meta_group_info == NULL) {
2184 			ext4_msg(sb, KERN_ERR, "can't allocate mem "
2185 				 "for a buddy group");
2186 			goto exit_meta_group_info;
2187 		}
2188 		sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] =
2189 			meta_group_info;
2190 	}
2191 
2192 	meta_group_info =
2193 		sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)];
2194 	i = group & (EXT4_DESC_PER_BLOCK(sb) - 1);
2195 
2196 	meta_group_info[i] = kmem_cache_alloc(cachep, GFP_KERNEL);
2197 	if (meta_group_info[i] == NULL) {
2198 		ext4_msg(sb, KERN_ERR, "can't allocate buddy mem");
2199 		goto exit_group_info;
2200 	}
2201 	memset(meta_group_info[i], 0, kmem_cache_size(cachep));
2202 	set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT,
2203 		&(meta_group_info[i]->bb_state));
2204 
2205 	/*
2206 	 * initialize bb_free to be able to skip
2207 	 * empty groups without initialization
2208 	 */
2209 	if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
2210 		meta_group_info[i]->bb_free =
2211 			ext4_free_clusters_after_init(sb, group, desc);
2212 	} else {
2213 		meta_group_info[i]->bb_free =
2214 			ext4_free_group_clusters(sb, desc);
2215 	}
2216 
2217 	INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list);
2218 	init_rwsem(&meta_group_info[i]->alloc_sem);
2219 	meta_group_info[i]->bb_free_root = RB_ROOT;
2220 	meta_group_info[i]->bb_largest_free_order = -1;  /* uninit */
2221 
2222 #ifdef DOUBLE_CHECK
2223 	{
2224 		struct buffer_head *bh;
2225 		meta_group_info[i]->bb_bitmap =
2226 			kmalloc(sb->s_blocksize, GFP_KERNEL);
2227 		BUG_ON(meta_group_info[i]->bb_bitmap == NULL);
2228 		bh = ext4_read_block_bitmap(sb, group);
2229 		BUG_ON(bh == NULL);
2230 		memcpy(meta_group_info[i]->bb_bitmap, bh->b_data,
2231 			sb->s_blocksize);
2232 		put_bh(bh);
2233 	}
2234 #endif
2235 
2236 	return 0;
2237 
2238 exit_group_info:
2239 	/* If a meta_group_info table has been allocated, release it now */
2240 	if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
2241 		kfree(sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]);
2242 		sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] = NULL;
2243 	}
2244 exit_meta_group_info:
2245 	return -ENOMEM;
2246 } /* ext4_mb_add_groupinfo */
2247 
ext4_mb_init_backend(struct super_block * sb)2248 static int ext4_mb_init_backend(struct super_block *sb)
2249 {
2250 	ext4_group_t ngroups = ext4_get_groups_count(sb);
2251 	ext4_group_t i;
2252 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2253 	struct ext4_super_block *es = sbi->s_es;
2254 	int num_meta_group_infos;
2255 	int num_meta_group_infos_max;
2256 	int array_size;
2257 	struct ext4_group_desc *desc;
2258 	struct kmem_cache *cachep;
2259 
2260 	/* This is the number of blocks used by GDT */
2261 	num_meta_group_infos = (ngroups + EXT4_DESC_PER_BLOCK(sb) -
2262 				1) >> EXT4_DESC_PER_BLOCK_BITS(sb);
2263 
2264 	/*
2265 	 * This is the total number of blocks used by GDT including
2266 	 * the number of reserved blocks for GDT.
2267 	 * The s_group_info array is allocated with this value
2268 	 * to allow a clean online resize without a complex
2269 	 * manipulation of pointer.
2270 	 * The drawback is the unused memory when no resize
2271 	 * occurs but it's very low in terms of pages
2272 	 * (see comments below)
2273 	 * Need to handle this properly when META_BG resizing is allowed
2274 	 */
2275 	num_meta_group_infos_max = num_meta_group_infos +
2276 				le16_to_cpu(es->s_reserved_gdt_blocks);
2277 
2278 	/*
2279 	 * array_size is the size of s_group_info array. We round it
2280 	 * to the next power of two because this approximation is done
2281 	 * internally by kmalloc so we can have some more memory
2282 	 * for free here (e.g. may be used for META_BG resize).
2283 	 */
2284 	array_size = 1;
2285 	while (array_size < sizeof(*sbi->s_group_info) *
2286 	       num_meta_group_infos_max)
2287 		array_size = array_size << 1;
2288 	/* An 8TB filesystem with 64-bit pointers requires a 4096 byte
2289 	 * kmalloc. A 128kb malloc should suffice for a 256TB filesystem.
2290 	 * So a two level scheme suffices for now. */
2291 	sbi->s_group_info = ext4_kvzalloc(array_size, GFP_KERNEL);
2292 	if (sbi->s_group_info == NULL) {
2293 		ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group");
2294 		return -ENOMEM;
2295 	}
2296 	sbi->s_buddy_cache = new_inode(sb);
2297 	if (sbi->s_buddy_cache == NULL) {
2298 		ext4_msg(sb, KERN_ERR, "can't get new inode");
2299 		goto err_freesgi;
2300 	}
2301 	/* To avoid potentially colliding with an valid on-disk inode number,
2302 	 * use EXT4_BAD_INO for the buddy cache inode number.  This inode is
2303 	 * not in the inode hash, so it should never be found by iget(), but
2304 	 * this will avoid confusion if it ever shows up during debugging. */
2305 	sbi->s_buddy_cache->i_ino = EXT4_BAD_INO;
2306 	EXT4_I(sbi->s_buddy_cache)->i_disksize = 0;
2307 	for (i = 0; i < ngroups; i++) {
2308 		desc = ext4_get_group_desc(sb, i, NULL);
2309 		if (desc == NULL) {
2310 			ext4_msg(sb, KERN_ERR, "can't read descriptor %u", i);
2311 			goto err_freebuddy;
2312 		}
2313 		if (ext4_mb_add_groupinfo(sb, i, desc) != 0)
2314 			goto err_freebuddy;
2315 	}
2316 
2317 	return 0;
2318 
2319 err_freebuddy:
2320 	cachep = get_groupinfo_cache(sb->s_blocksize_bits);
2321 	while (i-- > 0)
2322 		kmem_cache_free(cachep, ext4_get_group_info(sb, i));
2323 	i = num_meta_group_infos;
2324 	while (i-- > 0)
2325 		kfree(sbi->s_group_info[i]);
2326 	iput(sbi->s_buddy_cache);
2327 err_freesgi:
2328 	ext4_kvfree(sbi->s_group_info);
2329 	return -ENOMEM;
2330 }
2331 
ext4_groupinfo_destroy_slabs(void)2332 static void ext4_groupinfo_destroy_slabs(void)
2333 {
2334 	int i;
2335 
2336 	for (i = 0; i < NR_GRPINFO_CACHES; i++) {
2337 		if (ext4_groupinfo_caches[i])
2338 			kmem_cache_destroy(ext4_groupinfo_caches[i]);
2339 		ext4_groupinfo_caches[i] = NULL;
2340 	}
2341 }
2342 
ext4_groupinfo_create_slab(size_t size)2343 static int ext4_groupinfo_create_slab(size_t size)
2344 {
2345 	static DEFINE_MUTEX(ext4_grpinfo_slab_create_mutex);
2346 	int slab_size;
2347 	int blocksize_bits = order_base_2(size);
2348 	int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
2349 	struct kmem_cache *cachep;
2350 
2351 	if (cache_index >= NR_GRPINFO_CACHES)
2352 		return -EINVAL;
2353 
2354 	if (unlikely(cache_index < 0))
2355 		cache_index = 0;
2356 
2357 	mutex_lock(&ext4_grpinfo_slab_create_mutex);
2358 	if (ext4_groupinfo_caches[cache_index]) {
2359 		mutex_unlock(&ext4_grpinfo_slab_create_mutex);
2360 		return 0;	/* Already created */
2361 	}
2362 
2363 	slab_size = offsetof(struct ext4_group_info,
2364 				bb_counters[blocksize_bits + 2]);
2365 
2366 	cachep = kmem_cache_create(ext4_groupinfo_slab_names[cache_index],
2367 					slab_size, 0, SLAB_RECLAIM_ACCOUNT,
2368 					NULL);
2369 
2370 	ext4_groupinfo_caches[cache_index] = cachep;
2371 
2372 	mutex_unlock(&ext4_grpinfo_slab_create_mutex);
2373 	if (!cachep) {
2374 		printk(KERN_EMERG
2375 		       "EXT4-fs: no memory for groupinfo slab cache\n");
2376 		return -ENOMEM;
2377 	}
2378 
2379 	return 0;
2380 }
2381 
ext4_mb_init(struct super_block * sb,int needs_recovery)2382 int ext4_mb_init(struct super_block *sb, int needs_recovery)
2383 {
2384 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2385 	unsigned i, j;
2386 	unsigned offset;
2387 	unsigned max;
2388 	int ret;
2389 
2390 	i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_offsets);
2391 
2392 	sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL);
2393 	if (sbi->s_mb_offsets == NULL) {
2394 		ret = -ENOMEM;
2395 		goto out;
2396 	}
2397 
2398 	i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_maxs);
2399 	sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL);
2400 	if (sbi->s_mb_maxs == NULL) {
2401 		ret = -ENOMEM;
2402 		goto out;
2403 	}
2404 
2405 	ret = ext4_groupinfo_create_slab(sb->s_blocksize);
2406 	if (ret < 0)
2407 		goto out;
2408 
2409 	/* order 0 is regular bitmap */
2410 	sbi->s_mb_maxs[0] = sb->s_blocksize << 3;
2411 	sbi->s_mb_offsets[0] = 0;
2412 
2413 	i = 1;
2414 	offset = 0;
2415 	max = sb->s_blocksize << 2;
2416 	do {
2417 		sbi->s_mb_offsets[i] = offset;
2418 		sbi->s_mb_maxs[i] = max;
2419 		offset += 1 << (sb->s_blocksize_bits - i);
2420 		max = max >> 1;
2421 		i++;
2422 	} while (i <= sb->s_blocksize_bits + 1);
2423 
2424 	spin_lock_init(&sbi->s_md_lock);
2425 	spin_lock_init(&sbi->s_bal_lock);
2426 
2427 	sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN;
2428 	sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN;
2429 	sbi->s_mb_stats = MB_DEFAULT_STATS;
2430 	sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
2431 	sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
2432 	/*
2433 	 * The default group preallocation is 512, which for 4k block
2434 	 * sizes translates to 2 megabytes.  However for bigalloc file
2435 	 * systems, this is probably too big (i.e, if the cluster size
2436 	 * is 1 megabyte, then group preallocation size becomes half a
2437 	 * gigabyte!).  As a default, we will keep a two megabyte
2438 	 * group pralloc size for cluster sizes up to 64k, and after
2439 	 * that, we will force a minimum group preallocation size of
2440 	 * 32 clusters.  This translates to 8 megs when the cluster
2441 	 * size is 256k, and 32 megs when the cluster size is 1 meg,
2442 	 * which seems reasonable as a default.
2443 	 */
2444 	sbi->s_mb_group_prealloc = max(MB_DEFAULT_GROUP_PREALLOC >>
2445 				       sbi->s_cluster_bits, 32);
2446 	/*
2447 	 * If there is a s_stripe > 1, then we set the s_mb_group_prealloc
2448 	 * to the lowest multiple of s_stripe which is bigger than
2449 	 * the s_mb_group_prealloc as determined above. We want
2450 	 * the preallocation size to be an exact multiple of the
2451 	 * RAID stripe size so that preallocations don't fragment
2452 	 * the stripes.
2453 	 */
2454 	if (sbi->s_stripe > 1) {
2455 		sbi->s_mb_group_prealloc = roundup(
2456 			sbi->s_mb_group_prealloc, sbi->s_stripe);
2457 	}
2458 
2459 	sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
2460 	if (sbi->s_locality_groups == NULL) {
2461 		ret = -ENOMEM;
2462 		goto out_free_groupinfo_slab;
2463 	}
2464 	for_each_possible_cpu(i) {
2465 		struct ext4_locality_group *lg;
2466 		lg = per_cpu_ptr(sbi->s_locality_groups, i);
2467 		mutex_init(&lg->lg_mutex);
2468 		for (j = 0; j < PREALLOC_TB_SIZE; j++)
2469 			INIT_LIST_HEAD(&lg->lg_prealloc_list[j]);
2470 		spin_lock_init(&lg->lg_prealloc_lock);
2471 	}
2472 
2473 	/* init file for buddy data */
2474 	ret = ext4_mb_init_backend(sb);
2475 	if (ret != 0)
2476 		goto out_free_locality_groups;
2477 
2478 	if (sbi->s_proc)
2479 		proc_create_data("mb_groups", S_IRUGO, sbi->s_proc,
2480 				 &ext4_mb_seq_groups_fops, sb);
2481 
2482 	return 0;
2483 
2484 out_free_locality_groups:
2485 	free_percpu(sbi->s_locality_groups);
2486 	sbi->s_locality_groups = NULL;
2487 out_free_groupinfo_slab:
2488 	ext4_groupinfo_destroy_slabs();
2489 out:
2490 	kfree(sbi->s_mb_offsets);
2491 	sbi->s_mb_offsets = NULL;
2492 	kfree(sbi->s_mb_maxs);
2493 	sbi->s_mb_maxs = NULL;
2494 	return ret;
2495 }
2496 
2497 /* need to called with the ext4 group lock held */
ext4_mb_cleanup_pa(struct ext4_group_info * grp)2498 static void ext4_mb_cleanup_pa(struct ext4_group_info *grp)
2499 {
2500 	struct ext4_prealloc_space *pa;
2501 	struct list_head *cur, *tmp;
2502 	int count = 0;
2503 
2504 	list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) {
2505 		pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
2506 		list_del(&pa->pa_group_list);
2507 		count++;
2508 		kmem_cache_free(ext4_pspace_cachep, pa);
2509 	}
2510 	if (count)
2511 		mb_debug(1, "mballoc: %u PAs left\n", count);
2512 
2513 }
2514 
ext4_mb_release(struct super_block * sb)2515 int ext4_mb_release(struct super_block *sb)
2516 {
2517 	ext4_group_t ngroups = ext4_get_groups_count(sb);
2518 	ext4_group_t i;
2519 	int num_meta_group_infos;
2520 	struct ext4_group_info *grinfo;
2521 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2522 	struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
2523 
2524 	if (sbi->s_proc)
2525 		remove_proc_entry("mb_groups", sbi->s_proc);
2526 
2527 	if (sbi->s_group_info) {
2528 		for (i = 0; i < ngroups; i++) {
2529 			grinfo = ext4_get_group_info(sb, i);
2530 #ifdef DOUBLE_CHECK
2531 			kfree(grinfo->bb_bitmap);
2532 #endif
2533 			ext4_lock_group(sb, i);
2534 			ext4_mb_cleanup_pa(grinfo);
2535 			ext4_unlock_group(sb, i);
2536 			kmem_cache_free(cachep, grinfo);
2537 		}
2538 		num_meta_group_infos = (ngroups +
2539 				EXT4_DESC_PER_BLOCK(sb) - 1) >>
2540 			EXT4_DESC_PER_BLOCK_BITS(sb);
2541 		for (i = 0; i < num_meta_group_infos; i++)
2542 			kfree(sbi->s_group_info[i]);
2543 		ext4_kvfree(sbi->s_group_info);
2544 	}
2545 	kfree(sbi->s_mb_offsets);
2546 	kfree(sbi->s_mb_maxs);
2547 	if (sbi->s_buddy_cache)
2548 		iput(sbi->s_buddy_cache);
2549 	if (sbi->s_mb_stats) {
2550 		ext4_msg(sb, KERN_INFO,
2551 		       "mballoc: %u blocks %u reqs (%u success)",
2552 				atomic_read(&sbi->s_bal_allocated),
2553 				atomic_read(&sbi->s_bal_reqs),
2554 				atomic_read(&sbi->s_bal_success));
2555 		ext4_msg(sb, KERN_INFO,
2556 		      "mballoc: %u extents scanned, %u goal hits, "
2557 				"%u 2^N hits, %u breaks, %u lost",
2558 				atomic_read(&sbi->s_bal_ex_scanned),
2559 				atomic_read(&sbi->s_bal_goals),
2560 				atomic_read(&sbi->s_bal_2orders),
2561 				atomic_read(&sbi->s_bal_breaks),
2562 				atomic_read(&sbi->s_mb_lost_chunks));
2563 		ext4_msg(sb, KERN_INFO,
2564 		       "mballoc: %lu generated and it took %Lu",
2565 				sbi->s_mb_buddies_generated,
2566 				sbi->s_mb_generation_time);
2567 		ext4_msg(sb, KERN_INFO,
2568 		       "mballoc: %u preallocated, %u discarded",
2569 				atomic_read(&sbi->s_mb_preallocated),
2570 				atomic_read(&sbi->s_mb_discarded));
2571 	}
2572 
2573 	free_percpu(sbi->s_locality_groups);
2574 
2575 	return 0;
2576 }
2577 
ext4_issue_discard(struct super_block * sb,ext4_group_t block_group,ext4_grpblk_t cluster,int count,unsigned long flags)2578 static inline int ext4_issue_discard(struct super_block *sb,
2579 		ext4_group_t block_group, ext4_grpblk_t cluster, int count,
2580 		unsigned long flags)
2581 {
2582 	ext4_fsblk_t discard_block;
2583 
2584 	discard_block = (EXT4_C2B(EXT4_SB(sb), cluster) +
2585 			 ext4_group_first_block_no(sb, block_group));
2586 	count = EXT4_C2B(EXT4_SB(sb), count);
2587 	trace_ext4_discard_blocks(sb,
2588 			(unsigned long long) discard_block, count);
2589 	return sb_issue_discard(sb, discard_block, count, GFP_NOFS, flags);
2590 }
2591 
2592 /*
2593  * This function is called by the jbd2 layer once the commit has finished,
2594  * so we know we can free the blocks that were released with that commit.
2595  */
ext4_free_data_callback(struct super_block * sb,struct ext4_journal_cb_entry * jce,int rc)2596 static void ext4_free_data_callback(struct super_block *sb,
2597 				    struct ext4_journal_cb_entry *jce,
2598 				    int rc)
2599 {
2600 	struct ext4_free_data *entry = (struct ext4_free_data *)jce;
2601 	struct ext4_buddy e4b;
2602 	struct ext4_group_info *db;
2603 	int err, count = 0, count2 = 0;
2604 
2605 	mb_debug(1, "gonna free %u blocks in group %u (0x%p):",
2606 		 entry->efd_count, entry->efd_group, entry);
2607 
2608 	if (test_opt(sb, DISCARD))
2609 		ext4_issue_discard(sb, entry->efd_group,
2610 				   entry->efd_start_cluster, entry->efd_count, 0);
2611 
2612 	err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b);
2613 	/* we expect to find existing buddy because it's pinned */
2614 	BUG_ON(err != 0);
2615 
2616 
2617 	db = e4b.bd_info;
2618 	/* there are blocks to put in buddy to make them really free */
2619 	count += entry->efd_count;
2620 	count2++;
2621 	ext4_lock_group(sb, entry->efd_group);
2622 	/* Take it out of per group rb tree */
2623 	rb_erase(&entry->efd_node, &(db->bb_free_root));
2624 	mb_free_blocks(NULL, &e4b, entry->efd_start_cluster, entry->efd_count);
2625 
2626 	/*
2627 	 * Clear the trimmed flag for the group so that the next
2628 	 * ext4_trim_fs can trim it.
2629 	 * If the volume is mounted with -o discard, online discard
2630 	 * is supported and the free blocks will be trimmed online.
2631 	 */
2632 	if (!test_opt(sb, DISCARD))
2633 		EXT4_MB_GRP_CLEAR_TRIMMED(db);
2634 
2635 	if (!db->bb_free_root.rb_node) {
2636 		/* No more items in the per group rb tree
2637 		 * balance refcounts from ext4_mb_free_metadata()
2638 		 */
2639 		page_cache_release(e4b.bd_buddy_page);
2640 		page_cache_release(e4b.bd_bitmap_page);
2641 	}
2642 	ext4_unlock_group(sb, entry->efd_group);
2643 	kmem_cache_free(ext4_free_data_cachep, entry);
2644 	ext4_mb_unload_buddy(&e4b);
2645 
2646 	mb_debug(1, "freed %u blocks in %u structures\n", count, count2);
2647 }
2648 
2649 #ifdef CONFIG_EXT4_DEBUG
2650 u8 mb_enable_debug __read_mostly;
2651 
2652 static struct dentry *debugfs_dir;
2653 static struct dentry *debugfs_debug;
2654 
ext4_create_debugfs_entry(void)2655 static void __init ext4_create_debugfs_entry(void)
2656 {
2657 	debugfs_dir = debugfs_create_dir("ext4", NULL);
2658 	if (debugfs_dir)
2659 		debugfs_debug = debugfs_create_u8("mballoc-debug",
2660 						  S_IRUGO | S_IWUSR,
2661 						  debugfs_dir,
2662 						  &mb_enable_debug);
2663 }
2664 
ext4_remove_debugfs_entry(void)2665 static void ext4_remove_debugfs_entry(void)
2666 {
2667 	debugfs_remove(debugfs_debug);
2668 	debugfs_remove(debugfs_dir);
2669 }
2670 
2671 #else
2672 
ext4_create_debugfs_entry(void)2673 static void __init ext4_create_debugfs_entry(void)
2674 {
2675 }
2676 
ext4_remove_debugfs_entry(void)2677 static void ext4_remove_debugfs_entry(void)
2678 {
2679 }
2680 
2681 #endif
2682 
ext4_init_mballoc(void)2683 int __init ext4_init_mballoc(void)
2684 {
2685 	ext4_pspace_cachep = KMEM_CACHE(ext4_prealloc_space,
2686 					SLAB_RECLAIM_ACCOUNT);
2687 	if (ext4_pspace_cachep == NULL)
2688 		return -ENOMEM;
2689 
2690 	ext4_ac_cachep = KMEM_CACHE(ext4_allocation_context,
2691 				    SLAB_RECLAIM_ACCOUNT);
2692 	if (ext4_ac_cachep == NULL) {
2693 		kmem_cache_destroy(ext4_pspace_cachep);
2694 		return -ENOMEM;
2695 	}
2696 
2697 	ext4_free_data_cachep = KMEM_CACHE(ext4_free_data,
2698 					   SLAB_RECLAIM_ACCOUNT);
2699 	if (ext4_free_data_cachep == NULL) {
2700 		kmem_cache_destroy(ext4_pspace_cachep);
2701 		kmem_cache_destroy(ext4_ac_cachep);
2702 		return -ENOMEM;
2703 	}
2704 	ext4_create_debugfs_entry();
2705 	return 0;
2706 }
2707 
ext4_exit_mballoc(void)2708 void ext4_exit_mballoc(void)
2709 {
2710 	/*
2711 	 * Wait for completion of call_rcu()'s on ext4_pspace_cachep
2712 	 * before destroying the slab cache.
2713 	 */
2714 	rcu_barrier();
2715 	kmem_cache_destroy(ext4_pspace_cachep);
2716 	kmem_cache_destroy(ext4_ac_cachep);
2717 	kmem_cache_destroy(ext4_free_data_cachep);
2718 	ext4_groupinfo_destroy_slabs();
2719 	ext4_remove_debugfs_entry();
2720 }
2721 
2722 
2723 /*
2724  * Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps
2725  * Returns 0 if success or error code
2726  */
2727 static noinline_for_stack int
ext4_mb_mark_diskspace_used(struct ext4_allocation_context * ac,handle_t * handle,unsigned int reserv_clstrs)2728 ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
2729 				handle_t *handle, unsigned int reserv_clstrs)
2730 {
2731 	struct buffer_head *bitmap_bh = NULL;
2732 	struct ext4_group_desc *gdp;
2733 	struct buffer_head *gdp_bh;
2734 	struct ext4_sb_info *sbi;
2735 	struct super_block *sb;
2736 	ext4_fsblk_t block;
2737 	int err, len;
2738 
2739 	BUG_ON(ac->ac_status != AC_STATUS_FOUND);
2740 	BUG_ON(ac->ac_b_ex.fe_len <= 0);
2741 
2742 	sb = ac->ac_sb;
2743 	sbi = EXT4_SB(sb);
2744 
2745 	err = -EIO;
2746 	bitmap_bh = ext4_read_block_bitmap(sb, ac->ac_b_ex.fe_group);
2747 	if (!bitmap_bh)
2748 		goto out_err;
2749 
2750 	err = ext4_journal_get_write_access(handle, bitmap_bh);
2751 	if (err)
2752 		goto out_err;
2753 
2754 	err = -EIO;
2755 	gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, &gdp_bh);
2756 	if (!gdp)
2757 		goto out_err;
2758 
2759 	ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group,
2760 			ext4_free_group_clusters(sb, gdp));
2761 
2762 	err = ext4_journal_get_write_access(handle, gdp_bh);
2763 	if (err)
2764 		goto out_err;
2765 
2766 	block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
2767 
2768 	len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
2769 	if (!ext4_data_block_valid(sbi, block, len)) {
2770 		ext4_error(sb, "Allocating blocks %llu-%llu which overlap "
2771 			   "fs metadata", block, block+len);
2772 		/* File system mounted not to panic on error
2773 		 * Fix the bitmap and repeat the block allocation
2774 		 * We leak some of the blocks here.
2775 		 */
2776 		ext4_lock_group(sb, ac->ac_b_ex.fe_group);
2777 		ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
2778 			      ac->ac_b_ex.fe_len);
2779 		ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
2780 		err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
2781 		if (!err)
2782 			err = -EAGAIN;
2783 		goto out_err;
2784 	}
2785 
2786 	ext4_lock_group(sb, ac->ac_b_ex.fe_group);
2787 #ifdef AGGRESSIVE_CHECK
2788 	{
2789 		int i;
2790 		for (i = 0; i < ac->ac_b_ex.fe_len; i++) {
2791 			BUG_ON(mb_test_bit(ac->ac_b_ex.fe_start + i,
2792 						bitmap_bh->b_data));
2793 		}
2794 	}
2795 #endif
2796 	ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
2797 		      ac->ac_b_ex.fe_len);
2798 	if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
2799 		gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
2800 		ext4_free_group_clusters_set(sb, gdp,
2801 					     ext4_free_clusters_after_init(sb,
2802 						ac->ac_b_ex.fe_group, gdp));
2803 	}
2804 	len = ext4_free_group_clusters(sb, gdp) - ac->ac_b_ex.fe_len;
2805 	ext4_free_group_clusters_set(sb, gdp, len);
2806 	gdp->bg_checksum = ext4_group_desc_csum(sbi, ac->ac_b_ex.fe_group, gdp);
2807 
2808 	ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
2809 	percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len);
2810 	/*
2811 	 * Now reduce the dirty block count also. Should not go negative
2812 	 */
2813 	if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED))
2814 		/* release all the reserved blocks if non delalloc */
2815 		percpu_counter_sub(&sbi->s_dirtyclusters_counter,
2816 				   reserv_clstrs);
2817 
2818 	if (sbi->s_log_groups_per_flex) {
2819 		ext4_group_t flex_group = ext4_flex_group(sbi,
2820 							  ac->ac_b_ex.fe_group);
2821 		atomic64_sub(ac->ac_b_ex.fe_len,
2822 			     &sbi->s_flex_groups[flex_group].free_clusters);
2823 	}
2824 
2825 	err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
2826 	if (err)
2827 		goto out_err;
2828 	err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh);
2829 
2830 out_err:
2831 	ext4_mark_super_dirty(sb);
2832 	brelse(bitmap_bh);
2833 	return err;
2834 }
2835 
2836 /*
2837  * here we normalize request for locality group
2838  * Group request are normalized to s_mb_group_prealloc, which goes to
2839  * s_strip if we set the same via mount option.
2840  * s_mb_group_prealloc can be configured via
2841  * /sys/fs/ext4/<partition>/mb_group_prealloc
2842  *
2843  * XXX: should we try to preallocate more than the group has now?
2844  */
ext4_mb_normalize_group_request(struct ext4_allocation_context * ac)2845 static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac)
2846 {
2847 	struct super_block *sb = ac->ac_sb;
2848 	struct ext4_locality_group *lg = ac->ac_lg;
2849 
2850 	BUG_ON(lg == NULL);
2851 	ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc;
2852 	mb_debug(1, "#%u: goal %u blocks for locality group\n",
2853 		current->pid, ac->ac_g_ex.fe_len);
2854 }
2855 
2856 /*
2857  * Normalization means making request better in terms of
2858  * size and alignment
2859  */
2860 static noinline_for_stack void
ext4_mb_normalize_request(struct ext4_allocation_context * ac,struct ext4_allocation_request * ar)2861 ext4_mb_normalize_request(struct ext4_allocation_context *ac,
2862 				struct ext4_allocation_request *ar)
2863 {
2864 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
2865 	int bsbits, max;
2866 	ext4_lblk_t end;
2867 	loff_t size, start_off;
2868 	loff_t orig_size __maybe_unused;
2869 	ext4_lblk_t start;
2870 	struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
2871 	struct ext4_prealloc_space *pa;
2872 
2873 	/* do normalize only data requests, metadata requests
2874 	   do not need preallocation */
2875 	if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
2876 		return;
2877 
2878 	/* sometime caller may want exact blocks */
2879 	if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
2880 		return;
2881 
2882 	/* caller may indicate that preallocation isn't
2883 	 * required (it's a tail, for example) */
2884 	if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC)
2885 		return;
2886 
2887 	if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) {
2888 		ext4_mb_normalize_group_request(ac);
2889 		return ;
2890 	}
2891 
2892 	bsbits = ac->ac_sb->s_blocksize_bits;
2893 
2894 	/* first, let's learn actual file size
2895 	 * given current request is allocated */
2896 	size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len);
2897 	size = size << bsbits;
2898 	if (size < i_size_read(ac->ac_inode))
2899 		size = i_size_read(ac->ac_inode);
2900 	orig_size = size;
2901 
2902 	/* max size of free chunks */
2903 	max = 2 << bsbits;
2904 
2905 #define NRL_CHECK_SIZE(req, size, max, chunk_size)	\
2906 		(req <= (size) || max <= (chunk_size))
2907 
2908 	/* first, try to predict filesize */
2909 	/* XXX: should this table be tunable? */
2910 	start_off = 0;
2911 	if (size <= 16 * 1024) {
2912 		size = 16 * 1024;
2913 	} else if (size <= 32 * 1024) {
2914 		size = 32 * 1024;
2915 	} else if (size <= 64 * 1024) {
2916 		size = 64 * 1024;
2917 	} else if (size <= 128 * 1024) {
2918 		size = 128 * 1024;
2919 	} else if (size <= 256 * 1024) {
2920 		size = 256 * 1024;
2921 	} else if (size <= 512 * 1024) {
2922 		size = 512 * 1024;
2923 	} else if (size <= 1024 * 1024) {
2924 		size = 1024 * 1024;
2925 	} else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) {
2926 		start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
2927 						(21 - bsbits)) << 21;
2928 		size = 2 * 1024 * 1024;
2929 	} else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) {
2930 		start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
2931 							(22 - bsbits)) << 22;
2932 		size = 4 * 1024 * 1024;
2933 	} else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len,
2934 					(8<<20)>>bsbits, max, 8 * 1024)) {
2935 		start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
2936 							(23 - bsbits)) << 23;
2937 		size = 8 * 1024 * 1024;
2938 	} else {
2939 		start_off = (loff_t)ac->ac_o_ex.fe_logical << bsbits;
2940 		size	  = ac->ac_o_ex.fe_len << bsbits;
2941 	}
2942 	size = size >> bsbits;
2943 	start = start_off >> bsbits;
2944 
2945 	/* don't cover already allocated blocks in selected range */
2946 	if (ar->pleft && start <= ar->lleft) {
2947 		size -= ar->lleft + 1 - start;
2948 		start = ar->lleft + 1;
2949 	}
2950 	if (ar->pright && start + size - 1 >= ar->lright)
2951 		size -= start + size - ar->lright;
2952 
2953 	end = start + size;
2954 
2955 	/* check we don't cross already preallocated blocks */
2956 	rcu_read_lock();
2957 	list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
2958 		ext4_lblk_t pa_end;
2959 
2960 		if (pa->pa_deleted)
2961 			continue;
2962 		spin_lock(&pa->pa_lock);
2963 		if (pa->pa_deleted) {
2964 			spin_unlock(&pa->pa_lock);
2965 			continue;
2966 		}
2967 
2968 		pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb),
2969 						  pa->pa_len);
2970 
2971 		/* PA must not overlap original request */
2972 		BUG_ON(!(ac->ac_o_ex.fe_logical >= pa_end ||
2973 			ac->ac_o_ex.fe_logical < pa->pa_lstart));
2974 
2975 		/* skip PAs this normalized request doesn't overlap with */
2976 		if (pa->pa_lstart >= end || pa_end <= start) {
2977 			spin_unlock(&pa->pa_lock);
2978 			continue;
2979 		}
2980 		BUG_ON(pa->pa_lstart <= start && pa_end >= end);
2981 
2982 		/* adjust start or end to be adjacent to this pa */
2983 		if (pa_end <= ac->ac_o_ex.fe_logical) {
2984 			BUG_ON(pa_end < start);
2985 			start = pa_end;
2986 		} else if (pa->pa_lstart > ac->ac_o_ex.fe_logical) {
2987 			BUG_ON(pa->pa_lstart > end);
2988 			end = pa->pa_lstart;
2989 		}
2990 		spin_unlock(&pa->pa_lock);
2991 	}
2992 	rcu_read_unlock();
2993 	size = end - start;
2994 
2995 	/* XXX: extra loop to check we really don't overlap preallocations */
2996 	rcu_read_lock();
2997 	list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
2998 		ext4_lblk_t pa_end;
2999 
3000 		spin_lock(&pa->pa_lock);
3001 		if (pa->pa_deleted == 0) {
3002 			pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb),
3003 							  pa->pa_len);
3004 			BUG_ON(!(start >= pa_end || end <= pa->pa_lstart));
3005 		}
3006 		spin_unlock(&pa->pa_lock);
3007 	}
3008 	rcu_read_unlock();
3009 
3010 	if (start + size <= ac->ac_o_ex.fe_logical &&
3011 			start > ac->ac_o_ex.fe_logical) {
3012 		ext4_msg(ac->ac_sb, KERN_ERR,
3013 			 "start %lu, size %lu, fe_logical %lu",
3014 			 (unsigned long) start, (unsigned long) size,
3015 			 (unsigned long) ac->ac_o_ex.fe_logical);
3016 	}
3017 	BUG_ON(start + size <= ac->ac_o_ex.fe_logical &&
3018 			start > ac->ac_o_ex.fe_logical);
3019 	BUG_ON(size <= 0 || size > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
3020 
3021 	/* now prepare goal request */
3022 
3023 	/* XXX: is it better to align blocks WRT to logical
3024 	 * placement or satisfy big request as is */
3025 	ac->ac_g_ex.fe_logical = start;
3026 	ac->ac_g_ex.fe_len = EXT4_NUM_B2C(sbi, size);
3027 
3028 	/* define goal start in order to merge */
3029 	if (ar->pright && (ar->lright == (start + size))) {
3030 		/* merge to the right */
3031 		ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size,
3032 						&ac->ac_f_ex.fe_group,
3033 						&ac->ac_f_ex.fe_start);
3034 		ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
3035 	}
3036 	if (ar->pleft && (ar->lleft + 1 == start)) {
3037 		/* merge to the left */
3038 		ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1,
3039 						&ac->ac_f_ex.fe_group,
3040 						&ac->ac_f_ex.fe_start);
3041 		ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
3042 	}
3043 
3044 	mb_debug(1, "goal: %u(was %u) blocks at %u\n", (unsigned) size,
3045 		(unsigned) orig_size, (unsigned) start);
3046 }
3047 
ext4_mb_collect_stats(struct ext4_allocation_context * ac)3048 static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
3049 {
3050 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3051 
3052 	if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
3053 		atomic_inc(&sbi->s_bal_reqs);
3054 		atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
3055 		if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
3056 			atomic_inc(&sbi->s_bal_success);
3057 		atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
3058 		if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
3059 				ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
3060 			atomic_inc(&sbi->s_bal_goals);
3061 		if (ac->ac_found > sbi->s_mb_max_to_scan)
3062 			atomic_inc(&sbi->s_bal_breaks);
3063 	}
3064 
3065 	if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
3066 		trace_ext4_mballoc_alloc(ac);
3067 	else
3068 		trace_ext4_mballoc_prealloc(ac);
3069 }
3070 
3071 /*
3072  * Called on failure; free up any blocks from the inode PA for this
3073  * context.  We don't need this for MB_GROUP_PA because we only change
3074  * pa_free in ext4_mb_release_context(), but on failure, we've already
3075  * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed.
3076  */
ext4_discard_allocated_blocks(struct ext4_allocation_context * ac)3077 static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac)
3078 {
3079 	struct ext4_prealloc_space *pa = ac->ac_pa;
3080 	int len;
3081 
3082 	if (pa && pa->pa_type == MB_INODE_PA) {
3083 		len = ac->ac_b_ex.fe_len;
3084 		pa->pa_free += len;
3085 	}
3086 
3087 }
3088 
3089 /*
3090  * use blocks preallocated to inode
3091  */
ext4_mb_use_inode_pa(struct ext4_allocation_context * ac,struct ext4_prealloc_space * pa)3092 static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac,
3093 				struct ext4_prealloc_space *pa)
3094 {
3095 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3096 	ext4_fsblk_t start;
3097 	ext4_fsblk_t end;
3098 	int len;
3099 
3100 	/* found preallocated blocks, use them */
3101 	start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart);
3102 	end = min(pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len),
3103 		  start + EXT4_C2B(sbi, ac->ac_o_ex.fe_len));
3104 	len = EXT4_NUM_B2C(sbi, end - start);
3105 	ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group,
3106 					&ac->ac_b_ex.fe_start);
3107 	ac->ac_b_ex.fe_len = len;
3108 	ac->ac_status = AC_STATUS_FOUND;
3109 	ac->ac_pa = pa;
3110 
3111 	BUG_ON(start < pa->pa_pstart);
3112 	BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len));
3113 	BUG_ON(pa->pa_free < len);
3114 	pa->pa_free -= len;
3115 
3116 	mb_debug(1, "use %llu/%u from inode pa %p\n", start, len, pa);
3117 }
3118 
3119 /*
3120  * use blocks preallocated to locality group
3121  */
ext4_mb_use_group_pa(struct ext4_allocation_context * ac,struct ext4_prealloc_space * pa)3122 static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac,
3123 				struct ext4_prealloc_space *pa)
3124 {
3125 	unsigned int len = ac->ac_o_ex.fe_len;
3126 
3127 	ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart,
3128 					&ac->ac_b_ex.fe_group,
3129 					&ac->ac_b_ex.fe_start);
3130 	ac->ac_b_ex.fe_len = len;
3131 	ac->ac_status = AC_STATUS_FOUND;
3132 	ac->ac_pa = pa;
3133 
3134 	/* we don't correct pa_pstart or pa_plen here to avoid
3135 	 * possible race when the group is being loaded concurrently
3136 	 * instead we correct pa later, after blocks are marked
3137 	 * in on-disk bitmap -- see ext4_mb_release_context()
3138 	 * Other CPUs are prevented from allocating from this pa by lg_mutex
3139 	 */
3140 	mb_debug(1, "use %u/%u from group pa %p\n", pa->pa_lstart-len, len, pa);
3141 }
3142 
3143 /*
3144  * Return the prealloc space that have minimal distance
3145  * from the goal block. @cpa is the prealloc
3146  * space that is having currently known minimal distance
3147  * from the goal block.
3148  */
3149 static struct ext4_prealloc_space *
ext4_mb_check_group_pa(ext4_fsblk_t goal_block,struct ext4_prealloc_space * pa,struct ext4_prealloc_space * cpa)3150 ext4_mb_check_group_pa(ext4_fsblk_t goal_block,
3151 			struct ext4_prealloc_space *pa,
3152 			struct ext4_prealloc_space *cpa)
3153 {
3154 	ext4_fsblk_t cur_distance, new_distance;
3155 
3156 	if (cpa == NULL) {
3157 		atomic_inc(&pa->pa_count);
3158 		return pa;
3159 	}
3160 	cur_distance = abs(goal_block - cpa->pa_pstart);
3161 	new_distance = abs(goal_block - pa->pa_pstart);
3162 
3163 	if (cur_distance <= new_distance)
3164 		return cpa;
3165 
3166 	/* drop the previous reference */
3167 	atomic_dec(&cpa->pa_count);
3168 	atomic_inc(&pa->pa_count);
3169 	return pa;
3170 }
3171 
3172 /*
3173  * search goal blocks in preallocated space
3174  */
3175 static noinline_for_stack int
ext4_mb_use_preallocated(struct ext4_allocation_context * ac)3176 ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
3177 {
3178 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3179 	int order, i;
3180 	struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
3181 	struct ext4_locality_group *lg;
3182 	struct ext4_prealloc_space *pa, *cpa = NULL;
3183 	ext4_fsblk_t goal_block;
3184 
3185 	/* only data can be preallocated */
3186 	if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
3187 		return 0;
3188 
3189 	/* first, try per-file preallocation */
3190 	rcu_read_lock();
3191 	list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
3192 
3193 		/* all fields in this condition don't change,
3194 		 * so we can skip locking for them */
3195 		if (ac->ac_o_ex.fe_logical < pa->pa_lstart ||
3196 		    ac->ac_o_ex.fe_logical >= (pa->pa_lstart +
3197 					       EXT4_C2B(sbi, pa->pa_len)))
3198 			continue;
3199 
3200 		/* non-extent files can't have physical blocks past 2^32 */
3201 		if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) &&
3202 		    (pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len) >
3203 		     EXT4_MAX_BLOCK_FILE_PHYS))
3204 			continue;
3205 
3206 		/* found preallocated blocks, use them */
3207 		spin_lock(&pa->pa_lock);
3208 		if (pa->pa_deleted == 0 && pa->pa_free) {
3209 			atomic_inc(&pa->pa_count);
3210 			ext4_mb_use_inode_pa(ac, pa);
3211 			spin_unlock(&pa->pa_lock);
3212 			ac->ac_criteria = 10;
3213 			rcu_read_unlock();
3214 			return 1;
3215 		}
3216 		spin_unlock(&pa->pa_lock);
3217 	}
3218 	rcu_read_unlock();
3219 
3220 	/* can we use group allocation? */
3221 	if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC))
3222 		return 0;
3223 
3224 	/* inode may have no locality group for some reason */
3225 	lg = ac->ac_lg;
3226 	if (lg == NULL)
3227 		return 0;
3228 	order  = fls(ac->ac_o_ex.fe_len) - 1;
3229 	if (order > PREALLOC_TB_SIZE - 1)
3230 		/* The max size of hash table is PREALLOC_TB_SIZE */
3231 		order = PREALLOC_TB_SIZE - 1;
3232 
3233 	goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex);
3234 	/*
3235 	 * search for the prealloc space that is having
3236 	 * minimal distance from the goal block.
3237 	 */
3238 	for (i = order; i < PREALLOC_TB_SIZE; i++) {
3239 		rcu_read_lock();
3240 		list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[i],
3241 					pa_inode_list) {
3242 			spin_lock(&pa->pa_lock);
3243 			if (pa->pa_deleted == 0 &&
3244 					pa->pa_free >= ac->ac_o_ex.fe_len) {
3245 
3246 				cpa = ext4_mb_check_group_pa(goal_block,
3247 								pa, cpa);
3248 			}
3249 			spin_unlock(&pa->pa_lock);
3250 		}
3251 		rcu_read_unlock();
3252 	}
3253 	if (cpa) {
3254 		ext4_mb_use_group_pa(ac, cpa);
3255 		ac->ac_criteria = 20;
3256 		return 1;
3257 	}
3258 	return 0;
3259 }
3260 
3261 /*
3262  * the function goes through all block freed in the group
3263  * but not yet committed and marks them used in in-core bitmap.
3264  * buddy must be generated from this bitmap
3265  * Need to be called with the ext4 group lock held
3266  */
ext4_mb_generate_from_freelist(struct super_block * sb,void * bitmap,ext4_group_t group)3267 static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
3268 						ext4_group_t group)
3269 {
3270 	struct rb_node *n;
3271 	struct ext4_group_info *grp;
3272 	struct ext4_free_data *entry;
3273 
3274 	grp = ext4_get_group_info(sb, group);
3275 	n = rb_first(&(grp->bb_free_root));
3276 
3277 	while (n) {
3278 		entry = rb_entry(n, struct ext4_free_data, efd_node);
3279 		ext4_set_bits(bitmap, entry->efd_start_cluster, entry->efd_count);
3280 		n = rb_next(n);
3281 	}
3282 	return;
3283 }
3284 
3285 /*
3286  * the function goes through all preallocation in this group and marks them
3287  * used in in-core bitmap. buddy must be generated from this bitmap
3288  * Need to be called with ext4 group lock held
3289  */
3290 static noinline_for_stack
ext4_mb_generate_from_pa(struct super_block * sb,void * bitmap,ext4_group_t group)3291 void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
3292 					ext4_group_t group)
3293 {
3294 	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
3295 	struct ext4_prealloc_space *pa;
3296 	struct list_head *cur;
3297 	ext4_group_t groupnr;
3298 	ext4_grpblk_t start;
3299 	int preallocated = 0;
3300 	int len;
3301 
3302 	/* all form of preallocation discards first load group,
3303 	 * so the only competing code is preallocation use.
3304 	 * we don't need any locking here
3305 	 * notice we do NOT ignore preallocations with pa_deleted
3306 	 * otherwise we could leave used blocks available for
3307 	 * allocation in buddy when concurrent ext4_mb_put_pa()
3308 	 * is dropping preallocation
3309 	 */
3310 	list_for_each(cur, &grp->bb_prealloc_list) {
3311 		pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
3312 		spin_lock(&pa->pa_lock);
3313 		ext4_get_group_no_and_offset(sb, pa->pa_pstart,
3314 					     &groupnr, &start);
3315 		len = pa->pa_len;
3316 		spin_unlock(&pa->pa_lock);
3317 		if (unlikely(len == 0))
3318 			continue;
3319 		BUG_ON(groupnr != group);
3320 		ext4_set_bits(bitmap, start, len);
3321 		preallocated += len;
3322 	}
3323 	mb_debug(1, "prellocated %u for group %u\n", preallocated, group);
3324 }
3325 
ext4_mb_pa_callback(struct rcu_head * head)3326 static void ext4_mb_pa_callback(struct rcu_head *head)
3327 {
3328 	struct ext4_prealloc_space *pa;
3329 	pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu);
3330 	kmem_cache_free(ext4_pspace_cachep, pa);
3331 }
3332 
3333 /*
3334  * drops a reference to preallocated space descriptor
3335  * if this was the last reference and the space is consumed
3336  */
ext4_mb_put_pa(struct ext4_allocation_context * ac,struct super_block * sb,struct ext4_prealloc_space * pa)3337 static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
3338 			struct super_block *sb, struct ext4_prealloc_space *pa)
3339 {
3340 	ext4_group_t grp;
3341 	ext4_fsblk_t grp_blk;
3342 
3343 	if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0)
3344 		return;
3345 
3346 	/* in this short window concurrent discard can set pa_deleted */
3347 	spin_lock(&pa->pa_lock);
3348 	if (pa->pa_deleted == 1) {
3349 		spin_unlock(&pa->pa_lock);
3350 		return;
3351 	}
3352 
3353 	pa->pa_deleted = 1;
3354 	spin_unlock(&pa->pa_lock);
3355 
3356 	grp_blk = pa->pa_pstart;
3357 	/*
3358 	 * If doing group-based preallocation, pa_pstart may be in the
3359 	 * next group when pa is used up
3360 	 */
3361 	if (pa->pa_type == MB_GROUP_PA)
3362 		grp_blk--;
3363 
3364 	ext4_get_group_no_and_offset(sb, grp_blk, &grp, NULL);
3365 
3366 	/*
3367 	 * possible race:
3368 	 *
3369 	 *  P1 (buddy init)			P2 (regular allocation)
3370 	 *					find block B in PA
3371 	 *  copy on-disk bitmap to buddy
3372 	 *  					mark B in on-disk bitmap
3373 	 *					drop PA from group
3374 	 *  mark all PAs in buddy
3375 	 *
3376 	 * thus, P1 initializes buddy with B available. to prevent this
3377 	 * we make "copy" and "mark all PAs" atomic and serialize "drop PA"
3378 	 * against that pair
3379 	 */
3380 	ext4_lock_group(sb, grp);
3381 	list_del(&pa->pa_group_list);
3382 	ext4_unlock_group(sb, grp);
3383 
3384 	spin_lock(pa->pa_obj_lock);
3385 	list_del_rcu(&pa->pa_inode_list);
3386 	spin_unlock(pa->pa_obj_lock);
3387 
3388 	call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
3389 }
3390 
3391 /*
3392  * creates new preallocated space for given inode
3393  */
3394 static noinline_for_stack int
ext4_mb_new_inode_pa(struct ext4_allocation_context * ac)3395 ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
3396 {
3397 	struct super_block *sb = ac->ac_sb;
3398 	struct ext4_sb_info *sbi = EXT4_SB(sb);
3399 	struct ext4_prealloc_space *pa;
3400 	struct ext4_group_info *grp;
3401 	struct ext4_inode_info *ei;
3402 
3403 	/* preallocate only when found space is larger then requested */
3404 	BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
3405 	BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3406 	BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
3407 
3408 	pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS);
3409 	if (pa == NULL)
3410 		return -ENOMEM;
3411 
3412 	if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) {
3413 		int winl;
3414 		int wins;
3415 		int win;
3416 		int offs;
3417 
3418 		/* we can't allocate as much as normalizer wants.
3419 		 * so, found space must get proper lstart
3420 		 * to cover original request */
3421 		BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical);
3422 		BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len);
3423 
3424 		/* we're limited by original request in that
3425 		 * logical block must be covered any way
3426 		 * winl is window we can move our chunk within */
3427 		winl = ac->ac_o_ex.fe_logical - ac->ac_g_ex.fe_logical;
3428 
3429 		/* also, we should cover whole original request */
3430 		wins = EXT4_C2B(sbi, ac->ac_b_ex.fe_len - ac->ac_o_ex.fe_len);
3431 
3432 		/* the smallest one defines real window */
3433 		win = min(winl, wins);
3434 
3435 		offs = ac->ac_o_ex.fe_logical %
3436 			EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
3437 		if (offs && offs < win)
3438 			win = offs;
3439 
3440 		ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical -
3441 			EXT4_NUM_B2C(sbi, win);
3442 		BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
3443 		BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len);
3444 	}
3445 
3446 	/* preallocation can change ac_b_ex, thus we store actually
3447 	 * allocated blocks for history */
3448 	ac->ac_f_ex = ac->ac_b_ex;
3449 
3450 	pa->pa_lstart = ac->ac_b_ex.fe_logical;
3451 	pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
3452 	pa->pa_len = ac->ac_b_ex.fe_len;
3453 	pa->pa_free = pa->pa_len;
3454 	atomic_set(&pa->pa_count, 1);
3455 	spin_lock_init(&pa->pa_lock);
3456 	INIT_LIST_HEAD(&pa->pa_inode_list);
3457 	INIT_LIST_HEAD(&pa->pa_group_list);
3458 	pa->pa_deleted = 0;
3459 	pa->pa_type = MB_INODE_PA;
3460 
3461 	mb_debug(1, "new inode pa %p: %llu/%u for %u\n", pa,
3462 			pa->pa_pstart, pa->pa_len, pa->pa_lstart);
3463 	trace_ext4_mb_new_inode_pa(ac, pa);
3464 
3465 	ext4_mb_use_inode_pa(ac, pa);
3466 	atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
3467 
3468 	ei = EXT4_I(ac->ac_inode);
3469 	grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
3470 
3471 	pa->pa_obj_lock = &ei->i_prealloc_lock;
3472 	pa->pa_inode = ac->ac_inode;
3473 
3474 	ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3475 	list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
3476 	ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3477 
3478 	spin_lock(pa->pa_obj_lock);
3479 	list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list);
3480 	spin_unlock(pa->pa_obj_lock);
3481 
3482 	return 0;
3483 }
3484 
3485 /*
3486  * creates new preallocated space for locality group inodes belongs to
3487  */
3488 static noinline_for_stack int
ext4_mb_new_group_pa(struct ext4_allocation_context * ac)3489 ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
3490 {
3491 	struct super_block *sb = ac->ac_sb;
3492 	struct ext4_locality_group *lg;
3493 	struct ext4_prealloc_space *pa;
3494 	struct ext4_group_info *grp;
3495 
3496 	/* preallocate only when found space is larger then requested */
3497 	BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
3498 	BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3499 	BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
3500 
3501 	BUG_ON(ext4_pspace_cachep == NULL);
3502 	pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS);
3503 	if (pa == NULL)
3504 		return -ENOMEM;
3505 
3506 	/* preallocation can change ac_b_ex, thus we store actually
3507 	 * allocated blocks for history */
3508 	ac->ac_f_ex = ac->ac_b_ex;
3509 
3510 	pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
3511 	pa->pa_lstart = pa->pa_pstart;
3512 	pa->pa_len = ac->ac_b_ex.fe_len;
3513 	pa->pa_free = pa->pa_len;
3514 	atomic_set(&pa->pa_count, 1);
3515 	spin_lock_init(&pa->pa_lock);
3516 	INIT_LIST_HEAD(&pa->pa_inode_list);
3517 	INIT_LIST_HEAD(&pa->pa_group_list);
3518 	pa->pa_deleted = 0;
3519 	pa->pa_type = MB_GROUP_PA;
3520 
3521 	mb_debug(1, "new group pa %p: %llu/%u for %u\n", pa,
3522 			pa->pa_pstart, pa->pa_len, pa->pa_lstart);
3523 	trace_ext4_mb_new_group_pa(ac, pa);
3524 
3525 	ext4_mb_use_group_pa(ac, pa);
3526 	atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
3527 
3528 	grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
3529 	lg = ac->ac_lg;
3530 	BUG_ON(lg == NULL);
3531 
3532 	pa->pa_obj_lock = &lg->lg_prealloc_lock;
3533 	pa->pa_inode = NULL;
3534 
3535 	ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3536 	list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
3537 	ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3538 
3539 	/*
3540 	 * We will later add the new pa to the right bucket
3541 	 * after updating the pa_free in ext4_mb_release_context
3542 	 */
3543 	return 0;
3544 }
3545 
ext4_mb_new_preallocation(struct ext4_allocation_context * ac)3546 static int ext4_mb_new_preallocation(struct ext4_allocation_context *ac)
3547 {
3548 	int err;
3549 
3550 	if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
3551 		err = ext4_mb_new_group_pa(ac);
3552 	else
3553 		err = ext4_mb_new_inode_pa(ac);
3554 	return err;
3555 }
3556 
3557 /*
3558  * finds all unused blocks in on-disk bitmap, frees them in
3559  * in-core bitmap and buddy.
3560  * @pa must be unlinked from inode and group lists, so that
3561  * nobody else can find/use it.
3562  * the caller MUST hold group/inode locks.
3563  * TODO: optimize the case when there are no in-core structures yet
3564  */
3565 static noinline_for_stack int
ext4_mb_release_inode_pa(struct ext4_buddy * e4b,struct buffer_head * bitmap_bh,struct ext4_prealloc_space * pa)3566 ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
3567 			struct ext4_prealloc_space *pa)
3568 {
3569 	struct super_block *sb = e4b->bd_sb;
3570 	struct ext4_sb_info *sbi = EXT4_SB(sb);
3571 	unsigned int end;
3572 	unsigned int next;
3573 	ext4_group_t group;
3574 	ext4_grpblk_t bit;
3575 	unsigned long long grp_blk_start;
3576 	int err = 0;
3577 	int free = 0;
3578 
3579 	BUG_ON(pa->pa_deleted == 0);
3580 	ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
3581 	grp_blk_start = pa->pa_pstart - EXT4_C2B(sbi, bit);
3582 	BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
3583 	end = bit + pa->pa_len;
3584 
3585 	while (bit < end) {
3586 		bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit);
3587 		if (bit >= end)
3588 			break;
3589 		next = mb_find_next_bit(bitmap_bh->b_data, end, bit);
3590 		mb_debug(1, "    free preallocated %u/%u in group %u\n",
3591 			 (unsigned) ext4_group_first_block_no(sb, group) + bit,
3592 			 (unsigned) next - bit, (unsigned) group);
3593 		free += next - bit;
3594 
3595 		trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit);
3596 		trace_ext4_mb_release_inode_pa(pa, (grp_blk_start +
3597 						    EXT4_C2B(sbi, bit)),
3598 					       next - bit);
3599 		mb_free_blocks(pa->pa_inode, e4b, bit, next - bit);
3600 		bit = next + 1;
3601 	}
3602 	if (free != pa->pa_free) {
3603 		ext4_msg(e4b->bd_sb, KERN_CRIT,
3604 			 "pa %p: logic %lu, phys. %lu, len %lu",
3605 			 pa, (unsigned long) pa->pa_lstart,
3606 			 (unsigned long) pa->pa_pstart,
3607 			 (unsigned long) pa->pa_len);
3608 		ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u",
3609 					free, pa->pa_free);
3610 		/*
3611 		 * pa is already deleted so we use the value obtained
3612 		 * from the bitmap and continue.
3613 		 */
3614 	}
3615 	atomic_add(free, &sbi->s_mb_discarded);
3616 
3617 	return err;
3618 }
3619 
3620 static noinline_for_stack int
ext4_mb_release_group_pa(struct ext4_buddy * e4b,struct ext4_prealloc_space * pa)3621 ext4_mb_release_group_pa(struct ext4_buddy *e4b,
3622 				struct ext4_prealloc_space *pa)
3623 {
3624 	struct super_block *sb = e4b->bd_sb;
3625 	ext4_group_t group;
3626 	ext4_grpblk_t bit;
3627 
3628 	trace_ext4_mb_release_group_pa(sb, pa);
3629 	BUG_ON(pa->pa_deleted == 0);
3630 	ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
3631 	BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
3632 	mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
3633 	atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
3634 	trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
3635 
3636 	return 0;
3637 }
3638 
3639 /*
3640  * releases all preallocations in given group
3641  *
3642  * first, we need to decide discard policy:
3643  * - when do we discard
3644  *   1) ENOSPC
3645  * - how many do we discard
3646  *   1) how many requested
3647  */
3648 static noinline_for_stack int
ext4_mb_discard_group_preallocations(struct super_block * sb,ext4_group_t group,int needed)3649 ext4_mb_discard_group_preallocations(struct super_block *sb,
3650 					ext4_group_t group, int needed)
3651 {
3652 	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
3653 	struct buffer_head *bitmap_bh = NULL;
3654 	struct ext4_prealloc_space *pa, *tmp;
3655 	struct list_head list;
3656 	struct ext4_buddy e4b;
3657 	int err;
3658 	int busy = 0;
3659 	int free = 0;
3660 
3661 	mb_debug(1, "discard preallocation for group %u\n", group);
3662 
3663 	if (list_empty(&grp->bb_prealloc_list))
3664 		return 0;
3665 
3666 	bitmap_bh = ext4_read_block_bitmap(sb, group);
3667 	if (bitmap_bh == NULL) {
3668 		ext4_error(sb, "Error reading block bitmap for %u", group);
3669 		return 0;
3670 	}
3671 
3672 	err = ext4_mb_load_buddy(sb, group, &e4b);
3673 	if (err) {
3674 		ext4_error(sb, "Error loading buddy information for %u", group);
3675 		put_bh(bitmap_bh);
3676 		return 0;
3677 	}
3678 
3679 	if (needed == 0)
3680 		needed = EXT4_CLUSTERS_PER_GROUP(sb) + 1;
3681 
3682 	INIT_LIST_HEAD(&list);
3683 repeat:
3684 	ext4_lock_group(sb, group);
3685 	list_for_each_entry_safe(pa, tmp,
3686 				&grp->bb_prealloc_list, pa_group_list) {
3687 		spin_lock(&pa->pa_lock);
3688 		if (atomic_read(&pa->pa_count)) {
3689 			spin_unlock(&pa->pa_lock);
3690 			busy = 1;
3691 			continue;
3692 		}
3693 		if (pa->pa_deleted) {
3694 			spin_unlock(&pa->pa_lock);
3695 			continue;
3696 		}
3697 
3698 		/* seems this one can be freed ... */
3699 		pa->pa_deleted = 1;
3700 
3701 		/* we can trust pa_free ... */
3702 		free += pa->pa_free;
3703 
3704 		spin_unlock(&pa->pa_lock);
3705 
3706 		list_del(&pa->pa_group_list);
3707 		list_add(&pa->u.pa_tmp_list, &list);
3708 	}
3709 
3710 	/* if we still need more blocks and some PAs were used, try again */
3711 	if (free < needed && busy) {
3712 		busy = 0;
3713 		ext4_unlock_group(sb, group);
3714 		/*
3715 		 * Yield the CPU here so that we don't get soft lockup
3716 		 * in non preempt case.
3717 		 */
3718 		yield();
3719 		goto repeat;
3720 	}
3721 
3722 	/* found anything to free? */
3723 	if (list_empty(&list)) {
3724 		BUG_ON(free != 0);
3725 		goto out;
3726 	}
3727 
3728 	/* now free all selected PAs */
3729 	list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
3730 
3731 		/* remove from object (inode or locality group) */
3732 		spin_lock(pa->pa_obj_lock);
3733 		list_del_rcu(&pa->pa_inode_list);
3734 		spin_unlock(pa->pa_obj_lock);
3735 
3736 		if (pa->pa_type == MB_GROUP_PA)
3737 			ext4_mb_release_group_pa(&e4b, pa);
3738 		else
3739 			ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
3740 
3741 		list_del(&pa->u.pa_tmp_list);
3742 		call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
3743 	}
3744 
3745 out:
3746 	ext4_unlock_group(sb, group);
3747 	ext4_mb_unload_buddy(&e4b);
3748 	put_bh(bitmap_bh);
3749 	return free;
3750 }
3751 
3752 /*
3753  * releases all non-used preallocated blocks for given inode
3754  *
3755  * It's important to discard preallocations under i_data_sem
3756  * We don't want another block to be served from the prealloc
3757  * space when we are discarding the inode prealloc space.
3758  *
3759  * FIXME!! Make sure it is valid at all the call sites
3760  */
ext4_discard_preallocations(struct inode * inode)3761 void ext4_discard_preallocations(struct inode *inode)
3762 {
3763 	struct ext4_inode_info *ei = EXT4_I(inode);
3764 	struct super_block *sb = inode->i_sb;
3765 	struct buffer_head *bitmap_bh = NULL;
3766 	struct ext4_prealloc_space *pa, *tmp;
3767 	ext4_group_t group = 0;
3768 	struct list_head list;
3769 	struct ext4_buddy e4b;
3770 	int err;
3771 
3772 	if (!S_ISREG(inode->i_mode)) {
3773 		/*BUG_ON(!list_empty(&ei->i_prealloc_list));*/
3774 		return;
3775 	}
3776 
3777 	mb_debug(1, "discard preallocation for inode %lu\n", inode->i_ino);
3778 	trace_ext4_discard_preallocations(inode);
3779 
3780 	INIT_LIST_HEAD(&list);
3781 
3782 repeat:
3783 	/* first, collect all pa's in the inode */
3784 	spin_lock(&ei->i_prealloc_lock);
3785 	while (!list_empty(&ei->i_prealloc_list)) {
3786 		pa = list_entry(ei->i_prealloc_list.next,
3787 				struct ext4_prealloc_space, pa_inode_list);
3788 		BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock);
3789 		spin_lock(&pa->pa_lock);
3790 		if (atomic_read(&pa->pa_count)) {
3791 			/* this shouldn't happen often - nobody should
3792 			 * use preallocation while we're discarding it */
3793 			spin_unlock(&pa->pa_lock);
3794 			spin_unlock(&ei->i_prealloc_lock);
3795 			ext4_msg(sb, KERN_ERR,
3796 				 "uh-oh! used pa while discarding");
3797 			WARN_ON(1);
3798 			schedule_timeout_uninterruptible(HZ);
3799 			goto repeat;
3800 
3801 		}
3802 		if (pa->pa_deleted == 0) {
3803 			pa->pa_deleted = 1;
3804 			spin_unlock(&pa->pa_lock);
3805 			list_del_rcu(&pa->pa_inode_list);
3806 			list_add(&pa->u.pa_tmp_list, &list);
3807 			continue;
3808 		}
3809 
3810 		/* someone is deleting pa right now */
3811 		spin_unlock(&pa->pa_lock);
3812 		spin_unlock(&ei->i_prealloc_lock);
3813 
3814 		/* we have to wait here because pa_deleted
3815 		 * doesn't mean pa is already unlinked from
3816 		 * the list. as we might be called from
3817 		 * ->clear_inode() the inode will get freed
3818 		 * and concurrent thread which is unlinking
3819 		 * pa from inode's list may access already
3820 		 * freed memory, bad-bad-bad */
3821 
3822 		/* XXX: if this happens too often, we can
3823 		 * add a flag to force wait only in case
3824 		 * of ->clear_inode(), but not in case of
3825 		 * regular truncate */
3826 		schedule_timeout_uninterruptible(HZ);
3827 		goto repeat;
3828 	}
3829 	spin_unlock(&ei->i_prealloc_lock);
3830 
3831 	list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
3832 		BUG_ON(pa->pa_type != MB_INODE_PA);
3833 		ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL);
3834 
3835 		err = ext4_mb_load_buddy(sb, group, &e4b);
3836 		if (err) {
3837 			ext4_error(sb, "Error loading buddy information for %u",
3838 					group);
3839 			continue;
3840 		}
3841 
3842 		bitmap_bh = ext4_read_block_bitmap(sb, group);
3843 		if (bitmap_bh == NULL) {
3844 			ext4_error(sb, "Error reading block bitmap for %u",
3845 					group);
3846 			ext4_mb_unload_buddy(&e4b);
3847 			continue;
3848 		}
3849 
3850 		ext4_lock_group(sb, group);
3851 		list_del(&pa->pa_group_list);
3852 		ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
3853 		ext4_unlock_group(sb, group);
3854 
3855 		ext4_mb_unload_buddy(&e4b);
3856 		put_bh(bitmap_bh);
3857 
3858 		list_del(&pa->u.pa_tmp_list);
3859 		call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
3860 	}
3861 }
3862 
3863 #ifdef CONFIG_EXT4_DEBUG
ext4_mb_show_ac(struct ext4_allocation_context * ac)3864 static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
3865 {
3866 	struct super_block *sb = ac->ac_sb;
3867 	ext4_group_t ngroups, i;
3868 
3869 	if (!mb_enable_debug ||
3870 	    (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED))
3871 		return;
3872 
3873 	ext4_msg(ac->ac_sb, KERN_ERR, "Can't allocate:"
3874 			" Allocation context details:");
3875 	ext4_msg(ac->ac_sb, KERN_ERR, "status %d flags %d",
3876 			ac->ac_status, ac->ac_flags);
3877 	ext4_msg(ac->ac_sb, KERN_ERR, "orig %lu/%lu/%lu@%lu, "
3878 		 	"goal %lu/%lu/%lu@%lu, "
3879 			"best %lu/%lu/%lu@%lu cr %d",
3880 			(unsigned long)ac->ac_o_ex.fe_group,
3881 			(unsigned long)ac->ac_o_ex.fe_start,
3882 			(unsigned long)ac->ac_o_ex.fe_len,
3883 			(unsigned long)ac->ac_o_ex.fe_logical,
3884 			(unsigned long)ac->ac_g_ex.fe_group,
3885 			(unsigned long)ac->ac_g_ex.fe_start,
3886 			(unsigned long)ac->ac_g_ex.fe_len,
3887 			(unsigned long)ac->ac_g_ex.fe_logical,
3888 			(unsigned long)ac->ac_b_ex.fe_group,
3889 			(unsigned long)ac->ac_b_ex.fe_start,
3890 			(unsigned long)ac->ac_b_ex.fe_len,
3891 			(unsigned long)ac->ac_b_ex.fe_logical,
3892 			(int)ac->ac_criteria);
3893 	ext4_msg(ac->ac_sb, KERN_ERR, "%lu scanned, %d found",
3894 		 ac->ac_ex_scanned, ac->ac_found);
3895 	ext4_msg(ac->ac_sb, KERN_ERR, "groups: ");
3896 	ngroups = ext4_get_groups_count(sb);
3897 	for (i = 0; i < ngroups; i++) {
3898 		struct ext4_group_info *grp = ext4_get_group_info(sb, i);
3899 		struct ext4_prealloc_space *pa;
3900 		ext4_grpblk_t start;
3901 		struct list_head *cur;
3902 		ext4_lock_group(sb, i);
3903 		list_for_each(cur, &grp->bb_prealloc_list) {
3904 			pa = list_entry(cur, struct ext4_prealloc_space,
3905 					pa_group_list);
3906 			spin_lock(&pa->pa_lock);
3907 			ext4_get_group_no_and_offset(sb, pa->pa_pstart,
3908 						     NULL, &start);
3909 			spin_unlock(&pa->pa_lock);
3910 			printk(KERN_ERR "PA:%u:%d:%u \n", i,
3911 			       start, pa->pa_len);
3912 		}
3913 		ext4_unlock_group(sb, i);
3914 
3915 		if (grp->bb_free == 0)
3916 			continue;
3917 		printk(KERN_ERR "%u: %d/%d \n",
3918 		       i, grp->bb_free, grp->bb_fragments);
3919 	}
3920 	printk(KERN_ERR "\n");
3921 }
3922 #else
ext4_mb_show_ac(struct ext4_allocation_context * ac)3923 static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac)
3924 {
3925 	return;
3926 }
3927 #endif
3928 
3929 /*
3930  * We use locality group preallocation for small size file. The size of the
3931  * file is determined by the current size or the resulting size after
3932  * allocation which ever is larger
3933  *
3934  * One can tune this size via /sys/fs/ext4/<partition>/mb_stream_req
3935  */
ext4_mb_group_or_file(struct ext4_allocation_context * ac)3936 static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
3937 {
3938 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3939 	int bsbits = ac->ac_sb->s_blocksize_bits;
3940 	loff_t size, isize;
3941 
3942 	if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
3943 		return;
3944 
3945 	if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
3946 		return;
3947 
3948 	size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len);
3949 	isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1)
3950 		>> bsbits;
3951 
3952 	if ((size == isize) &&
3953 	    !ext4_fs_is_busy(sbi) &&
3954 	    (atomic_read(&ac->ac_inode->i_writecount) == 0)) {
3955 		ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC;
3956 		return;
3957 	}
3958 
3959 	if (sbi->s_mb_group_prealloc <= 0) {
3960 		ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
3961 		return;
3962 	}
3963 
3964 	/* don't use group allocation for large files */
3965 	size = max(size, isize);
3966 	if (size > sbi->s_mb_stream_request) {
3967 		ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
3968 		return;
3969 	}
3970 
3971 	BUG_ON(ac->ac_lg != NULL);
3972 	/*
3973 	 * locality group prealloc space are per cpu. The reason for having
3974 	 * per cpu locality group is to reduce the contention between block
3975 	 * request from multiple CPUs.
3976 	 */
3977 	ac->ac_lg = __this_cpu_ptr(sbi->s_locality_groups);
3978 
3979 	/* we're going to use group allocation */
3980 	ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC;
3981 
3982 	/* serialize all allocations in the group */
3983 	mutex_lock(&ac->ac_lg->lg_mutex);
3984 }
3985 
3986 static noinline_for_stack int
ext4_mb_initialize_context(struct ext4_allocation_context * ac,struct ext4_allocation_request * ar)3987 ext4_mb_initialize_context(struct ext4_allocation_context *ac,
3988 				struct ext4_allocation_request *ar)
3989 {
3990 	struct super_block *sb = ar->inode->i_sb;
3991 	struct ext4_sb_info *sbi = EXT4_SB(sb);
3992 	struct ext4_super_block *es = sbi->s_es;
3993 	ext4_group_t group;
3994 	unsigned int len;
3995 	ext4_fsblk_t goal;
3996 	ext4_grpblk_t block;
3997 
3998 	/* we can't allocate > group size */
3999 	len = ar->len;
4000 
4001 	/* just a dirty hack to filter too big requests  */
4002 	if (len >= EXT4_CLUSTERS_PER_GROUP(sb) - 10)
4003 		len = EXT4_CLUSTERS_PER_GROUP(sb) - 10;
4004 
4005 	/* start searching from the goal */
4006 	goal = ar->goal;
4007 	if (goal < le32_to_cpu(es->s_first_data_block) ||
4008 			goal >= ext4_blocks_count(es))
4009 		goal = le32_to_cpu(es->s_first_data_block);
4010 	ext4_get_group_no_and_offset(sb, goal, &group, &block);
4011 
4012 	/* set up allocation goals */
4013 	memset(ac, 0, sizeof(struct ext4_allocation_context));
4014 	ac->ac_b_ex.fe_logical = ar->logical & ~(sbi->s_cluster_ratio - 1);
4015 	ac->ac_status = AC_STATUS_CONTINUE;
4016 	ac->ac_sb = sb;
4017 	ac->ac_inode = ar->inode;
4018 	ac->ac_o_ex.fe_logical = ac->ac_b_ex.fe_logical;
4019 	ac->ac_o_ex.fe_group = group;
4020 	ac->ac_o_ex.fe_start = block;
4021 	ac->ac_o_ex.fe_len = len;
4022 	ac->ac_g_ex = ac->ac_o_ex;
4023 	ac->ac_flags = ar->flags;
4024 
4025 	/* we have to define context: we'll we work with a file or
4026 	 * locality group. this is a policy, actually */
4027 	ext4_mb_group_or_file(ac);
4028 
4029 	mb_debug(1, "init ac: %u blocks @ %u, goal %u, flags %x, 2^%d, "
4030 			"left: %u/%u, right %u/%u to %swritable\n",
4031 			(unsigned) ar->len, (unsigned) ar->logical,
4032 			(unsigned) ar->goal, ac->ac_flags, ac->ac_2order,
4033 			(unsigned) ar->lleft, (unsigned) ar->pleft,
4034 			(unsigned) ar->lright, (unsigned) ar->pright,
4035 			atomic_read(&ar->inode->i_writecount) ? "" : "non-");
4036 	return 0;
4037 
4038 }
4039 
4040 static noinline_for_stack void
ext4_mb_discard_lg_preallocations(struct super_block * sb,struct ext4_locality_group * lg,int order,int total_entries)4041 ext4_mb_discard_lg_preallocations(struct super_block *sb,
4042 					struct ext4_locality_group *lg,
4043 					int order, int total_entries)
4044 {
4045 	ext4_group_t group = 0;
4046 	struct ext4_buddy e4b;
4047 	struct list_head discard_list;
4048 	struct ext4_prealloc_space *pa, *tmp;
4049 
4050 	mb_debug(1, "discard locality group preallocation\n");
4051 
4052 	INIT_LIST_HEAD(&discard_list);
4053 
4054 	spin_lock(&lg->lg_prealloc_lock);
4055 	list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order],
4056 						pa_inode_list) {
4057 		spin_lock(&pa->pa_lock);
4058 		if (atomic_read(&pa->pa_count)) {
4059 			/*
4060 			 * This is the pa that we just used
4061 			 * for block allocation. So don't
4062 			 * free that
4063 			 */
4064 			spin_unlock(&pa->pa_lock);
4065 			continue;
4066 		}
4067 		if (pa->pa_deleted) {
4068 			spin_unlock(&pa->pa_lock);
4069 			continue;
4070 		}
4071 		/* only lg prealloc space */
4072 		BUG_ON(pa->pa_type != MB_GROUP_PA);
4073 
4074 		/* seems this one can be freed ... */
4075 		pa->pa_deleted = 1;
4076 		spin_unlock(&pa->pa_lock);
4077 
4078 		list_del_rcu(&pa->pa_inode_list);
4079 		list_add(&pa->u.pa_tmp_list, &discard_list);
4080 
4081 		total_entries--;
4082 		if (total_entries <= 5) {
4083 			/*
4084 			 * we want to keep only 5 entries
4085 			 * allowing it to grow to 8. This
4086 			 * mak sure we don't call discard
4087 			 * soon for this list.
4088 			 */
4089 			break;
4090 		}
4091 	}
4092 	spin_unlock(&lg->lg_prealloc_lock);
4093 
4094 	list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) {
4095 
4096 		ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL);
4097 		if (ext4_mb_load_buddy(sb, group, &e4b)) {
4098 			ext4_error(sb, "Error loading buddy information for %u",
4099 					group);
4100 			continue;
4101 		}
4102 		ext4_lock_group(sb, group);
4103 		list_del(&pa->pa_group_list);
4104 		ext4_mb_release_group_pa(&e4b, pa);
4105 		ext4_unlock_group(sb, group);
4106 
4107 		ext4_mb_unload_buddy(&e4b);
4108 		list_del(&pa->u.pa_tmp_list);
4109 		call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
4110 	}
4111 }
4112 
4113 /*
4114  * We have incremented pa_count. So it cannot be freed at this
4115  * point. Also we hold lg_mutex. So no parallel allocation is
4116  * possible from this lg. That means pa_free cannot be updated.
4117  *
4118  * A parallel ext4_mb_discard_group_preallocations is possible.
4119  * which can cause the lg_prealloc_list to be updated.
4120  */
4121 
ext4_mb_add_n_trim(struct ext4_allocation_context * ac)4122 static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
4123 {
4124 	int order, added = 0, lg_prealloc_count = 1;
4125 	struct super_block *sb = ac->ac_sb;
4126 	struct ext4_locality_group *lg = ac->ac_lg;
4127 	struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa;
4128 
4129 	order = fls(pa->pa_free) - 1;
4130 	if (order > PREALLOC_TB_SIZE - 1)
4131 		/* The max size of hash table is PREALLOC_TB_SIZE */
4132 		order = PREALLOC_TB_SIZE - 1;
4133 	/* Add the prealloc space to lg */
4134 	spin_lock(&lg->lg_prealloc_lock);
4135 	list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order],
4136 						pa_inode_list) {
4137 		spin_lock(&tmp_pa->pa_lock);
4138 		if (tmp_pa->pa_deleted) {
4139 			spin_unlock(&tmp_pa->pa_lock);
4140 			continue;
4141 		}
4142 		if (!added && pa->pa_free < tmp_pa->pa_free) {
4143 			/* Add to the tail of the previous entry */
4144 			list_add_tail_rcu(&pa->pa_inode_list,
4145 						&tmp_pa->pa_inode_list);
4146 			added = 1;
4147 			/*
4148 			 * we want to count the total
4149 			 * number of entries in the list
4150 			 */
4151 		}
4152 		spin_unlock(&tmp_pa->pa_lock);
4153 		lg_prealloc_count++;
4154 	}
4155 	if (!added)
4156 		list_add_tail_rcu(&pa->pa_inode_list,
4157 					&lg->lg_prealloc_list[order]);
4158 	spin_unlock(&lg->lg_prealloc_lock);
4159 
4160 	/* Now trim the list to be not more than 8 elements */
4161 	if (lg_prealloc_count > 8) {
4162 		ext4_mb_discard_lg_preallocations(sb, lg,
4163 						  order, lg_prealloc_count);
4164 		return;
4165 	}
4166 	return ;
4167 }
4168 
4169 /*
4170  * release all resource we used in allocation
4171  */
ext4_mb_release_context(struct ext4_allocation_context * ac)4172 static int ext4_mb_release_context(struct ext4_allocation_context *ac)
4173 {
4174 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4175 	struct ext4_prealloc_space *pa = ac->ac_pa;
4176 	if (pa) {
4177 		if (pa->pa_type == MB_GROUP_PA) {
4178 			/* see comment in ext4_mb_use_group_pa() */
4179 			spin_lock(&pa->pa_lock);
4180 			pa->pa_pstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
4181 			pa->pa_lstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
4182 			pa->pa_free -= ac->ac_b_ex.fe_len;
4183 			pa->pa_len -= ac->ac_b_ex.fe_len;
4184 			spin_unlock(&pa->pa_lock);
4185 		}
4186 	}
4187 	if (pa) {
4188 		/*
4189 		 * We want to add the pa to the right bucket.
4190 		 * Remove it from the list and while adding
4191 		 * make sure the list to which we are adding
4192 		 * doesn't grow big.
4193 		 */
4194 		if ((pa->pa_type == MB_GROUP_PA) && likely(pa->pa_free)) {
4195 			spin_lock(pa->pa_obj_lock);
4196 			list_del_rcu(&pa->pa_inode_list);
4197 			spin_unlock(pa->pa_obj_lock);
4198 			ext4_mb_add_n_trim(ac);
4199 		}
4200 		ext4_mb_put_pa(ac, ac->ac_sb, pa);
4201 	}
4202 	if (ac->ac_bitmap_page)
4203 		page_cache_release(ac->ac_bitmap_page);
4204 	if (ac->ac_buddy_page)
4205 		page_cache_release(ac->ac_buddy_page);
4206 	if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
4207 		mutex_unlock(&ac->ac_lg->lg_mutex);
4208 	ext4_mb_collect_stats(ac);
4209 	return 0;
4210 }
4211 
ext4_mb_discard_preallocations(struct super_block * sb,int needed)4212 static int ext4_mb_discard_preallocations(struct super_block *sb, int needed)
4213 {
4214 	ext4_group_t i, ngroups = ext4_get_groups_count(sb);
4215 	int ret;
4216 	int freed = 0;
4217 
4218 	trace_ext4_mb_discard_preallocations(sb, needed);
4219 	for (i = 0; i < ngroups && needed > 0; i++) {
4220 		ret = ext4_mb_discard_group_preallocations(sb, i, needed);
4221 		freed += ret;
4222 		needed -= ret;
4223 	}
4224 
4225 	return freed;
4226 }
4227 
4228 /*
4229  * Main entry point into mballoc to allocate blocks
4230  * it tries to use preallocation first, then falls back
4231  * to usual allocation
4232  */
ext4_mb_new_blocks(handle_t * handle,struct ext4_allocation_request * ar,int * errp)4233 ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
4234 				struct ext4_allocation_request *ar, int *errp)
4235 {
4236 	int freed;
4237 	struct ext4_allocation_context *ac = NULL;
4238 	struct ext4_sb_info *sbi;
4239 	struct super_block *sb;
4240 	ext4_fsblk_t block = 0;
4241 	unsigned int inquota = 0;
4242 	unsigned int reserv_clstrs = 0;
4243 
4244 	sb = ar->inode->i_sb;
4245 	sbi = EXT4_SB(sb);
4246 
4247 	trace_ext4_request_blocks(ar);
4248 
4249 	/* Allow to use superuser reservation for quota file */
4250 	if (IS_NOQUOTA(ar->inode))
4251 		ar->flags |= EXT4_MB_USE_ROOT_BLOCKS;
4252 
4253 	/*
4254 	 * For delayed allocation, we could skip the ENOSPC and
4255 	 * EDQUOT check, as blocks and quotas have been already
4256 	 * reserved when data being copied into pagecache.
4257 	 */
4258 	if (ext4_test_inode_state(ar->inode, EXT4_STATE_DELALLOC_RESERVED))
4259 		ar->flags |= EXT4_MB_DELALLOC_RESERVED;
4260 	else {
4261 		/* Without delayed allocation we need to verify
4262 		 * there is enough free blocks to do block allocation
4263 		 * and verify allocation doesn't exceed the quota limits.
4264 		 */
4265 		while (ar->len &&
4266 			ext4_claim_free_clusters(sbi, ar->len, ar->flags)) {
4267 
4268 			/* let others to free the space */
4269 			yield();
4270 			ar->len = ar->len >> 1;
4271 		}
4272 		if (!ar->len) {
4273 			*errp = -ENOSPC;
4274 			return 0;
4275 		}
4276 		reserv_clstrs = ar->len;
4277 		if (ar->flags & EXT4_MB_USE_ROOT_BLOCKS) {
4278 			dquot_alloc_block_nofail(ar->inode,
4279 						 EXT4_C2B(sbi, ar->len));
4280 		} else {
4281 			while (ar->len &&
4282 				dquot_alloc_block(ar->inode,
4283 						  EXT4_C2B(sbi, ar->len))) {
4284 
4285 				ar->flags |= EXT4_MB_HINT_NOPREALLOC;
4286 				ar->len--;
4287 			}
4288 		}
4289 		inquota = ar->len;
4290 		if (ar->len == 0) {
4291 			*errp = -EDQUOT;
4292 			goto out;
4293 		}
4294 	}
4295 
4296 	ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
4297 	if (!ac) {
4298 		ar->len = 0;
4299 		*errp = -ENOMEM;
4300 		goto out;
4301 	}
4302 
4303 	*errp = ext4_mb_initialize_context(ac, ar);
4304 	if (*errp) {
4305 		ar->len = 0;
4306 		goto out;
4307 	}
4308 
4309 	ac->ac_op = EXT4_MB_HISTORY_PREALLOC;
4310 	if (!ext4_mb_use_preallocated(ac)) {
4311 		ac->ac_op = EXT4_MB_HISTORY_ALLOC;
4312 		ext4_mb_normalize_request(ac, ar);
4313 repeat:
4314 		/* allocate space in core */
4315 		*errp = ext4_mb_regular_allocator(ac);
4316 		if (*errp)
4317 			goto errout;
4318 
4319 		/* as we've just preallocated more space than
4320 		 * user requested orinally, we store allocated
4321 		 * space in a special descriptor */
4322 		if (ac->ac_status == AC_STATUS_FOUND &&
4323 				ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len)
4324 			ext4_mb_new_preallocation(ac);
4325 	}
4326 	if (likely(ac->ac_status == AC_STATUS_FOUND)) {
4327 		*errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs);
4328 		if (*errp == -EAGAIN) {
4329 			/*
4330 			 * drop the reference that we took
4331 			 * in ext4_mb_use_best_found
4332 			 */
4333 			ext4_mb_release_context(ac);
4334 			ac->ac_b_ex.fe_group = 0;
4335 			ac->ac_b_ex.fe_start = 0;
4336 			ac->ac_b_ex.fe_len = 0;
4337 			ac->ac_status = AC_STATUS_CONTINUE;
4338 			goto repeat;
4339 		} else if (*errp)
4340 		errout:
4341 			ext4_discard_allocated_blocks(ac);
4342 		else {
4343 			block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
4344 			ar->len = ac->ac_b_ex.fe_len;
4345 		}
4346 	} else {
4347 		freed  = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len);
4348 		if (freed)
4349 			goto repeat;
4350 		*errp = -ENOSPC;
4351 	}
4352 
4353 	if (*errp) {
4354 		ac->ac_b_ex.fe_len = 0;
4355 		ar->len = 0;
4356 		ext4_mb_show_ac(ac);
4357 	}
4358 	ext4_mb_release_context(ac);
4359 out:
4360 	if (ac)
4361 		kmem_cache_free(ext4_ac_cachep, ac);
4362 	if (inquota && ar->len < inquota)
4363 		dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len));
4364 	if (!ar->len) {
4365 		if (!ext4_test_inode_state(ar->inode,
4366 					   EXT4_STATE_DELALLOC_RESERVED))
4367 			/* release all the reserved blocks if non delalloc */
4368 			percpu_counter_sub(&sbi->s_dirtyclusters_counter,
4369 						reserv_clstrs);
4370 	}
4371 
4372 	trace_ext4_allocate_blocks(ar, (unsigned long long)block);
4373 
4374 	return block;
4375 }
4376 
4377 /*
4378  * We can merge two free data extents only if the physical blocks
4379  * are contiguous, AND the extents were freed by the same transaction,
4380  * AND the blocks are associated with the same group.
4381  */
can_merge(struct ext4_free_data * entry1,struct ext4_free_data * entry2)4382 static int can_merge(struct ext4_free_data *entry1,
4383 			struct ext4_free_data *entry2)
4384 {
4385 	if ((entry1->efd_tid == entry2->efd_tid) &&
4386 	    (entry1->efd_group == entry2->efd_group) &&
4387 	    ((entry1->efd_start_cluster + entry1->efd_count) == entry2->efd_start_cluster))
4388 		return 1;
4389 	return 0;
4390 }
4391 
4392 static noinline_for_stack int
ext4_mb_free_metadata(handle_t * handle,struct ext4_buddy * e4b,struct ext4_free_data * new_entry)4393 ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
4394 		      struct ext4_free_data *new_entry)
4395 {
4396 	ext4_group_t group = e4b->bd_group;
4397 	ext4_grpblk_t cluster;
4398 	struct ext4_free_data *entry;
4399 	struct ext4_group_info *db = e4b->bd_info;
4400 	struct super_block *sb = e4b->bd_sb;
4401 	struct ext4_sb_info *sbi = EXT4_SB(sb);
4402 	struct rb_node **n = &db->bb_free_root.rb_node, *node;
4403 	struct rb_node *parent = NULL, *new_node;
4404 
4405 	BUG_ON(!ext4_handle_valid(handle));
4406 	BUG_ON(e4b->bd_bitmap_page == NULL);
4407 	BUG_ON(e4b->bd_buddy_page == NULL);
4408 
4409 	new_node = &new_entry->efd_node;
4410 	cluster = new_entry->efd_start_cluster;
4411 
4412 	if (!*n) {
4413 		/* first free block exent. We need to
4414 		   protect buddy cache from being freed,
4415 		 * otherwise we'll refresh it from
4416 		 * on-disk bitmap and lose not-yet-available
4417 		 * blocks */
4418 		page_cache_get(e4b->bd_buddy_page);
4419 		page_cache_get(e4b->bd_bitmap_page);
4420 	}
4421 	while (*n) {
4422 		parent = *n;
4423 		entry = rb_entry(parent, struct ext4_free_data, efd_node);
4424 		if (cluster < entry->efd_start_cluster)
4425 			n = &(*n)->rb_left;
4426 		else if (cluster >= (entry->efd_start_cluster + entry->efd_count))
4427 			n = &(*n)->rb_right;
4428 		else {
4429 			ext4_grp_locked_error(sb, group, 0,
4430 				ext4_group_first_block_no(sb, group) +
4431 				EXT4_C2B(sbi, cluster),
4432 				"Block already on to-be-freed list");
4433 			return 0;
4434 		}
4435 	}
4436 
4437 	rb_link_node(new_node, parent, n);
4438 	rb_insert_color(new_node, &db->bb_free_root);
4439 
4440 	/* Now try to see the extent can be merged to left and right */
4441 	node = rb_prev(new_node);
4442 	if (node) {
4443 		entry = rb_entry(node, struct ext4_free_data, efd_node);
4444 		if (can_merge(entry, new_entry) &&
4445 		    ext4_journal_callback_try_del(handle, &entry->efd_jce)) {
4446 			new_entry->efd_start_cluster = entry->efd_start_cluster;
4447 			new_entry->efd_count += entry->efd_count;
4448 			rb_erase(node, &(db->bb_free_root));
4449 			kmem_cache_free(ext4_free_data_cachep, entry);
4450 		}
4451 	}
4452 
4453 	node = rb_next(new_node);
4454 	if (node) {
4455 		entry = rb_entry(node, struct ext4_free_data, efd_node);
4456 		if (can_merge(new_entry, entry) &&
4457 		    ext4_journal_callback_try_del(handle, &entry->efd_jce)) {
4458 			new_entry->efd_count += entry->efd_count;
4459 			rb_erase(node, &(db->bb_free_root));
4460 			kmem_cache_free(ext4_free_data_cachep, entry);
4461 		}
4462 	}
4463 	/* Add the extent to transaction's private list */
4464 	ext4_journal_callback_add(handle, ext4_free_data_callback,
4465 				  &new_entry->efd_jce);
4466 	return 0;
4467 }
4468 
4469 /**
4470  * ext4_free_blocks() -- Free given blocks and update quota
4471  * @handle:		handle for this transaction
4472  * @inode:		inode
4473  * @block:		start physical block to free
4474  * @count:		number of blocks to count
4475  * @flags:		flags used by ext4_free_blocks
4476  */
ext4_free_blocks(handle_t * handle,struct inode * inode,struct buffer_head * bh,ext4_fsblk_t block,unsigned long count,int flags)4477 void ext4_free_blocks(handle_t *handle, struct inode *inode,
4478 		      struct buffer_head *bh, ext4_fsblk_t block,
4479 		      unsigned long count, int flags)
4480 {
4481 	struct buffer_head *bitmap_bh = NULL;
4482 	struct super_block *sb = inode->i_sb;
4483 	struct ext4_group_desc *gdp;
4484 	unsigned long freed = 0;
4485 	unsigned int overflow;
4486 	ext4_grpblk_t bit;
4487 	struct buffer_head *gd_bh;
4488 	ext4_group_t block_group;
4489 	struct ext4_sb_info *sbi;
4490 	struct ext4_buddy e4b;
4491 	unsigned int count_clusters;
4492 	int err = 0;
4493 	int ret;
4494 
4495 	if (bh) {
4496 		if (block)
4497 			BUG_ON(block != bh->b_blocknr);
4498 		else
4499 			block = bh->b_blocknr;
4500 	}
4501 
4502 	sbi = EXT4_SB(sb);
4503 	if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
4504 	    !ext4_data_block_valid(sbi, block, count)) {
4505 		ext4_error(sb, "Freeing blocks not in datazone - "
4506 			   "block = %llu, count = %lu", block, count);
4507 		goto error_return;
4508 	}
4509 
4510 	ext4_debug("freeing block %llu\n", block);
4511 	trace_ext4_free_blocks(inode, block, count, flags);
4512 
4513 	if (flags & EXT4_FREE_BLOCKS_FORGET) {
4514 		struct buffer_head *tbh = bh;
4515 		int i;
4516 
4517 		BUG_ON(bh && (count > 1));
4518 
4519 		for (i = 0; i < count; i++) {
4520 			if (!bh)
4521 				tbh = sb_find_get_block(inode->i_sb,
4522 							block + i);
4523 			if (unlikely(!tbh))
4524 				continue;
4525 			ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA,
4526 				    inode, tbh, block + i);
4527 		}
4528 	}
4529 
4530 	/*
4531 	 * We need to make sure we don't reuse the freed block until
4532 	 * after the transaction is committed, which we can do by
4533 	 * treating the block as metadata, below.  We make an
4534 	 * exception if the inode is to be written in writeback mode
4535 	 * since writeback mode has weak data consistency guarantees.
4536 	 */
4537 	if (!ext4_should_writeback_data(inode))
4538 		flags |= EXT4_FREE_BLOCKS_METADATA;
4539 
4540 	/*
4541 	 * If the extent to be freed does not begin on a cluster
4542 	 * boundary, we need to deal with partial clusters at the
4543 	 * beginning and end of the extent.  Normally we will free
4544 	 * blocks at the beginning or the end unless we are explicitly
4545 	 * requested to avoid doing so.
4546 	 */
4547 	overflow = block & (sbi->s_cluster_ratio - 1);
4548 	if (overflow) {
4549 		if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) {
4550 			overflow = sbi->s_cluster_ratio - overflow;
4551 			block += overflow;
4552 			if (count > overflow)
4553 				count -= overflow;
4554 			else
4555 				return;
4556 		} else {
4557 			block -= overflow;
4558 			count += overflow;
4559 		}
4560 	}
4561 	overflow = count & (sbi->s_cluster_ratio - 1);
4562 	if (overflow) {
4563 		if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) {
4564 			if (count > overflow)
4565 				count -= overflow;
4566 			else
4567 				return;
4568 		} else
4569 			count += sbi->s_cluster_ratio - overflow;
4570 	}
4571 
4572 do_more:
4573 	overflow = 0;
4574 	ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
4575 
4576 	/*
4577 	 * Check to see if we are freeing blocks across a group
4578 	 * boundary.
4579 	 */
4580 	if (EXT4_C2B(sbi, bit) + count > EXT4_BLOCKS_PER_GROUP(sb)) {
4581 		overflow = EXT4_C2B(sbi, bit) + count -
4582 			EXT4_BLOCKS_PER_GROUP(sb);
4583 		count -= overflow;
4584 	}
4585 	count_clusters = EXT4_NUM_B2C(sbi, count);
4586 	bitmap_bh = ext4_read_block_bitmap(sb, block_group);
4587 	if (!bitmap_bh) {
4588 		err = -EIO;
4589 		goto error_return;
4590 	}
4591 	gdp = ext4_get_group_desc(sb, block_group, &gd_bh);
4592 	if (!gdp) {
4593 		err = -EIO;
4594 		goto error_return;
4595 	}
4596 
4597 	if (in_range(ext4_block_bitmap(sb, gdp), block, count) ||
4598 	    in_range(ext4_inode_bitmap(sb, gdp), block, count) ||
4599 	    in_range(block, ext4_inode_table(sb, gdp),
4600 		     EXT4_SB(sb)->s_itb_per_group) ||
4601 	    in_range(block + count - 1, ext4_inode_table(sb, gdp),
4602 		     EXT4_SB(sb)->s_itb_per_group)) {
4603 
4604 		ext4_error(sb, "Freeing blocks in system zone - "
4605 			   "Block = %llu, count = %lu", block, count);
4606 		/* err = 0. ext4_std_error should be a no op */
4607 		goto error_return;
4608 	}
4609 
4610 	BUFFER_TRACE(bitmap_bh, "getting write access");
4611 	err = ext4_journal_get_write_access(handle, bitmap_bh);
4612 	if (err)
4613 		goto error_return;
4614 
4615 	/*
4616 	 * We are about to modify some metadata.  Call the journal APIs
4617 	 * to unshare ->b_data if a currently-committing transaction is
4618 	 * using it
4619 	 */
4620 	BUFFER_TRACE(gd_bh, "get_write_access");
4621 	err = ext4_journal_get_write_access(handle, gd_bh);
4622 	if (err)
4623 		goto error_return;
4624 #ifdef AGGRESSIVE_CHECK
4625 	{
4626 		int i;
4627 		for (i = 0; i < count_clusters; i++)
4628 			BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data));
4629 	}
4630 #endif
4631 	trace_ext4_mballoc_free(sb, inode, block_group, bit, count_clusters);
4632 
4633 	err = ext4_mb_load_buddy(sb, block_group, &e4b);
4634 	if (err)
4635 		goto error_return;
4636 
4637 	if ((flags & EXT4_FREE_BLOCKS_METADATA) && ext4_handle_valid(handle)) {
4638 		struct ext4_free_data *new_entry;
4639 		/*
4640 		 * blocks being freed are metadata. these blocks shouldn't
4641 		 * be used until this transaction is committed
4642 		 */
4643 	retry:
4644 		new_entry = kmem_cache_alloc(ext4_free_data_cachep, GFP_NOFS);
4645 		if (!new_entry) {
4646 			/*
4647 			 * We use a retry loop because
4648 			 * ext4_free_blocks() is not allowed to fail.
4649 			 */
4650 			cond_resched();
4651 			congestion_wait(BLK_RW_ASYNC, HZ/50);
4652 			goto retry;
4653 		}
4654 		new_entry->efd_start_cluster = bit;
4655 		new_entry->efd_group = block_group;
4656 		new_entry->efd_count = count_clusters;
4657 		new_entry->efd_tid = handle->h_transaction->t_tid;
4658 
4659 		ext4_lock_group(sb, block_group);
4660 		mb_clear_bits(bitmap_bh->b_data, bit, count_clusters);
4661 		ext4_mb_free_metadata(handle, &e4b, new_entry);
4662 	} else {
4663 		/* need to update group_info->bb_free and bitmap
4664 		 * with group lock held. generate_buddy look at
4665 		 * them with group lock_held
4666 		 */
4667 		ext4_lock_group(sb, block_group);
4668 		mb_clear_bits(bitmap_bh->b_data, bit, count_clusters);
4669 		mb_free_blocks(inode, &e4b, bit, count_clusters);
4670 	}
4671 
4672 	ret = ext4_free_group_clusters(sb, gdp) + count_clusters;
4673 	ext4_free_group_clusters_set(sb, gdp, ret);
4674 	gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp);
4675 	ext4_unlock_group(sb, block_group);
4676 	percpu_counter_add(&sbi->s_freeclusters_counter, count_clusters);
4677 
4678 	if (sbi->s_log_groups_per_flex) {
4679 		ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
4680 		atomic64_add(count_clusters,
4681 			     &sbi->s_flex_groups[flex_group].free_clusters);
4682 	}
4683 
4684 	ext4_mb_unload_buddy(&e4b);
4685 
4686 	freed += count;
4687 
4688 	if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
4689 		dquot_free_block(inode, EXT4_C2B(sbi, count_clusters));
4690 
4691 	/* We dirtied the bitmap block */
4692 	BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
4693 	err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
4694 
4695 	/* And the group descriptor block */
4696 	BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
4697 	ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
4698 	if (!err)
4699 		err = ret;
4700 
4701 	if (overflow && !err) {
4702 		block += count;
4703 		count = overflow;
4704 		put_bh(bitmap_bh);
4705 		goto do_more;
4706 	}
4707 	ext4_mark_super_dirty(sb);
4708 error_return:
4709 	brelse(bitmap_bh);
4710 	ext4_std_error(sb, err);
4711 	return;
4712 }
4713 
4714 /**
4715  * ext4_group_add_blocks() -- Add given blocks to an existing group
4716  * @handle:			handle to this transaction
4717  * @sb:				super block
4718  * @block:			start physcial block to add to the block group
4719  * @count:			number of blocks to free
4720  *
4721  * This marks the blocks as free in the bitmap and buddy.
4722  */
ext4_group_add_blocks(handle_t * handle,struct super_block * sb,ext4_fsblk_t block,unsigned long count)4723 int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
4724 			 ext4_fsblk_t block, unsigned long count)
4725 {
4726 	struct buffer_head *bitmap_bh = NULL;
4727 	struct buffer_head *gd_bh;
4728 	ext4_group_t block_group;
4729 	ext4_grpblk_t bit;
4730 	unsigned int i;
4731 	struct ext4_group_desc *desc;
4732 	struct ext4_sb_info *sbi = EXT4_SB(sb);
4733 	struct ext4_buddy e4b;
4734 	int err = 0, ret, blk_free_count;
4735 	ext4_grpblk_t blocks_freed;
4736 
4737 	ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1);
4738 
4739 	if (count == 0)
4740 		return 0;
4741 
4742 	ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
4743 	/*
4744 	 * Check to see if we are freeing blocks across a group
4745 	 * boundary.
4746 	 */
4747 	if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) {
4748 		ext4_warning(sb, "too much blocks added to group %u\n",
4749 			     block_group);
4750 		err = -EINVAL;
4751 		goto error_return;
4752 	}
4753 
4754 	bitmap_bh = ext4_read_block_bitmap(sb, block_group);
4755 	if (!bitmap_bh) {
4756 		err = -EIO;
4757 		goto error_return;
4758 	}
4759 
4760 	desc = ext4_get_group_desc(sb, block_group, &gd_bh);
4761 	if (!desc) {
4762 		err = -EIO;
4763 		goto error_return;
4764 	}
4765 
4766 	if (in_range(ext4_block_bitmap(sb, desc), block, count) ||
4767 	    in_range(ext4_inode_bitmap(sb, desc), block, count) ||
4768 	    in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) ||
4769 	    in_range(block + count - 1, ext4_inode_table(sb, desc),
4770 		     sbi->s_itb_per_group)) {
4771 		ext4_error(sb, "Adding blocks in system zones - "
4772 			   "Block = %llu, count = %lu",
4773 			   block, count);
4774 		err = -EINVAL;
4775 		goto error_return;
4776 	}
4777 
4778 	BUFFER_TRACE(bitmap_bh, "getting write access");
4779 	err = ext4_journal_get_write_access(handle, bitmap_bh);
4780 	if (err)
4781 		goto error_return;
4782 
4783 	/*
4784 	 * We are about to modify some metadata.  Call the journal APIs
4785 	 * to unshare ->b_data if a currently-committing transaction is
4786 	 * using it
4787 	 */
4788 	BUFFER_TRACE(gd_bh, "get_write_access");
4789 	err = ext4_journal_get_write_access(handle, gd_bh);
4790 	if (err)
4791 		goto error_return;
4792 
4793 	for (i = 0, blocks_freed = 0; i < count; i++) {
4794 		BUFFER_TRACE(bitmap_bh, "clear bit");
4795 		if (!mb_test_bit(bit + i, bitmap_bh->b_data)) {
4796 			ext4_error(sb, "bit already cleared for block %llu",
4797 				   (ext4_fsblk_t)(block + i));
4798 			BUFFER_TRACE(bitmap_bh, "bit already cleared");
4799 		} else {
4800 			blocks_freed++;
4801 		}
4802 	}
4803 
4804 	err = ext4_mb_load_buddy(sb, block_group, &e4b);
4805 	if (err)
4806 		goto error_return;
4807 
4808 	/*
4809 	 * need to update group_info->bb_free and bitmap
4810 	 * with group lock held. generate_buddy look at
4811 	 * them with group lock_held
4812 	 */
4813 	ext4_lock_group(sb, block_group);
4814 	mb_clear_bits(bitmap_bh->b_data, bit, count);
4815 	mb_free_blocks(NULL, &e4b, bit, count);
4816 	blk_free_count = blocks_freed + ext4_free_group_clusters(sb, desc);
4817 	ext4_free_group_clusters_set(sb, desc, blk_free_count);
4818 	desc->bg_checksum = ext4_group_desc_csum(sbi, block_group, desc);
4819 	ext4_unlock_group(sb, block_group);
4820 	percpu_counter_add(&sbi->s_freeclusters_counter,
4821 			   EXT4_NUM_B2C(sbi, blocks_freed));
4822 
4823 	if (sbi->s_log_groups_per_flex) {
4824 		ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
4825 		atomic64_add(EXT4_NUM_B2C(sbi, blocks_freed),
4826 			     &sbi->s_flex_groups[flex_group].free_clusters);
4827 	}
4828 
4829 	ext4_mb_unload_buddy(&e4b);
4830 
4831 	/* We dirtied the bitmap block */
4832 	BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
4833 	err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
4834 
4835 	/* And the group descriptor block */
4836 	BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
4837 	ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
4838 	if (!err)
4839 		err = ret;
4840 
4841 error_return:
4842 	brelse(bitmap_bh);
4843 	ext4_std_error(sb, err);
4844 	return err;
4845 }
4846 
4847 /**
4848  * ext4_trim_extent -- function to TRIM one single free extent in the group
4849  * @sb:		super block for the file system
4850  * @start:	starting block of the free extent in the alloc. group
4851  * @count:	number of blocks to TRIM
4852  * @group:	alloc. group we are working with
4853  * @e4b:	ext4 buddy for the group
4854  * @blkdev_flags: flags for the block device
4855  *
4856  * Trim "count" blocks starting at "start" in the "group". To assure that no
4857  * one will allocate those blocks, mark it as used in buddy bitmap. This must
4858  * be called with under the group lock.
4859  */
ext4_trim_extent(struct super_block * sb,int start,int count,ext4_group_t group,struct ext4_buddy * e4b,unsigned long blkdev_flags)4860 static void ext4_trim_extent(struct super_block *sb, int start, int count,
4861 			    ext4_group_t group, struct ext4_buddy *e4b,
4862 			    unsigned long blkdev_flags)
4863 {
4864 	struct ext4_free_extent ex;
4865 
4866 	trace_ext4_trim_extent(sb, group, start, count);
4867 
4868 	assert_spin_locked(ext4_group_lock_ptr(sb, group));
4869 
4870 	ex.fe_start = start;
4871 	ex.fe_group = group;
4872 	ex.fe_len = count;
4873 
4874 	/*
4875 	 * Mark blocks used, so no one can reuse them while
4876 	 * being trimmed.
4877 	 */
4878 	mb_mark_used(e4b, &ex);
4879 	ext4_unlock_group(sb, group);
4880 	ext4_issue_discard(sb, group, start, count, blkdev_flags);
4881 	ext4_lock_group(sb, group);
4882 	mb_free_blocks(NULL, e4b, start, ex.fe_len);
4883 }
4884 
4885 /**
4886  * ext4_trim_all_free -- function to trim all free space in alloc. group
4887  * @sb:			super block for file system
4888  * @group:		group to be trimmed
4889  * @start:		first group block to examine
4890  * @max:		last group block to examine
4891  * @minblocks:		minimum extent block count
4892  * @blkdev_flags:	flags for the block device
4893  *
4894  * ext4_trim_all_free walks through group's buddy bitmap searching for free
4895  * extents. When the free block is found, ext4_trim_extent is called to TRIM
4896  * the extent.
4897  *
4898  *
4899  * ext4_trim_all_free walks through group's block bitmap searching for free
4900  * extents. When the free extent is found, mark it as used in group buddy
4901  * bitmap. Then issue a TRIM command on this extent and free the extent in
4902  * the group buddy bitmap. This is done until whole group is scanned.
4903  */
4904 static ext4_grpblk_t
ext4_trim_all_free(struct super_block * sb,ext4_group_t group,ext4_grpblk_t start,ext4_grpblk_t max,ext4_grpblk_t minblocks,unsigned long blkdev_flags)4905 ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
4906 		   ext4_grpblk_t start, ext4_grpblk_t max,
4907 		   ext4_grpblk_t minblocks, unsigned long blkdev_flags)
4908 {
4909 	void *bitmap;
4910 	ext4_grpblk_t next, count = 0, free_count = 0;
4911 	struct ext4_buddy e4b;
4912 	int ret;
4913 
4914 	trace_ext4_trim_all_free(sb, group, start, max);
4915 
4916 	ret = ext4_mb_load_buddy(sb, group, &e4b);
4917 	if (ret) {
4918 		ext4_error(sb, "Error in loading buddy "
4919 				"information for %u", group);
4920 		return ret;
4921 	}
4922 	bitmap = e4b.bd_bitmap;
4923 
4924 	ext4_lock_group(sb, group);
4925 	if (EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) &&
4926 	    minblocks >= atomic_read(&EXT4_SB(sb)->s_last_trim_minblks))
4927 		goto out;
4928 
4929 	start = (e4b.bd_info->bb_first_free > start) ?
4930 		e4b.bd_info->bb_first_free : start;
4931 
4932 	while (start <= max) {
4933 		start = mb_find_next_zero_bit(bitmap, max + 1, start);
4934 		if (start > max)
4935 			break;
4936 		next = mb_find_next_bit(bitmap, max + 1, start);
4937 
4938 		if ((next - start) >= minblocks) {
4939 			ext4_trim_extent(sb, start,
4940 					 next - start, group, &e4b, blkdev_flags);
4941 			count += next - start;
4942 		}
4943 		free_count += next - start;
4944 		start = next + 1;
4945 
4946 		if (fatal_signal_pending(current)) {
4947 			count = -ERESTARTSYS;
4948 			break;
4949 		}
4950 
4951 		if (need_resched()) {
4952 			ext4_unlock_group(sb, group);
4953 			cond_resched();
4954 			ext4_lock_group(sb, group);
4955 		}
4956 
4957 		if ((e4b.bd_info->bb_free - free_count) < minblocks)
4958 			break;
4959 	}
4960 
4961 	if (!ret)
4962 		EXT4_MB_GRP_SET_TRIMMED(e4b.bd_info);
4963 out:
4964 	ext4_unlock_group(sb, group);
4965 	ext4_mb_unload_buddy(&e4b);
4966 
4967 	ext4_debug("trimmed %d blocks in the group %d\n",
4968 		count, group);
4969 
4970 	return count;
4971 }
4972 
4973 /**
4974  * ext4_trim_fs() -- trim ioctl handle function
4975  * @sb:			superblock for filesystem
4976  * @range:		fstrim_range structure
4977  * @blkdev_flags:	flags for the block device
4978  *
4979  * start:	First Byte to trim
4980  * len:		number of Bytes to trim from start
4981  * minlen:	minimum extent length in Bytes
4982  * ext4_trim_fs goes through all allocation groups containing Bytes from
4983  * start to start+len. For each such a group ext4_trim_all_free function
4984  * is invoked to trim all free space.
4985  */
ext4_trim_fs(struct super_block * sb,struct fstrim_range * range,unsigned long blkdev_flags)4986 int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range,
4987 			unsigned long blkdev_flags)
4988 {
4989 	struct ext4_group_info *grp;
4990 	ext4_group_t group, first_group, last_group;
4991 	ext4_grpblk_t cnt = 0, first_cluster, last_cluster;
4992 	uint64_t start, end, minlen, trimmed = 0;
4993 	ext4_fsblk_t first_data_blk =
4994 			le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
4995 	ext4_fsblk_t max_blks = ext4_blocks_count(EXT4_SB(sb)->s_es);
4996 	int ret = 0;
4997 
4998 	start = range->start >> sb->s_blocksize_bits;
4999 	end = start + (range->len >> sb->s_blocksize_bits) - 1;
5000 	minlen = range->minlen >> sb->s_blocksize_bits;
5001 
5002 	if (minlen > EXT4_CLUSTERS_PER_GROUP(sb) ||
5003 	    start >= max_blks ||
5004 	    range->len < sb->s_blocksize)
5005 		return -EINVAL;
5006 	if (end >= max_blks)
5007 		end = max_blks - 1;
5008 	if (end <= first_data_blk)
5009 		goto out;
5010 	if (start < first_data_blk)
5011 		start = first_data_blk;
5012 
5013 	/* Determine first and last group to examine based on start and end */
5014 	ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start,
5015 				     &first_group, &first_cluster);
5016 	ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) end,
5017 				     &last_group, &last_cluster);
5018 
5019 	/* end now represents the last cluster to discard in this group */
5020 	end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
5021 
5022 	for (group = first_group; group <= last_group; group++) {
5023 		grp = ext4_get_group_info(sb, group);
5024 		/* We only do this if the grp has never been initialized */
5025 		if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
5026 			ret = ext4_mb_init_group(sb, group);
5027 			if (ret)
5028 				break;
5029 		}
5030 
5031 		/*
5032 		 * For all the groups except the last one, last cluster will
5033 		 * always be EXT4_CLUSTERS_PER_GROUP(sb)-1, so we only need to
5034 		 * change it for the last group, note that last_cluster is
5035 		 * already computed earlier by ext4_get_group_no_and_offset()
5036 		 */
5037 		if (group == last_group)
5038 			end = last_cluster;
5039 
5040 		if (grp->bb_free >= minlen) {
5041 			cnt = ext4_trim_all_free(sb, group, first_cluster,
5042 						end, minlen, blkdev_flags);
5043 			if (cnt < 0) {
5044 				ret = cnt;
5045 				break;
5046 			}
5047 			trimmed += cnt;
5048 		}
5049 
5050 		/*
5051 		 * For every group except the first one, we are sure
5052 		 * that the first cluster to discard will be cluster #0.
5053 		 */
5054 		first_cluster = 0;
5055 	}
5056 
5057 	if (!ret)
5058 		atomic_set(&EXT4_SB(sb)->s_last_trim_minblks, minlen);
5059 
5060 out:
5061 	range->len = trimmed * sb->s_blocksize;
5062 	return ret;
5063 }
5064