1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
4 * Written by Alex Tomas <alex@clusterfs.com>
5 */
6
7
8 /*
9 * mballoc.c contains the multiblocks allocation routines
10 */
11
12 #include "ext4_jbd2.h"
13 #include "mballoc.h"
14 #include <linux/log2.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/nospec.h>
18 #include <linux/backing-dev.h>
19 #include <trace/events/ext4.h>
20
21 /*
22 * MUSTDO:
23 * - test ext4_ext_search_left() and ext4_ext_search_right()
24 * - search for metadata in few groups
25 *
26 * TODO v4:
27 * - normalization should take into account whether file is still open
28 * - discard preallocations if no free space left (policy?)
29 * - don't normalize tails
30 * - quota
31 * - reservation for superuser
32 *
33 * TODO v3:
34 * - bitmap read-ahead (proposed by Oleg Drokin aka green)
35 * - track min/max extents in each group for better group selection
36 * - mb_mark_used() may allocate chunk right after splitting buddy
37 * - tree of groups sorted by number of free blocks
38 * - error handling
39 */
40
41 /*
42 * The allocation request involve request for multiple number of blocks
43 * near to the goal(block) value specified.
44 *
45 * During initialization phase of the allocator we decide to use the
46 * group preallocation or inode preallocation depending on the size of
47 * the file. The size of the file could be the resulting file size we
48 * would have after allocation, or the current file size, which ever
49 * is larger. If the size is less than sbi->s_mb_stream_request we
50 * select to use the group preallocation. The default value of
51 * s_mb_stream_request is 16 blocks. This can also be tuned via
52 * /sys/fs/ext4/<partition>/mb_stream_req. The value is represented in
53 * terms of number of blocks.
54 *
55 * The main motivation for having small file use group preallocation is to
56 * ensure that we have small files closer together on the disk.
57 *
58 * First stage the allocator looks at the inode prealloc list,
59 * ext4_inode_info->i_prealloc_list, which contains list of prealloc
60 * spaces for this particular inode. The inode prealloc space is
61 * represented as:
62 *
63 * pa_lstart -> the logical start block for this prealloc space
64 * pa_pstart -> the physical start block for this prealloc space
65 * pa_len -> length for this prealloc space (in clusters)
66 * pa_free -> free space available in this prealloc space (in clusters)
67 *
68 * The inode preallocation space is used looking at the _logical_ start
69 * block. If only the logical file block falls within the range of prealloc
70 * space we will consume the particular prealloc space. This makes sure that
71 * we have contiguous physical blocks representing the file blocks
72 *
73 * The important thing to be noted in case of inode prealloc space is that
74 * we don't modify the values associated to inode prealloc space except
75 * pa_free.
76 *
77 * If we are not able to find blocks in the inode prealloc space and if we
78 * have the group allocation flag set then we look at the locality group
79 * prealloc space. These are per CPU prealloc list represented as
80 *
81 * ext4_sb_info.s_locality_groups[smp_processor_id()]
82 *
83 * The reason for having a per cpu locality group is to reduce the contention
84 * between CPUs. It is possible to get scheduled at this point.
85 *
86 * The locality group prealloc space is used looking at whether we have
87 * enough free space (pa_free) within the prealloc space.
88 *
89 * If we can't allocate blocks via inode prealloc or/and locality group
90 * prealloc then we look at the buddy cache. The buddy cache is represented
91 * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets
92 * mapped to the buddy and bitmap information regarding different
93 * groups. The buddy information is attached to buddy cache inode so that
94 * we can access them through the page cache. The information regarding
95 * each group is loaded via ext4_mb_load_buddy. The information involve
96 * block bitmap and buddy information. The information are stored in the
97 * inode as:
98 *
99 * { page }
100 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
101 *
102 *
103 * one block each for bitmap and buddy information. So for each group we
104 * take up 2 blocks. A page can contain blocks_per_page (PAGE_SIZE /
105 * blocksize) blocks. So it can have information regarding groups_per_page
106 * which is blocks_per_page/2
107 *
108 * The buddy cache inode is not stored on disk. The inode is thrown
109 * away when the filesystem is unmounted.
110 *
111 * We look for count number of blocks in the buddy cache. If we were able
112 * to locate that many free blocks we return with additional information
113 * regarding rest of the contiguous physical block available
114 *
115 * Before allocating blocks via buddy cache we normalize the request
116 * blocks. This ensure we ask for more blocks that we needed. The extra
117 * blocks that we get after allocation is added to the respective prealloc
118 * list. In case of inode preallocation we follow a list of heuristics
119 * based on file size. This can be found in ext4_mb_normalize_request. If
120 * we are doing a group prealloc we try to normalize the request to
121 * sbi->s_mb_group_prealloc. The default value of s_mb_group_prealloc is
122 * dependent on the cluster size; for non-bigalloc file systems, it is
123 * 512 blocks. This can be tuned via
124 * /sys/fs/ext4/<partition>/mb_group_prealloc. The value is represented in
125 * terms of number of blocks. If we have mounted the file system with -O
126 * stripe=<value> option the group prealloc request is normalized to the
127 * smallest multiple of the stripe value (sbi->s_stripe) which is
128 * greater than the default mb_group_prealloc.
129 *
130 * The regular allocator (using the buddy cache) supports a few tunables.
131 *
132 * /sys/fs/ext4/<partition>/mb_min_to_scan
133 * /sys/fs/ext4/<partition>/mb_max_to_scan
134 * /sys/fs/ext4/<partition>/mb_order2_req
135 *
136 * The regular allocator uses buddy scan only if the request len is power of
137 * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The
138 * value of s_mb_order2_reqs can be tuned via
139 * /sys/fs/ext4/<partition>/mb_order2_req. If the request len is equal to
140 * stripe size (sbi->s_stripe), we try to search for contiguous block in
141 * stripe size. This should result in better allocation on RAID setups. If
142 * not, we search in the specific group using bitmap for best extents. The
143 * tunable min_to_scan and max_to_scan control the behaviour here.
144 * min_to_scan indicate how long the mballoc __must__ look for a best
145 * extent and max_to_scan indicates how long the mballoc __can__ look for a
146 * best extent in the found extents. Searching for the blocks starts with
147 * the group specified as the goal value in allocation context via
148 * ac_g_ex. Each group is first checked based on the criteria whether it
149 * can be used for allocation. ext4_mb_good_group explains how the groups are
150 * checked.
151 *
152 * Both the prealloc space are getting populated as above. So for the first
153 * request we will hit the buddy cache which will result in this prealloc
154 * space getting filled. The prealloc space is then later used for the
155 * subsequent request.
156 */
157
158 /*
159 * mballoc operates on the following data:
160 * - on-disk bitmap
161 * - in-core buddy (actually includes buddy and bitmap)
162 * - preallocation descriptors (PAs)
163 *
164 * there are two types of preallocations:
165 * - inode
166 * assiged to specific inode and can be used for this inode only.
167 * it describes part of inode's space preallocated to specific
168 * physical blocks. any block from that preallocated can be used
169 * independent. the descriptor just tracks number of blocks left
170 * unused. so, before taking some block from descriptor, one must
171 * make sure corresponded logical block isn't allocated yet. this
172 * also means that freeing any block within descriptor's range
173 * must discard all preallocated blocks.
174 * - locality group
175 * assigned to specific locality group which does not translate to
176 * permanent set of inodes: inode can join and leave group. space
177 * from this type of preallocation can be used for any inode. thus
178 * it's consumed from the beginning to the end.
179 *
180 * relation between them can be expressed as:
181 * in-core buddy = on-disk bitmap + preallocation descriptors
182 *
183 * this mean blocks mballoc considers used are:
184 * - allocated blocks (persistent)
185 * - preallocated blocks (non-persistent)
186 *
187 * consistency in mballoc world means that at any time a block is either
188 * free or used in ALL structures. notice: "any time" should not be read
189 * literally -- time is discrete and delimited by locks.
190 *
191 * to keep it simple, we don't use block numbers, instead we count number of
192 * blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA.
193 *
194 * all operations can be expressed as:
195 * - init buddy: buddy = on-disk + PAs
196 * - new PA: buddy += N; PA = N
197 * - use inode PA: on-disk += N; PA -= N
198 * - discard inode PA buddy -= on-disk - PA; PA = 0
199 * - use locality group PA on-disk += N; PA -= N
200 * - discard locality group PA buddy -= PA; PA = 0
201 * note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap
202 * is used in real operation because we can't know actual used
203 * bits from PA, only from on-disk bitmap
204 *
205 * if we follow this strict logic, then all operations above should be atomic.
206 * given some of them can block, we'd have to use something like semaphores
207 * killing performance on high-end SMP hardware. let's try to relax it using
208 * the following knowledge:
209 * 1) if buddy is referenced, it's already initialized
210 * 2) while block is used in buddy and the buddy is referenced,
211 * nobody can re-allocate that block
212 * 3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has
213 * bit set and PA claims same block, it's OK. IOW, one can set bit in
214 * on-disk bitmap if buddy has same bit set or/and PA covers corresponded
215 * block
216 *
217 * so, now we're building a concurrency table:
218 * - init buddy vs.
219 * - new PA
220 * blocks for PA are allocated in the buddy, buddy must be referenced
221 * until PA is linked to allocation group to avoid concurrent buddy init
222 * - use inode PA
223 * we need to make sure that either on-disk bitmap or PA has uptodate data
224 * given (3) we care that PA-=N operation doesn't interfere with init
225 * - discard inode PA
226 * the simplest way would be to have buddy initialized by the discard
227 * - use locality group PA
228 * again PA-=N must be serialized with init
229 * - discard locality group PA
230 * the simplest way would be to have buddy initialized by the discard
231 * - new PA vs.
232 * - use inode PA
233 * i_data_sem serializes them
234 * - discard inode PA
235 * discard process must wait until PA isn't used by another process
236 * - use locality group PA
237 * some mutex should serialize them
238 * - discard locality group PA
239 * discard process must wait until PA isn't used by another process
240 * - use inode PA
241 * - use inode PA
242 * i_data_sem or another mutex should serializes them
243 * - discard inode PA
244 * discard process must wait until PA isn't used by another process
245 * - use locality group PA
246 * nothing wrong here -- they're different PAs covering different blocks
247 * - discard locality group PA
248 * discard process must wait until PA isn't used by another process
249 *
250 * now we're ready to make few consequences:
251 * - PA is referenced and while it is no discard is possible
252 * - PA is referenced until block isn't marked in on-disk bitmap
253 * - PA changes only after on-disk bitmap
254 * - discard must not compete with init. either init is done before
255 * any discard or they're serialized somehow
256 * - buddy init as sum of on-disk bitmap and PAs is done atomically
257 *
258 * a special case when we've used PA to emptiness. no need to modify buddy
259 * in this case, but we should care about concurrent init
260 *
261 */
262
263 /*
264 * Logic in few words:
265 *
266 * - allocation:
267 * load group
268 * find blocks
269 * mark bits in on-disk bitmap
270 * release group
271 *
272 * - use preallocation:
273 * find proper PA (per-inode or group)
274 * load group
275 * mark bits in on-disk bitmap
276 * release group
277 * release PA
278 *
279 * - free:
280 * load group
281 * mark bits in on-disk bitmap
282 * release group
283 *
284 * - discard preallocations in group:
285 * mark PAs deleted
286 * move them onto local list
287 * load on-disk bitmap
288 * load group
289 * remove PA from object (inode or locality group)
290 * mark free blocks in-core
291 *
292 * - discard inode's preallocations:
293 */
294
295 /*
296 * Locking rules
297 *
298 * Locks:
299 * - bitlock on a group (group)
300 * - object (inode/locality) (object)
301 * - per-pa lock (pa)
302 *
303 * Paths:
304 * - new pa
305 * object
306 * group
307 *
308 * - find and use pa:
309 * pa
310 *
311 * - release consumed pa:
312 * pa
313 * group
314 * object
315 *
316 * - generate in-core bitmap:
317 * group
318 * pa
319 *
320 * - discard all for given object (inode, locality group):
321 * object
322 * pa
323 * group
324 *
325 * - discard all for given group:
326 * group
327 * pa
328 * group
329 * object
330 *
331 */
332 static struct kmem_cache *ext4_pspace_cachep;
333 static struct kmem_cache *ext4_ac_cachep;
334 static struct kmem_cache *ext4_free_data_cachep;
335
336 /* We create slab caches for groupinfo data structures based on the
337 * superblock block size. There will be one per mounted filesystem for
338 * each unique s_blocksize_bits */
339 #define NR_GRPINFO_CACHES 8
340 static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES];
341
342 static const char * const ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = {
343 "ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k",
344 "ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k",
345 "ext4_groupinfo_64k", "ext4_groupinfo_128k"
346 };
347
348 static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
349 ext4_group_t group);
350 static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
351 ext4_group_t group);
352 static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac);
353
354 /*
355 * The algorithm using this percpu seq counter goes below:
356 * 1. We sample the percpu discard_pa_seq counter before trying for block
357 * allocation in ext4_mb_new_blocks().
358 * 2. We increment this percpu discard_pa_seq counter when we either allocate
359 * or free these blocks i.e. while marking those blocks as used/free in
360 * mb_mark_used()/mb_free_blocks().
361 * 3. We also increment this percpu seq counter when we successfully identify
362 * that the bb_prealloc_list is not empty and hence proceed for discarding
363 * of those PAs inside ext4_mb_discard_group_preallocations().
364 *
365 * Now to make sure that the regular fast path of block allocation is not
366 * affected, as a small optimization we only sample the percpu seq counter
367 * on that cpu. Only when the block allocation fails and when freed blocks
368 * found were 0, that is when we sample percpu seq counter for all cpus using
369 * below function ext4_get_discard_pa_seq_sum(). This happens after making
370 * sure that all the PAs on grp->bb_prealloc_list got freed or if it's empty.
371 */
372 static DEFINE_PER_CPU(u64, discard_pa_seq);
ext4_get_discard_pa_seq_sum(void)373 static inline u64 ext4_get_discard_pa_seq_sum(void)
374 {
375 int __cpu;
376 u64 __seq = 0;
377
378 for_each_possible_cpu(__cpu)
379 __seq += per_cpu(discard_pa_seq, __cpu);
380 return __seq;
381 }
382
mb_correct_addr_and_bit(int * bit,void * addr)383 static inline void *mb_correct_addr_and_bit(int *bit, void *addr)
384 {
385 #if BITS_PER_LONG == 64
386 *bit += ((unsigned long) addr & 7UL) << 3;
387 addr = (void *) ((unsigned long) addr & ~7UL);
388 #elif BITS_PER_LONG == 32
389 *bit += ((unsigned long) addr & 3UL) << 3;
390 addr = (void *) ((unsigned long) addr & ~3UL);
391 #else
392 #error "how many bits you are?!"
393 #endif
394 return addr;
395 }
396
mb_test_bit(int bit,void * addr)397 static inline int mb_test_bit(int bit, void *addr)
398 {
399 /*
400 * ext4_test_bit on architecture like powerpc
401 * needs unsigned long aligned address
402 */
403 addr = mb_correct_addr_and_bit(&bit, addr);
404 return ext4_test_bit(bit, addr);
405 }
406
mb_set_bit(int bit,void * addr)407 static inline void mb_set_bit(int bit, void *addr)
408 {
409 addr = mb_correct_addr_and_bit(&bit, addr);
410 ext4_set_bit(bit, addr);
411 }
412
mb_clear_bit(int bit,void * addr)413 static inline void mb_clear_bit(int bit, void *addr)
414 {
415 addr = mb_correct_addr_and_bit(&bit, addr);
416 ext4_clear_bit(bit, addr);
417 }
418
mb_test_and_clear_bit(int bit,void * addr)419 static inline int mb_test_and_clear_bit(int bit, void *addr)
420 {
421 addr = mb_correct_addr_and_bit(&bit, addr);
422 return ext4_test_and_clear_bit(bit, addr);
423 }
424
mb_find_next_zero_bit(void * addr,int max,int start)425 static inline int mb_find_next_zero_bit(void *addr, int max, int start)
426 {
427 int fix = 0, ret, tmpmax;
428 addr = mb_correct_addr_and_bit(&fix, addr);
429 tmpmax = max + fix;
430 start += fix;
431
432 ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix;
433 if (ret > max)
434 return max;
435 return ret;
436 }
437
mb_find_next_bit(void * addr,int max,int start)438 static inline int mb_find_next_bit(void *addr, int max, int start)
439 {
440 int fix = 0, ret, tmpmax;
441 addr = mb_correct_addr_and_bit(&fix, addr);
442 tmpmax = max + fix;
443 start += fix;
444
445 ret = ext4_find_next_bit(addr, tmpmax, start) - fix;
446 if (ret > max)
447 return max;
448 return ret;
449 }
450
mb_find_buddy(struct ext4_buddy * e4b,int order,int * max)451 static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max)
452 {
453 char *bb;
454
455 BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
456 BUG_ON(max == NULL);
457
458 if (order > e4b->bd_blkbits + 1) {
459 *max = 0;
460 return NULL;
461 }
462
463 /* at order 0 we see each particular block */
464 if (order == 0) {
465 *max = 1 << (e4b->bd_blkbits + 3);
466 return e4b->bd_bitmap;
467 }
468
469 bb = e4b->bd_buddy + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order];
470 *max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order];
471
472 return bb;
473 }
474
475 #ifdef DOUBLE_CHECK
mb_free_blocks_double(struct inode * inode,struct ext4_buddy * e4b,int first,int count)476 static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b,
477 int first, int count)
478 {
479 int i;
480 struct super_block *sb = e4b->bd_sb;
481
482 if (unlikely(e4b->bd_info->bb_bitmap == NULL))
483 return;
484 assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
485 for (i = 0; i < count; i++) {
486 if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) {
487 ext4_fsblk_t blocknr;
488
489 blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
490 blocknr += EXT4_C2B(EXT4_SB(sb), first + i);
491 ext4_grp_locked_error(sb, e4b->bd_group,
492 inode ? inode->i_ino : 0,
493 blocknr,
494 "freeing block already freed "
495 "(bit %u)",
496 first + i);
497 ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
498 EXT4_GROUP_INFO_BBITMAP_CORRUPT);
499 }
500 mb_clear_bit(first + i, e4b->bd_info->bb_bitmap);
501 }
502 }
503
mb_mark_used_double(struct ext4_buddy * e4b,int first,int count)504 static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count)
505 {
506 int i;
507
508 if (unlikely(e4b->bd_info->bb_bitmap == NULL))
509 return;
510 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
511 for (i = 0; i < count; i++) {
512 BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap));
513 mb_set_bit(first + i, e4b->bd_info->bb_bitmap);
514 }
515 }
516
mb_cmp_bitmaps(struct ext4_buddy * e4b,void * bitmap)517 static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
518 {
519 if (unlikely(e4b->bd_info->bb_bitmap == NULL))
520 return;
521 if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) {
522 unsigned char *b1, *b2;
523 int i;
524 b1 = (unsigned char *) e4b->bd_info->bb_bitmap;
525 b2 = (unsigned char *) bitmap;
526 for (i = 0; i < e4b->bd_sb->s_blocksize; i++) {
527 if (b1[i] != b2[i]) {
528 ext4_msg(e4b->bd_sb, KERN_ERR,
529 "corruption in group %u "
530 "at byte %u(%u): %x in copy != %x "
531 "on disk/prealloc",
532 e4b->bd_group, i, i * 8, b1[i], b2[i]);
533 BUG();
534 }
535 }
536 }
537 }
538
mb_group_bb_bitmap_alloc(struct super_block * sb,struct ext4_group_info * grp,ext4_group_t group)539 static void mb_group_bb_bitmap_alloc(struct super_block *sb,
540 struct ext4_group_info *grp, ext4_group_t group)
541 {
542 struct buffer_head *bh;
543
544 grp->bb_bitmap = kmalloc(sb->s_blocksize, GFP_NOFS);
545 if (!grp->bb_bitmap)
546 return;
547
548 bh = ext4_read_block_bitmap(sb, group);
549 if (IS_ERR_OR_NULL(bh)) {
550 kfree(grp->bb_bitmap);
551 grp->bb_bitmap = NULL;
552 return;
553 }
554
555 memcpy(grp->bb_bitmap, bh->b_data, sb->s_blocksize);
556 put_bh(bh);
557 }
558
mb_group_bb_bitmap_free(struct ext4_group_info * grp)559 static void mb_group_bb_bitmap_free(struct ext4_group_info *grp)
560 {
561 kfree(grp->bb_bitmap);
562 }
563
564 #else
mb_free_blocks_double(struct inode * inode,struct ext4_buddy * e4b,int first,int count)565 static inline void mb_free_blocks_double(struct inode *inode,
566 struct ext4_buddy *e4b, int first, int count)
567 {
568 return;
569 }
mb_mark_used_double(struct ext4_buddy * e4b,int first,int count)570 static inline void mb_mark_used_double(struct ext4_buddy *e4b,
571 int first, int count)
572 {
573 return;
574 }
mb_cmp_bitmaps(struct ext4_buddy * e4b,void * bitmap)575 static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
576 {
577 return;
578 }
579
mb_group_bb_bitmap_alloc(struct super_block * sb,struct ext4_group_info * grp,ext4_group_t group)580 static inline void mb_group_bb_bitmap_alloc(struct super_block *sb,
581 struct ext4_group_info *grp, ext4_group_t group)
582 {
583 return;
584 }
585
mb_group_bb_bitmap_free(struct ext4_group_info * grp)586 static inline void mb_group_bb_bitmap_free(struct ext4_group_info *grp)
587 {
588 return;
589 }
590 #endif
591
592 #ifdef AGGRESSIVE_CHECK
593
594 #define MB_CHECK_ASSERT(assert) \
595 do { \
596 if (!(assert)) { \
597 printk(KERN_EMERG \
598 "Assertion failure in %s() at %s:%d: \"%s\"\n", \
599 function, file, line, # assert); \
600 BUG(); \
601 } \
602 } while (0)
603
__mb_check_buddy(struct ext4_buddy * e4b,char * file,const char * function,int line)604 static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
605 const char *function, int line)
606 {
607 struct super_block *sb = e4b->bd_sb;
608 int order = e4b->bd_blkbits + 1;
609 int max;
610 int max2;
611 int i;
612 int j;
613 int k;
614 int count;
615 struct ext4_group_info *grp;
616 int fragments = 0;
617 int fstart;
618 struct list_head *cur;
619 void *buddy;
620 void *buddy2;
621
622 if (e4b->bd_info->bb_check_counter++ % 10)
623 return 0;
624
625 while (order > 1) {
626 buddy = mb_find_buddy(e4b, order, &max);
627 MB_CHECK_ASSERT(buddy);
628 buddy2 = mb_find_buddy(e4b, order - 1, &max2);
629 MB_CHECK_ASSERT(buddy2);
630 MB_CHECK_ASSERT(buddy != buddy2);
631 MB_CHECK_ASSERT(max * 2 == max2);
632
633 count = 0;
634 for (i = 0; i < max; i++) {
635
636 if (mb_test_bit(i, buddy)) {
637 /* only single bit in buddy2 may be 1 */
638 if (!mb_test_bit(i << 1, buddy2)) {
639 MB_CHECK_ASSERT(
640 mb_test_bit((i<<1)+1, buddy2));
641 } else if (!mb_test_bit((i << 1) + 1, buddy2)) {
642 MB_CHECK_ASSERT(
643 mb_test_bit(i << 1, buddy2));
644 }
645 continue;
646 }
647
648 /* both bits in buddy2 must be 1 */
649 MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2));
650 MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2));
651
652 for (j = 0; j < (1 << order); j++) {
653 k = (i * (1 << order)) + j;
654 MB_CHECK_ASSERT(
655 !mb_test_bit(k, e4b->bd_bitmap));
656 }
657 count++;
658 }
659 MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count);
660 order--;
661 }
662
663 fstart = -1;
664 buddy = mb_find_buddy(e4b, 0, &max);
665 for (i = 0; i < max; i++) {
666 if (!mb_test_bit(i, buddy)) {
667 MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free);
668 if (fstart == -1) {
669 fragments++;
670 fstart = i;
671 }
672 continue;
673 }
674 fstart = -1;
675 /* check used bits only */
676 for (j = 0; j < e4b->bd_blkbits + 1; j++) {
677 buddy2 = mb_find_buddy(e4b, j, &max2);
678 k = i >> j;
679 MB_CHECK_ASSERT(k < max2);
680 MB_CHECK_ASSERT(mb_test_bit(k, buddy2));
681 }
682 }
683 MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info));
684 MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments);
685
686 grp = ext4_get_group_info(sb, e4b->bd_group);
687 if (!grp)
688 return NULL;
689 list_for_each(cur, &grp->bb_prealloc_list) {
690 ext4_group_t groupnr;
691 struct ext4_prealloc_space *pa;
692 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
693 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k);
694 MB_CHECK_ASSERT(groupnr == e4b->bd_group);
695 for (i = 0; i < pa->pa_len; i++)
696 MB_CHECK_ASSERT(mb_test_bit(k + i, buddy));
697 }
698 return 0;
699 }
700 #undef MB_CHECK_ASSERT
701 #define mb_check_buddy(e4b) __mb_check_buddy(e4b, \
702 __FILE__, __func__, __LINE__)
703 #else
704 #define mb_check_buddy(e4b)
705 #endif
706
707 /*
708 * Divide blocks started from @first with length @len into
709 * smaller chunks with power of 2 blocks.
710 * Clear the bits in bitmap which the blocks of the chunk(s) covered,
711 * then increase bb_counters[] for corresponded chunk size.
712 */
ext4_mb_mark_free_simple(struct super_block * sb,void * buddy,ext4_grpblk_t first,ext4_grpblk_t len,struct ext4_group_info * grp)713 static void ext4_mb_mark_free_simple(struct super_block *sb,
714 void *buddy, ext4_grpblk_t first, ext4_grpblk_t len,
715 struct ext4_group_info *grp)
716 {
717 struct ext4_sb_info *sbi = EXT4_SB(sb);
718 ext4_grpblk_t min;
719 ext4_grpblk_t max;
720 ext4_grpblk_t chunk;
721 unsigned int border;
722
723 BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb));
724
725 border = 2 << sb->s_blocksize_bits;
726
727 while (len > 0) {
728 /* find how many blocks can be covered since this position */
729 max = ffs(first | border) - 1;
730
731 /* find how many blocks of power 2 we need to mark */
732 min = fls(len) - 1;
733
734 if (max < min)
735 min = max;
736 chunk = 1 << min;
737
738 /* mark multiblock chunks only */
739 grp->bb_counters[min]++;
740 if (min > 0)
741 mb_clear_bit(first >> min,
742 buddy + sbi->s_mb_offsets[min]);
743
744 len -= chunk;
745 first += chunk;
746 }
747 }
748
749 /*
750 * Cache the order of the largest free extent we have available in this block
751 * group.
752 */
753 static void
mb_set_largest_free_order(struct super_block * sb,struct ext4_group_info * grp)754 mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp)
755 {
756 int i;
757 int bits;
758
759 grp->bb_largest_free_order = -1; /* uninit */
760
761 bits = sb->s_blocksize_bits + 1;
762 for (i = bits; i >= 0; i--) {
763 if (grp->bb_counters[i] > 0) {
764 grp->bb_largest_free_order = i;
765 break;
766 }
767 }
768 }
769
770 static noinline_for_stack
ext4_mb_generate_buddy(struct super_block * sb,void * buddy,void * bitmap,ext4_group_t group,struct ext4_group_info * grp)771 void ext4_mb_generate_buddy(struct super_block *sb,
772 void *buddy, void *bitmap, ext4_group_t group,
773 struct ext4_group_info *grp)
774 {
775 struct ext4_sb_info *sbi = EXT4_SB(sb);
776 ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
777 ext4_grpblk_t i = 0;
778 ext4_grpblk_t first;
779 ext4_grpblk_t len;
780 unsigned free = 0;
781 unsigned fragments = 0;
782 unsigned long long period = get_cycles();
783
784 /* initialize buddy from bitmap which is aggregation
785 * of on-disk bitmap and preallocations */
786 i = mb_find_next_zero_bit(bitmap, max, 0);
787 grp->bb_first_free = i;
788 while (i < max) {
789 fragments++;
790 first = i;
791 i = mb_find_next_bit(bitmap, max, i);
792 len = i - first;
793 free += len;
794 if (len > 1)
795 ext4_mb_mark_free_simple(sb, buddy, first, len, grp);
796 else
797 grp->bb_counters[0]++;
798 if (i < max)
799 i = mb_find_next_zero_bit(bitmap, max, i);
800 }
801 grp->bb_fragments = fragments;
802
803 if (free != grp->bb_free) {
804 ext4_grp_locked_error(sb, group, 0, 0,
805 "block bitmap and bg descriptor "
806 "inconsistent: %u vs %u free clusters",
807 free, grp->bb_free);
808 /*
809 * If we intend to continue, we consider group descriptor
810 * corrupt and update bb_free using bitmap value
811 */
812 grp->bb_free = free;
813 ext4_mark_group_bitmap_corrupted(sb, group,
814 EXT4_GROUP_INFO_BBITMAP_CORRUPT);
815 }
816 mb_set_largest_free_order(sb, grp);
817
818 clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state));
819
820 period = get_cycles() - period;
821 spin_lock(&sbi->s_bal_lock);
822 sbi->s_mb_buddies_generated++;
823 sbi->s_mb_generation_time += period;
824 spin_unlock(&sbi->s_bal_lock);
825 }
826
mb_regenerate_buddy(struct ext4_buddy * e4b)827 static void mb_regenerate_buddy(struct ext4_buddy *e4b)
828 {
829 int count;
830 int order = 1;
831 void *buddy;
832
833 while ((buddy = mb_find_buddy(e4b, order++, &count)))
834 ext4_set_bits(buddy, 0, count);
835
836 e4b->bd_info->bb_fragments = 0;
837 memset(e4b->bd_info->bb_counters, 0,
838 sizeof(*e4b->bd_info->bb_counters) *
839 (e4b->bd_sb->s_blocksize_bits + 2));
840
841 ext4_mb_generate_buddy(e4b->bd_sb, e4b->bd_buddy,
842 e4b->bd_bitmap, e4b->bd_group, e4b->bd_info);
843 }
844
845 /* The buddy information is attached the buddy cache inode
846 * for convenience. The information regarding each group
847 * is loaded via ext4_mb_load_buddy. The information involve
848 * block bitmap and buddy information. The information are
849 * stored in the inode as
850 *
851 * { page }
852 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
853 *
854 *
855 * one block each for bitmap and buddy information.
856 * So for each group we take up 2 blocks. A page can
857 * contain blocks_per_page (PAGE_SIZE / blocksize) blocks.
858 * So it can have information regarding groups_per_page which
859 * is blocks_per_page/2
860 *
861 * Locking note: This routine takes the block group lock of all groups
862 * for this page; do not hold this lock when calling this routine!
863 */
864
ext4_mb_init_cache(struct page * page,char * incore,gfp_t gfp)865 static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
866 {
867 ext4_group_t ngroups;
868 int blocksize;
869 int blocks_per_page;
870 int groups_per_page;
871 int err = 0;
872 int i;
873 ext4_group_t first_group, group;
874 int first_block;
875 struct super_block *sb;
876 struct buffer_head *bhs;
877 struct buffer_head **bh = NULL;
878 struct inode *inode;
879 char *data;
880 char *bitmap;
881 struct ext4_group_info *grinfo;
882
883 inode = page->mapping->host;
884 sb = inode->i_sb;
885 ngroups = ext4_get_groups_count(sb);
886 blocksize = i_blocksize(inode);
887 blocks_per_page = PAGE_SIZE / blocksize;
888
889 mb_debug(sb, "init page %lu\n", page->index);
890
891 groups_per_page = blocks_per_page >> 1;
892 if (groups_per_page == 0)
893 groups_per_page = 1;
894
895 /* allocate buffer_heads to read bitmaps */
896 if (groups_per_page > 1) {
897 i = sizeof(struct buffer_head *) * groups_per_page;
898 bh = kzalloc(i, gfp);
899 if (bh == NULL) {
900 err = -ENOMEM;
901 goto out;
902 }
903 } else
904 bh = &bhs;
905
906 first_group = page->index * blocks_per_page / 2;
907
908 /* read all groups the page covers into the cache */
909 for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
910 if (group >= ngroups)
911 break;
912
913 grinfo = ext4_get_group_info(sb, group);
914 if (!grinfo)
915 continue;
916 /*
917 * If page is uptodate then we came here after online resize
918 * which added some new uninitialized group info structs, so
919 * we must skip all initialized uptodate buddies on the page,
920 * which may be currently in use by an allocating task.
921 */
922 if (PageUptodate(page) && !EXT4_MB_GRP_NEED_INIT(grinfo)) {
923 bh[i] = NULL;
924 continue;
925 }
926 bh[i] = ext4_read_block_bitmap_nowait(sb, group, false);
927 if (IS_ERR(bh[i])) {
928 err = PTR_ERR(bh[i]);
929 bh[i] = NULL;
930 goto out;
931 }
932 mb_debug(sb, "read bitmap for group %u\n", group);
933 }
934
935 /* wait for I/O completion */
936 for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
937 int err2;
938
939 if (!bh[i])
940 continue;
941 err2 = ext4_wait_block_bitmap(sb, group, bh[i]);
942 if (!err)
943 err = err2;
944 }
945
946 first_block = page->index * blocks_per_page;
947 for (i = 0; i < blocks_per_page; i++) {
948 group = (first_block + i) >> 1;
949 if (group >= ngroups)
950 break;
951
952 if (!bh[group - first_group])
953 /* skip initialized uptodate buddy */
954 continue;
955
956 if (!buffer_verified(bh[group - first_group]))
957 /* Skip faulty bitmaps */
958 continue;
959 err = 0;
960
961 /*
962 * data carry information regarding this
963 * particular group in the format specified
964 * above
965 *
966 */
967 data = page_address(page) + (i * blocksize);
968 bitmap = bh[group - first_group]->b_data;
969
970 /*
971 * We place the buddy block and bitmap block
972 * close together
973 */
974 if ((first_block + i) & 1) {
975 /* this is block of buddy */
976 BUG_ON(incore == NULL);
977 mb_debug(sb, "put buddy for group %u in page %lu/%x\n",
978 group, page->index, i * blocksize);
979 trace_ext4_mb_buddy_bitmap_load(sb, group);
980 grinfo = ext4_get_group_info(sb, group);
981 if (!grinfo) {
982 err = -EFSCORRUPTED;
983 goto out;
984 }
985 grinfo->bb_fragments = 0;
986 memset(grinfo->bb_counters, 0,
987 sizeof(*grinfo->bb_counters) *
988 (sb->s_blocksize_bits+2));
989 /*
990 * incore got set to the group block bitmap below
991 */
992 ext4_lock_group(sb, group);
993 /* init the buddy */
994 memset(data, 0xff, blocksize);
995 ext4_mb_generate_buddy(sb, data, incore, group, grinfo);
996 ext4_unlock_group(sb, group);
997 incore = NULL;
998 } else {
999 /* this is block of bitmap */
1000 BUG_ON(incore != NULL);
1001 mb_debug(sb, "put bitmap for group %u in page %lu/%x\n",
1002 group, page->index, i * blocksize);
1003 trace_ext4_mb_bitmap_load(sb, group);
1004
1005 /* see comments in ext4_mb_put_pa() */
1006 ext4_lock_group(sb, group);
1007 memcpy(data, bitmap, blocksize);
1008
1009 /* mark all preallocated blks used in in-core bitmap */
1010 ext4_mb_generate_from_pa(sb, data, group);
1011 ext4_mb_generate_from_freelist(sb, data, group);
1012 ext4_unlock_group(sb, group);
1013
1014 /* set incore so that the buddy information can be
1015 * generated using this
1016 */
1017 incore = data;
1018 }
1019 }
1020 SetPageUptodate(page);
1021
1022 out:
1023 if (bh) {
1024 for (i = 0; i < groups_per_page; i++)
1025 brelse(bh[i]);
1026 if (bh != &bhs)
1027 kfree(bh);
1028 }
1029 return err;
1030 }
1031
1032 /*
1033 * Lock the buddy and bitmap pages. This make sure other parallel init_group
1034 * on the same buddy page doesn't happen whild holding the buddy page lock.
1035 * Return locked buddy and bitmap pages on e4b struct. If buddy and bitmap
1036 * are on the same page e4b->bd_buddy_page is NULL and return value is 0.
1037 */
ext4_mb_get_buddy_page_lock(struct super_block * sb,ext4_group_t group,struct ext4_buddy * e4b,gfp_t gfp)1038 static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
1039 ext4_group_t group, struct ext4_buddy *e4b, gfp_t gfp)
1040 {
1041 struct inode *inode = EXT4_SB(sb)->s_buddy_cache;
1042 int block, pnum, poff;
1043 int blocks_per_page;
1044 struct page *page;
1045
1046 e4b->bd_buddy_page = NULL;
1047 e4b->bd_bitmap_page = NULL;
1048
1049 blocks_per_page = PAGE_SIZE / sb->s_blocksize;
1050 /*
1051 * the buddy cache inode stores the block bitmap
1052 * and buddy information in consecutive blocks.
1053 * So for each group we need two blocks.
1054 */
1055 block = group * 2;
1056 pnum = block / blocks_per_page;
1057 poff = block % blocks_per_page;
1058 page = find_or_create_page(inode->i_mapping, pnum, gfp);
1059 if (!page)
1060 return -ENOMEM;
1061 BUG_ON(page->mapping != inode->i_mapping);
1062 e4b->bd_bitmap_page = page;
1063 e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
1064
1065 if (blocks_per_page >= 2) {
1066 /* buddy and bitmap are on the same page */
1067 return 0;
1068 }
1069
1070 block++;
1071 pnum = block / blocks_per_page;
1072 page = find_or_create_page(inode->i_mapping, pnum, gfp);
1073 if (!page)
1074 return -ENOMEM;
1075 BUG_ON(page->mapping != inode->i_mapping);
1076 e4b->bd_buddy_page = page;
1077 return 0;
1078 }
1079
ext4_mb_put_buddy_page_lock(struct ext4_buddy * e4b)1080 static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b)
1081 {
1082 if (e4b->bd_bitmap_page) {
1083 unlock_page(e4b->bd_bitmap_page);
1084 put_page(e4b->bd_bitmap_page);
1085 }
1086 if (e4b->bd_buddy_page) {
1087 unlock_page(e4b->bd_buddy_page);
1088 put_page(e4b->bd_buddy_page);
1089 }
1090 }
1091
1092 /*
1093 * Locking note: This routine calls ext4_mb_init_cache(), which takes the
1094 * block group lock of all groups for this page; do not hold the BG lock when
1095 * calling this routine!
1096 */
1097 static noinline_for_stack
ext4_mb_init_group(struct super_block * sb,ext4_group_t group,gfp_t gfp)1098 int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp)
1099 {
1100
1101 struct ext4_group_info *this_grp;
1102 struct ext4_buddy e4b;
1103 struct page *page;
1104 int ret = 0;
1105
1106 might_sleep();
1107 mb_debug(sb, "init group %u\n", group);
1108 this_grp = ext4_get_group_info(sb, group);
1109 if (!this_grp)
1110 return -EFSCORRUPTED;
1111
1112 /*
1113 * This ensures that we don't reinit the buddy cache
1114 * page which map to the group from which we are already
1115 * allocating. If we are looking at the buddy cache we would
1116 * have taken a reference using ext4_mb_load_buddy and that
1117 * would have pinned buddy page to page cache.
1118 * The call to ext4_mb_get_buddy_page_lock will mark the
1119 * page accessed.
1120 */
1121 ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b, gfp);
1122 if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) {
1123 /*
1124 * somebody initialized the group
1125 * return without doing anything
1126 */
1127 goto err;
1128 }
1129
1130 page = e4b.bd_bitmap_page;
1131 ret = ext4_mb_init_cache(page, NULL, gfp);
1132 if (ret)
1133 goto err;
1134 if (!PageUptodate(page)) {
1135 ret = -EIO;
1136 goto err;
1137 }
1138
1139 if (e4b.bd_buddy_page == NULL) {
1140 /*
1141 * If both the bitmap and buddy are in
1142 * the same page we don't need to force
1143 * init the buddy
1144 */
1145 ret = 0;
1146 goto err;
1147 }
1148 /* init buddy cache */
1149 page = e4b.bd_buddy_page;
1150 ret = ext4_mb_init_cache(page, e4b.bd_bitmap, gfp);
1151 if (ret)
1152 goto err;
1153 if (!PageUptodate(page)) {
1154 ret = -EIO;
1155 goto err;
1156 }
1157 err:
1158 ext4_mb_put_buddy_page_lock(&e4b);
1159 return ret;
1160 }
1161
1162 /*
1163 * Locking note: This routine calls ext4_mb_init_cache(), which takes the
1164 * block group lock of all groups for this page; do not hold the BG lock when
1165 * calling this routine!
1166 */
1167 static noinline_for_stack int
ext4_mb_load_buddy_gfp(struct super_block * sb,ext4_group_t group,struct ext4_buddy * e4b,gfp_t gfp)1168 ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
1169 struct ext4_buddy *e4b, gfp_t gfp)
1170 {
1171 int blocks_per_page;
1172 int block;
1173 int pnum;
1174 int poff;
1175 struct page *page;
1176 int ret;
1177 struct ext4_group_info *grp;
1178 struct ext4_sb_info *sbi = EXT4_SB(sb);
1179 struct inode *inode = sbi->s_buddy_cache;
1180
1181 might_sleep();
1182 mb_debug(sb, "load group %u\n", group);
1183
1184 blocks_per_page = PAGE_SIZE / sb->s_blocksize;
1185 grp = ext4_get_group_info(sb, group);
1186 if (!grp)
1187 return -EFSCORRUPTED;
1188
1189 e4b->bd_blkbits = sb->s_blocksize_bits;
1190 e4b->bd_info = grp;
1191 e4b->bd_sb = sb;
1192 e4b->bd_group = group;
1193 e4b->bd_buddy_page = NULL;
1194 e4b->bd_bitmap_page = NULL;
1195
1196 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
1197 /*
1198 * we need full data about the group
1199 * to make a good selection
1200 */
1201 ret = ext4_mb_init_group(sb, group, gfp);
1202 if (ret)
1203 return ret;
1204 }
1205
1206 /*
1207 * the buddy cache inode stores the block bitmap
1208 * and buddy information in consecutive blocks.
1209 * So for each group we need two blocks.
1210 */
1211 block = group * 2;
1212 pnum = block / blocks_per_page;
1213 poff = block % blocks_per_page;
1214
1215 /* we could use find_or_create_page(), but it locks page
1216 * what we'd like to avoid in fast path ... */
1217 page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
1218 if (page == NULL || !PageUptodate(page)) {
1219 if (page)
1220 /*
1221 * drop the page reference and try
1222 * to get the page with lock. If we
1223 * are not uptodate that implies
1224 * somebody just created the page but
1225 * is yet to initialize the same. So
1226 * wait for it to initialize.
1227 */
1228 put_page(page);
1229 page = find_or_create_page(inode->i_mapping, pnum, gfp);
1230 if (page) {
1231 BUG_ON(page->mapping != inode->i_mapping);
1232 if (!PageUptodate(page)) {
1233 ret = ext4_mb_init_cache(page, NULL, gfp);
1234 if (ret) {
1235 unlock_page(page);
1236 goto err;
1237 }
1238 mb_cmp_bitmaps(e4b, page_address(page) +
1239 (poff * sb->s_blocksize));
1240 }
1241 unlock_page(page);
1242 }
1243 }
1244 if (page == NULL) {
1245 ret = -ENOMEM;
1246 goto err;
1247 }
1248 if (!PageUptodate(page)) {
1249 ret = -EIO;
1250 goto err;
1251 }
1252
1253 /* Pages marked accessed already */
1254 e4b->bd_bitmap_page = page;
1255 e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
1256
1257 block++;
1258 pnum = block / blocks_per_page;
1259 poff = block % blocks_per_page;
1260
1261 page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
1262 if (page == NULL || !PageUptodate(page)) {
1263 if (page)
1264 put_page(page);
1265 page = find_or_create_page(inode->i_mapping, pnum, gfp);
1266 if (page) {
1267 BUG_ON(page->mapping != inode->i_mapping);
1268 if (!PageUptodate(page)) {
1269 ret = ext4_mb_init_cache(page, e4b->bd_bitmap,
1270 gfp);
1271 if (ret) {
1272 unlock_page(page);
1273 goto err;
1274 }
1275 }
1276 unlock_page(page);
1277 }
1278 }
1279 if (page == NULL) {
1280 ret = -ENOMEM;
1281 goto err;
1282 }
1283 if (!PageUptodate(page)) {
1284 ret = -EIO;
1285 goto err;
1286 }
1287
1288 /* Pages marked accessed already */
1289 e4b->bd_buddy_page = page;
1290 e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
1291
1292 return 0;
1293
1294 err:
1295 if (page)
1296 put_page(page);
1297 if (e4b->bd_bitmap_page)
1298 put_page(e4b->bd_bitmap_page);
1299 if (e4b->bd_buddy_page)
1300 put_page(e4b->bd_buddy_page);
1301 e4b->bd_buddy = NULL;
1302 e4b->bd_bitmap = NULL;
1303 return ret;
1304 }
1305
ext4_mb_load_buddy(struct super_block * sb,ext4_group_t group,struct ext4_buddy * e4b)1306 static int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
1307 struct ext4_buddy *e4b)
1308 {
1309 return ext4_mb_load_buddy_gfp(sb, group, e4b, GFP_NOFS);
1310 }
1311
ext4_mb_unload_buddy(struct ext4_buddy * e4b)1312 static void ext4_mb_unload_buddy(struct ext4_buddy *e4b)
1313 {
1314 if (e4b->bd_bitmap_page)
1315 put_page(e4b->bd_bitmap_page);
1316 if (e4b->bd_buddy_page)
1317 put_page(e4b->bd_buddy_page);
1318 }
1319
1320
mb_find_order_for_block(struct ext4_buddy * e4b,int block)1321 static int mb_find_order_for_block(struct ext4_buddy *e4b, int block)
1322 {
1323 int order = 1;
1324 int bb_incr = 1 << (e4b->bd_blkbits - 1);
1325 void *bb;
1326
1327 BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
1328 BUG_ON(block >= (1 << (e4b->bd_blkbits + 3)));
1329
1330 bb = e4b->bd_buddy;
1331 while (order <= e4b->bd_blkbits + 1) {
1332 block = block >> 1;
1333 if (!mb_test_bit(block, bb)) {
1334 /* this block is part of buddy of order 'order' */
1335 return order;
1336 }
1337 bb += bb_incr;
1338 bb_incr >>= 1;
1339 order++;
1340 }
1341 return 0;
1342 }
1343
mb_clear_bits(void * bm,int cur,int len)1344 static void mb_clear_bits(void *bm, int cur, int len)
1345 {
1346 __u32 *addr;
1347
1348 len = cur + len;
1349 while (cur < len) {
1350 if ((cur & 31) == 0 && (len - cur) >= 32) {
1351 /* fast path: clear whole word at once */
1352 addr = bm + (cur >> 3);
1353 *addr = 0;
1354 cur += 32;
1355 continue;
1356 }
1357 mb_clear_bit(cur, bm);
1358 cur++;
1359 }
1360 }
1361
1362 /* clear bits in given range
1363 * will return first found zero bit if any, -1 otherwise
1364 */
mb_test_and_clear_bits(void * bm,int cur,int len)1365 static int mb_test_and_clear_bits(void *bm, int cur, int len)
1366 {
1367 __u32 *addr;
1368 int zero_bit = -1;
1369
1370 len = cur + len;
1371 while (cur < len) {
1372 if ((cur & 31) == 0 && (len - cur) >= 32) {
1373 /* fast path: clear whole word at once */
1374 addr = bm + (cur >> 3);
1375 if (*addr != (__u32)(-1) && zero_bit == -1)
1376 zero_bit = cur + mb_find_next_zero_bit(addr, 32, 0);
1377 *addr = 0;
1378 cur += 32;
1379 continue;
1380 }
1381 if (!mb_test_and_clear_bit(cur, bm) && zero_bit == -1)
1382 zero_bit = cur;
1383 cur++;
1384 }
1385
1386 return zero_bit;
1387 }
1388
ext4_set_bits(void * bm,int cur,int len)1389 void ext4_set_bits(void *bm, int cur, int len)
1390 {
1391 __u32 *addr;
1392
1393 len = cur + len;
1394 while (cur < len) {
1395 if ((cur & 31) == 0 && (len - cur) >= 32) {
1396 /* fast path: set whole word at once */
1397 addr = bm + (cur >> 3);
1398 *addr = 0xffffffff;
1399 cur += 32;
1400 continue;
1401 }
1402 mb_set_bit(cur, bm);
1403 cur++;
1404 }
1405 }
1406
mb_buddy_adjust_border(int * bit,void * bitmap,int side)1407 static inline int mb_buddy_adjust_border(int* bit, void* bitmap, int side)
1408 {
1409 if (mb_test_bit(*bit + side, bitmap)) {
1410 mb_clear_bit(*bit, bitmap);
1411 (*bit) -= side;
1412 return 1;
1413 }
1414 else {
1415 (*bit) += side;
1416 mb_set_bit(*bit, bitmap);
1417 return -1;
1418 }
1419 }
1420
mb_buddy_mark_free(struct ext4_buddy * e4b,int first,int last)1421 static void mb_buddy_mark_free(struct ext4_buddy *e4b, int first, int last)
1422 {
1423 int max;
1424 int order = 1;
1425 void *buddy = mb_find_buddy(e4b, order, &max);
1426
1427 while (buddy) {
1428 void *buddy2;
1429
1430 /* Bits in range [first; last] are known to be set since
1431 * corresponding blocks were allocated. Bits in range
1432 * (first; last) will stay set because they form buddies on
1433 * upper layer. We just deal with borders if they don't
1434 * align with upper layer and then go up.
1435 * Releasing entire group is all about clearing
1436 * single bit of highest order buddy.
1437 */
1438
1439 /* Example:
1440 * ---------------------------------
1441 * | 1 | 1 | 1 | 1 |
1442 * ---------------------------------
1443 * | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 |
1444 * ---------------------------------
1445 * 0 1 2 3 4 5 6 7
1446 * \_____________________/
1447 *
1448 * Neither [1] nor [6] is aligned to above layer.
1449 * Left neighbour [0] is free, so mark it busy,
1450 * decrease bb_counters and extend range to
1451 * [0; 6]
1452 * Right neighbour [7] is busy. It can't be coaleasced with [6], so
1453 * mark [6] free, increase bb_counters and shrink range to
1454 * [0; 5].
1455 * Then shift range to [0; 2], go up and do the same.
1456 */
1457
1458
1459 if (first & 1)
1460 e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&first, buddy, -1);
1461 if (!(last & 1))
1462 e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&last, buddy, 1);
1463 if (first > last)
1464 break;
1465 order++;
1466
1467 if (first == last || !(buddy2 = mb_find_buddy(e4b, order, &max))) {
1468 mb_clear_bits(buddy, first, last - first + 1);
1469 e4b->bd_info->bb_counters[order - 1] += last - first + 1;
1470 break;
1471 }
1472 first >>= 1;
1473 last >>= 1;
1474 buddy = buddy2;
1475 }
1476 }
1477
mb_free_blocks(struct inode * inode,struct ext4_buddy * e4b,int first,int count)1478 static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
1479 int first, int count)
1480 {
1481 int left_is_free = 0;
1482 int right_is_free = 0;
1483 int block;
1484 int last = first + count - 1;
1485 struct super_block *sb = e4b->bd_sb;
1486
1487 if (WARN_ON(count == 0))
1488 return;
1489 BUG_ON(last >= (sb->s_blocksize << 3));
1490 assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
1491 /* Don't bother if the block group is corrupt. */
1492 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
1493 return;
1494
1495 mb_check_buddy(e4b);
1496 mb_free_blocks_double(inode, e4b, first, count);
1497
1498 this_cpu_inc(discard_pa_seq);
1499 e4b->bd_info->bb_free += count;
1500 if (first < e4b->bd_info->bb_first_free)
1501 e4b->bd_info->bb_first_free = first;
1502
1503 /* access memory sequentially: check left neighbour,
1504 * clear range and then check right neighbour
1505 */
1506 if (first != 0)
1507 left_is_free = !mb_test_bit(first - 1, e4b->bd_bitmap);
1508 block = mb_test_and_clear_bits(e4b->bd_bitmap, first, count);
1509 if (last + 1 < EXT4_SB(sb)->s_mb_maxs[0])
1510 right_is_free = !mb_test_bit(last + 1, e4b->bd_bitmap);
1511
1512 if (unlikely(block != -1)) {
1513 struct ext4_sb_info *sbi = EXT4_SB(sb);
1514 ext4_fsblk_t blocknr;
1515
1516 blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
1517 blocknr += EXT4_C2B(sbi, block);
1518 if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) {
1519 ext4_grp_locked_error(sb, e4b->bd_group,
1520 inode ? inode->i_ino : 0,
1521 blocknr,
1522 "freeing already freed block (bit %u); block bitmap corrupt.",
1523 block);
1524 ext4_mark_group_bitmap_corrupted(
1525 sb, e4b->bd_group,
1526 EXT4_GROUP_INFO_BBITMAP_CORRUPT);
1527 } else {
1528 mb_regenerate_buddy(e4b);
1529 }
1530 goto done;
1531 }
1532
1533 /* let's maintain fragments counter */
1534 if (left_is_free && right_is_free)
1535 e4b->bd_info->bb_fragments--;
1536 else if (!left_is_free && !right_is_free)
1537 e4b->bd_info->bb_fragments++;
1538
1539 /* buddy[0] == bd_bitmap is a special case, so handle
1540 * it right away and let mb_buddy_mark_free stay free of
1541 * zero order checks.
1542 * Check if neighbours are to be coaleasced,
1543 * adjust bitmap bb_counters and borders appropriately.
1544 */
1545 if (first & 1) {
1546 first += !left_is_free;
1547 e4b->bd_info->bb_counters[0] += left_is_free ? -1 : 1;
1548 }
1549 if (!(last & 1)) {
1550 last -= !right_is_free;
1551 e4b->bd_info->bb_counters[0] += right_is_free ? -1 : 1;
1552 }
1553
1554 if (first <= last)
1555 mb_buddy_mark_free(e4b, first >> 1, last >> 1);
1556
1557 done:
1558 mb_set_largest_free_order(sb, e4b->bd_info);
1559 mb_check_buddy(e4b);
1560 }
1561
mb_find_extent(struct ext4_buddy * e4b,int block,int needed,struct ext4_free_extent * ex)1562 static int mb_find_extent(struct ext4_buddy *e4b, int block,
1563 int needed, struct ext4_free_extent *ex)
1564 {
1565 int next = block;
1566 int max, order;
1567 void *buddy;
1568
1569 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
1570 BUG_ON(ex == NULL);
1571
1572 buddy = mb_find_buddy(e4b, 0, &max);
1573 BUG_ON(buddy == NULL);
1574 BUG_ON(block >= max);
1575 if (mb_test_bit(block, buddy)) {
1576 ex->fe_len = 0;
1577 ex->fe_start = 0;
1578 ex->fe_group = 0;
1579 return 0;
1580 }
1581
1582 /* find actual order */
1583 order = mb_find_order_for_block(e4b, block);
1584 block = block >> order;
1585
1586 ex->fe_len = 1 << order;
1587 ex->fe_start = block << order;
1588 ex->fe_group = e4b->bd_group;
1589
1590 /* calc difference from given start */
1591 next = next - ex->fe_start;
1592 ex->fe_len -= next;
1593 ex->fe_start += next;
1594
1595 while (needed > ex->fe_len &&
1596 mb_find_buddy(e4b, order, &max)) {
1597
1598 if (block + 1 >= max)
1599 break;
1600
1601 next = (block + 1) * (1 << order);
1602 if (mb_test_bit(next, e4b->bd_bitmap))
1603 break;
1604
1605 order = mb_find_order_for_block(e4b, next);
1606
1607 block = next >> order;
1608 ex->fe_len += 1 << order;
1609 }
1610
1611 if (ex->fe_start + ex->fe_len > EXT4_CLUSTERS_PER_GROUP(e4b->bd_sb)) {
1612 /* Should never happen! (but apparently sometimes does?!?) */
1613 WARN_ON(1);
1614 ext4_grp_locked_error(e4b->bd_sb, e4b->bd_group, 0, 0,
1615 "corruption or bug in mb_find_extent "
1616 "block=%d, order=%d needed=%d ex=%u/%d/%d@%u",
1617 block, order, needed, ex->fe_group, ex->fe_start,
1618 ex->fe_len, ex->fe_logical);
1619 ex->fe_len = 0;
1620 ex->fe_start = 0;
1621 ex->fe_group = 0;
1622 }
1623 return ex->fe_len;
1624 }
1625
mb_mark_used(struct ext4_buddy * e4b,struct ext4_free_extent * ex)1626 static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex)
1627 {
1628 int ord;
1629 int mlen = 0;
1630 int max = 0;
1631 int cur;
1632 int start = ex->fe_start;
1633 int len = ex->fe_len;
1634 unsigned ret = 0;
1635 int len0 = len;
1636 void *buddy;
1637
1638 BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3));
1639 BUG_ON(e4b->bd_group != ex->fe_group);
1640 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
1641 mb_check_buddy(e4b);
1642 mb_mark_used_double(e4b, start, len);
1643
1644 this_cpu_inc(discard_pa_seq);
1645 e4b->bd_info->bb_free -= len;
1646 if (e4b->bd_info->bb_first_free == start)
1647 e4b->bd_info->bb_first_free += len;
1648
1649 /* let's maintain fragments counter */
1650 if (start != 0)
1651 mlen = !mb_test_bit(start - 1, e4b->bd_bitmap);
1652 if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0])
1653 max = !mb_test_bit(start + len, e4b->bd_bitmap);
1654 if (mlen && max)
1655 e4b->bd_info->bb_fragments++;
1656 else if (!mlen && !max)
1657 e4b->bd_info->bb_fragments--;
1658
1659 /* let's maintain buddy itself */
1660 while (len) {
1661 ord = mb_find_order_for_block(e4b, start);
1662
1663 if (((start >> ord) << ord) == start && len >= (1 << ord)) {
1664 /* the whole chunk may be allocated at once! */
1665 mlen = 1 << ord;
1666 buddy = mb_find_buddy(e4b, ord, &max);
1667 BUG_ON((start >> ord) >= max);
1668 mb_set_bit(start >> ord, buddy);
1669 e4b->bd_info->bb_counters[ord]--;
1670 start += mlen;
1671 len -= mlen;
1672 BUG_ON(len < 0);
1673 continue;
1674 }
1675
1676 /* store for history */
1677 if (ret == 0)
1678 ret = len | (ord << 16);
1679
1680 /* we have to split large buddy */
1681 BUG_ON(ord <= 0);
1682 buddy = mb_find_buddy(e4b, ord, &max);
1683 mb_set_bit(start >> ord, buddy);
1684 e4b->bd_info->bb_counters[ord]--;
1685
1686 ord--;
1687 cur = (start >> ord) & ~1U;
1688 buddy = mb_find_buddy(e4b, ord, &max);
1689 mb_clear_bit(cur, buddy);
1690 mb_clear_bit(cur + 1, buddy);
1691 e4b->bd_info->bb_counters[ord]++;
1692 e4b->bd_info->bb_counters[ord]++;
1693 }
1694 mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info);
1695
1696 ext4_set_bits(e4b->bd_bitmap, ex->fe_start, len0);
1697 mb_check_buddy(e4b);
1698
1699 return ret;
1700 }
1701
1702 /*
1703 * Must be called under group lock!
1704 */
ext4_mb_use_best_found(struct ext4_allocation_context * ac,struct ext4_buddy * e4b)1705 static void ext4_mb_use_best_found(struct ext4_allocation_context *ac,
1706 struct ext4_buddy *e4b)
1707 {
1708 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1709 int ret;
1710
1711 BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group);
1712 BUG_ON(ac->ac_status == AC_STATUS_FOUND);
1713
1714 ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len);
1715 ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical;
1716 ret = mb_mark_used(e4b, &ac->ac_b_ex);
1717
1718 /* preallocation can change ac_b_ex, thus we store actually
1719 * allocated blocks for history */
1720 ac->ac_f_ex = ac->ac_b_ex;
1721
1722 ac->ac_status = AC_STATUS_FOUND;
1723 ac->ac_tail = ret & 0xffff;
1724 ac->ac_buddy = ret >> 16;
1725
1726 /*
1727 * take the page reference. We want the page to be pinned
1728 * so that we don't get a ext4_mb_init_cache_call for this
1729 * group until we update the bitmap. That would mean we
1730 * double allocate blocks. The reference is dropped
1731 * in ext4_mb_release_context
1732 */
1733 ac->ac_bitmap_page = e4b->bd_bitmap_page;
1734 get_page(ac->ac_bitmap_page);
1735 ac->ac_buddy_page = e4b->bd_buddy_page;
1736 get_page(ac->ac_buddy_page);
1737 /* store last allocated for subsequent stream allocation */
1738 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
1739 spin_lock(&sbi->s_md_lock);
1740 sbi->s_mb_last_group = ac->ac_f_ex.fe_group;
1741 sbi->s_mb_last_start = ac->ac_f_ex.fe_start;
1742 spin_unlock(&sbi->s_md_lock);
1743 }
1744 /*
1745 * As we've just preallocated more space than
1746 * user requested originally, we store allocated
1747 * space in a special descriptor.
1748 */
1749 if (ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len)
1750 ext4_mb_new_preallocation(ac);
1751
1752 }
1753
ext4_mb_check_limits(struct ext4_allocation_context * ac,struct ext4_buddy * e4b,int finish_group)1754 static void ext4_mb_check_limits(struct ext4_allocation_context *ac,
1755 struct ext4_buddy *e4b,
1756 int finish_group)
1757 {
1758 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1759 struct ext4_free_extent *bex = &ac->ac_b_ex;
1760 struct ext4_free_extent *gex = &ac->ac_g_ex;
1761 struct ext4_free_extent ex;
1762 int max;
1763
1764 if (ac->ac_status == AC_STATUS_FOUND)
1765 return;
1766 /*
1767 * We don't want to scan for a whole year
1768 */
1769 if (ac->ac_found > sbi->s_mb_max_to_scan &&
1770 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
1771 ac->ac_status = AC_STATUS_BREAK;
1772 return;
1773 }
1774
1775 /*
1776 * Haven't found good chunk so far, let's continue
1777 */
1778 if (bex->fe_len < gex->fe_len)
1779 return;
1780
1781 if ((finish_group || ac->ac_found > sbi->s_mb_min_to_scan)
1782 && bex->fe_group == e4b->bd_group) {
1783 /* recheck chunk's availability - we don't know
1784 * when it was found (within this lock-unlock
1785 * period or not) */
1786 max = mb_find_extent(e4b, bex->fe_start, gex->fe_len, &ex);
1787 if (max >= gex->fe_len) {
1788 ext4_mb_use_best_found(ac, e4b);
1789 return;
1790 }
1791 }
1792 }
1793
1794 /*
1795 * The routine checks whether found extent is good enough. If it is,
1796 * then the extent gets marked used and flag is set to the context
1797 * to stop scanning. Otherwise, the extent is compared with the
1798 * previous found extent and if new one is better, then it's stored
1799 * in the context. Later, the best found extent will be used, if
1800 * mballoc can't find good enough extent.
1801 *
1802 * FIXME: real allocation policy is to be designed yet!
1803 */
ext4_mb_measure_extent(struct ext4_allocation_context * ac,struct ext4_free_extent * ex,struct ext4_buddy * e4b)1804 static void ext4_mb_measure_extent(struct ext4_allocation_context *ac,
1805 struct ext4_free_extent *ex,
1806 struct ext4_buddy *e4b)
1807 {
1808 struct ext4_free_extent *bex = &ac->ac_b_ex;
1809 struct ext4_free_extent *gex = &ac->ac_g_ex;
1810
1811 BUG_ON(ex->fe_len <= 0);
1812 BUG_ON(ex->fe_len > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
1813 BUG_ON(ex->fe_start >= EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
1814 BUG_ON(ac->ac_status != AC_STATUS_CONTINUE);
1815
1816 ac->ac_found++;
1817
1818 /*
1819 * The special case - take what you catch first
1820 */
1821 if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
1822 *bex = *ex;
1823 ext4_mb_use_best_found(ac, e4b);
1824 return;
1825 }
1826
1827 /*
1828 * Let's check whether the chuck is good enough
1829 */
1830 if (ex->fe_len == gex->fe_len) {
1831 *bex = *ex;
1832 ext4_mb_use_best_found(ac, e4b);
1833 return;
1834 }
1835
1836 /*
1837 * If this is first found extent, just store it in the context
1838 */
1839 if (bex->fe_len == 0) {
1840 *bex = *ex;
1841 return;
1842 }
1843
1844 /*
1845 * If new found extent is better, store it in the context
1846 */
1847 if (bex->fe_len < gex->fe_len) {
1848 /* if the request isn't satisfied, any found extent
1849 * larger than previous best one is better */
1850 if (ex->fe_len > bex->fe_len)
1851 *bex = *ex;
1852 } else if (ex->fe_len > gex->fe_len) {
1853 /* if the request is satisfied, then we try to find
1854 * an extent that still satisfy the request, but is
1855 * smaller than previous one */
1856 if (ex->fe_len < bex->fe_len)
1857 *bex = *ex;
1858 }
1859
1860 ext4_mb_check_limits(ac, e4b, 0);
1861 }
1862
1863 static noinline_for_stack
ext4_mb_try_best_found(struct ext4_allocation_context * ac,struct ext4_buddy * e4b)1864 int ext4_mb_try_best_found(struct ext4_allocation_context *ac,
1865 struct ext4_buddy *e4b)
1866 {
1867 struct ext4_free_extent ex = ac->ac_b_ex;
1868 ext4_group_t group = ex.fe_group;
1869 int max;
1870 int err;
1871
1872 BUG_ON(ex.fe_len <= 0);
1873 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
1874 if (err)
1875 return err;
1876
1877 ext4_lock_group(ac->ac_sb, group);
1878 max = mb_find_extent(e4b, ex.fe_start, ex.fe_len, &ex);
1879
1880 if (max > 0) {
1881 ac->ac_b_ex = ex;
1882 ext4_mb_use_best_found(ac, e4b);
1883 }
1884
1885 ext4_unlock_group(ac->ac_sb, group);
1886 ext4_mb_unload_buddy(e4b);
1887
1888 return 0;
1889 }
1890
1891 static noinline_for_stack
ext4_mb_find_by_goal(struct ext4_allocation_context * ac,struct ext4_buddy * e4b)1892 int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
1893 struct ext4_buddy *e4b)
1894 {
1895 ext4_group_t group = ac->ac_g_ex.fe_group;
1896 int max;
1897 int err;
1898 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1899 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
1900 struct ext4_free_extent ex;
1901
1902 if (!grp)
1903 return -EFSCORRUPTED;
1904 if (!(ac->ac_flags & EXT4_MB_HINT_TRY_GOAL))
1905 return 0;
1906 if (grp->bb_free == 0)
1907 return 0;
1908
1909 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
1910 if (err)
1911 return err;
1912
1913 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) {
1914 ext4_mb_unload_buddy(e4b);
1915 return 0;
1916 }
1917
1918 ext4_lock_group(ac->ac_sb, group);
1919 max = mb_find_extent(e4b, ac->ac_g_ex.fe_start,
1920 ac->ac_g_ex.fe_len, &ex);
1921 ex.fe_logical = 0xDEADFA11; /* debug value */
1922
1923 if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) {
1924 ext4_fsblk_t start;
1925
1926 start = ext4_group_first_block_no(ac->ac_sb, e4b->bd_group) +
1927 ex.fe_start;
1928 /* use do_div to get remainder (would be 64-bit modulo) */
1929 if (do_div(start, sbi->s_stripe) == 0) {
1930 ac->ac_found++;
1931 ac->ac_b_ex = ex;
1932 ext4_mb_use_best_found(ac, e4b);
1933 }
1934 } else if (max >= ac->ac_g_ex.fe_len) {
1935 BUG_ON(ex.fe_len <= 0);
1936 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
1937 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
1938 ac->ac_found++;
1939 ac->ac_b_ex = ex;
1940 ext4_mb_use_best_found(ac, e4b);
1941 } else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) {
1942 /* Sometimes, caller may want to merge even small
1943 * number of blocks to an existing extent */
1944 BUG_ON(ex.fe_len <= 0);
1945 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
1946 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
1947 ac->ac_found++;
1948 ac->ac_b_ex = ex;
1949 ext4_mb_use_best_found(ac, e4b);
1950 }
1951 ext4_unlock_group(ac->ac_sb, group);
1952 ext4_mb_unload_buddy(e4b);
1953
1954 return 0;
1955 }
1956
1957 /*
1958 * The routine scans buddy structures (not bitmap!) from given order
1959 * to max order and tries to find big enough chunk to satisfy the req
1960 */
1961 static noinline_for_stack
ext4_mb_simple_scan_group(struct ext4_allocation_context * ac,struct ext4_buddy * e4b)1962 void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
1963 struct ext4_buddy *e4b)
1964 {
1965 struct super_block *sb = ac->ac_sb;
1966 struct ext4_group_info *grp = e4b->bd_info;
1967 void *buddy;
1968 int i;
1969 int k;
1970 int max;
1971
1972 BUG_ON(ac->ac_2order <= 0);
1973 for (i = ac->ac_2order; i <= sb->s_blocksize_bits + 1; i++) {
1974 if (grp->bb_counters[i] == 0)
1975 continue;
1976
1977 buddy = mb_find_buddy(e4b, i, &max);
1978 BUG_ON(buddy == NULL);
1979
1980 k = mb_find_next_zero_bit(buddy, max, 0);
1981 if (k >= max) {
1982 ext4_grp_locked_error(ac->ac_sb, e4b->bd_group, 0, 0,
1983 "%d free clusters of order %d. But found 0",
1984 grp->bb_counters[i], i);
1985 ext4_mark_group_bitmap_corrupted(ac->ac_sb,
1986 e4b->bd_group,
1987 EXT4_GROUP_INFO_BBITMAP_CORRUPT);
1988 break;
1989 }
1990 ac->ac_found++;
1991
1992 ac->ac_b_ex.fe_len = 1 << i;
1993 ac->ac_b_ex.fe_start = k << i;
1994 ac->ac_b_ex.fe_group = e4b->bd_group;
1995
1996 ext4_mb_use_best_found(ac, e4b);
1997
1998 BUG_ON(ac->ac_f_ex.fe_len != ac->ac_g_ex.fe_len);
1999
2000 if (EXT4_SB(sb)->s_mb_stats)
2001 atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
2002
2003 break;
2004 }
2005 }
2006
2007 /*
2008 * The routine scans the group and measures all found extents.
2009 * In order to optimize scanning, caller must pass number of
2010 * free blocks in the group, so the routine can know upper limit.
2011 */
2012 static noinline_for_stack
ext4_mb_complex_scan_group(struct ext4_allocation_context * ac,struct ext4_buddy * e4b)2013 void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
2014 struct ext4_buddy *e4b)
2015 {
2016 struct super_block *sb = ac->ac_sb;
2017 void *bitmap = e4b->bd_bitmap;
2018 struct ext4_free_extent ex;
2019 int i;
2020 int free;
2021
2022 free = e4b->bd_info->bb_free;
2023 if (WARN_ON(free <= 0))
2024 return;
2025
2026 i = e4b->bd_info->bb_first_free;
2027
2028 while (free && ac->ac_status == AC_STATUS_CONTINUE) {
2029 i = mb_find_next_zero_bit(bitmap,
2030 EXT4_CLUSTERS_PER_GROUP(sb), i);
2031 if (i >= EXT4_CLUSTERS_PER_GROUP(sb)) {
2032 /*
2033 * IF we have corrupt bitmap, we won't find any
2034 * free blocks even though group info says we
2035 * have free blocks
2036 */
2037 ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
2038 "%d free clusters as per "
2039 "group info. But bitmap says 0",
2040 free);
2041 ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
2042 EXT4_GROUP_INFO_BBITMAP_CORRUPT);
2043 break;
2044 }
2045
2046 mb_find_extent(e4b, i, ac->ac_g_ex.fe_len, &ex);
2047 if (WARN_ON(ex.fe_len <= 0))
2048 break;
2049 if (free < ex.fe_len) {
2050 ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
2051 "%d free clusters as per "
2052 "group info. But got %d blocks",
2053 free, ex.fe_len);
2054 ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
2055 EXT4_GROUP_INFO_BBITMAP_CORRUPT);
2056 /*
2057 * The number of free blocks differs. This mostly
2058 * indicate that the bitmap is corrupt. So exit
2059 * without claiming the space.
2060 */
2061 break;
2062 }
2063 ex.fe_logical = 0xDEADC0DE; /* debug value */
2064 ext4_mb_measure_extent(ac, &ex, e4b);
2065
2066 i += ex.fe_len;
2067 free -= ex.fe_len;
2068 }
2069
2070 ext4_mb_check_limits(ac, e4b, 1);
2071 }
2072
2073 /*
2074 * This is a special case for storages like raid5
2075 * we try to find stripe-aligned chunks for stripe-size-multiple requests
2076 */
2077 static noinline_for_stack
ext4_mb_scan_aligned(struct ext4_allocation_context * ac,struct ext4_buddy * e4b)2078 void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
2079 struct ext4_buddy *e4b)
2080 {
2081 struct super_block *sb = ac->ac_sb;
2082 struct ext4_sb_info *sbi = EXT4_SB(sb);
2083 void *bitmap = e4b->bd_bitmap;
2084 struct ext4_free_extent ex;
2085 ext4_fsblk_t first_group_block;
2086 ext4_fsblk_t a;
2087 ext4_grpblk_t i;
2088 int max;
2089
2090 BUG_ON(sbi->s_stripe == 0);
2091
2092 /* find first stripe-aligned block in group */
2093 first_group_block = ext4_group_first_block_no(sb, e4b->bd_group);
2094
2095 a = first_group_block + sbi->s_stripe - 1;
2096 do_div(a, sbi->s_stripe);
2097 i = (a * sbi->s_stripe) - first_group_block;
2098
2099 while (i < EXT4_CLUSTERS_PER_GROUP(sb)) {
2100 if (!mb_test_bit(i, bitmap)) {
2101 max = mb_find_extent(e4b, i, sbi->s_stripe, &ex);
2102 if (max >= sbi->s_stripe) {
2103 ac->ac_found++;
2104 ex.fe_logical = 0xDEADF00D; /* debug value */
2105 ac->ac_b_ex = ex;
2106 ext4_mb_use_best_found(ac, e4b);
2107 break;
2108 }
2109 }
2110 i += sbi->s_stripe;
2111 }
2112 }
2113
2114 /*
2115 * This is also called BEFORE we load the buddy bitmap.
2116 * Returns either 1 or 0 indicating that the group is either suitable
2117 * for the allocation or not.
2118 */
ext4_mb_good_group(struct ext4_allocation_context * ac,ext4_group_t group,int cr)2119 static bool ext4_mb_good_group(struct ext4_allocation_context *ac,
2120 ext4_group_t group, int cr)
2121 {
2122 ext4_grpblk_t free, fragments;
2123 int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb));
2124 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
2125
2126 BUG_ON(cr < 0 || cr >= 4);
2127
2128 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp) || !grp))
2129 return false;
2130
2131 free = grp->bb_free;
2132 if (free == 0)
2133 return false;
2134
2135 fragments = grp->bb_fragments;
2136 if (fragments == 0)
2137 return false;
2138
2139 switch (cr) {
2140 case 0:
2141 BUG_ON(ac->ac_2order == 0);
2142
2143 /* Avoid using the first bg of a flexgroup for data files */
2144 if ((ac->ac_flags & EXT4_MB_HINT_DATA) &&
2145 (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) &&
2146 ((group % flex_size) == 0))
2147 return false;
2148
2149 if (free < ac->ac_g_ex.fe_len)
2150 return false;
2151
2152 if (ac->ac_2order > ac->ac_sb->s_blocksize_bits+1)
2153 return true;
2154
2155 if (grp->bb_largest_free_order < ac->ac_2order)
2156 return false;
2157
2158 return true;
2159 case 1:
2160 if ((free / fragments) >= ac->ac_g_ex.fe_len)
2161 return true;
2162 break;
2163 case 2:
2164 if (free >= ac->ac_g_ex.fe_len)
2165 return true;
2166 break;
2167 case 3:
2168 return true;
2169 default:
2170 BUG();
2171 }
2172
2173 return false;
2174 }
2175
2176 /*
2177 * This could return negative error code if something goes wrong
2178 * during ext4_mb_init_group(). This should not be called with
2179 * ext4_lock_group() held.
2180 */
ext4_mb_good_group_nolock(struct ext4_allocation_context * ac,ext4_group_t group,int cr)2181 static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac,
2182 ext4_group_t group, int cr)
2183 {
2184 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
2185 struct super_block *sb = ac->ac_sb;
2186 struct ext4_sb_info *sbi = EXT4_SB(sb);
2187 bool should_lock = ac->ac_flags & EXT4_MB_STRICT_CHECK;
2188 ext4_grpblk_t free;
2189 int ret = 0;
2190
2191 if (!grp)
2192 return -EFSCORRUPTED;
2193 if (should_lock)
2194 ext4_lock_group(sb, group);
2195 free = grp->bb_free;
2196 if (free == 0)
2197 goto out;
2198 if (cr <= 2 && free < ac->ac_g_ex.fe_len)
2199 goto out;
2200 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
2201 goto out;
2202 if (should_lock)
2203 ext4_unlock_group(sb, group);
2204
2205 /* We only do this if the grp has never been initialized */
2206 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
2207 struct ext4_group_desc *gdp =
2208 ext4_get_group_desc(sb, group, NULL);
2209 int ret;
2210
2211 /* cr=0/1 is a very optimistic search to find large
2212 * good chunks almost for free. If buddy data is not
2213 * ready, then this optimization makes no sense. But
2214 * we never skip the first block group in a flex_bg,
2215 * since this gets used for metadata block allocation,
2216 * and we want to make sure we locate metadata blocks
2217 * in the first block group in the flex_bg if possible.
2218 */
2219 if (cr < 2 &&
2220 (!sbi->s_log_groups_per_flex ||
2221 ((group & ((1 << sbi->s_log_groups_per_flex) - 1)) != 0)) &&
2222 !(ext4_has_group_desc_csum(sb) &&
2223 (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))))
2224 return 0;
2225 ret = ext4_mb_init_group(sb, group, GFP_NOFS);
2226 if (ret)
2227 return ret;
2228 }
2229
2230 if (should_lock)
2231 ext4_lock_group(sb, group);
2232 ret = ext4_mb_good_group(ac, group, cr);
2233 out:
2234 if (should_lock)
2235 ext4_unlock_group(sb, group);
2236 return ret;
2237 }
2238
2239 /*
2240 * Start prefetching @nr block bitmaps starting at @group.
2241 * Return the next group which needs to be prefetched.
2242 */
ext4_mb_prefetch(struct super_block * sb,ext4_group_t group,unsigned int nr,int * cnt)2243 ext4_group_t ext4_mb_prefetch(struct super_block *sb, ext4_group_t group,
2244 unsigned int nr, int *cnt)
2245 {
2246 ext4_group_t ngroups = ext4_get_groups_count(sb);
2247 struct buffer_head *bh;
2248 struct blk_plug plug;
2249
2250 blk_start_plug(&plug);
2251 while (nr-- > 0) {
2252 struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group,
2253 NULL);
2254 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
2255
2256 /*
2257 * Prefetch block groups with free blocks; but don't
2258 * bother if it is marked uninitialized on disk, since
2259 * it won't require I/O to read. Also only try to
2260 * prefetch once, so we avoid getblk() call, which can
2261 * be expensive.
2262 */
2263 if (gdp && grp && !EXT4_MB_GRP_TEST_AND_SET_READ(grp) &&
2264 EXT4_MB_GRP_NEED_INIT(grp) &&
2265 ext4_free_group_clusters(sb, gdp) > 0 &&
2266 !(ext4_has_group_desc_csum(sb) &&
2267 (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) {
2268 bh = ext4_read_block_bitmap_nowait(sb, group, true);
2269 if (bh && !IS_ERR(bh)) {
2270 if (!buffer_uptodate(bh) && cnt)
2271 (*cnt)++;
2272 brelse(bh);
2273 }
2274 }
2275 if (++group >= ngroups)
2276 group = 0;
2277 }
2278 blk_finish_plug(&plug);
2279 return group;
2280 }
2281
2282 /*
2283 * Prefetching reads the block bitmap into the buffer cache; but we
2284 * need to make sure that the buddy bitmap in the page cache has been
2285 * initialized. Note that ext4_mb_init_group() will block if the I/O
2286 * is not yet completed, or indeed if it was not initiated by
2287 * ext4_mb_prefetch did not start the I/O.
2288 *
2289 * TODO: We should actually kick off the buddy bitmap setup in a work
2290 * queue when the buffer I/O is completed, so that we don't block
2291 * waiting for the block allocation bitmap read to finish when
2292 * ext4_mb_prefetch_fini is called from ext4_mb_regular_allocator().
2293 */
ext4_mb_prefetch_fini(struct super_block * sb,ext4_group_t group,unsigned int nr)2294 void ext4_mb_prefetch_fini(struct super_block *sb, ext4_group_t group,
2295 unsigned int nr)
2296 {
2297 while (nr-- > 0) {
2298 struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group,
2299 NULL);
2300 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
2301
2302 if (!group)
2303 group = ext4_get_groups_count(sb);
2304 group--;
2305 grp = ext4_get_group_info(sb, group);
2306
2307 if (grp && gdp && EXT4_MB_GRP_NEED_INIT(grp) &&
2308 ext4_free_group_clusters(sb, gdp) > 0 &&
2309 !(ext4_has_group_desc_csum(sb) &&
2310 (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) {
2311 if (ext4_mb_init_group(sb, group, GFP_NOFS))
2312 break;
2313 }
2314 }
2315 }
2316
2317 static noinline_for_stack int
ext4_mb_regular_allocator(struct ext4_allocation_context * ac)2318 ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
2319 {
2320 ext4_group_t prefetch_grp = 0, ngroups, group, i;
2321 int cr = -1;
2322 int err = 0, first_err = 0;
2323 unsigned int nr = 0, prefetch_ios = 0;
2324 struct ext4_sb_info *sbi;
2325 struct super_block *sb;
2326 struct ext4_buddy e4b;
2327 int lost;
2328
2329 sb = ac->ac_sb;
2330 sbi = EXT4_SB(sb);
2331 ngroups = ext4_get_groups_count(sb);
2332 /* non-extent files are limited to low blocks/groups */
2333 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)))
2334 ngroups = sbi->s_blockfile_groups;
2335
2336 BUG_ON(ac->ac_status == AC_STATUS_FOUND);
2337
2338 /* first, try the goal */
2339 err = ext4_mb_find_by_goal(ac, &e4b);
2340 if (err || ac->ac_status == AC_STATUS_FOUND)
2341 goto out;
2342
2343 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
2344 goto out;
2345
2346 /*
2347 * ac->ac_2order is set only if the fe_len is a power of 2
2348 * if ac->ac_2order is set we also set criteria to 0 so that we
2349 * try exact allocation using buddy.
2350 */
2351 i = fls(ac->ac_g_ex.fe_len);
2352 ac->ac_2order = 0;
2353 /*
2354 * We search using buddy data only if the order of the request
2355 * is greater than equal to the sbi_s_mb_order2_reqs
2356 * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req
2357 * We also support searching for power-of-two requests only for
2358 * requests upto maximum buddy size we have constructed.
2359 */
2360 if (i >= sbi->s_mb_order2_reqs && i <= sb->s_blocksize_bits + 2) {
2361 /*
2362 * This should tell if fe_len is exactly power of 2
2363 */
2364 if ((ac->ac_g_ex.fe_len & (~(1 << (i - 1)))) == 0)
2365 ac->ac_2order = array_index_nospec(i - 1,
2366 sb->s_blocksize_bits + 2);
2367 }
2368
2369 /* if stream allocation is enabled, use global goal */
2370 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
2371 /* TBD: may be hot point */
2372 spin_lock(&sbi->s_md_lock);
2373 ac->ac_g_ex.fe_group = sbi->s_mb_last_group;
2374 ac->ac_g_ex.fe_start = sbi->s_mb_last_start;
2375 spin_unlock(&sbi->s_md_lock);
2376 }
2377
2378 /* Let's just scan groups to find more-less suitable blocks */
2379 cr = ac->ac_2order ? 0 : 1;
2380 /*
2381 * cr == 0 try to get exact allocation,
2382 * cr == 3 try to get anything
2383 */
2384 repeat:
2385 for (; cr < 4 && ac->ac_status == AC_STATUS_CONTINUE; cr++) {
2386 ac->ac_criteria = cr;
2387 /*
2388 * searching for the right group start
2389 * from the goal value specified
2390 */
2391 group = ac->ac_g_ex.fe_group;
2392 prefetch_grp = group;
2393
2394 for (i = 0; i < ngroups; group++, i++) {
2395 int ret = 0;
2396 cond_resched();
2397 /*
2398 * Artificially restricted ngroups for non-extent
2399 * files makes group > ngroups possible on first loop.
2400 */
2401 if (group >= ngroups)
2402 group = 0;
2403
2404 /*
2405 * Batch reads of the block allocation bitmaps
2406 * to get multiple READs in flight; limit
2407 * prefetching at cr=0/1, otherwise mballoc can
2408 * spend a lot of time loading imperfect groups
2409 */
2410 if ((prefetch_grp == group) &&
2411 (cr > 1 ||
2412 prefetch_ios < sbi->s_mb_prefetch_limit)) {
2413 unsigned int curr_ios = prefetch_ios;
2414
2415 nr = sbi->s_mb_prefetch;
2416 if (ext4_has_feature_flex_bg(sb)) {
2417 nr = 1 << sbi->s_log_groups_per_flex;
2418 nr -= group & (nr - 1);
2419 nr = min(nr, sbi->s_mb_prefetch);
2420 }
2421 prefetch_grp = ext4_mb_prefetch(sb, group,
2422 nr, &prefetch_ios);
2423 if (prefetch_ios == curr_ios)
2424 nr = 0;
2425 }
2426
2427 /* This now checks without needing the buddy page */
2428 ret = ext4_mb_good_group_nolock(ac, group, cr);
2429 if (ret <= 0) {
2430 if (!first_err)
2431 first_err = ret;
2432 continue;
2433 }
2434
2435 err = ext4_mb_load_buddy(sb, group, &e4b);
2436 if (err)
2437 goto out;
2438
2439 ext4_lock_group(sb, group);
2440
2441 /*
2442 * We need to check again after locking the
2443 * block group
2444 */
2445 ret = ext4_mb_good_group(ac, group, cr);
2446 if (ret == 0) {
2447 ext4_unlock_group(sb, group);
2448 ext4_mb_unload_buddy(&e4b);
2449 continue;
2450 }
2451
2452 ac->ac_groups_scanned++;
2453 if (cr == 0)
2454 ext4_mb_simple_scan_group(ac, &e4b);
2455 else if (cr == 1 && sbi->s_stripe &&
2456 !(ac->ac_g_ex.fe_len % sbi->s_stripe))
2457 ext4_mb_scan_aligned(ac, &e4b);
2458 else
2459 ext4_mb_complex_scan_group(ac, &e4b);
2460
2461 ext4_unlock_group(sb, group);
2462 ext4_mb_unload_buddy(&e4b);
2463
2464 if (ac->ac_status != AC_STATUS_CONTINUE)
2465 break;
2466 }
2467 }
2468
2469 if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND &&
2470 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2471 /*
2472 * We've been searching too long. Let's try to allocate
2473 * the best chunk we've found so far
2474 */
2475 ext4_mb_try_best_found(ac, &e4b);
2476 if (ac->ac_status != AC_STATUS_FOUND) {
2477 /*
2478 * Someone more lucky has already allocated it.
2479 * The only thing we can do is just take first
2480 * found block(s)
2481 */
2482 lost = atomic_inc_return(&sbi->s_mb_lost_chunks);
2483 mb_debug(sb, "lost chunk, group: %u, start: %d, len: %d, lost: %d\n",
2484 ac->ac_b_ex.fe_group, ac->ac_b_ex.fe_start,
2485 ac->ac_b_ex.fe_len, lost);
2486
2487 ac->ac_b_ex.fe_group = 0;
2488 ac->ac_b_ex.fe_start = 0;
2489 ac->ac_b_ex.fe_len = 0;
2490 ac->ac_status = AC_STATUS_CONTINUE;
2491 ac->ac_flags |= EXT4_MB_HINT_FIRST;
2492 cr = 3;
2493 goto repeat;
2494 }
2495 }
2496 out:
2497 if (!err && ac->ac_status != AC_STATUS_FOUND && first_err)
2498 err = first_err;
2499
2500 mb_debug(sb, "Best len %d, origin len %d, ac_status %u, ac_flags 0x%x, cr %d ret %d\n",
2501 ac->ac_b_ex.fe_len, ac->ac_o_ex.fe_len, ac->ac_status,
2502 ac->ac_flags, cr, err);
2503
2504 if (nr)
2505 ext4_mb_prefetch_fini(sb, prefetch_grp, nr);
2506
2507 return err;
2508 }
2509
ext4_mb_seq_groups_start(struct seq_file * seq,loff_t * pos)2510 static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos)
2511 {
2512 struct super_block *sb = PDE_DATA(file_inode(seq->file));
2513 ext4_group_t group;
2514
2515 if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
2516 return NULL;
2517 group = *pos + 1;
2518 return (void *) ((unsigned long) group);
2519 }
2520
ext4_mb_seq_groups_next(struct seq_file * seq,void * v,loff_t * pos)2521 static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos)
2522 {
2523 struct super_block *sb = PDE_DATA(file_inode(seq->file));
2524 ext4_group_t group;
2525
2526 ++*pos;
2527 if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
2528 return NULL;
2529 group = *pos + 1;
2530 return (void *) ((unsigned long) group);
2531 }
2532
ext4_mb_seq_groups_show(struct seq_file * seq,void * v)2533 static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
2534 {
2535 struct super_block *sb = PDE_DATA(file_inode(seq->file));
2536 ext4_group_t group = (ext4_group_t) ((unsigned long) v);
2537 int i;
2538 int err, buddy_loaded = 0;
2539 struct ext4_buddy e4b;
2540 struct ext4_group_info *grinfo;
2541 unsigned char blocksize_bits = min_t(unsigned char,
2542 sb->s_blocksize_bits,
2543 EXT4_MAX_BLOCK_LOG_SIZE);
2544 struct sg {
2545 struct ext4_group_info info;
2546 ext4_grpblk_t counters[EXT4_MAX_BLOCK_LOG_SIZE + 2];
2547 } sg;
2548
2549 group--;
2550 if (group == 0)
2551 seq_puts(seq, "#group: free frags first ["
2552 " 2^0 2^1 2^2 2^3 2^4 2^5 2^6 "
2553 " 2^7 2^8 2^9 2^10 2^11 2^12 2^13 ]\n");
2554
2555 i = (blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) +
2556 sizeof(struct ext4_group_info);
2557
2558 grinfo = ext4_get_group_info(sb, group);
2559 if (!grinfo)
2560 return 0;
2561 /* Load the group info in memory only if not already loaded. */
2562 if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) {
2563 err = ext4_mb_load_buddy(sb, group, &e4b);
2564 if (err) {
2565 seq_printf(seq, "#%-5u: I/O error\n", group);
2566 return 0;
2567 }
2568 buddy_loaded = 1;
2569 }
2570
2571 memcpy(&sg, grinfo, i);
2572
2573 if (buddy_loaded)
2574 ext4_mb_unload_buddy(&e4b);
2575
2576 seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free,
2577 sg.info.bb_fragments, sg.info.bb_first_free);
2578 for (i = 0; i <= 13; i++)
2579 seq_printf(seq, " %-5u", i <= blocksize_bits + 1 ?
2580 sg.info.bb_counters[i] : 0);
2581 seq_puts(seq, " ]\n");
2582
2583 return 0;
2584 }
2585
ext4_mb_seq_groups_stop(struct seq_file * seq,void * v)2586 static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v)
2587 {
2588 }
2589
2590 const struct seq_operations ext4_mb_seq_groups_ops = {
2591 .start = ext4_mb_seq_groups_start,
2592 .next = ext4_mb_seq_groups_next,
2593 .stop = ext4_mb_seq_groups_stop,
2594 .show = ext4_mb_seq_groups_show,
2595 };
2596
get_groupinfo_cache(int blocksize_bits)2597 static struct kmem_cache *get_groupinfo_cache(int blocksize_bits)
2598 {
2599 int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
2600 struct kmem_cache *cachep = ext4_groupinfo_caches[cache_index];
2601
2602 BUG_ON(!cachep);
2603 return cachep;
2604 }
2605
2606 /*
2607 * Allocate the top-level s_group_info array for the specified number
2608 * of groups
2609 */
ext4_mb_alloc_groupinfo(struct super_block * sb,ext4_group_t ngroups)2610 int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups)
2611 {
2612 struct ext4_sb_info *sbi = EXT4_SB(sb);
2613 unsigned size;
2614 struct ext4_group_info ***old_groupinfo, ***new_groupinfo;
2615
2616 size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >>
2617 EXT4_DESC_PER_BLOCK_BITS(sb);
2618 if (size <= sbi->s_group_info_size)
2619 return 0;
2620
2621 size = roundup_pow_of_two(sizeof(*sbi->s_group_info) * size);
2622 new_groupinfo = kvzalloc(size, GFP_KERNEL);
2623 if (!new_groupinfo) {
2624 ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group");
2625 return -ENOMEM;
2626 }
2627 rcu_read_lock();
2628 old_groupinfo = rcu_dereference(sbi->s_group_info);
2629 if (old_groupinfo)
2630 memcpy(new_groupinfo, old_groupinfo,
2631 sbi->s_group_info_size * sizeof(*sbi->s_group_info));
2632 rcu_read_unlock();
2633 rcu_assign_pointer(sbi->s_group_info, new_groupinfo);
2634 sbi->s_group_info_size = size / sizeof(*sbi->s_group_info);
2635 if (old_groupinfo)
2636 ext4_kvfree_array_rcu(old_groupinfo);
2637 ext4_debug("allocated s_groupinfo array for %d meta_bg's\n",
2638 sbi->s_group_info_size);
2639 return 0;
2640 }
2641
2642 /* Create and initialize ext4_group_info data for the given group. */
ext4_mb_add_groupinfo(struct super_block * sb,ext4_group_t group,struct ext4_group_desc * desc)2643 int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
2644 struct ext4_group_desc *desc)
2645 {
2646 int i;
2647 int metalen = 0;
2648 int idx = group >> EXT4_DESC_PER_BLOCK_BITS(sb);
2649 struct ext4_sb_info *sbi = EXT4_SB(sb);
2650 struct ext4_group_info **meta_group_info;
2651 struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
2652
2653 /*
2654 * First check if this group is the first of a reserved block.
2655 * If it's true, we have to allocate a new table of pointers
2656 * to ext4_group_info structures
2657 */
2658 if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
2659 metalen = sizeof(*meta_group_info) <<
2660 EXT4_DESC_PER_BLOCK_BITS(sb);
2661 meta_group_info = kmalloc(metalen, GFP_NOFS);
2662 if (meta_group_info == NULL) {
2663 ext4_msg(sb, KERN_ERR, "can't allocate mem "
2664 "for a buddy group");
2665 goto exit_meta_group_info;
2666 }
2667 rcu_read_lock();
2668 rcu_dereference(sbi->s_group_info)[idx] = meta_group_info;
2669 rcu_read_unlock();
2670 }
2671
2672 meta_group_info = sbi_array_rcu_deref(sbi, s_group_info, idx);
2673 i = group & (EXT4_DESC_PER_BLOCK(sb) - 1);
2674
2675 meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_NOFS);
2676 if (meta_group_info[i] == NULL) {
2677 ext4_msg(sb, KERN_ERR, "can't allocate buddy mem");
2678 goto exit_group_info;
2679 }
2680 set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT,
2681 &(meta_group_info[i]->bb_state));
2682
2683 /*
2684 * initialize bb_free to be able to skip
2685 * empty groups without initialization
2686 */
2687 if (ext4_has_group_desc_csum(sb) &&
2688 (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
2689 meta_group_info[i]->bb_free =
2690 ext4_free_clusters_after_init(sb, group, desc);
2691 } else {
2692 meta_group_info[i]->bb_free =
2693 ext4_free_group_clusters(sb, desc);
2694 }
2695
2696 INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list);
2697 init_rwsem(&meta_group_info[i]->alloc_sem);
2698 meta_group_info[i]->bb_free_root = RB_ROOT;
2699 meta_group_info[i]->bb_largest_free_order = -1; /* uninit */
2700
2701 mb_group_bb_bitmap_alloc(sb, meta_group_info[i], group);
2702 return 0;
2703
2704 exit_group_info:
2705 /* If a meta_group_info table has been allocated, release it now */
2706 if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
2707 struct ext4_group_info ***group_info;
2708
2709 rcu_read_lock();
2710 group_info = rcu_dereference(sbi->s_group_info);
2711 kfree(group_info[idx]);
2712 group_info[idx] = NULL;
2713 rcu_read_unlock();
2714 }
2715 exit_meta_group_info:
2716 return -ENOMEM;
2717 } /* ext4_mb_add_groupinfo */
2718
ext4_mb_init_backend(struct super_block * sb)2719 static int ext4_mb_init_backend(struct super_block *sb)
2720 {
2721 ext4_group_t ngroups = ext4_get_groups_count(sb);
2722 ext4_group_t i;
2723 struct ext4_sb_info *sbi = EXT4_SB(sb);
2724 int err;
2725 struct ext4_group_desc *desc;
2726 struct ext4_group_info ***group_info;
2727 struct kmem_cache *cachep;
2728
2729 err = ext4_mb_alloc_groupinfo(sb, ngroups);
2730 if (err)
2731 return err;
2732
2733 sbi->s_buddy_cache = new_inode(sb);
2734 if (sbi->s_buddy_cache == NULL) {
2735 ext4_msg(sb, KERN_ERR, "can't get new inode");
2736 goto err_freesgi;
2737 }
2738 /* To avoid potentially colliding with an valid on-disk inode number,
2739 * use EXT4_BAD_INO for the buddy cache inode number. This inode is
2740 * not in the inode hash, so it should never be found by iget(), but
2741 * this will avoid confusion if it ever shows up during debugging. */
2742 sbi->s_buddy_cache->i_ino = EXT4_BAD_INO;
2743 EXT4_I(sbi->s_buddy_cache)->i_disksize = 0;
2744 for (i = 0; i < ngroups; i++) {
2745 cond_resched();
2746 desc = ext4_get_group_desc(sb, i, NULL);
2747 if (desc == NULL) {
2748 ext4_msg(sb, KERN_ERR, "can't read descriptor %u", i);
2749 goto err_freebuddy;
2750 }
2751 if (ext4_mb_add_groupinfo(sb, i, desc) != 0)
2752 goto err_freebuddy;
2753 }
2754
2755 if (ext4_has_feature_flex_bg(sb)) {
2756 /* a single flex group is supposed to be read by a single IO.
2757 * 2 ^ s_log_groups_per_flex != UINT_MAX as s_mb_prefetch is
2758 * unsigned integer, so the maximum shift is 32.
2759 */
2760 if (sbi->s_es->s_log_groups_per_flex >= 32) {
2761 ext4_msg(sb, KERN_ERR, "too many log groups per flexible block group");
2762 goto err_freebuddy;
2763 }
2764 sbi->s_mb_prefetch = min_t(uint, 1 << sbi->s_es->s_log_groups_per_flex,
2765 BLK_MAX_SEGMENT_SIZE >> (sb->s_blocksize_bits - 9));
2766 sbi->s_mb_prefetch *= 8; /* 8 prefetch IOs in flight at most */
2767 } else {
2768 sbi->s_mb_prefetch = 32;
2769 }
2770 if (sbi->s_mb_prefetch > ext4_get_groups_count(sb))
2771 sbi->s_mb_prefetch = ext4_get_groups_count(sb);
2772 /* now many real IOs to prefetch within a single allocation at cr=0
2773 * given cr=0 is an CPU-related optimization we shouldn't try to
2774 * load too many groups, at some point we should start to use what
2775 * we've got in memory.
2776 * with an average random access time 5ms, it'd take a second to get
2777 * 200 groups (* N with flex_bg), so let's make this limit 4
2778 */
2779 sbi->s_mb_prefetch_limit = sbi->s_mb_prefetch * 4;
2780 if (sbi->s_mb_prefetch_limit > ext4_get_groups_count(sb))
2781 sbi->s_mb_prefetch_limit = ext4_get_groups_count(sb);
2782
2783 return 0;
2784
2785 err_freebuddy:
2786 cachep = get_groupinfo_cache(sb->s_blocksize_bits);
2787 while (i-- > 0) {
2788 struct ext4_group_info *grp = ext4_get_group_info(sb, i);
2789
2790 if (grp)
2791 kmem_cache_free(cachep, grp);
2792 }
2793 i = sbi->s_group_info_size;
2794 rcu_read_lock();
2795 group_info = rcu_dereference(sbi->s_group_info);
2796 while (i-- > 0)
2797 kfree(group_info[i]);
2798 rcu_read_unlock();
2799 iput(sbi->s_buddy_cache);
2800 err_freesgi:
2801 rcu_read_lock();
2802 kvfree(rcu_dereference(sbi->s_group_info));
2803 rcu_read_unlock();
2804 return -ENOMEM;
2805 }
2806
ext4_groupinfo_destroy_slabs(void)2807 static void ext4_groupinfo_destroy_slabs(void)
2808 {
2809 int i;
2810
2811 for (i = 0; i < NR_GRPINFO_CACHES; i++) {
2812 kmem_cache_destroy(ext4_groupinfo_caches[i]);
2813 ext4_groupinfo_caches[i] = NULL;
2814 }
2815 }
2816
ext4_groupinfo_create_slab(size_t size)2817 static int ext4_groupinfo_create_slab(size_t size)
2818 {
2819 static DEFINE_MUTEX(ext4_grpinfo_slab_create_mutex);
2820 int slab_size;
2821 int blocksize_bits = order_base_2(size);
2822 int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
2823 struct kmem_cache *cachep;
2824
2825 if (cache_index >= NR_GRPINFO_CACHES)
2826 return -EINVAL;
2827
2828 if (unlikely(cache_index < 0))
2829 cache_index = 0;
2830
2831 mutex_lock(&ext4_grpinfo_slab_create_mutex);
2832 if (ext4_groupinfo_caches[cache_index]) {
2833 mutex_unlock(&ext4_grpinfo_slab_create_mutex);
2834 return 0; /* Already created */
2835 }
2836
2837 slab_size = offsetof(struct ext4_group_info,
2838 bb_counters[blocksize_bits + 2]);
2839
2840 cachep = kmem_cache_create(ext4_groupinfo_slab_names[cache_index],
2841 slab_size, 0, SLAB_RECLAIM_ACCOUNT,
2842 NULL);
2843
2844 ext4_groupinfo_caches[cache_index] = cachep;
2845
2846 mutex_unlock(&ext4_grpinfo_slab_create_mutex);
2847 if (!cachep) {
2848 printk(KERN_EMERG
2849 "EXT4-fs: no memory for groupinfo slab cache\n");
2850 return -ENOMEM;
2851 }
2852
2853 return 0;
2854 }
2855
ext4_mb_init(struct super_block * sb)2856 int ext4_mb_init(struct super_block *sb)
2857 {
2858 struct ext4_sb_info *sbi = EXT4_SB(sb);
2859 unsigned i, j;
2860 unsigned offset, offset_incr;
2861 unsigned max;
2862 int ret;
2863
2864 i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_offsets);
2865
2866 sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL);
2867 if (sbi->s_mb_offsets == NULL) {
2868 ret = -ENOMEM;
2869 goto out;
2870 }
2871
2872 i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_maxs);
2873 sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL);
2874 if (sbi->s_mb_maxs == NULL) {
2875 ret = -ENOMEM;
2876 goto out;
2877 }
2878
2879 ret = ext4_groupinfo_create_slab(sb->s_blocksize);
2880 if (ret < 0)
2881 goto out;
2882
2883 /* order 0 is regular bitmap */
2884 sbi->s_mb_maxs[0] = sb->s_blocksize << 3;
2885 sbi->s_mb_offsets[0] = 0;
2886
2887 i = 1;
2888 offset = 0;
2889 offset_incr = 1 << (sb->s_blocksize_bits - 1);
2890 max = sb->s_blocksize << 2;
2891 do {
2892 sbi->s_mb_offsets[i] = offset;
2893 sbi->s_mb_maxs[i] = max;
2894 offset += offset_incr;
2895 offset_incr = offset_incr >> 1;
2896 max = max >> 1;
2897 i++;
2898 } while (i <= sb->s_blocksize_bits + 1);
2899
2900 spin_lock_init(&sbi->s_md_lock);
2901 spin_lock_init(&sbi->s_bal_lock);
2902 sbi->s_mb_free_pending = 0;
2903 INIT_LIST_HEAD(&sbi->s_freed_data_list);
2904
2905 sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN;
2906 sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN;
2907 sbi->s_mb_stats = MB_DEFAULT_STATS;
2908 sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
2909 sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
2910 sbi->s_mb_max_inode_prealloc = MB_DEFAULT_MAX_INODE_PREALLOC;
2911 /*
2912 * The default group preallocation is 512, which for 4k block
2913 * sizes translates to 2 megabytes. However for bigalloc file
2914 * systems, this is probably too big (i.e, if the cluster size
2915 * is 1 megabyte, then group preallocation size becomes half a
2916 * gigabyte!). As a default, we will keep a two megabyte
2917 * group pralloc size for cluster sizes up to 64k, and after
2918 * that, we will force a minimum group preallocation size of
2919 * 32 clusters. This translates to 8 megs when the cluster
2920 * size is 256k, and 32 megs when the cluster size is 1 meg,
2921 * which seems reasonable as a default.
2922 */
2923 sbi->s_mb_group_prealloc = max(MB_DEFAULT_GROUP_PREALLOC >>
2924 sbi->s_cluster_bits, 32);
2925 /*
2926 * If there is a s_stripe > 1, then we set the s_mb_group_prealloc
2927 * to the lowest multiple of s_stripe which is bigger than
2928 * the s_mb_group_prealloc as determined above. We want
2929 * the preallocation size to be an exact multiple of the
2930 * RAID stripe size so that preallocations don't fragment
2931 * the stripes.
2932 */
2933 if (sbi->s_stripe > 1) {
2934 sbi->s_mb_group_prealloc = roundup(
2935 sbi->s_mb_group_prealloc, sbi->s_stripe);
2936 }
2937
2938 sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
2939 if (sbi->s_locality_groups == NULL) {
2940 ret = -ENOMEM;
2941 goto out;
2942 }
2943 for_each_possible_cpu(i) {
2944 struct ext4_locality_group *lg;
2945 lg = per_cpu_ptr(sbi->s_locality_groups, i);
2946 mutex_init(&lg->lg_mutex);
2947 for (j = 0; j < PREALLOC_TB_SIZE; j++)
2948 INIT_LIST_HEAD(&lg->lg_prealloc_list[j]);
2949 spin_lock_init(&lg->lg_prealloc_lock);
2950 }
2951
2952 /* init file for buddy data */
2953 ret = ext4_mb_init_backend(sb);
2954 if (ret != 0)
2955 goto out_free_locality_groups;
2956
2957 return 0;
2958
2959 out_free_locality_groups:
2960 free_percpu(sbi->s_locality_groups);
2961 sbi->s_locality_groups = NULL;
2962 out:
2963 kfree(sbi->s_mb_offsets);
2964 sbi->s_mb_offsets = NULL;
2965 kfree(sbi->s_mb_maxs);
2966 sbi->s_mb_maxs = NULL;
2967 return ret;
2968 }
2969
2970 /* need to called with the ext4 group lock held */
ext4_mb_cleanup_pa(struct ext4_group_info * grp)2971 static int ext4_mb_cleanup_pa(struct ext4_group_info *grp)
2972 {
2973 struct ext4_prealloc_space *pa;
2974 struct list_head *cur, *tmp;
2975 int count = 0;
2976
2977 list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) {
2978 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
2979 list_del(&pa->pa_group_list);
2980 count++;
2981 kmem_cache_free(ext4_pspace_cachep, pa);
2982 }
2983 return count;
2984 }
2985
ext4_mb_release(struct super_block * sb)2986 int ext4_mb_release(struct super_block *sb)
2987 {
2988 ext4_group_t ngroups = ext4_get_groups_count(sb);
2989 ext4_group_t i;
2990 int num_meta_group_infos;
2991 struct ext4_group_info *grinfo, ***group_info;
2992 struct ext4_sb_info *sbi = EXT4_SB(sb);
2993 struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
2994 int count;
2995
2996 if (sbi->s_group_info) {
2997 for (i = 0; i < ngroups; i++) {
2998 cond_resched();
2999 grinfo = ext4_get_group_info(sb, i);
3000 if (!grinfo)
3001 continue;
3002 mb_group_bb_bitmap_free(grinfo);
3003 ext4_lock_group(sb, i);
3004 count = ext4_mb_cleanup_pa(grinfo);
3005 if (count)
3006 mb_debug(sb, "mballoc: %d PAs left\n",
3007 count);
3008 ext4_unlock_group(sb, i);
3009 kmem_cache_free(cachep, grinfo);
3010 }
3011 num_meta_group_infos = (ngroups +
3012 EXT4_DESC_PER_BLOCK(sb) - 1) >>
3013 EXT4_DESC_PER_BLOCK_BITS(sb);
3014 rcu_read_lock();
3015 group_info = rcu_dereference(sbi->s_group_info);
3016 for (i = 0; i < num_meta_group_infos; i++)
3017 kfree(group_info[i]);
3018 kvfree(group_info);
3019 rcu_read_unlock();
3020 }
3021 kfree(sbi->s_mb_offsets);
3022 kfree(sbi->s_mb_maxs);
3023 iput(sbi->s_buddy_cache);
3024 if (sbi->s_mb_stats) {
3025 ext4_msg(sb, KERN_INFO,
3026 "mballoc: %u blocks %u reqs (%u success)",
3027 atomic_read(&sbi->s_bal_allocated),
3028 atomic_read(&sbi->s_bal_reqs),
3029 atomic_read(&sbi->s_bal_success));
3030 ext4_msg(sb, KERN_INFO,
3031 "mballoc: %u extents scanned, %u goal hits, "
3032 "%u 2^N hits, %u breaks, %u lost",
3033 atomic_read(&sbi->s_bal_ex_scanned),
3034 atomic_read(&sbi->s_bal_goals),
3035 atomic_read(&sbi->s_bal_2orders),
3036 atomic_read(&sbi->s_bal_breaks),
3037 atomic_read(&sbi->s_mb_lost_chunks));
3038 ext4_msg(sb, KERN_INFO,
3039 "mballoc: %lu generated and it took %Lu",
3040 sbi->s_mb_buddies_generated,
3041 sbi->s_mb_generation_time);
3042 ext4_msg(sb, KERN_INFO,
3043 "mballoc: %u preallocated, %u discarded",
3044 atomic_read(&sbi->s_mb_preallocated),
3045 atomic_read(&sbi->s_mb_discarded));
3046 }
3047
3048 free_percpu(sbi->s_locality_groups);
3049
3050 return 0;
3051 }
3052
ext4_issue_discard(struct super_block * sb,ext4_group_t block_group,ext4_grpblk_t cluster,int count,struct bio ** biop)3053 static inline int ext4_issue_discard(struct super_block *sb,
3054 ext4_group_t block_group, ext4_grpblk_t cluster, int count,
3055 struct bio **biop)
3056 {
3057 ext4_fsblk_t discard_block;
3058
3059 discard_block = (EXT4_C2B(EXT4_SB(sb), cluster) +
3060 ext4_group_first_block_no(sb, block_group));
3061 count = EXT4_C2B(EXT4_SB(sb), count);
3062 trace_ext4_discard_blocks(sb,
3063 (unsigned long long) discard_block, count);
3064 if (biop) {
3065 return __blkdev_issue_discard(sb->s_bdev,
3066 (sector_t)discard_block << (sb->s_blocksize_bits - 9),
3067 (sector_t)count << (sb->s_blocksize_bits - 9),
3068 GFP_NOFS, 0, biop);
3069 } else
3070 return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
3071 }
3072
ext4_free_data_in_buddy(struct super_block * sb,struct ext4_free_data * entry)3073 static void ext4_free_data_in_buddy(struct super_block *sb,
3074 struct ext4_free_data *entry)
3075 {
3076 struct ext4_buddy e4b;
3077 struct ext4_group_info *db;
3078 int err, count = 0, count2 = 0;
3079
3080 mb_debug(sb, "gonna free %u blocks in group %u (0x%p):",
3081 entry->efd_count, entry->efd_group, entry);
3082
3083 err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b);
3084 /* we expect to find existing buddy because it's pinned */
3085 BUG_ON(err != 0);
3086
3087 spin_lock(&EXT4_SB(sb)->s_md_lock);
3088 EXT4_SB(sb)->s_mb_free_pending -= entry->efd_count;
3089 spin_unlock(&EXT4_SB(sb)->s_md_lock);
3090
3091 db = e4b.bd_info;
3092 /* there are blocks to put in buddy to make them really free */
3093 count += entry->efd_count;
3094 count2++;
3095 ext4_lock_group(sb, entry->efd_group);
3096 /* Take it out of per group rb tree */
3097 rb_erase(&entry->efd_node, &(db->bb_free_root));
3098 mb_free_blocks(NULL, &e4b, entry->efd_start_cluster, entry->efd_count);
3099
3100 /*
3101 * Clear the trimmed flag for the group so that the next
3102 * ext4_trim_fs can trim it.
3103 * If the volume is mounted with -o discard, online discard
3104 * is supported and the free blocks will be trimmed online.
3105 */
3106 if (!test_opt(sb, DISCARD))
3107 EXT4_MB_GRP_CLEAR_TRIMMED(db);
3108
3109 if (!db->bb_free_root.rb_node) {
3110 /* No more items in the per group rb tree
3111 * balance refcounts from ext4_mb_free_metadata()
3112 */
3113 put_page(e4b.bd_buddy_page);
3114 put_page(e4b.bd_bitmap_page);
3115 }
3116 ext4_unlock_group(sb, entry->efd_group);
3117 kmem_cache_free(ext4_free_data_cachep, entry);
3118 ext4_mb_unload_buddy(&e4b);
3119
3120 mb_debug(sb, "freed %d blocks in %d structures\n", count,
3121 count2);
3122 }
3123
3124 /*
3125 * This function is called by the jbd2 layer once the commit has finished,
3126 * so we know we can free the blocks that were released with that commit.
3127 */
ext4_process_freed_data(struct super_block * sb,tid_t commit_tid)3128 void ext4_process_freed_data(struct super_block *sb, tid_t commit_tid)
3129 {
3130 struct ext4_sb_info *sbi = EXT4_SB(sb);
3131 struct ext4_free_data *entry, *tmp;
3132 struct bio *discard_bio = NULL;
3133 struct list_head freed_data_list;
3134 struct list_head *cut_pos = NULL;
3135 int err;
3136
3137 INIT_LIST_HEAD(&freed_data_list);
3138
3139 spin_lock(&sbi->s_md_lock);
3140 list_for_each_entry(entry, &sbi->s_freed_data_list, efd_list) {
3141 if (entry->efd_tid != commit_tid)
3142 break;
3143 cut_pos = &entry->efd_list;
3144 }
3145 if (cut_pos)
3146 list_cut_position(&freed_data_list, &sbi->s_freed_data_list,
3147 cut_pos);
3148 spin_unlock(&sbi->s_md_lock);
3149
3150 if (test_opt(sb, DISCARD)) {
3151 list_for_each_entry(entry, &freed_data_list, efd_list) {
3152 err = ext4_issue_discard(sb, entry->efd_group,
3153 entry->efd_start_cluster,
3154 entry->efd_count,
3155 &discard_bio);
3156 if (err && err != -EOPNOTSUPP) {
3157 ext4_msg(sb, KERN_WARNING, "discard request in"
3158 " group:%d block:%d count:%d failed"
3159 " with %d", entry->efd_group,
3160 entry->efd_start_cluster,
3161 entry->efd_count, err);
3162 } else if (err == -EOPNOTSUPP)
3163 break;
3164 }
3165
3166 if (discard_bio) {
3167 submit_bio_wait(discard_bio);
3168 bio_put(discard_bio);
3169 }
3170 }
3171
3172 list_for_each_entry_safe(entry, tmp, &freed_data_list, efd_list)
3173 ext4_free_data_in_buddy(sb, entry);
3174 }
3175
ext4_init_mballoc(void)3176 int __init ext4_init_mballoc(void)
3177 {
3178 ext4_pspace_cachep = KMEM_CACHE(ext4_prealloc_space,
3179 SLAB_RECLAIM_ACCOUNT);
3180 if (ext4_pspace_cachep == NULL)
3181 goto out;
3182
3183 ext4_ac_cachep = KMEM_CACHE(ext4_allocation_context,
3184 SLAB_RECLAIM_ACCOUNT);
3185 if (ext4_ac_cachep == NULL)
3186 goto out_pa_free;
3187
3188 ext4_free_data_cachep = KMEM_CACHE(ext4_free_data,
3189 SLAB_RECLAIM_ACCOUNT);
3190 if (ext4_free_data_cachep == NULL)
3191 goto out_ac_free;
3192
3193 return 0;
3194
3195 out_ac_free:
3196 kmem_cache_destroy(ext4_ac_cachep);
3197 out_pa_free:
3198 kmem_cache_destroy(ext4_pspace_cachep);
3199 out:
3200 return -ENOMEM;
3201 }
3202
ext4_exit_mballoc(void)3203 void ext4_exit_mballoc(void)
3204 {
3205 /*
3206 * Wait for completion of call_rcu()'s on ext4_pspace_cachep
3207 * before destroying the slab cache.
3208 */
3209 rcu_barrier();
3210 kmem_cache_destroy(ext4_pspace_cachep);
3211 kmem_cache_destroy(ext4_ac_cachep);
3212 kmem_cache_destroy(ext4_free_data_cachep);
3213 ext4_groupinfo_destroy_slabs();
3214 }
3215
3216
3217 /*
3218 * Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps
3219 * Returns 0 if success or error code
3220 */
3221 static noinline_for_stack int
ext4_mb_mark_diskspace_used(struct ext4_allocation_context * ac,handle_t * handle,unsigned int reserv_clstrs)3222 ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
3223 handle_t *handle, unsigned int reserv_clstrs)
3224 {
3225 struct buffer_head *bitmap_bh = NULL;
3226 struct ext4_group_desc *gdp;
3227 struct buffer_head *gdp_bh;
3228 struct ext4_sb_info *sbi;
3229 struct super_block *sb;
3230 ext4_fsblk_t block;
3231 int err, len;
3232
3233 BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3234 BUG_ON(ac->ac_b_ex.fe_len <= 0);
3235
3236 sb = ac->ac_sb;
3237 sbi = EXT4_SB(sb);
3238
3239 bitmap_bh = ext4_read_block_bitmap(sb, ac->ac_b_ex.fe_group);
3240 if (IS_ERR(bitmap_bh)) {
3241 err = PTR_ERR(bitmap_bh);
3242 bitmap_bh = NULL;
3243 goto out_err;
3244 }
3245
3246 BUFFER_TRACE(bitmap_bh, "getting write access");
3247 err = ext4_journal_get_write_access(handle, bitmap_bh);
3248 if (err)
3249 goto out_err;
3250
3251 err = -EIO;
3252 gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, &gdp_bh);
3253 if (!gdp)
3254 goto out_err;
3255
3256 ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group,
3257 ext4_free_group_clusters(sb, gdp));
3258
3259 BUFFER_TRACE(gdp_bh, "get_write_access");
3260 err = ext4_journal_get_write_access(handle, gdp_bh);
3261 if (err)
3262 goto out_err;
3263
3264 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
3265
3266 len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
3267 if (!ext4_inode_block_valid(ac->ac_inode, block, len)) {
3268 ext4_error(sb, "Allocating blocks %llu-%llu which overlap "
3269 "fs metadata", block, block+len);
3270 /* File system mounted not to panic on error
3271 * Fix the bitmap and return EFSCORRUPTED
3272 * We leak some of the blocks here.
3273 */
3274 ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3275 ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
3276 ac->ac_b_ex.fe_len);
3277 ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3278 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
3279 if (!err)
3280 err = -EFSCORRUPTED;
3281 goto out_err;
3282 }
3283
3284 ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3285 #ifdef AGGRESSIVE_CHECK
3286 {
3287 int i;
3288 for (i = 0; i < ac->ac_b_ex.fe_len; i++) {
3289 BUG_ON(mb_test_bit(ac->ac_b_ex.fe_start + i,
3290 bitmap_bh->b_data));
3291 }
3292 }
3293 #endif
3294 ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
3295 ac->ac_b_ex.fe_len);
3296 if (ext4_has_group_desc_csum(sb) &&
3297 (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
3298 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
3299 ext4_free_group_clusters_set(sb, gdp,
3300 ext4_free_clusters_after_init(sb,
3301 ac->ac_b_ex.fe_group, gdp));
3302 }
3303 len = ext4_free_group_clusters(sb, gdp) - ac->ac_b_ex.fe_len;
3304 ext4_free_group_clusters_set(sb, gdp, len);
3305 ext4_block_bitmap_csum_set(sb, ac->ac_b_ex.fe_group, gdp, bitmap_bh);
3306 ext4_group_desc_csum_set(sb, ac->ac_b_ex.fe_group, gdp);
3307
3308 ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3309 percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len);
3310 /*
3311 * Now reduce the dirty block count also. Should not go negative
3312 */
3313 if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED))
3314 /* release all the reserved blocks if non delalloc */
3315 percpu_counter_sub(&sbi->s_dirtyclusters_counter,
3316 reserv_clstrs);
3317
3318 if (sbi->s_log_groups_per_flex) {
3319 ext4_group_t flex_group = ext4_flex_group(sbi,
3320 ac->ac_b_ex.fe_group);
3321 atomic64_sub(ac->ac_b_ex.fe_len,
3322 &sbi_array_rcu_deref(sbi, s_flex_groups,
3323 flex_group)->free_clusters);
3324 }
3325
3326 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
3327 if (err)
3328 goto out_err;
3329 err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh);
3330
3331 out_err:
3332 brelse(bitmap_bh);
3333 return err;
3334 }
3335
3336 /*
3337 * Idempotent helper for Ext4 fast commit replay path to set the state of
3338 * blocks in bitmaps and update counters.
3339 */
ext4_mb_mark_bb(struct super_block * sb,ext4_fsblk_t block,int len,int state)3340 void ext4_mb_mark_bb(struct super_block *sb, ext4_fsblk_t block,
3341 int len, int state)
3342 {
3343 struct buffer_head *bitmap_bh = NULL;
3344 struct ext4_group_desc *gdp;
3345 struct buffer_head *gdp_bh;
3346 struct ext4_sb_info *sbi = EXT4_SB(sb);
3347 ext4_group_t group;
3348 ext4_grpblk_t blkoff;
3349 int i, err;
3350 int already;
3351 unsigned int clen, clen_changed, thisgrp_len;
3352
3353 while (len > 0) {
3354 ext4_get_group_no_and_offset(sb, block, &group, &blkoff);
3355
3356 /*
3357 * Check to see if we are freeing blocks across a group
3358 * boundary.
3359 * In case of flex_bg, this can happen that (block, len) may
3360 * span across more than one group. In that case we need to
3361 * get the corresponding group metadata to work with.
3362 * For this we have goto again loop.
3363 */
3364 thisgrp_len = min_t(unsigned int, (unsigned int)len,
3365 EXT4_BLOCKS_PER_GROUP(sb) - EXT4_C2B(sbi, blkoff));
3366 clen = EXT4_NUM_B2C(sbi, thisgrp_len);
3367
3368 bitmap_bh = ext4_read_block_bitmap(sb, group);
3369 if (IS_ERR(bitmap_bh)) {
3370 err = PTR_ERR(bitmap_bh);
3371 bitmap_bh = NULL;
3372 break;
3373 }
3374
3375 err = -EIO;
3376 gdp = ext4_get_group_desc(sb, group, &gdp_bh);
3377 if (!gdp)
3378 break;
3379
3380 ext4_lock_group(sb, group);
3381 already = 0;
3382 for (i = 0; i < clen; i++)
3383 if (!mb_test_bit(blkoff + i, bitmap_bh->b_data) ==
3384 !state)
3385 already++;
3386
3387 clen_changed = clen - already;
3388 if (state)
3389 ext4_set_bits(bitmap_bh->b_data, blkoff, clen);
3390 else
3391 mb_test_and_clear_bits(bitmap_bh->b_data, blkoff, clen);
3392 if (ext4_has_group_desc_csum(sb) &&
3393 (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
3394 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
3395 ext4_free_group_clusters_set(sb, gdp,
3396 ext4_free_clusters_after_init(sb, group, gdp));
3397 }
3398 if (state)
3399 clen = ext4_free_group_clusters(sb, gdp) - clen_changed;
3400 else
3401 clen = ext4_free_group_clusters(sb, gdp) + clen_changed;
3402
3403 ext4_free_group_clusters_set(sb, gdp, clen);
3404 ext4_block_bitmap_csum_set(sb, group, gdp, bitmap_bh);
3405 ext4_group_desc_csum_set(sb, group, gdp);
3406
3407 ext4_unlock_group(sb, group);
3408
3409 if (sbi->s_log_groups_per_flex) {
3410 ext4_group_t flex_group = ext4_flex_group(sbi, group);
3411 struct flex_groups *fg = sbi_array_rcu_deref(sbi,
3412 s_flex_groups, flex_group);
3413
3414 if (state)
3415 atomic64_sub(clen_changed, &fg->free_clusters);
3416 else
3417 atomic64_add(clen_changed, &fg->free_clusters);
3418
3419 }
3420
3421 err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh);
3422 if (err)
3423 break;
3424 sync_dirty_buffer(bitmap_bh);
3425 err = ext4_handle_dirty_metadata(NULL, NULL, gdp_bh);
3426 sync_dirty_buffer(gdp_bh);
3427 if (err)
3428 break;
3429
3430 block += thisgrp_len;
3431 len -= thisgrp_len;
3432 brelse(bitmap_bh);
3433 BUG_ON(len < 0);
3434 }
3435
3436 if (err)
3437 brelse(bitmap_bh);
3438 }
3439
3440 /*
3441 * here we normalize request for locality group
3442 * Group request are normalized to s_mb_group_prealloc, which goes to
3443 * s_strip if we set the same via mount option.
3444 * s_mb_group_prealloc can be configured via
3445 * /sys/fs/ext4/<partition>/mb_group_prealloc
3446 *
3447 * XXX: should we try to preallocate more than the group has now?
3448 */
ext4_mb_normalize_group_request(struct ext4_allocation_context * ac)3449 static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac)
3450 {
3451 struct super_block *sb = ac->ac_sb;
3452 struct ext4_locality_group *lg = ac->ac_lg;
3453
3454 BUG_ON(lg == NULL);
3455 ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc;
3456 mb_debug(sb, "goal %u blocks for locality group\n", ac->ac_g_ex.fe_len);
3457 }
3458
3459 /*
3460 * Normalization means making request better in terms of
3461 * size and alignment
3462 */
3463 static noinline_for_stack void
ext4_mb_normalize_request(struct ext4_allocation_context * ac,struct ext4_allocation_request * ar)3464 ext4_mb_normalize_request(struct ext4_allocation_context *ac,
3465 struct ext4_allocation_request *ar)
3466 {
3467 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3468 int bsbits, max;
3469 ext4_lblk_t end;
3470 loff_t size, start_off;
3471 loff_t orig_size __maybe_unused;
3472 ext4_lblk_t start;
3473 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
3474 struct ext4_prealloc_space *pa;
3475
3476 /* do normalize only data requests, metadata requests
3477 do not need preallocation */
3478 if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
3479 return;
3480
3481 /* sometime caller may want exact blocks */
3482 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
3483 return;
3484
3485 /* caller may indicate that preallocation isn't
3486 * required (it's a tail, for example) */
3487 if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC)
3488 return;
3489
3490 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) {
3491 ext4_mb_normalize_group_request(ac);
3492 return ;
3493 }
3494
3495 bsbits = ac->ac_sb->s_blocksize_bits;
3496
3497 /* first, let's learn actual file size
3498 * given current request is allocated */
3499 size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len);
3500 size = size << bsbits;
3501 if (size < i_size_read(ac->ac_inode))
3502 size = i_size_read(ac->ac_inode);
3503 orig_size = size;
3504
3505 /* max size of free chunks */
3506 max = 2 << bsbits;
3507
3508 #define NRL_CHECK_SIZE(req, size, max, chunk_size) \
3509 (req <= (size) || max <= (chunk_size))
3510
3511 /* first, try to predict filesize */
3512 /* XXX: should this table be tunable? */
3513 start_off = 0;
3514 if (size <= 16 * 1024) {
3515 size = 16 * 1024;
3516 } else if (size <= 32 * 1024) {
3517 size = 32 * 1024;
3518 } else if (size <= 64 * 1024) {
3519 size = 64 * 1024;
3520 } else if (size <= 128 * 1024) {
3521 size = 128 * 1024;
3522 } else if (size <= 256 * 1024) {
3523 size = 256 * 1024;
3524 } else if (size <= 512 * 1024) {
3525 size = 512 * 1024;
3526 } else if (size <= 1024 * 1024) {
3527 size = 1024 * 1024;
3528 } else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) {
3529 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
3530 (21 - bsbits)) << 21;
3531 size = 2 * 1024 * 1024;
3532 } else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) {
3533 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
3534 (22 - bsbits)) << 22;
3535 size = 4 * 1024 * 1024;
3536 } else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len,
3537 (8<<20)>>bsbits, max, 8 * 1024)) {
3538 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
3539 (23 - bsbits)) << 23;
3540 size = 8 * 1024 * 1024;
3541 } else {
3542 start_off = (loff_t) ac->ac_o_ex.fe_logical << bsbits;
3543 size = (loff_t) EXT4_C2B(EXT4_SB(ac->ac_sb),
3544 ac->ac_o_ex.fe_len) << bsbits;
3545 }
3546 size = size >> bsbits;
3547 start = start_off >> bsbits;
3548
3549 /*
3550 * For tiny groups (smaller than 8MB) the chosen allocation
3551 * alignment may be larger than group size. Make sure the
3552 * alignment does not move allocation to a different group which
3553 * makes mballoc fail assertions later.
3554 */
3555 start = max(start, rounddown(ac->ac_o_ex.fe_logical,
3556 (ext4_lblk_t)EXT4_BLOCKS_PER_GROUP(ac->ac_sb)));
3557
3558 /* don't cover already allocated blocks in selected range */
3559 if (ar->pleft && start <= ar->lleft) {
3560 size -= ar->lleft + 1 - start;
3561 start = ar->lleft + 1;
3562 }
3563 if (ar->pright && start + size - 1 >= ar->lright)
3564 size -= start + size - ar->lright;
3565
3566 /*
3567 * Trim allocation request for filesystems with artificially small
3568 * groups.
3569 */
3570 if (size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb))
3571 size = EXT4_BLOCKS_PER_GROUP(ac->ac_sb);
3572
3573 end = start + size;
3574
3575 /* check we don't cross already preallocated blocks */
3576 rcu_read_lock();
3577 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
3578 ext4_lblk_t pa_end;
3579
3580 if (pa->pa_deleted)
3581 continue;
3582 spin_lock(&pa->pa_lock);
3583 if (pa->pa_deleted) {
3584 spin_unlock(&pa->pa_lock);
3585 continue;
3586 }
3587
3588 pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb),
3589 pa->pa_len);
3590
3591 /* PA must not overlap original request */
3592 BUG_ON(!(ac->ac_o_ex.fe_logical >= pa_end ||
3593 ac->ac_o_ex.fe_logical < pa->pa_lstart));
3594
3595 /* skip PAs this normalized request doesn't overlap with */
3596 if (pa->pa_lstart >= end || pa_end <= start) {
3597 spin_unlock(&pa->pa_lock);
3598 continue;
3599 }
3600 BUG_ON(pa->pa_lstart <= start && pa_end >= end);
3601
3602 /* adjust start or end to be adjacent to this pa */
3603 if (pa_end <= ac->ac_o_ex.fe_logical) {
3604 BUG_ON(pa_end < start);
3605 start = pa_end;
3606 } else if (pa->pa_lstart > ac->ac_o_ex.fe_logical) {
3607 BUG_ON(pa->pa_lstart > end);
3608 end = pa->pa_lstart;
3609 }
3610 spin_unlock(&pa->pa_lock);
3611 }
3612 rcu_read_unlock();
3613 size = end - start;
3614
3615 /* XXX: extra loop to check we really don't overlap preallocations */
3616 rcu_read_lock();
3617 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
3618 ext4_lblk_t pa_end;
3619
3620 spin_lock(&pa->pa_lock);
3621 if (pa->pa_deleted == 0) {
3622 pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb),
3623 pa->pa_len);
3624 BUG_ON(!(start >= pa_end || end <= pa->pa_lstart));
3625 }
3626 spin_unlock(&pa->pa_lock);
3627 }
3628 rcu_read_unlock();
3629
3630 if (start + size <= ac->ac_o_ex.fe_logical &&
3631 start > ac->ac_o_ex.fe_logical) {
3632 ext4_msg(ac->ac_sb, KERN_ERR,
3633 "start %lu, size %lu, fe_logical %lu",
3634 (unsigned long) start, (unsigned long) size,
3635 (unsigned long) ac->ac_o_ex.fe_logical);
3636 BUG();
3637 }
3638 BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
3639
3640 /* now prepare goal request */
3641
3642 /* XXX: is it better to align blocks WRT to logical
3643 * placement or satisfy big request as is */
3644 ac->ac_g_ex.fe_logical = start;
3645 ac->ac_g_ex.fe_len = EXT4_NUM_B2C(sbi, size);
3646
3647 /* define goal start in order to merge */
3648 if (ar->pright && (ar->lright == (start + size))) {
3649 /* merge to the right */
3650 ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size,
3651 &ac->ac_f_ex.fe_group,
3652 &ac->ac_f_ex.fe_start);
3653 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
3654 }
3655 if (ar->pleft && (ar->lleft + 1 == start)) {
3656 /* merge to the left */
3657 ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1,
3658 &ac->ac_f_ex.fe_group,
3659 &ac->ac_f_ex.fe_start);
3660 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
3661 }
3662
3663 mb_debug(ac->ac_sb, "goal: %lld(was %lld) blocks at %u\n", size,
3664 orig_size, start);
3665 }
3666
ext4_mb_collect_stats(struct ext4_allocation_context * ac)3667 static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
3668 {
3669 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3670
3671 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
3672 atomic_inc(&sbi->s_bal_reqs);
3673 atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
3674 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
3675 atomic_inc(&sbi->s_bal_success);
3676 atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
3677 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
3678 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
3679 atomic_inc(&sbi->s_bal_goals);
3680 if (ac->ac_found > sbi->s_mb_max_to_scan)
3681 atomic_inc(&sbi->s_bal_breaks);
3682 }
3683
3684 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
3685 trace_ext4_mballoc_alloc(ac);
3686 else
3687 trace_ext4_mballoc_prealloc(ac);
3688 }
3689
3690 /*
3691 * Called on failure; free up any blocks from the inode PA for this
3692 * context. We don't need this for MB_GROUP_PA because we only change
3693 * pa_free in ext4_mb_release_context(), but on failure, we've already
3694 * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed.
3695 */
ext4_discard_allocated_blocks(struct ext4_allocation_context * ac)3696 static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac)
3697 {
3698 struct ext4_prealloc_space *pa = ac->ac_pa;
3699 struct ext4_buddy e4b;
3700 int err;
3701
3702 if (pa == NULL) {
3703 if (ac->ac_f_ex.fe_len == 0)
3704 return;
3705 err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b);
3706 if (err) {
3707 /*
3708 * This should never happen since we pin the
3709 * pages in the ext4_allocation_context so
3710 * ext4_mb_load_buddy() should never fail.
3711 */
3712 WARN(1, "mb_load_buddy failed (%d)", err);
3713 return;
3714 }
3715 ext4_lock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
3716 mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start,
3717 ac->ac_f_ex.fe_len);
3718 ext4_unlock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
3719 ext4_mb_unload_buddy(&e4b);
3720 return;
3721 }
3722 if (pa->pa_type == MB_INODE_PA)
3723 pa->pa_free += ac->ac_b_ex.fe_len;
3724 }
3725
3726 /*
3727 * use blocks preallocated to inode
3728 */
ext4_mb_use_inode_pa(struct ext4_allocation_context * ac,struct ext4_prealloc_space * pa)3729 static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac,
3730 struct ext4_prealloc_space *pa)
3731 {
3732 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3733 ext4_fsblk_t start;
3734 ext4_fsblk_t end;
3735 int len;
3736
3737 /* found preallocated blocks, use them */
3738 start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart);
3739 end = min(pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len),
3740 start + EXT4_C2B(sbi, ac->ac_o_ex.fe_len));
3741 len = EXT4_NUM_B2C(sbi, end - start);
3742 ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group,
3743 &ac->ac_b_ex.fe_start);
3744 ac->ac_b_ex.fe_len = len;
3745 ac->ac_status = AC_STATUS_FOUND;
3746 ac->ac_pa = pa;
3747
3748 BUG_ON(start < pa->pa_pstart);
3749 BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len));
3750 BUG_ON(pa->pa_free < len);
3751 pa->pa_free -= len;
3752
3753 mb_debug(ac->ac_sb, "use %llu/%d from inode pa %p\n", start, len, pa);
3754 }
3755
3756 /*
3757 * use blocks preallocated to locality group
3758 */
ext4_mb_use_group_pa(struct ext4_allocation_context * ac,struct ext4_prealloc_space * pa)3759 static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac,
3760 struct ext4_prealloc_space *pa)
3761 {
3762 unsigned int len = ac->ac_o_ex.fe_len;
3763
3764 ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart,
3765 &ac->ac_b_ex.fe_group,
3766 &ac->ac_b_ex.fe_start);
3767 ac->ac_b_ex.fe_len = len;
3768 ac->ac_status = AC_STATUS_FOUND;
3769 ac->ac_pa = pa;
3770
3771 /* we don't correct pa_pstart or pa_plen here to avoid
3772 * possible race when the group is being loaded concurrently
3773 * instead we correct pa later, after blocks are marked
3774 * in on-disk bitmap -- see ext4_mb_release_context()
3775 * Other CPUs are prevented from allocating from this pa by lg_mutex
3776 */
3777 mb_debug(ac->ac_sb, "use %u/%u from group pa %p\n",
3778 pa->pa_lstart-len, len, pa);
3779 }
3780
3781 /*
3782 * Return the prealloc space that have minimal distance
3783 * from the goal block. @cpa is the prealloc
3784 * space that is having currently known minimal distance
3785 * from the goal block.
3786 */
3787 static struct ext4_prealloc_space *
ext4_mb_check_group_pa(ext4_fsblk_t goal_block,struct ext4_prealloc_space * pa,struct ext4_prealloc_space * cpa)3788 ext4_mb_check_group_pa(ext4_fsblk_t goal_block,
3789 struct ext4_prealloc_space *pa,
3790 struct ext4_prealloc_space *cpa)
3791 {
3792 ext4_fsblk_t cur_distance, new_distance;
3793
3794 if (cpa == NULL) {
3795 atomic_inc(&pa->pa_count);
3796 return pa;
3797 }
3798 cur_distance = abs(goal_block - cpa->pa_pstart);
3799 new_distance = abs(goal_block - pa->pa_pstart);
3800
3801 if (cur_distance <= new_distance)
3802 return cpa;
3803
3804 /* drop the previous reference */
3805 atomic_dec(&cpa->pa_count);
3806 atomic_inc(&pa->pa_count);
3807 return pa;
3808 }
3809
3810 /*
3811 * search goal blocks in preallocated space
3812 */
3813 static noinline_for_stack bool
ext4_mb_use_preallocated(struct ext4_allocation_context * ac)3814 ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
3815 {
3816 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3817 int order, i;
3818 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
3819 struct ext4_locality_group *lg;
3820 struct ext4_prealloc_space *pa, *cpa = NULL;
3821 ext4_fsblk_t goal_block;
3822
3823 /* only data can be preallocated */
3824 if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
3825 return false;
3826
3827 /* first, try per-file preallocation */
3828 rcu_read_lock();
3829 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
3830
3831 /* all fields in this condition don't change,
3832 * so we can skip locking for them */
3833 if (ac->ac_o_ex.fe_logical < pa->pa_lstart ||
3834 ac->ac_o_ex.fe_logical >= (pa->pa_lstart +
3835 EXT4_C2B(sbi, pa->pa_len)))
3836 continue;
3837
3838 /* non-extent files can't have physical blocks past 2^32 */
3839 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) &&
3840 (pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len) >
3841 EXT4_MAX_BLOCK_FILE_PHYS))
3842 continue;
3843
3844 /* found preallocated blocks, use them */
3845 spin_lock(&pa->pa_lock);
3846 if (pa->pa_deleted == 0 && pa->pa_free) {
3847 atomic_inc(&pa->pa_count);
3848 ext4_mb_use_inode_pa(ac, pa);
3849 spin_unlock(&pa->pa_lock);
3850 ac->ac_criteria = 10;
3851 rcu_read_unlock();
3852 return true;
3853 }
3854 spin_unlock(&pa->pa_lock);
3855 }
3856 rcu_read_unlock();
3857
3858 /* can we use group allocation? */
3859 if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC))
3860 return false;
3861
3862 /* inode may have no locality group for some reason */
3863 lg = ac->ac_lg;
3864 if (lg == NULL)
3865 return false;
3866 order = fls(ac->ac_o_ex.fe_len) - 1;
3867 if (order > PREALLOC_TB_SIZE - 1)
3868 /* The max size of hash table is PREALLOC_TB_SIZE */
3869 order = PREALLOC_TB_SIZE - 1;
3870
3871 goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex);
3872 /*
3873 * search for the prealloc space that is having
3874 * minimal distance from the goal block.
3875 */
3876 for (i = order; i < PREALLOC_TB_SIZE; i++) {
3877 rcu_read_lock();
3878 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[i],
3879 pa_inode_list) {
3880 spin_lock(&pa->pa_lock);
3881 if (pa->pa_deleted == 0 &&
3882 pa->pa_free >= ac->ac_o_ex.fe_len) {
3883
3884 cpa = ext4_mb_check_group_pa(goal_block,
3885 pa, cpa);
3886 }
3887 spin_unlock(&pa->pa_lock);
3888 }
3889 rcu_read_unlock();
3890 }
3891 if (cpa) {
3892 ext4_mb_use_group_pa(ac, cpa);
3893 ac->ac_criteria = 20;
3894 return true;
3895 }
3896 return false;
3897 }
3898
3899 /*
3900 * the function goes through all block freed in the group
3901 * but not yet committed and marks them used in in-core bitmap.
3902 * buddy must be generated from this bitmap
3903 * Need to be called with the ext4 group lock held
3904 */
ext4_mb_generate_from_freelist(struct super_block * sb,void * bitmap,ext4_group_t group)3905 static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
3906 ext4_group_t group)
3907 {
3908 struct rb_node *n;
3909 struct ext4_group_info *grp;
3910 struct ext4_free_data *entry;
3911
3912 grp = ext4_get_group_info(sb, group);
3913 if (!grp)
3914 return;
3915 n = rb_first(&(grp->bb_free_root));
3916
3917 while (n) {
3918 entry = rb_entry(n, struct ext4_free_data, efd_node);
3919 ext4_set_bits(bitmap, entry->efd_start_cluster, entry->efd_count);
3920 n = rb_next(n);
3921 }
3922 return;
3923 }
3924
3925 /*
3926 * the function goes through all preallocation in this group and marks them
3927 * used in in-core bitmap. buddy must be generated from this bitmap
3928 * Need to be called with ext4 group lock held
3929 */
3930 static noinline_for_stack
ext4_mb_generate_from_pa(struct super_block * sb,void * bitmap,ext4_group_t group)3931 void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
3932 ext4_group_t group)
3933 {
3934 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
3935 struct ext4_prealloc_space *pa;
3936 struct list_head *cur;
3937 ext4_group_t groupnr;
3938 ext4_grpblk_t start;
3939 int preallocated = 0;
3940 int len;
3941
3942 if (!grp)
3943 return;
3944
3945 /* all form of preallocation discards first load group,
3946 * so the only competing code is preallocation use.
3947 * we don't need any locking here
3948 * notice we do NOT ignore preallocations with pa_deleted
3949 * otherwise we could leave used blocks available for
3950 * allocation in buddy when concurrent ext4_mb_put_pa()
3951 * is dropping preallocation
3952 */
3953 list_for_each(cur, &grp->bb_prealloc_list) {
3954 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
3955 spin_lock(&pa->pa_lock);
3956 ext4_get_group_no_and_offset(sb, pa->pa_pstart,
3957 &groupnr, &start);
3958 len = pa->pa_len;
3959 spin_unlock(&pa->pa_lock);
3960 if (unlikely(len == 0))
3961 continue;
3962 BUG_ON(groupnr != group);
3963 ext4_set_bits(bitmap, start, len);
3964 preallocated += len;
3965 }
3966 mb_debug(sb, "preallocated %d for group %u\n", preallocated, group);
3967 }
3968
ext4_mb_mark_pa_deleted(struct super_block * sb,struct ext4_prealloc_space * pa)3969 static void ext4_mb_mark_pa_deleted(struct super_block *sb,
3970 struct ext4_prealloc_space *pa)
3971 {
3972 struct ext4_inode_info *ei;
3973
3974 if (pa->pa_deleted) {
3975 ext4_warning(sb, "deleted pa, type:%d, pblk:%llu, lblk:%u, len:%d\n",
3976 pa->pa_type, pa->pa_pstart, pa->pa_lstart,
3977 pa->pa_len);
3978 return;
3979 }
3980
3981 pa->pa_deleted = 1;
3982
3983 if (pa->pa_type == MB_INODE_PA) {
3984 ei = EXT4_I(pa->pa_inode);
3985 atomic_dec(&ei->i_prealloc_active);
3986 }
3987 }
3988
ext4_mb_pa_callback(struct rcu_head * head)3989 static void ext4_mb_pa_callback(struct rcu_head *head)
3990 {
3991 struct ext4_prealloc_space *pa;
3992 pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu);
3993
3994 BUG_ON(atomic_read(&pa->pa_count));
3995 BUG_ON(pa->pa_deleted == 0);
3996 kmem_cache_free(ext4_pspace_cachep, pa);
3997 }
3998
3999 /*
4000 * drops a reference to preallocated space descriptor
4001 * if this was the last reference and the space is consumed
4002 */
ext4_mb_put_pa(struct ext4_allocation_context * ac,struct super_block * sb,struct ext4_prealloc_space * pa)4003 static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
4004 struct super_block *sb, struct ext4_prealloc_space *pa)
4005 {
4006 ext4_group_t grp;
4007 ext4_fsblk_t grp_blk;
4008
4009 /* in this short window concurrent discard can set pa_deleted */
4010 spin_lock(&pa->pa_lock);
4011 if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) {
4012 spin_unlock(&pa->pa_lock);
4013 return;
4014 }
4015
4016 if (pa->pa_deleted == 1) {
4017 spin_unlock(&pa->pa_lock);
4018 return;
4019 }
4020
4021 ext4_mb_mark_pa_deleted(sb, pa);
4022 spin_unlock(&pa->pa_lock);
4023
4024 grp_blk = pa->pa_pstart;
4025 /*
4026 * If doing group-based preallocation, pa_pstart may be in the
4027 * next group when pa is used up
4028 */
4029 if (pa->pa_type == MB_GROUP_PA)
4030 grp_blk--;
4031
4032 grp = ext4_get_group_number(sb, grp_blk);
4033
4034 /*
4035 * possible race:
4036 *
4037 * P1 (buddy init) P2 (regular allocation)
4038 * find block B in PA
4039 * copy on-disk bitmap to buddy
4040 * mark B in on-disk bitmap
4041 * drop PA from group
4042 * mark all PAs in buddy
4043 *
4044 * thus, P1 initializes buddy with B available. to prevent this
4045 * we make "copy" and "mark all PAs" atomic and serialize "drop PA"
4046 * against that pair
4047 */
4048 ext4_lock_group(sb, grp);
4049 list_del(&pa->pa_group_list);
4050 ext4_unlock_group(sb, grp);
4051
4052 spin_lock(pa->pa_obj_lock);
4053 list_del_rcu(&pa->pa_inode_list);
4054 spin_unlock(pa->pa_obj_lock);
4055
4056 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
4057 }
4058
4059 /*
4060 * creates new preallocated space for given inode
4061 */
4062 static noinline_for_stack void
ext4_mb_new_inode_pa(struct ext4_allocation_context * ac)4063 ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
4064 {
4065 struct super_block *sb = ac->ac_sb;
4066 struct ext4_sb_info *sbi = EXT4_SB(sb);
4067 struct ext4_prealloc_space *pa;
4068 struct ext4_group_info *grp;
4069 struct ext4_inode_info *ei;
4070
4071 /* preallocate only when found space is larger then requested */
4072 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
4073 BUG_ON(ac->ac_status != AC_STATUS_FOUND);
4074 BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
4075 BUG_ON(ac->ac_pa == NULL);
4076
4077 pa = ac->ac_pa;
4078
4079 if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) {
4080 int winl;
4081 int wins;
4082 int win;
4083 int offs;
4084
4085 /* we can't allocate as much as normalizer wants.
4086 * so, found space must get proper lstart
4087 * to cover original request */
4088 BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical);
4089 BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len);
4090
4091 /* we're limited by original request in that
4092 * logical block must be covered any way
4093 * winl is window we can move our chunk within */
4094 winl = ac->ac_o_ex.fe_logical - ac->ac_g_ex.fe_logical;
4095
4096 /* also, we should cover whole original request */
4097 wins = EXT4_C2B(sbi, ac->ac_b_ex.fe_len - ac->ac_o_ex.fe_len);
4098
4099 /* the smallest one defines real window */
4100 win = min(winl, wins);
4101
4102 offs = ac->ac_o_ex.fe_logical %
4103 EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
4104 if (offs && offs < win)
4105 win = offs;
4106
4107 ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical -
4108 EXT4_NUM_B2C(sbi, win);
4109 BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
4110 BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len);
4111 }
4112
4113 /* preallocation can change ac_b_ex, thus we store actually
4114 * allocated blocks for history */
4115 ac->ac_f_ex = ac->ac_b_ex;
4116
4117 pa->pa_lstart = ac->ac_b_ex.fe_logical;
4118 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
4119 pa->pa_len = ac->ac_b_ex.fe_len;
4120 pa->pa_free = pa->pa_len;
4121 spin_lock_init(&pa->pa_lock);
4122 INIT_LIST_HEAD(&pa->pa_inode_list);
4123 INIT_LIST_HEAD(&pa->pa_group_list);
4124 pa->pa_deleted = 0;
4125 pa->pa_type = MB_INODE_PA;
4126
4127 mb_debug(sb, "new inode pa %p: %llu/%d for %u\n", pa, pa->pa_pstart,
4128 pa->pa_len, pa->pa_lstart);
4129 trace_ext4_mb_new_inode_pa(ac, pa);
4130
4131 ext4_mb_use_inode_pa(ac, pa);
4132 atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
4133
4134 ei = EXT4_I(ac->ac_inode);
4135 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
4136 if (!grp)
4137 return;
4138
4139 pa->pa_obj_lock = &ei->i_prealloc_lock;
4140 pa->pa_inode = ac->ac_inode;
4141
4142 list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
4143
4144 spin_lock(pa->pa_obj_lock);
4145 list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list);
4146 spin_unlock(pa->pa_obj_lock);
4147 atomic_inc(&ei->i_prealloc_active);
4148 }
4149
4150 /*
4151 * creates new preallocated space for locality group inodes belongs to
4152 */
4153 static noinline_for_stack void
ext4_mb_new_group_pa(struct ext4_allocation_context * ac)4154 ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
4155 {
4156 struct super_block *sb = ac->ac_sb;
4157 struct ext4_locality_group *lg;
4158 struct ext4_prealloc_space *pa;
4159 struct ext4_group_info *grp;
4160
4161 /* preallocate only when found space is larger then requested */
4162 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
4163 BUG_ON(ac->ac_status != AC_STATUS_FOUND);
4164 BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
4165 BUG_ON(ac->ac_pa == NULL);
4166
4167 pa = ac->ac_pa;
4168
4169 /* preallocation can change ac_b_ex, thus we store actually
4170 * allocated blocks for history */
4171 ac->ac_f_ex = ac->ac_b_ex;
4172
4173 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
4174 pa->pa_lstart = pa->pa_pstart;
4175 pa->pa_len = ac->ac_b_ex.fe_len;
4176 pa->pa_free = pa->pa_len;
4177 spin_lock_init(&pa->pa_lock);
4178 INIT_LIST_HEAD(&pa->pa_inode_list);
4179 INIT_LIST_HEAD(&pa->pa_group_list);
4180 pa->pa_deleted = 0;
4181 pa->pa_type = MB_GROUP_PA;
4182
4183 mb_debug(sb, "new group pa %p: %llu/%d for %u\n", pa, pa->pa_pstart,
4184 pa->pa_len, pa->pa_lstart);
4185 trace_ext4_mb_new_group_pa(ac, pa);
4186
4187 ext4_mb_use_group_pa(ac, pa);
4188 atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
4189
4190 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
4191 if (!grp)
4192 return;
4193 lg = ac->ac_lg;
4194 BUG_ON(lg == NULL);
4195
4196 pa->pa_obj_lock = &lg->lg_prealloc_lock;
4197 pa->pa_inode = NULL;
4198
4199 list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
4200
4201 /*
4202 * We will later add the new pa to the right bucket
4203 * after updating the pa_free in ext4_mb_release_context
4204 */
4205 }
4206
ext4_mb_new_preallocation(struct ext4_allocation_context * ac)4207 static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac)
4208 {
4209 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
4210 ext4_mb_new_group_pa(ac);
4211 else
4212 ext4_mb_new_inode_pa(ac);
4213 }
4214
4215 /*
4216 * finds all unused blocks in on-disk bitmap, frees them in
4217 * in-core bitmap and buddy.
4218 * @pa must be unlinked from inode and group lists, so that
4219 * nobody else can find/use it.
4220 * the caller MUST hold group/inode locks.
4221 * TODO: optimize the case when there are no in-core structures yet
4222 */
4223 static noinline_for_stack int
ext4_mb_release_inode_pa(struct ext4_buddy * e4b,struct buffer_head * bitmap_bh,struct ext4_prealloc_space * pa)4224 ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
4225 struct ext4_prealloc_space *pa)
4226 {
4227 struct super_block *sb = e4b->bd_sb;
4228 struct ext4_sb_info *sbi = EXT4_SB(sb);
4229 unsigned int end;
4230 unsigned int next;
4231 ext4_group_t group;
4232 ext4_grpblk_t bit;
4233 unsigned long long grp_blk_start;
4234 int free = 0;
4235
4236 BUG_ON(pa->pa_deleted == 0);
4237 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
4238 grp_blk_start = pa->pa_pstart - EXT4_C2B(sbi, bit);
4239 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
4240 end = bit + pa->pa_len;
4241
4242 while (bit < end) {
4243 bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit);
4244 if (bit >= end)
4245 break;
4246 next = mb_find_next_bit(bitmap_bh->b_data, end, bit);
4247 mb_debug(sb, "free preallocated %u/%u in group %u\n",
4248 (unsigned) ext4_group_first_block_no(sb, group) + bit,
4249 (unsigned) next - bit, (unsigned) group);
4250 free += next - bit;
4251
4252 trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit);
4253 trace_ext4_mb_release_inode_pa(pa, (grp_blk_start +
4254 EXT4_C2B(sbi, bit)),
4255 next - bit);
4256 mb_free_blocks(pa->pa_inode, e4b, bit, next - bit);
4257 bit = next + 1;
4258 }
4259 if (free != pa->pa_free) {
4260 ext4_msg(e4b->bd_sb, KERN_CRIT,
4261 "pa %p: logic %lu, phys. %lu, len %d",
4262 pa, (unsigned long) pa->pa_lstart,
4263 (unsigned long) pa->pa_pstart,
4264 pa->pa_len);
4265 ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u",
4266 free, pa->pa_free);
4267 /*
4268 * pa is already deleted so we use the value obtained
4269 * from the bitmap and continue.
4270 */
4271 }
4272 atomic_add(free, &sbi->s_mb_discarded);
4273
4274 return 0;
4275 }
4276
4277 static noinline_for_stack int
ext4_mb_release_group_pa(struct ext4_buddy * e4b,struct ext4_prealloc_space * pa)4278 ext4_mb_release_group_pa(struct ext4_buddy *e4b,
4279 struct ext4_prealloc_space *pa)
4280 {
4281 struct super_block *sb = e4b->bd_sb;
4282 ext4_group_t group;
4283 ext4_grpblk_t bit;
4284
4285 trace_ext4_mb_release_group_pa(sb, pa);
4286 BUG_ON(pa->pa_deleted == 0);
4287 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
4288 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
4289 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
4290 atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
4291 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
4292
4293 return 0;
4294 }
4295
4296 /*
4297 * releases all preallocations in given group
4298 *
4299 * first, we need to decide discard policy:
4300 * - when do we discard
4301 * 1) ENOSPC
4302 * - how many do we discard
4303 * 1) how many requested
4304 */
4305 static noinline_for_stack int
ext4_mb_discard_group_preallocations(struct super_block * sb,ext4_group_t group,int * busy)4306 ext4_mb_discard_group_preallocations(struct super_block *sb,
4307 ext4_group_t group, int *busy)
4308 {
4309 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
4310 struct buffer_head *bitmap_bh = NULL;
4311 struct ext4_prealloc_space *pa, *tmp;
4312 struct list_head list;
4313 struct ext4_buddy e4b;
4314 int err;
4315 int free = 0;
4316
4317 if (!grp)
4318 return 0;
4319 mb_debug(sb, "discard preallocation for group %u\n", group);
4320 if (list_empty(&grp->bb_prealloc_list))
4321 goto out_dbg;
4322
4323 bitmap_bh = ext4_read_block_bitmap(sb, group);
4324 if (IS_ERR(bitmap_bh)) {
4325 err = PTR_ERR(bitmap_bh);
4326 ext4_error_err(sb, -err,
4327 "Error %d reading block bitmap for %u",
4328 err, group);
4329 goto out_dbg;
4330 }
4331
4332 err = ext4_mb_load_buddy(sb, group, &e4b);
4333 if (err) {
4334 ext4_warning(sb, "Error %d loading buddy information for %u",
4335 err, group);
4336 put_bh(bitmap_bh);
4337 goto out_dbg;
4338 }
4339
4340 INIT_LIST_HEAD(&list);
4341 ext4_lock_group(sb, group);
4342 list_for_each_entry_safe(pa, tmp,
4343 &grp->bb_prealloc_list, pa_group_list) {
4344 spin_lock(&pa->pa_lock);
4345 if (atomic_read(&pa->pa_count)) {
4346 spin_unlock(&pa->pa_lock);
4347 *busy = 1;
4348 continue;
4349 }
4350 if (pa->pa_deleted) {
4351 spin_unlock(&pa->pa_lock);
4352 continue;
4353 }
4354
4355 /* seems this one can be freed ... */
4356 ext4_mb_mark_pa_deleted(sb, pa);
4357
4358 if (!free)
4359 this_cpu_inc(discard_pa_seq);
4360
4361 /* we can trust pa_free ... */
4362 free += pa->pa_free;
4363
4364 spin_unlock(&pa->pa_lock);
4365
4366 list_del(&pa->pa_group_list);
4367 list_add(&pa->u.pa_tmp_list, &list);
4368 }
4369
4370 /* now free all selected PAs */
4371 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
4372
4373 /* remove from object (inode or locality group) */
4374 spin_lock(pa->pa_obj_lock);
4375 list_del_rcu(&pa->pa_inode_list);
4376 spin_unlock(pa->pa_obj_lock);
4377
4378 if (pa->pa_type == MB_GROUP_PA)
4379 ext4_mb_release_group_pa(&e4b, pa);
4380 else
4381 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
4382
4383 list_del(&pa->u.pa_tmp_list);
4384 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
4385 }
4386
4387 ext4_unlock_group(sb, group);
4388 ext4_mb_unload_buddy(&e4b);
4389 put_bh(bitmap_bh);
4390 out_dbg:
4391 mb_debug(sb, "discarded (%d) blocks preallocated for group %u bb_free (%d)\n",
4392 free, group, grp->bb_free);
4393 return free;
4394 }
4395
4396 /*
4397 * releases all non-used preallocated blocks for given inode
4398 *
4399 * It's important to discard preallocations under i_data_sem
4400 * We don't want another block to be served from the prealloc
4401 * space when we are discarding the inode prealloc space.
4402 *
4403 * FIXME!! Make sure it is valid at all the call sites
4404 */
ext4_discard_preallocations(struct inode * inode,unsigned int needed)4405 void ext4_discard_preallocations(struct inode *inode, unsigned int needed)
4406 {
4407 struct ext4_inode_info *ei = EXT4_I(inode);
4408 struct super_block *sb = inode->i_sb;
4409 struct buffer_head *bitmap_bh = NULL;
4410 struct ext4_prealloc_space *pa, *tmp;
4411 ext4_group_t group = 0;
4412 struct list_head list;
4413 struct ext4_buddy e4b;
4414 int err;
4415
4416 if (!S_ISREG(inode->i_mode)) {
4417 /*BUG_ON(!list_empty(&ei->i_prealloc_list));*/
4418 return;
4419 }
4420
4421 if (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY)
4422 return;
4423
4424 mb_debug(sb, "discard preallocation for inode %lu\n",
4425 inode->i_ino);
4426 trace_ext4_discard_preallocations(inode,
4427 atomic_read(&ei->i_prealloc_active), needed);
4428
4429 INIT_LIST_HEAD(&list);
4430
4431 if (needed == 0)
4432 needed = UINT_MAX;
4433
4434 repeat:
4435 /* first, collect all pa's in the inode */
4436 spin_lock(&ei->i_prealloc_lock);
4437 while (!list_empty(&ei->i_prealloc_list) && needed) {
4438 pa = list_entry(ei->i_prealloc_list.prev,
4439 struct ext4_prealloc_space, pa_inode_list);
4440 BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock);
4441 spin_lock(&pa->pa_lock);
4442 if (atomic_read(&pa->pa_count)) {
4443 /* this shouldn't happen often - nobody should
4444 * use preallocation while we're discarding it */
4445 spin_unlock(&pa->pa_lock);
4446 spin_unlock(&ei->i_prealloc_lock);
4447 ext4_msg(sb, KERN_ERR,
4448 "uh-oh! used pa while discarding");
4449 WARN_ON(1);
4450 schedule_timeout_uninterruptible(HZ);
4451 goto repeat;
4452
4453 }
4454 if (pa->pa_deleted == 0) {
4455 ext4_mb_mark_pa_deleted(sb, pa);
4456 spin_unlock(&pa->pa_lock);
4457 list_del_rcu(&pa->pa_inode_list);
4458 list_add(&pa->u.pa_tmp_list, &list);
4459 needed--;
4460 continue;
4461 }
4462
4463 /* someone is deleting pa right now */
4464 spin_unlock(&pa->pa_lock);
4465 spin_unlock(&ei->i_prealloc_lock);
4466
4467 /* we have to wait here because pa_deleted
4468 * doesn't mean pa is already unlinked from
4469 * the list. as we might be called from
4470 * ->clear_inode() the inode will get freed
4471 * and concurrent thread which is unlinking
4472 * pa from inode's list may access already
4473 * freed memory, bad-bad-bad */
4474
4475 /* XXX: if this happens too often, we can
4476 * add a flag to force wait only in case
4477 * of ->clear_inode(), but not in case of
4478 * regular truncate */
4479 schedule_timeout_uninterruptible(HZ);
4480 goto repeat;
4481 }
4482 spin_unlock(&ei->i_prealloc_lock);
4483
4484 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
4485 BUG_ON(pa->pa_type != MB_INODE_PA);
4486 group = ext4_get_group_number(sb, pa->pa_pstart);
4487
4488 err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
4489 GFP_NOFS|__GFP_NOFAIL);
4490 if (err) {
4491 ext4_error_err(sb, -err, "Error %d loading buddy information for %u",
4492 err, group);
4493 continue;
4494 }
4495
4496 bitmap_bh = ext4_read_block_bitmap(sb, group);
4497 if (IS_ERR(bitmap_bh)) {
4498 err = PTR_ERR(bitmap_bh);
4499 ext4_error_err(sb, -err, "Error %d reading block bitmap for %u",
4500 err, group);
4501 ext4_mb_unload_buddy(&e4b);
4502 continue;
4503 }
4504
4505 ext4_lock_group(sb, group);
4506 list_del(&pa->pa_group_list);
4507 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
4508 ext4_unlock_group(sb, group);
4509
4510 ext4_mb_unload_buddy(&e4b);
4511 put_bh(bitmap_bh);
4512
4513 list_del(&pa->u.pa_tmp_list);
4514 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
4515 }
4516 }
4517
ext4_mb_pa_alloc(struct ext4_allocation_context * ac)4518 static int ext4_mb_pa_alloc(struct ext4_allocation_context *ac)
4519 {
4520 struct ext4_prealloc_space *pa;
4521
4522 BUG_ON(ext4_pspace_cachep == NULL);
4523 pa = kmem_cache_zalloc(ext4_pspace_cachep, GFP_NOFS);
4524 if (!pa)
4525 return -ENOMEM;
4526 atomic_set(&pa->pa_count, 1);
4527 ac->ac_pa = pa;
4528 return 0;
4529 }
4530
ext4_mb_pa_free(struct ext4_allocation_context * ac)4531 static void ext4_mb_pa_free(struct ext4_allocation_context *ac)
4532 {
4533 struct ext4_prealloc_space *pa = ac->ac_pa;
4534
4535 BUG_ON(!pa);
4536 ac->ac_pa = NULL;
4537 WARN_ON(!atomic_dec_and_test(&pa->pa_count));
4538 kmem_cache_free(ext4_pspace_cachep, pa);
4539 }
4540
4541 #ifdef CONFIG_EXT4_DEBUG
ext4_mb_show_pa(struct super_block * sb)4542 static inline void ext4_mb_show_pa(struct super_block *sb)
4543 {
4544 ext4_group_t i, ngroups;
4545
4546 if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED))
4547 return;
4548
4549 ngroups = ext4_get_groups_count(sb);
4550 mb_debug(sb, "groups: ");
4551 for (i = 0; i < ngroups; i++) {
4552 struct ext4_group_info *grp = ext4_get_group_info(sb, i);
4553 struct ext4_prealloc_space *pa;
4554 ext4_grpblk_t start;
4555 struct list_head *cur;
4556
4557 if (!grp)
4558 continue;
4559 ext4_lock_group(sb, i);
4560 list_for_each(cur, &grp->bb_prealloc_list) {
4561 pa = list_entry(cur, struct ext4_prealloc_space,
4562 pa_group_list);
4563 spin_lock(&pa->pa_lock);
4564 ext4_get_group_no_and_offset(sb, pa->pa_pstart,
4565 NULL, &start);
4566 spin_unlock(&pa->pa_lock);
4567 mb_debug(sb, "PA:%u:%d:%d\n", i, start,
4568 pa->pa_len);
4569 }
4570 ext4_unlock_group(sb, i);
4571 mb_debug(sb, "%u: %d/%d\n", i, grp->bb_free,
4572 grp->bb_fragments);
4573 }
4574 }
4575
ext4_mb_show_ac(struct ext4_allocation_context * ac)4576 static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
4577 {
4578 struct super_block *sb = ac->ac_sb;
4579
4580 if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED))
4581 return;
4582
4583 mb_debug(sb, "Can't allocate:"
4584 " Allocation context details:");
4585 mb_debug(sb, "status %u flags 0x%x",
4586 ac->ac_status, ac->ac_flags);
4587 mb_debug(sb, "orig %lu/%lu/%lu@%lu, "
4588 "goal %lu/%lu/%lu@%lu, "
4589 "best %lu/%lu/%lu@%lu cr %d",
4590 (unsigned long)ac->ac_o_ex.fe_group,
4591 (unsigned long)ac->ac_o_ex.fe_start,
4592 (unsigned long)ac->ac_o_ex.fe_len,
4593 (unsigned long)ac->ac_o_ex.fe_logical,
4594 (unsigned long)ac->ac_g_ex.fe_group,
4595 (unsigned long)ac->ac_g_ex.fe_start,
4596 (unsigned long)ac->ac_g_ex.fe_len,
4597 (unsigned long)ac->ac_g_ex.fe_logical,
4598 (unsigned long)ac->ac_b_ex.fe_group,
4599 (unsigned long)ac->ac_b_ex.fe_start,
4600 (unsigned long)ac->ac_b_ex.fe_len,
4601 (unsigned long)ac->ac_b_ex.fe_logical,
4602 (int)ac->ac_criteria);
4603 mb_debug(sb, "%u found", ac->ac_found);
4604 ext4_mb_show_pa(sb);
4605 }
4606 #else
ext4_mb_show_pa(struct super_block * sb)4607 static inline void ext4_mb_show_pa(struct super_block *sb)
4608 {
4609 return;
4610 }
ext4_mb_show_ac(struct ext4_allocation_context * ac)4611 static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac)
4612 {
4613 ext4_mb_show_pa(ac->ac_sb);
4614 return;
4615 }
4616 #endif
4617
4618 /*
4619 * We use locality group preallocation for small size file. The size of the
4620 * file is determined by the current size or the resulting size after
4621 * allocation which ever is larger
4622 *
4623 * One can tune this size via /sys/fs/ext4/<partition>/mb_stream_req
4624 */
ext4_mb_group_or_file(struct ext4_allocation_context * ac)4625 static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
4626 {
4627 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4628 int bsbits = ac->ac_sb->s_blocksize_bits;
4629 loff_t size, isize;
4630
4631 if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
4632 return;
4633
4634 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
4635 return;
4636
4637 size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len);
4638 isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1)
4639 >> bsbits;
4640
4641 if ((size == isize) && !ext4_fs_is_busy(sbi) &&
4642 !inode_is_open_for_write(ac->ac_inode)) {
4643 ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC;
4644 return;
4645 }
4646
4647 if (sbi->s_mb_group_prealloc <= 0) {
4648 ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
4649 return;
4650 }
4651
4652 /* don't use group allocation for large files */
4653 size = max(size, isize);
4654 if (size > sbi->s_mb_stream_request) {
4655 ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
4656 return;
4657 }
4658
4659 BUG_ON(ac->ac_lg != NULL);
4660 /*
4661 * locality group prealloc space are per cpu. The reason for having
4662 * per cpu locality group is to reduce the contention between block
4663 * request from multiple CPUs.
4664 */
4665 ac->ac_lg = raw_cpu_ptr(sbi->s_locality_groups);
4666
4667 /* we're going to use group allocation */
4668 ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC;
4669
4670 /* serialize all allocations in the group */
4671 mutex_lock(&ac->ac_lg->lg_mutex);
4672 }
4673
4674 static noinline_for_stack int
ext4_mb_initialize_context(struct ext4_allocation_context * ac,struct ext4_allocation_request * ar)4675 ext4_mb_initialize_context(struct ext4_allocation_context *ac,
4676 struct ext4_allocation_request *ar)
4677 {
4678 struct super_block *sb = ar->inode->i_sb;
4679 struct ext4_sb_info *sbi = EXT4_SB(sb);
4680 struct ext4_super_block *es = sbi->s_es;
4681 ext4_group_t group;
4682 unsigned int len;
4683 ext4_fsblk_t goal;
4684 ext4_grpblk_t block;
4685
4686 /* we can't allocate > group size */
4687 len = ar->len;
4688
4689 /* just a dirty hack to filter too big requests */
4690 if (len >= EXT4_CLUSTERS_PER_GROUP(sb))
4691 len = EXT4_CLUSTERS_PER_GROUP(sb);
4692
4693 /* start searching from the goal */
4694 goal = ar->goal;
4695 if (goal < le32_to_cpu(es->s_first_data_block) ||
4696 goal >= ext4_blocks_count(es))
4697 goal = le32_to_cpu(es->s_first_data_block);
4698 ext4_get_group_no_and_offset(sb, goal, &group, &block);
4699
4700 /* set up allocation goals */
4701 ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical);
4702 ac->ac_status = AC_STATUS_CONTINUE;
4703 ac->ac_sb = sb;
4704 ac->ac_inode = ar->inode;
4705 ac->ac_o_ex.fe_logical = ac->ac_b_ex.fe_logical;
4706 ac->ac_o_ex.fe_group = group;
4707 ac->ac_o_ex.fe_start = block;
4708 ac->ac_o_ex.fe_len = len;
4709 ac->ac_g_ex = ac->ac_o_ex;
4710 ac->ac_flags = ar->flags;
4711
4712 /* we have to define context: we'll work with a file or
4713 * locality group. this is a policy, actually */
4714 ext4_mb_group_or_file(ac);
4715
4716 mb_debug(sb, "init ac: %u blocks @ %u, goal %u, flags 0x%x, 2^%d, "
4717 "left: %u/%u, right %u/%u to %swritable\n",
4718 (unsigned) ar->len, (unsigned) ar->logical,
4719 (unsigned) ar->goal, ac->ac_flags, ac->ac_2order,
4720 (unsigned) ar->lleft, (unsigned) ar->pleft,
4721 (unsigned) ar->lright, (unsigned) ar->pright,
4722 inode_is_open_for_write(ar->inode) ? "" : "non-");
4723 return 0;
4724
4725 }
4726
4727 static noinline_for_stack void
ext4_mb_discard_lg_preallocations(struct super_block * sb,struct ext4_locality_group * lg,int order,int total_entries)4728 ext4_mb_discard_lg_preallocations(struct super_block *sb,
4729 struct ext4_locality_group *lg,
4730 int order, int total_entries)
4731 {
4732 ext4_group_t group = 0;
4733 struct ext4_buddy e4b;
4734 struct list_head discard_list;
4735 struct ext4_prealloc_space *pa, *tmp;
4736
4737 mb_debug(sb, "discard locality group preallocation\n");
4738
4739 INIT_LIST_HEAD(&discard_list);
4740
4741 spin_lock(&lg->lg_prealloc_lock);
4742 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order],
4743 pa_inode_list,
4744 lockdep_is_held(&lg->lg_prealloc_lock)) {
4745 spin_lock(&pa->pa_lock);
4746 if (atomic_read(&pa->pa_count)) {
4747 /*
4748 * This is the pa that we just used
4749 * for block allocation. So don't
4750 * free that
4751 */
4752 spin_unlock(&pa->pa_lock);
4753 continue;
4754 }
4755 if (pa->pa_deleted) {
4756 spin_unlock(&pa->pa_lock);
4757 continue;
4758 }
4759 /* only lg prealloc space */
4760 BUG_ON(pa->pa_type != MB_GROUP_PA);
4761
4762 /* seems this one can be freed ... */
4763 ext4_mb_mark_pa_deleted(sb, pa);
4764 spin_unlock(&pa->pa_lock);
4765
4766 list_del_rcu(&pa->pa_inode_list);
4767 list_add(&pa->u.pa_tmp_list, &discard_list);
4768
4769 total_entries--;
4770 if (total_entries <= 5) {
4771 /*
4772 * we want to keep only 5 entries
4773 * allowing it to grow to 8. This
4774 * mak sure we don't call discard
4775 * soon for this list.
4776 */
4777 break;
4778 }
4779 }
4780 spin_unlock(&lg->lg_prealloc_lock);
4781
4782 list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) {
4783 int err;
4784
4785 group = ext4_get_group_number(sb, pa->pa_pstart);
4786 err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
4787 GFP_NOFS|__GFP_NOFAIL);
4788 if (err) {
4789 ext4_error_err(sb, -err, "Error %d loading buddy information for %u",
4790 err, group);
4791 continue;
4792 }
4793 ext4_lock_group(sb, group);
4794 list_del(&pa->pa_group_list);
4795 ext4_mb_release_group_pa(&e4b, pa);
4796 ext4_unlock_group(sb, group);
4797
4798 ext4_mb_unload_buddy(&e4b);
4799 list_del(&pa->u.pa_tmp_list);
4800 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
4801 }
4802 }
4803
4804 /*
4805 * We have incremented pa_count. So it cannot be freed at this
4806 * point. Also we hold lg_mutex. So no parallel allocation is
4807 * possible from this lg. That means pa_free cannot be updated.
4808 *
4809 * A parallel ext4_mb_discard_group_preallocations is possible.
4810 * which can cause the lg_prealloc_list to be updated.
4811 */
4812
ext4_mb_add_n_trim(struct ext4_allocation_context * ac)4813 static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
4814 {
4815 int order, added = 0, lg_prealloc_count = 1;
4816 struct super_block *sb = ac->ac_sb;
4817 struct ext4_locality_group *lg = ac->ac_lg;
4818 struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa;
4819
4820 order = fls(pa->pa_free) - 1;
4821 if (order > PREALLOC_TB_SIZE - 1)
4822 /* The max size of hash table is PREALLOC_TB_SIZE */
4823 order = PREALLOC_TB_SIZE - 1;
4824 /* Add the prealloc space to lg */
4825 spin_lock(&lg->lg_prealloc_lock);
4826 list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order],
4827 pa_inode_list,
4828 lockdep_is_held(&lg->lg_prealloc_lock)) {
4829 spin_lock(&tmp_pa->pa_lock);
4830 if (tmp_pa->pa_deleted) {
4831 spin_unlock(&tmp_pa->pa_lock);
4832 continue;
4833 }
4834 if (!added && pa->pa_free < tmp_pa->pa_free) {
4835 /* Add to the tail of the previous entry */
4836 list_add_tail_rcu(&pa->pa_inode_list,
4837 &tmp_pa->pa_inode_list);
4838 added = 1;
4839 /*
4840 * we want to count the total
4841 * number of entries in the list
4842 */
4843 }
4844 spin_unlock(&tmp_pa->pa_lock);
4845 lg_prealloc_count++;
4846 }
4847 if (!added)
4848 list_add_tail_rcu(&pa->pa_inode_list,
4849 &lg->lg_prealloc_list[order]);
4850 spin_unlock(&lg->lg_prealloc_lock);
4851
4852 /* Now trim the list to be not more than 8 elements */
4853 if (lg_prealloc_count > 8) {
4854 ext4_mb_discard_lg_preallocations(sb, lg,
4855 order, lg_prealloc_count);
4856 return;
4857 }
4858 return ;
4859 }
4860
4861 /*
4862 * if per-inode prealloc list is too long, trim some PA
4863 */
ext4_mb_trim_inode_pa(struct inode * inode)4864 static void ext4_mb_trim_inode_pa(struct inode *inode)
4865 {
4866 struct ext4_inode_info *ei = EXT4_I(inode);
4867 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4868 int count, delta;
4869
4870 count = atomic_read(&ei->i_prealloc_active);
4871 delta = (sbi->s_mb_max_inode_prealloc >> 2) + 1;
4872 if (count > sbi->s_mb_max_inode_prealloc + delta) {
4873 count -= sbi->s_mb_max_inode_prealloc;
4874 ext4_discard_preallocations(inode, count);
4875 }
4876 }
4877
4878 /*
4879 * release all resource we used in allocation
4880 */
ext4_mb_release_context(struct ext4_allocation_context * ac)4881 static int ext4_mb_release_context(struct ext4_allocation_context *ac)
4882 {
4883 struct inode *inode = ac->ac_inode;
4884 struct ext4_inode_info *ei = EXT4_I(inode);
4885 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4886 struct ext4_prealloc_space *pa = ac->ac_pa;
4887 if (pa) {
4888 if (pa->pa_type == MB_GROUP_PA) {
4889 /* see comment in ext4_mb_use_group_pa() */
4890 spin_lock(&pa->pa_lock);
4891 pa->pa_pstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
4892 pa->pa_lstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
4893 pa->pa_free -= ac->ac_b_ex.fe_len;
4894 pa->pa_len -= ac->ac_b_ex.fe_len;
4895 spin_unlock(&pa->pa_lock);
4896
4897 /*
4898 * We want to add the pa to the right bucket.
4899 * Remove it from the list and while adding
4900 * make sure the list to which we are adding
4901 * doesn't grow big.
4902 */
4903 if (likely(pa->pa_free)) {
4904 spin_lock(pa->pa_obj_lock);
4905 list_del_rcu(&pa->pa_inode_list);
4906 spin_unlock(pa->pa_obj_lock);
4907 ext4_mb_add_n_trim(ac);
4908 }
4909 }
4910
4911 if (pa->pa_type == MB_INODE_PA) {
4912 /*
4913 * treat per-inode prealloc list as a lru list, then try
4914 * to trim the least recently used PA.
4915 */
4916 spin_lock(pa->pa_obj_lock);
4917 list_move(&pa->pa_inode_list, &ei->i_prealloc_list);
4918 spin_unlock(pa->pa_obj_lock);
4919 }
4920
4921 ext4_mb_put_pa(ac, ac->ac_sb, pa);
4922 }
4923 if (ac->ac_bitmap_page)
4924 put_page(ac->ac_bitmap_page);
4925 if (ac->ac_buddy_page)
4926 put_page(ac->ac_buddy_page);
4927 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
4928 mutex_unlock(&ac->ac_lg->lg_mutex);
4929 ext4_mb_collect_stats(ac);
4930 ext4_mb_trim_inode_pa(inode);
4931 return 0;
4932 }
4933
ext4_mb_discard_preallocations(struct super_block * sb,int needed)4934 static int ext4_mb_discard_preallocations(struct super_block *sb, int needed)
4935 {
4936 ext4_group_t i, ngroups = ext4_get_groups_count(sb);
4937 int ret;
4938 int freed = 0, busy = 0;
4939 int retry = 0;
4940
4941 trace_ext4_mb_discard_preallocations(sb, needed);
4942
4943 if (needed == 0)
4944 needed = EXT4_CLUSTERS_PER_GROUP(sb) + 1;
4945 repeat:
4946 for (i = 0; i < ngroups && needed > 0; i++) {
4947 ret = ext4_mb_discard_group_preallocations(sb, i, &busy);
4948 freed += ret;
4949 needed -= ret;
4950 cond_resched();
4951 }
4952
4953 if (needed > 0 && busy && ++retry < 3) {
4954 busy = 0;
4955 goto repeat;
4956 }
4957
4958 return freed;
4959 }
4960
ext4_mb_discard_preallocations_should_retry(struct super_block * sb,struct ext4_allocation_context * ac,u64 * seq)4961 static bool ext4_mb_discard_preallocations_should_retry(struct super_block *sb,
4962 struct ext4_allocation_context *ac, u64 *seq)
4963 {
4964 int freed;
4965 u64 seq_retry = 0;
4966 bool ret = false;
4967
4968 freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len);
4969 if (freed) {
4970 ret = true;
4971 goto out_dbg;
4972 }
4973 seq_retry = ext4_get_discard_pa_seq_sum();
4974 if (!(ac->ac_flags & EXT4_MB_STRICT_CHECK) || seq_retry != *seq) {
4975 ac->ac_flags |= EXT4_MB_STRICT_CHECK;
4976 *seq = seq_retry;
4977 ret = true;
4978 }
4979
4980 out_dbg:
4981 mb_debug(sb, "freed %d, retry ? %s\n", freed, ret ? "yes" : "no");
4982 return ret;
4983 }
4984
4985 static ext4_fsblk_t ext4_mb_new_blocks_simple(handle_t *handle,
4986 struct ext4_allocation_request *ar, int *errp);
4987
4988 /*
4989 * Main entry point into mballoc to allocate blocks
4990 * it tries to use preallocation first, then falls back
4991 * to usual allocation
4992 */
ext4_mb_new_blocks(handle_t * handle,struct ext4_allocation_request * ar,int * errp)4993 ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
4994 struct ext4_allocation_request *ar, int *errp)
4995 {
4996 struct ext4_allocation_context *ac = NULL;
4997 struct ext4_sb_info *sbi;
4998 struct super_block *sb;
4999 ext4_fsblk_t block = 0;
5000 unsigned int inquota = 0;
5001 unsigned int reserv_clstrs = 0;
5002 int retries = 0;
5003 u64 seq;
5004
5005 might_sleep();
5006 sb = ar->inode->i_sb;
5007 sbi = EXT4_SB(sb);
5008
5009 trace_ext4_request_blocks(ar);
5010 if (sbi->s_mount_state & EXT4_FC_REPLAY)
5011 return ext4_mb_new_blocks_simple(handle, ar, errp);
5012
5013 /* Allow to use superuser reservation for quota file */
5014 if (ext4_is_quota_file(ar->inode))
5015 ar->flags |= EXT4_MB_USE_ROOT_BLOCKS;
5016
5017 if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) {
5018 /* Without delayed allocation we need to verify
5019 * there is enough free blocks to do block allocation
5020 * and verify allocation doesn't exceed the quota limits.
5021 */
5022 while (ar->len &&
5023 ext4_claim_free_clusters(sbi, ar->len, ar->flags)) {
5024
5025 /* let others to free the space */
5026 cond_resched();
5027 ar->len = ar->len >> 1;
5028 }
5029 if (!ar->len) {
5030 ext4_mb_show_pa(sb);
5031 *errp = -ENOSPC;
5032 return 0;
5033 }
5034 reserv_clstrs = ar->len;
5035 if (ar->flags & EXT4_MB_USE_ROOT_BLOCKS) {
5036 dquot_alloc_block_nofail(ar->inode,
5037 EXT4_C2B(sbi, ar->len));
5038 } else {
5039 while (ar->len &&
5040 dquot_alloc_block(ar->inode,
5041 EXT4_C2B(sbi, ar->len))) {
5042
5043 ar->flags |= EXT4_MB_HINT_NOPREALLOC;
5044 ar->len--;
5045 }
5046 }
5047 inquota = ar->len;
5048 if (ar->len == 0) {
5049 *errp = -EDQUOT;
5050 goto out;
5051 }
5052 }
5053
5054 ac = kmem_cache_zalloc(ext4_ac_cachep, GFP_NOFS);
5055 if (!ac) {
5056 ar->len = 0;
5057 *errp = -ENOMEM;
5058 goto out;
5059 }
5060
5061 *errp = ext4_mb_initialize_context(ac, ar);
5062 if (*errp) {
5063 ar->len = 0;
5064 goto out;
5065 }
5066
5067 ac->ac_op = EXT4_MB_HISTORY_PREALLOC;
5068 seq = this_cpu_read(discard_pa_seq);
5069 if (!ext4_mb_use_preallocated(ac)) {
5070 ac->ac_op = EXT4_MB_HISTORY_ALLOC;
5071 ext4_mb_normalize_request(ac, ar);
5072
5073 *errp = ext4_mb_pa_alloc(ac);
5074 if (*errp)
5075 goto errout;
5076 repeat:
5077 /* allocate space in core */
5078 *errp = ext4_mb_regular_allocator(ac);
5079 /*
5080 * pa allocated above is added to grp->bb_prealloc_list only
5081 * when we were able to allocate some block i.e. when
5082 * ac->ac_status == AC_STATUS_FOUND.
5083 * And error from above mean ac->ac_status != AC_STATUS_FOUND
5084 * So we have to free this pa here itself.
5085 */
5086 if (*errp) {
5087 ext4_mb_pa_free(ac);
5088 ext4_discard_allocated_blocks(ac);
5089 goto errout;
5090 }
5091 if (ac->ac_status == AC_STATUS_FOUND &&
5092 ac->ac_o_ex.fe_len >= ac->ac_f_ex.fe_len)
5093 ext4_mb_pa_free(ac);
5094 }
5095 if (likely(ac->ac_status == AC_STATUS_FOUND)) {
5096 *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs);
5097 if (*errp) {
5098 ext4_discard_allocated_blocks(ac);
5099 goto errout;
5100 } else {
5101 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
5102 ar->len = ac->ac_b_ex.fe_len;
5103 }
5104 } else {
5105 if (++retries < 3 &&
5106 ext4_mb_discard_preallocations_should_retry(sb, ac, &seq))
5107 goto repeat;
5108 /*
5109 * If block allocation fails then the pa allocated above
5110 * needs to be freed here itself.
5111 */
5112 ext4_mb_pa_free(ac);
5113 *errp = -ENOSPC;
5114 }
5115
5116 errout:
5117 if (*errp) {
5118 ac->ac_b_ex.fe_len = 0;
5119 ar->len = 0;
5120 ext4_mb_show_ac(ac);
5121 }
5122 ext4_mb_release_context(ac);
5123 out:
5124 if (ac)
5125 kmem_cache_free(ext4_ac_cachep, ac);
5126 if (inquota && ar->len < inquota)
5127 dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len));
5128 if (!ar->len) {
5129 if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0)
5130 /* release all the reserved blocks if non delalloc */
5131 percpu_counter_sub(&sbi->s_dirtyclusters_counter,
5132 reserv_clstrs);
5133 }
5134
5135 trace_ext4_allocate_blocks(ar, (unsigned long long)block);
5136
5137 return block;
5138 }
5139
5140 /*
5141 * We can merge two free data extents only if the physical blocks
5142 * are contiguous, AND the extents were freed by the same transaction,
5143 * AND the blocks are associated with the same group.
5144 */
ext4_try_merge_freed_extent(struct ext4_sb_info * sbi,struct ext4_free_data * entry,struct ext4_free_data * new_entry,struct rb_root * entry_rb_root)5145 static void ext4_try_merge_freed_extent(struct ext4_sb_info *sbi,
5146 struct ext4_free_data *entry,
5147 struct ext4_free_data *new_entry,
5148 struct rb_root *entry_rb_root)
5149 {
5150 if ((entry->efd_tid != new_entry->efd_tid) ||
5151 (entry->efd_group != new_entry->efd_group))
5152 return;
5153 if (entry->efd_start_cluster + entry->efd_count ==
5154 new_entry->efd_start_cluster) {
5155 new_entry->efd_start_cluster = entry->efd_start_cluster;
5156 new_entry->efd_count += entry->efd_count;
5157 } else if (new_entry->efd_start_cluster + new_entry->efd_count ==
5158 entry->efd_start_cluster) {
5159 new_entry->efd_count += entry->efd_count;
5160 } else
5161 return;
5162 spin_lock(&sbi->s_md_lock);
5163 list_del(&entry->efd_list);
5164 spin_unlock(&sbi->s_md_lock);
5165 rb_erase(&entry->efd_node, entry_rb_root);
5166 kmem_cache_free(ext4_free_data_cachep, entry);
5167 }
5168
5169 static noinline_for_stack int
ext4_mb_free_metadata(handle_t * handle,struct ext4_buddy * e4b,struct ext4_free_data * new_entry)5170 ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
5171 struct ext4_free_data *new_entry)
5172 {
5173 ext4_group_t group = e4b->bd_group;
5174 ext4_grpblk_t cluster;
5175 ext4_grpblk_t clusters = new_entry->efd_count;
5176 struct ext4_free_data *entry;
5177 struct ext4_group_info *db = e4b->bd_info;
5178 struct super_block *sb = e4b->bd_sb;
5179 struct ext4_sb_info *sbi = EXT4_SB(sb);
5180 struct rb_node **n = &db->bb_free_root.rb_node, *node;
5181 struct rb_node *parent = NULL, *new_node;
5182
5183 BUG_ON(!ext4_handle_valid(handle));
5184 BUG_ON(e4b->bd_bitmap_page == NULL);
5185 BUG_ON(e4b->bd_buddy_page == NULL);
5186
5187 new_node = &new_entry->efd_node;
5188 cluster = new_entry->efd_start_cluster;
5189
5190 if (!*n) {
5191 /* first free block exent. We need to
5192 protect buddy cache from being freed,
5193 * otherwise we'll refresh it from
5194 * on-disk bitmap and lose not-yet-available
5195 * blocks */
5196 get_page(e4b->bd_buddy_page);
5197 get_page(e4b->bd_bitmap_page);
5198 }
5199 while (*n) {
5200 parent = *n;
5201 entry = rb_entry(parent, struct ext4_free_data, efd_node);
5202 if (cluster < entry->efd_start_cluster)
5203 n = &(*n)->rb_left;
5204 else if (cluster >= (entry->efd_start_cluster + entry->efd_count))
5205 n = &(*n)->rb_right;
5206 else {
5207 ext4_grp_locked_error(sb, group, 0,
5208 ext4_group_first_block_no(sb, group) +
5209 EXT4_C2B(sbi, cluster),
5210 "Block already on to-be-freed list");
5211 kmem_cache_free(ext4_free_data_cachep, new_entry);
5212 return 0;
5213 }
5214 }
5215
5216 rb_link_node(new_node, parent, n);
5217 rb_insert_color(new_node, &db->bb_free_root);
5218
5219 /* Now try to see the extent can be merged to left and right */
5220 node = rb_prev(new_node);
5221 if (node) {
5222 entry = rb_entry(node, struct ext4_free_data, efd_node);
5223 ext4_try_merge_freed_extent(sbi, entry, new_entry,
5224 &(db->bb_free_root));
5225 }
5226
5227 node = rb_next(new_node);
5228 if (node) {
5229 entry = rb_entry(node, struct ext4_free_data, efd_node);
5230 ext4_try_merge_freed_extent(sbi, entry, new_entry,
5231 &(db->bb_free_root));
5232 }
5233
5234 spin_lock(&sbi->s_md_lock);
5235 list_add_tail(&new_entry->efd_list, &sbi->s_freed_data_list);
5236 sbi->s_mb_free_pending += clusters;
5237 spin_unlock(&sbi->s_md_lock);
5238 return 0;
5239 }
5240
5241 /*
5242 * Simple allocator for Ext4 fast commit replay path. It searches for blocks
5243 * linearly starting at the goal block and also excludes the blocks which
5244 * are going to be in use after fast commit replay.
5245 */
ext4_mb_new_blocks_simple(handle_t * handle,struct ext4_allocation_request * ar,int * errp)5246 static ext4_fsblk_t ext4_mb_new_blocks_simple(handle_t *handle,
5247 struct ext4_allocation_request *ar, int *errp)
5248 {
5249 struct buffer_head *bitmap_bh;
5250 struct super_block *sb = ar->inode->i_sb;
5251 ext4_group_t group;
5252 ext4_grpblk_t blkoff;
5253 ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
5254 ext4_grpblk_t i = 0;
5255 ext4_fsblk_t goal, block;
5256 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
5257
5258 goal = ar->goal;
5259 if (goal < le32_to_cpu(es->s_first_data_block) ||
5260 goal >= ext4_blocks_count(es))
5261 goal = le32_to_cpu(es->s_first_data_block);
5262
5263 ar->len = 0;
5264 ext4_get_group_no_and_offset(sb, goal, &group, &blkoff);
5265 for (; group < ext4_get_groups_count(sb); group++) {
5266 bitmap_bh = ext4_read_block_bitmap(sb, group);
5267 if (IS_ERR(bitmap_bh)) {
5268 *errp = PTR_ERR(bitmap_bh);
5269 pr_warn("Failed to read block bitmap\n");
5270 return 0;
5271 }
5272
5273 ext4_get_group_no_and_offset(sb,
5274 max(ext4_group_first_block_no(sb, group), goal),
5275 NULL, &blkoff);
5276 while (1) {
5277 i = mb_find_next_zero_bit(bitmap_bh->b_data, max,
5278 blkoff);
5279 if (i >= max)
5280 break;
5281 if (ext4_fc_replay_check_excluded(sb,
5282 ext4_group_first_block_no(sb, group) + i)) {
5283 blkoff = i + 1;
5284 } else
5285 break;
5286 }
5287 brelse(bitmap_bh);
5288 if (i < max)
5289 break;
5290 }
5291
5292 if (group >= ext4_get_groups_count(sb) || i >= max) {
5293 *errp = -ENOSPC;
5294 return 0;
5295 }
5296
5297 block = ext4_group_first_block_no(sb, group) + i;
5298 ext4_mb_mark_bb(sb, block, 1, 1);
5299 ar->len = 1;
5300
5301 return block;
5302 }
5303
ext4_free_blocks_simple(struct inode * inode,ext4_fsblk_t block,unsigned long count)5304 static void ext4_free_blocks_simple(struct inode *inode, ext4_fsblk_t block,
5305 unsigned long count)
5306 {
5307 struct buffer_head *bitmap_bh;
5308 struct super_block *sb = inode->i_sb;
5309 struct ext4_group_desc *gdp;
5310 struct buffer_head *gdp_bh;
5311 ext4_group_t group;
5312 ext4_grpblk_t blkoff;
5313 int already_freed = 0, err, i;
5314
5315 ext4_get_group_no_and_offset(sb, block, &group, &blkoff);
5316 bitmap_bh = ext4_read_block_bitmap(sb, group);
5317 if (IS_ERR(bitmap_bh)) {
5318 err = PTR_ERR(bitmap_bh);
5319 pr_warn("Failed to read block bitmap\n");
5320 return;
5321 }
5322 gdp = ext4_get_group_desc(sb, group, &gdp_bh);
5323 if (!gdp)
5324 return;
5325
5326 for (i = 0; i < count; i++) {
5327 if (!mb_test_bit(blkoff + i, bitmap_bh->b_data))
5328 already_freed++;
5329 }
5330 mb_clear_bits(bitmap_bh->b_data, blkoff, count);
5331 err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh);
5332 if (err)
5333 return;
5334 ext4_free_group_clusters_set(
5335 sb, gdp, ext4_free_group_clusters(sb, gdp) +
5336 count - already_freed);
5337 ext4_block_bitmap_csum_set(sb, group, gdp, bitmap_bh);
5338 ext4_group_desc_csum_set(sb, group, gdp);
5339 ext4_handle_dirty_metadata(NULL, NULL, gdp_bh);
5340 sync_dirty_buffer(bitmap_bh);
5341 sync_dirty_buffer(gdp_bh);
5342 brelse(bitmap_bh);
5343 }
5344
5345 /**
5346 * ext4_free_blocks() -- Free given blocks and update quota
5347 * @handle: handle for this transaction
5348 * @inode: inode
5349 * @bh: optional buffer of the block to be freed
5350 * @block: starting physical block to be freed
5351 * @count: number of blocks to be freed
5352 * @flags: flags used by ext4_free_blocks
5353 */
ext4_free_blocks(handle_t * handle,struct inode * inode,struct buffer_head * bh,ext4_fsblk_t block,unsigned long count,int flags)5354 void ext4_free_blocks(handle_t *handle, struct inode *inode,
5355 struct buffer_head *bh, ext4_fsblk_t block,
5356 unsigned long count, int flags)
5357 {
5358 struct buffer_head *bitmap_bh = NULL;
5359 struct super_block *sb = inode->i_sb;
5360 struct ext4_group_desc *gdp;
5361 struct ext4_group_info *grp;
5362 unsigned int overflow;
5363 ext4_grpblk_t bit;
5364 struct buffer_head *gd_bh;
5365 ext4_group_t block_group;
5366 struct ext4_sb_info *sbi;
5367 struct ext4_buddy e4b;
5368 unsigned int count_clusters;
5369 int err = 0;
5370 int ret;
5371
5372 sbi = EXT4_SB(sb);
5373
5374 if (sbi->s_mount_state & EXT4_FC_REPLAY) {
5375 ext4_free_blocks_simple(inode, block, count);
5376 return;
5377 }
5378
5379 might_sleep();
5380 if (bh) {
5381 if (block)
5382 BUG_ON(block != bh->b_blocknr);
5383 else
5384 block = bh->b_blocknr;
5385 }
5386
5387 if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
5388 !ext4_inode_block_valid(inode, block, count)) {
5389 ext4_error(sb, "Freeing blocks not in datazone - "
5390 "block = %llu, count = %lu", block, count);
5391 goto error_return;
5392 }
5393
5394 ext4_debug("freeing block %llu\n", block);
5395 trace_ext4_free_blocks(inode, block, count, flags);
5396
5397 if (bh && (flags & EXT4_FREE_BLOCKS_FORGET)) {
5398 BUG_ON(count > 1);
5399
5400 ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA,
5401 inode, bh, block);
5402 }
5403
5404 /*
5405 * If the extent to be freed does not begin on a cluster
5406 * boundary, we need to deal with partial clusters at the
5407 * beginning and end of the extent. Normally we will free
5408 * blocks at the beginning or the end unless we are explicitly
5409 * requested to avoid doing so.
5410 */
5411 overflow = EXT4_PBLK_COFF(sbi, block);
5412 if (overflow) {
5413 if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) {
5414 overflow = sbi->s_cluster_ratio - overflow;
5415 block += overflow;
5416 if (count > overflow)
5417 count -= overflow;
5418 else
5419 return;
5420 } else {
5421 block -= overflow;
5422 count += overflow;
5423 }
5424 }
5425 overflow = EXT4_LBLK_COFF(sbi, count);
5426 if (overflow) {
5427 if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) {
5428 if (count > overflow)
5429 count -= overflow;
5430 else
5431 return;
5432 } else
5433 count += sbi->s_cluster_ratio - overflow;
5434 }
5435
5436 if (!bh && (flags & EXT4_FREE_BLOCKS_FORGET)) {
5437 int i;
5438 int is_metadata = flags & EXT4_FREE_BLOCKS_METADATA;
5439
5440 for (i = 0; i < count; i++) {
5441 cond_resched();
5442 if (is_metadata)
5443 bh = sb_find_get_block(inode->i_sb, block + i);
5444 ext4_forget(handle, is_metadata, inode, bh, block + i);
5445 }
5446 }
5447
5448 do_more:
5449 overflow = 0;
5450 ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
5451
5452 grp = ext4_get_group_info(sb, block_group);
5453 if (unlikely(!grp || EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
5454 return;
5455
5456 /*
5457 * Check to see if we are freeing blocks across a group
5458 * boundary.
5459 */
5460 if (EXT4_C2B(sbi, bit) + count > EXT4_BLOCKS_PER_GROUP(sb)) {
5461 overflow = EXT4_C2B(sbi, bit) + count -
5462 EXT4_BLOCKS_PER_GROUP(sb);
5463 count -= overflow;
5464 }
5465 count_clusters = EXT4_NUM_B2C(sbi, count);
5466 bitmap_bh = ext4_read_block_bitmap(sb, block_group);
5467 if (IS_ERR(bitmap_bh)) {
5468 err = PTR_ERR(bitmap_bh);
5469 bitmap_bh = NULL;
5470 goto error_return;
5471 }
5472 gdp = ext4_get_group_desc(sb, block_group, &gd_bh);
5473 if (!gdp) {
5474 err = -EIO;
5475 goto error_return;
5476 }
5477
5478 if (in_range(ext4_block_bitmap(sb, gdp), block, count) ||
5479 in_range(ext4_inode_bitmap(sb, gdp), block, count) ||
5480 in_range(block, ext4_inode_table(sb, gdp),
5481 sbi->s_itb_per_group) ||
5482 in_range(block + count - 1, ext4_inode_table(sb, gdp),
5483 sbi->s_itb_per_group)) {
5484
5485 ext4_error(sb, "Freeing blocks in system zone - "
5486 "Block = %llu, count = %lu", block, count);
5487 /* err = 0. ext4_std_error should be a no op */
5488 goto error_return;
5489 }
5490
5491 BUFFER_TRACE(bitmap_bh, "getting write access");
5492 err = ext4_journal_get_write_access(handle, bitmap_bh);
5493 if (err)
5494 goto error_return;
5495
5496 /*
5497 * We are about to modify some metadata. Call the journal APIs
5498 * to unshare ->b_data if a currently-committing transaction is
5499 * using it
5500 */
5501 BUFFER_TRACE(gd_bh, "get_write_access");
5502 err = ext4_journal_get_write_access(handle, gd_bh);
5503 if (err)
5504 goto error_return;
5505 #ifdef AGGRESSIVE_CHECK
5506 {
5507 int i;
5508 for (i = 0; i < count_clusters; i++)
5509 BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data));
5510 }
5511 #endif
5512 trace_ext4_mballoc_free(sb, inode, block_group, bit, count_clusters);
5513
5514 /* __GFP_NOFAIL: retry infinitely, ignore TIF_MEMDIE and memcg limit. */
5515 err = ext4_mb_load_buddy_gfp(sb, block_group, &e4b,
5516 GFP_NOFS|__GFP_NOFAIL);
5517 if (err)
5518 goto error_return;
5519
5520 /*
5521 * We need to make sure we don't reuse the freed block until after the
5522 * transaction is committed. We make an exception if the inode is to be
5523 * written in writeback mode since writeback mode has weak data
5524 * consistency guarantees.
5525 */
5526 if (ext4_handle_valid(handle) &&
5527 ((flags & EXT4_FREE_BLOCKS_METADATA) ||
5528 !ext4_should_writeback_data(inode))) {
5529 struct ext4_free_data *new_entry;
5530 /*
5531 * We use __GFP_NOFAIL because ext4_free_blocks() is not allowed
5532 * to fail.
5533 */
5534 new_entry = kmem_cache_alloc(ext4_free_data_cachep,
5535 GFP_NOFS|__GFP_NOFAIL);
5536 new_entry->efd_start_cluster = bit;
5537 new_entry->efd_group = block_group;
5538 new_entry->efd_count = count_clusters;
5539 new_entry->efd_tid = handle->h_transaction->t_tid;
5540
5541 ext4_lock_group(sb, block_group);
5542 mb_clear_bits(bitmap_bh->b_data, bit, count_clusters);
5543 ext4_mb_free_metadata(handle, &e4b, new_entry);
5544 } else {
5545 /* need to update group_info->bb_free and bitmap
5546 * with group lock held. generate_buddy look at
5547 * them with group lock_held
5548 */
5549 if (test_opt(sb, DISCARD)) {
5550 err = ext4_issue_discard(sb, block_group, bit, count,
5551 NULL);
5552 if (err && err != -EOPNOTSUPP)
5553 ext4_msg(sb, KERN_WARNING, "discard request in"
5554 " group:%d block:%d count:%lu failed"
5555 " with %d", block_group, bit, count,
5556 err);
5557 } else
5558 EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info);
5559
5560 ext4_lock_group(sb, block_group);
5561 mb_clear_bits(bitmap_bh->b_data, bit, count_clusters);
5562 mb_free_blocks(inode, &e4b, bit, count_clusters);
5563 }
5564
5565 ret = ext4_free_group_clusters(sb, gdp) + count_clusters;
5566 ext4_free_group_clusters_set(sb, gdp, ret);
5567 ext4_block_bitmap_csum_set(sb, block_group, gdp, bitmap_bh);
5568 ext4_group_desc_csum_set(sb, block_group, gdp);
5569 ext4_unlock_group(sb, block_group);
5570
5571 if (sbi->s_log_groups_per_flex) {
5572 ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
5573 atomic64_add(count_clusters,
5574 &sbi_array_rcu_deref(sbi, s_flex_groups,
5575 flex_group)->free_clusters);
5576 }
5577
5578 /*
5579 * on a bigalloc file system, defer the s_freeclusters_counter
5580 * update to the caller (ext4_remove_space and friends) so they
5581 * can determine if a cluster freed here should be rereserved
5582 */
5583 if (!(flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)) {
5584 if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
5585 dquot_free_block(inode, EXT4_C2B(sbi, count_clusters));
5586 percpu_counter_add(&sbi->s_freeclusters_counter,
5587 count_clusters);
5588 }
5589
5590 ext4_mb_unload_buddy(&e4b);
5591
5592 /* We dirtied the bitmap block */
5593 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
5594 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
5595
5596 /* And the group descriptor block */
5597 BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
5598 ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
5599 if (!err)
5600 err = ret;
5601
5602 if (overflow && !err) {
5603 block += count;
5604 count = overflow;
5605 put_bh(bitmap_bh);
5606 goto do_more;
5607 }
5608 error_return:
5609 brelse(bitmap_bh);
5610 ext4_std_error(sb, err);
5611 return;
5612 }
5613
5614 /**
5615 * ext4_group_add_blocks() -- Add given blocks to an existing group
5616 * @handle: handle to this transaction
5617 * @sb: super block
5618 * @block: start physical block to add to the block group
5619 * @count: number of blocks to free
5620 *
5621 * This marks the blocks as free in the bitmap and buddy.
5622 */
ext4_group_add_blocks(handle_t * handle,struct super_block * sb,ext4_fsblk_t block,unsigned long count)5623 int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
5624 ext4_fsblk_t block, unsigned long count)
5625 {
5626 struct buffer_head *bitmap_bh = NULL;
5627 struct buffer_head *gd_bh;
5628 ext4_group_t block_group;
5629 ext4_grpblk_t bit;
5630 unsigned int i;
5631 struct ext4_group_desc *desc;
5632 struct ext4_sb_info *sbi = EXT4_SB(sb);
5633 struct ext4_buddy e4b;
5634 int err = 0, ret, free_clusters_count;
5635 ext4_grpblk_t clusters_freed;
5636 ext4_fsblk_t first_cluster = EXT4_B2C(sbi, block);
5637 ext4_fsblk_t last_cluster = EXT4_B2C(sbi, block + count - 1);
5638 unsigned long cluster_count = last_cluster - first_cluster + 1;
5639
5640 ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1);
5641
5642 if (count == 0)
5643 return 0;
5644
5645 ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
5646 /*
5647 * Check to see if we are freeing blocks across a group
5648 * boundary.
5649 */
5650 if (bit + cluster_count > EXT4_CLUSTERS_PER_GROUP(sb)) {
5651 ext4_warning(sb, "too many blocks added to group %u",
5652 block_group);
5653 err = -EINVAL;
5654 goto error_return;
5655 }
5656
5657 bitmap_bh = ext4_read_block_bitmap(sb, block_group);
5658 if (IS_ERR(bitmap_bh)) {
5659 err = PTR_ERR(bitmap_bh);
5660 bitmap_bh = NULL;
5661 goto error_return;
5662 }
5663
5664 desc = ext4_get_group_desc(sb, block_group, &gd_bh);
5665 if (!desc) {
5666 err = -EIO;
5667 goto error_return;
5668 }
5669
5670 if (in_range(ext4_block_bitmap(sb, desc), block, count) ||
5671 in_range(ext4_inode_bitmap(sb, desc), block, count) ||
5672 in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) ||
5673 in_range(block + count - 1, ext4_inode_table(sb, desc),
5674 sbi->s_itb_per_group)) {
5675 ext4_error(sb, "Adding blocks in system zones - "
5676 "Block = %llu, count = %lu",
5677 block, count);
5678 err = -EINVAL;
5679 goto error_return;
5680 }
5681
5682 BUFFER_TRACE(bitmap_bh, "getting write access");
5683 err = ext4_journal_get_write_access(handle, bitmap_bh);
5684 if (err)
5685 goto error_return;
5686
5687 /*
5688 * We are about to modify some metadata. Call the journal APIs
5689 * to unshare ->b_data if a currently-committing transaction is
5690 * using it
5691 */
5692 BUFFER_TRACE(gd_bh, "get_write_access");
5693 err = ext4_journal_get_write_access(handle, gd_bh);
5694 if (err)
5695 goto error_return;
5696
5697 for (i = 0, clusters_freed = 0; i < cluster_count; i++) {
5698 BUFFER_TRACE(bitmap_bh, "clear bit");
5699 if (!mb_test_bit(bit + i, bitmap_bh->b_data)) {
5700 ext4_error(sb, "bit already cleared for block %llu",
5701 (ext4_fsblk_t)(block + i));
5702 BUFFER_TRACE(bitmap_bh, "bit already cleared");
5703 } else {
5704 clusters_freed++;
5705 }
5706 }
5707
5708 err = ext4_mb_load_buddy(sb, block_group, &e4b);
5709 if (err)
5710 goto error_return;
5711
5712 /*
5713 * need to update group_info->bb_free and bitmap
5714 * with group lock held. generate_buddy look at
5715 * them with group lock_held
5716 */
5717 ext4_lock_group(sb, block_group);
5718 mb_clear_bits(bitmap_bh->b_data, bit, cluster_count);
5719 mb_free_blocks(NULL, &e4b, bit, cluster_count);
5720 free_clusters_count = clusters_freed +
5721 ext4_free_group_clusters(sb, desc);
5722 ext4_free_group_clusters_set(sb, desc, free_clusters_count);
5723 ext4_block_bitmap_csum_set(sb, block_group, desc, bitmap_bh);
5724 ext4_group_desc_csum_set(sb, block_group, desc);
5725 ext4_unlock_group(sb, block_group);
5726 percpu_counter_add(&sbi->s_freeclusters_counter,
5727 clusters_freed);
5728
5729 if (sbi->s_log_groups_per_flex) {
5730 ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
5731 atomic64_add(clusters_freed,
5732 &sbi_array_rcu_deref(sbi, s_flex_groups,
5733 flex_group)->free_clusters);
5734 }
5735
5736 ext4_mb_unload_buddy(&e4b);
5737
5738 /* We dirtied the bitmap block */
5739 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
5740 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
5741
5742 /* And the group descriptor block */
5743 BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
5744 ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
5745 if (!err)
5746 err = ret;
5747
5748 error_return:
5749 brelse(bitmap_bh);
5750 ext4_std_error(sb, err);
5751 return err;
5752 }
5753
5754 /**
5755 * ext4_trim_extent -- function to TRIM one single free extent in the group
5756 * @sb: super block for the file system
5757 * @start: starting block of the free extent in the alloc. group
5758 * @count: number of blocks to TRIM
5759 * @group: alloc. group we are working with
5760 * @e4b: ext4 buddy for the group
5761 *
5762 * Trim "count" blocks starting at "start" in the "group". To assure that no
5763 * one will allocate those blocks, mark it as used in buddy bitmap. This must
5764 * be called with under the group lock.
5765 */
ext4_trim_extent(struct super_block * sb,int start,int count,ext4_group_t group,struct ext4_buddy * e4b)5766 static int ext4_trim_extent(struct super_block *sb, int start, int count,
5767 ext4_group_t group, struct ext4_buddy *e4b)
5768 __releases(bitlock)
5769 __acquires(bitlock)
5770 {
5771 struct ext4_free_extent ex;
5772 int ret = 0;
5773
5774 trace_ext4_trim_extent(sb, group, start, count);
5775
5776 assert_spin_locked(ext4_group_lock_ptr(sb, group));
5777
5778 ex.fe_start = start;
5779 ex.fe_group = group;
5780 ex.fe_len = count;
5781
5782 /*
5783 * Mark blocks used, so no one can reuse them while
5784 * being trimmed.
5785 */
5786 mb_mark_used(e4b, &ex);
5787 ext4_unlock_group(sb, group);
5788 ret = ext4_issue_discard(sb, group, start, count, NULL);
5789 ext4_lock_group(sb, group);
5790 mb_free_blocks(NULL, e4b, start, ex.fe_len);
5791 return ret;
5792 }
5793
5794 /**
5795 * ext4_trim_all_free -- function to trim all free space in alloc. group
5796 * @sb: super block for file system
5797 * @group: group to be trimmed
5798 * @start: first group block to examine
5799 * @max: last group block to examine
5800 * @minblocks: minimum extent block count
5801 *
5802 * ext4_trim_all_free walks through group's buddy bitmap searching for free
5803 * extents. When the free block is found, ext4_trim_extent is called to TRIM
5804 * the extent.
5805 *
5806 *
5807 * ext4_trim_all_free walks through group's block bitmap searching for free
5808 * extents. When the free extent is found, mark it as used in group buddy
5809 * bitmap. Then issue a TRIM command on this extent and free the extent in
5810 * the group buddy bitmap. This is done until whole group is scanned.
5811 */
5812 static ext4_grpblk_t
ext4_trim_all_free(struct super_block * sb,ext4_group_t group,ext4_grpblk_t start,ext4_grpblk_t max,ext4_grpblk_t minblocks)5813 ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
5814 ext4_grpblk_t start, ext4_grpblk_t max,
5815 ext4_grpblk_t minblocks)
5816 {
5817 void *bitmap;
5818 ext4_grpblk_t next, count = 0, free_count = 0;
5819 struct ext4_buddy e4b;
5820 int ret = 0;
5821
5822 trace_ext4_trim_all_free(sb, group, start, max);
5823
5824 ret = ext4_mb_load_buddy(sb, group, &e4b);
5825 if (ret) {
5826 ext4_warning(sb, "Error %d loading buddy information for %u",
5827 ret, group);
5828 return ret;
5829 }
5830 bitmap = e4b.bd_bitmap;
5831
5832 ext4_lock_group(sb, group);
5833 if (EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) &&
5834 minblocks >= atomic_read(&EXT4_SB(sb)->s_last_trim_minblks))
5835 goto out;
5836
5837 start = (e4b.bd_info->bb_first_free > start) ?
5838 e4b.bd_info->bb_first_free : start;
5839
5840 while (start <= max) {
5841 start = mb_find_next_zero_bit(bitmap, max + 1, start);
5842 if (start > max)
5843 break;
5844 next = mb_find_next_bit(bitmap, max + 1, start);
5845
5846 if ((next - start) >= minblocks) {
5847 ret = ext4_trim_extent(sb, start,
5848 next - start, group, &e4b);
5849 if (ret && ret != -EOPNOTSUPP)
5850 break;
5851 ret = 0;
5852 count += next - start;
5853 }
5854 free_count += next - start;
5855 start = next + 1;
5856
5857 if (fatal_signal_pending(current)) {
5858 count = -ERESTARTSYS;
5859 break;
5860 }
5861
5862 if (need_resched()) {
5863 ext4_unlock_group(sb, group);
5864 cond_resched();
5865 ext4_lock_group(sb, group);
5866 }
5867
5868 if ((e4b.bd_info->bb_free - free_count) < minblocks)
5869 break;
5870 }
5871
5872 if (!ret) {
5873 ret = count;
5874 EXT4_MB_GRP_SET_TRIMMED(e4b.bd_info);
5875 }
5876 out:
5877 ext4_unlock_group(sb, group);
5878 ext4_mb_unload_buddy(&e4b);
5879
5880 ext4_debug("trimmed %d blocks in the group %d\n",
5881 count, group);
5882
5883 return ret;
5884 }
5885
5886 /**
5887 * ext4_trim_fs() -- trim ioctl handle function
5888 * @sb: superblock for filesystem
5889 * @range: fstrim_range structure
5890 *
5891 * start: First Byte to trim
5892 * len: number of Bytes to trim from start
5893 * minlen: minimum extent length in Bytes
5894 * ext4_trim_fs goes through all allocation groups containing Bytes from
5895 * start to start+len. For each such a group ext4_trim_all_free function
5896 * is invoked to trim all free space.
5897 */
ext4_trim_fs(struct super_block * sb,struct fstrim_range * range)5898 int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
5899 {
5900 struct request_queue *q = bdev_get_queue(sb->s_bdev);
5901 struct ext4_group_info *grp;
5902 ext4_group_t group, first_group, last_group;
5903 ext4_grpblk_t cnt = 0, first_cluster, last_cluster;
5904 uint64_t start, end, minlen, trimmed = 0;
5905 ext4_fsblk_t first_data_blk =
5906 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
5907 ext4_fsblk_t max_blks = ext4_blocks_count(EXT4_SB(sb)->s_es);
5908 int ret = 0;
5909
5910 start = range->start >> sb->s_blocksize_bits;
5911 end = start + (range->len >> sb->s_blocksize_bits) - 1;
5912 minlen = EXT4_NUM_B2C(EXT4_SB(sb),
5913 range->minlen >> sb->s_blocksize_bits);
5914
5915 if (minlen > EXT4_CLUSTERS_PER_GROUP(sb) ||
5916 start >= max_blks ||
5917 range->len < sb->s_blocksize)
5918 return -EINVAL;
5919 /* No point to try to trim less than discard granularity */
5920 if (range->minlen < q->limits.discard_granularity) {
5921 minlen = EXT4_NUM_B2C(EXT4_SB(sb),
5922 q->limits.discard_granularity >> sb->s_blocksize_bits);
5923 if (minlen > EXT4_CLUSTERS_PER_GROUP(sb))
5924 goto out;
5925 }
5926 if (end >= max_blks)
5927 end = max_blks - 1;
5928 if (end <= first_data_blk)
5929 goto out;
5930 if (start < first_data_blk)
5931 start = first_data_blk;
5932
5933 /* Determine first and last group to examine based on start and end */
5934 ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start,
5935 &first_group, &first_cluster);
5936 ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) end,
5937 &last_group, &last_cluster);
5938
5939 /* end now represents the last cluster to discard in this group */
5940 end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
5941
5942 for (group = first_group; group <= last_group; group++) {
5943 grp = ext4_get_group_info(sb, group);
5944 if (!grp)
5945 continue;
5946 /* We only do this if the grp has never been initialized */
5947 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
5948 ret = ext4_mb_init_group(sb, group, GFP_NOFS);
5949 if (ret)
5950 break;
5951 }
5952
5953 /*
5954 * For all the groups except the last one, last cluster will
5955 * always be EXT4_CLUSTERS_PER_GROUP(sb)-1, so we only need to
5956 * change it for the last group, note that last_cluster is
5957 * already computed earlier by ext4_get_group_no_and_offset()
5958 */
5959 if (group == last_group)
5960 end = last_cluster;
5961
5962 if (grp->bb_free >= minlen) {
5963 cnt = ext4_trim_all_free(sb, group, first_cluster,
5964 end, minlen);
5965 if (cnt < 0) {
5966 ret = cnt;
5967 break;
5968 }
5969 trimmed += cnt;
5970 }
5971
5972 /*
5973 * For every group except the first one, we are sure
5974 * that the first cluster to discard will be cluster #0.
5975 */
5976 first_cluster = 0;
5977 }
5978
5979 if (!ret)
5980 atomic_set(&EXT4_SB(sb)->s_last_trim_minblks, minlen);
5981
5982 out:
5983 range->len = EXT4_C2B(EXT4_SB(sb), trimmed) << sb->s_blocksize_bits;
5984 return ret;
5985 }
5986
5987 /* Iterate all the free extents in the group. */
5988 int
ext4_mballoc_query_range(struct super_block * sb,ext4_group_t group,ext4_grpblk_t start,ext4_grpblk_t end,ext4_mballoc_query_range_fn formatter,void * priv)5989 ext4_mballoc_query_range(
5990 struct super_block *sb,
5991 ext4_group_t group,
5992 ext4_grpblk_t start,
5993 ext4_grpblk_t end,
5994 ext4_mballoc_query_range_fn formatter,
5995 void *priv)
5996 {
5997 void *bitmap;
5998 ext4_grpblk_t next;
5999 struct ext4_buddy e4b;
6000 int error;
6001
6002 error = ext4_mb_load_buddy(sb, group, &e4b);
6003 if (error)
6004 return error;
6005 bitmap = e4b.bd_bitmap;
6006
6007 ext4_lock_group(sb, group);
6008
6009 start = (e4b.bd_info->bb_first_free > start) ?
6010 e4b.bd_info->bb_first_free : start;
6011 if (end >= EXT4_CLUSTERS_PER_GROUP(sb))
6012 end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
6013
6014 while (start <= end) {
6015 start = mb_find_next_zero_bit(bitmap, end + 1, start);
6016 if (start > end)
6017 break;
6018 next = mb_find_next_bit(bitmap, end + 1, start);
6019
6020 ext4_unlock_group(sb, group);
6021 error = formatter(sb, group, start, next - start, priv);
6022 if (error)
6023 goto out_unload;
6024 ext4_lock_group(sb, group);
6025
6026 start = next + 1;
6027 }
6028
6029 ext4_unlock_group(sb, group);
6030 out_unload:
6031 ext4_mb_unload_buddy(&e4b);
6032
6033 return error;
6034 }
6035