1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
4 * Written by Alex Tomas <alex@clusterfs.com>
5 */
6
7
8 /*
9 * mballoc.c contains the multiblocks allocation routines
10 */
11
12 #include "ext4_jbd2.h"
13 #include "mballoc.h"
14 #include <linux/log2.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/nospec.h>
18 #include <linux/backing-dev.h>
19 #include <linux/freezer.h>
20 #include <trace/events/ext4.h>
21
22 /*
23 * MUSTDO:
24 * - test ext4_ext_search_left() and ext4_ext_search_right()
25 * - search for metadata in few groups
26 *
27 * TODO v4:
28 * - normalization should take into account whether file is still open
29 * - discard preallocations if no free space left (policy?)
30 * - don't normalize tails
31 * - quota
32 * - reservation for superuser
33 *
34 * TODO v3:
35 * - bitmap read-ahead (proposed by Oleg Drokin aka green)
36 * - track min/max extents in each group for better group selection
37 * - mb_mark_used() may allocate chunk right after splitting buddy
38 * - tree of groups sorted by number of free blocks
39 * - error handling
40 */
41
42 /*
43 * The allocation request involve request for multiple number of blocks
44 * near to the goal(block) value specified.
45 *
46 * During initialization phase of the allocator we decide to use the
47 * group preallocation or inode preallocation depending on the size of
48 * the file. The size of the file could be the resulting file size we
49 * would have after allocation, or the current file size, which ever
50 * is larger. If the size is less than sbi->s_mb_stream_request we
51 * select to use the group preallocation. The default value of
52 * s_mb_stream_request is 16 blocks. This can also be tuned via
53 * /sys/fs/ext4/<partition>/mb_stream_req. The value is represented in
54 * terms of number of blocks.
55 *
56 * The main motivation for having small file use group preallocation is to
57 * ensure that we have small files closer together on the disk.
58 *
59 * First stage the allocator looks at the inode prealloc list,
60 * ext4_inode_info->i_prealloc_list, which contains list of prealloc
61 * spaces for this particular inode. The inode prealloc space is
62 * represented as:
63 *
64 * pa_lstart -> the logical start block for this prealloc space
65 * pa_pstart -> the physical start block for this prealloc space
66 * pa_len -> length for this prealloc space (in clusters)
67 * pa_free -> free space available in this prealloc space (in clusters)
68 *
69 * The inode preallocation space is used looking at the _logical_ start
70 * block. If only the logical file block falls within the range of prealloc
71 * space we will consume the particular prealloc space. This makes sure that
72 * we have contiguous physical blocks representing the file blocks
73 *
74 * The important thing to be noted in case of inode prealloc space is that
75 * we don't modify the values associated to inode prealloc space except
76 * pa_free.
77 *
78 * If we are not able to find blocks in the inode prealloc space and if we
79 * have the group allocation flag set then we look at the locality group
80 * prealloc space. These are per CPU prealloc list represented as
81 *
82 * ext4_sb_info.s_locality_groups[smp_processor_id()]
83 *
84 * The reason for having a per cpu locality group is to reduce the contention
85 * between CPUs. It is possible to get scheduled at this point.
86 *
87 * The locality group prealloc space is used looking at whether we have
88 * enough free space (pa_free) within the prealloc space.
89 *
90 * If we can't allocate blocks via inode prealloc or/and locality group
91 * prealloc then we look at the buddy cache. The buddy cache is represented
92 * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets
93 * mapped to the buddy and bitmap information regarding different
94 * groups. The buddy information is attached to buddy cache inode so that
95 * we can access them through the page cache. The information regarding
96 * each group is loaded via ext4_mb_load_buddy. The information involve
97 * block bitmap and buddy information. The information are stored in the
98 * inode as:
99 *
100 * { page }
101 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
102 *
103 *
104 * one block each for bitmap and buddy information. So for each group we
105 * take up 2 blocks. A page can contain blocks_per_page (PAGE_SIZE /
106 * blocksize) blocks. So it can have information regarding groups_per_page
107 * which is blocks_per_page/2
108 *
109 * The buddy cache inode is not stored on disk. The inode is thrown
110 * away when the filesystem is unmounted.
111 *
112 * We look for count number of blocks in the buddy cache. If we were able
113 * to locate that many free blocks we return with additional information
114 * regarding rest of the contiguous physical block available
115 *
116 * Before allocating blocks via buddy cache we normalize the request
117 * blocks. This ensure we ask for more blocks that we needed. The extra
118 * blocks that we get after allocation is added to the respective prealloc
119 * list. In case of inode preallocation we follow a list of heuristics
120 * based on file size. This can be found in ext4_mb_normalize_request. If
121 * we are doing a group prealloc we try to normalize the request to
122 * sbi->s_mb_group_prealloc. The default value of s_mb_group_prealloc is
123 * dependent on the cluster size; for non-bigalloc file systems, it is
124 * 512 blocks. This can be tuned via
125 * /sys/fs/ext4/<partition>/mb_group_prealloc. The value is represented in
126 * terms of number of blocks. If we have mounted the file system with -O
127 * stripe=<value> option the group prealloc request is normalized to the
128 * smallest multiple of the stripe value (sbi->s_stripe) which is
129 * greater than the default mb_group_prealloc.
130 *
131 * If "mb_optimize_scan" mount option is set, we maintain in memory group info
132 * structures in two data structures:
133 *
134 * 1) Array of largest free order lists (sbi->s_mb_largest_free_orders)
135 *
136 * Locking: sbi->s_mb_largest_free_orders_locks(array of rw locks)
137 *
138 * This is an array of lists where the index in the array represents the
139 * largest free order in the buddy bitmap of the participating group infos of
140 * that list. So, there are exactly MB_NUM_ORDERS(sb) (which means total
141 * number of buddy bitmap orders possible) number of lists. Group-infos are
142 * placed in appropriate lists.
143 *
144 * 2) Average fragment size rb tree (sbi->s_mb_avg_fragment_size_root)
145 *
146 * Locking: sbi->s_mb_rb_lock (rwlock)
147 *
148 * This is a red black tree consisting of group infos and the tree is sorted
149 * by average fragment sizes (which is calculated as ext4_group_info->bb_free
150 * / ext4_group_info->bb_fragments).
151 *
152 * When "mb_optimize_scan" mount option is set, mballoc consults the above data
153 * structures to decide the order in which groups are to be traversed for
154 * fulfilling an allocation request.
155 *
156 * At CR = 0, we look for groups which have the largest_free_order >= the order
157 * of the request. We directly look at the largest free order list in the data
158 * structure (1) above where largest_free_order = order of the request. If that
159 * list is empty, we look at remaining list in the increasing order of
160 * largest_free_order. This allows us to perform CR = 0 lookup in O(1) time.
161 *
162 * At CR = 1, we only consider groups where average fragment size > request
163 * size. So, we lookup a group which has average fragment size just above or
164 * equal to request size using our rb tree (data structure 2) in O(log N) time.
165 *
166 * If "mb_optimize_scan" mount option is not set, mballoc traverses groups in
167 * linear order which requires O(N) search time for each CR 0 and CR 1 phase.
168 *
169 * The regular allocator (using the buddy cache) supports a few tunables.
170 *
171 * /sys/fs/ext4/<partition>/mb_min_to_scan
172 * /sys/fs/ext4/<partition>/mb_max_to_scan
173 * /sys/fs/ext4/<partition>/mb_order2_req
174 * /sys/fs/ext4/<partition>/mb_linear_limit
175 *
176 * The regular allocator uses buddy scan only if the request len is power of
177 * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The
178 * value of s_mb_order2_reqs can be tuned via
179 * /sys/fs/ext4/<partition>/mb_order2_req. If the request len is equal to
180 * stripe size (sbi->s_stripe), we try to search for contiguous block in
181 * stripe size. This should result in better allocation on RAID setups. If
182 * not, we search in the specific group using bitmap for best extents. The
183 * tunable min_to_scan and max_to_scan control the behaviour here.
184 * min_to_scan indicate how long the mballoc __must__ look for a best
185 * extent and max_to_scan indicates how long the mballoc __can__ look for a
186 * best extent in the found extents. Searching for the blocks starts with
187 * the group specified as the goal value in allocation context via
188 * ac_g_ex. Each group is first checked based on the criteria whether it
189 * can be used for allocation. ext4_mb_good_group explains how the groups are
190 * checked.
191 *
192 * When "mb_optimize_scan" is turned on, as mentioned above, the groups may not
193 * get traversed linearly. That may result in subsequent allocations being not
194 * close to each other. And so, the underlying device may get filled up in a
195 * non-linear fashion. While that may not matter on non-rotational devices, for
196 * rotational devices that may result in higher seek times. "mb_linear_limit"
197 * tells mballoc how many groups mballoc should search linearly before
198 * performing consulting above data structures for more efficient lookups. For
199 * non rotational devices, this value defaults to 0 and for rotational devices
200 * this is set to MB_DEFAULT_LINEAR_LIMIT.
201 *
202 * Both the prealloc space are getting populated as above. So for the first
203 * request we will hit the buddy cache which will result in this prealloc
204 * space getting filled. The prealloc space is then later used for the
205 * subsequent request.
206 */
207
208 /*
209 * mballoc operates on the following data:
210 * - on-disk bitmap
211 * - in-core buddy (actually includes buddy and bitmap)
212 * - preallocation descriptors (PAs)
213 *
214 * there are two types of preallocations:
215 * - inode
216 * assiged to specific inode and can be used for this inode only.
217 * it describes part of inode's space preallocated to specific
218 * physical blocks. any block from that preallocated can be used
219 * independent. the descriptor just tracks number of blocks left
220 * unused. so, before taking some block from descriptor, one must
221 * make sure corresponded logical block isn't allocated yet. this
222 * also means that freeing any block within descriptor's range
223 * must discard all preallocated blocks.
224 * - locality group
225 * assigned to specific locality group which does not translate to
226 * permanent set of inodes: inode can join and leave group. space
227 * from this type of preallocation can be used for any inode. thus
228 * it's consumed from the beginning to the end.
229 *
230 * relation between them can be expressed as:
231 * in-core buddy = on-disk bitmap + preallocation descriptors
232 *
233 * this mean blocks mballoc considers used are:
234 * - allocated blocks (persistent)
235 * - preallocated blocks (non-persistent)
236 *
237 * consistency in mballoc world means that at any time a block is either
238 * free or used in ALL structures. notice: "any time" should not be read
239 * literally -- time is discrete and delimited by locks.
240 *
241 * to keep it simple, we don't use block numbers, instead we count number of
242 * blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA.
243 *
244 * all operations can be expressed as:
245 * - init buddy: buddy = on-disk + PAs
246 * - new PA: buddy += N; PA = N
247 * - use inode PA: on-disk += N; PA -= N
248 * - discard inode PA buddy -= on-disk - PA; PA = 0
249 * - use locality group PA on-disk += N; PA -= N
250 * - discard locality group PA buddy -= PA; PA = 0
251 * note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap
252 * is used in real operation because we can't know actual used
253 * bits from PA, only from on-disk bitmap
254 *
255 * if we follow this strict logic, then all operations above should be atomic.
256 * given some of them can block, we'd have to use something like semaphores
257 * killing performance on high-end SMP hardware. let's try to relax it using
258 * the following knowledge:
259 * 1) if buddy is referenced, it's already initialized
260 * 2) while block is used in buddy and the buddy is referenced,
261 * nobody can re-allocate that block
262 * 3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has
263 * bit set and PA claims same block, it's OK. IOW, one can set bit in
264 * on-disk bitmap if buddy has same bit set or/and PA covers corresponded
265 * block
266 *
267 * so, now we're building a concurrency table:
268 * - init buddy vs.
269 * - new PA
270 * blocks for PA are allocated in the buddy, buddy must be referenced
271 * until PA is linked to allocation group to avoid concurrent buddy init
272 * - use inode PA
273 * we need to make sure that either on-disk bitmap or PA has uptodate data
274 * given (3) we care that PA-=N operation doesn't interfere with init
275 * - discard inode PA
276 * the simplest way would be to have buddy initialized by the discard
277 * - use locality group PA
278 * again PA-=N must be serialized with init
279 * - discard locality group PA
280 * the simplest way would be to have buddy initialized by the discard
281 * - new PA vs.
282 * - use inode PA
283 * i_data_sem serializes them
284 * - discard inode PA
285 * discard process must wait until PA isn't used by another process
286 * - use locality group PA
287 * some mutex should serialize them
288 * - discard locality group PA
289 * discard process must wait until PA isn't used by another process
290 * - use inode PA
291 * - use inode PA
292 * i_data_sem or another mutex should serializes them
293 * - discard inode PA
294 * discard process must wait until PA isn't used by another process
295 * - use locality group PA
296 * nothing wrong here -- they're different PAs covering different blocks
297 * - discard locality group PA
298 * discard process must wait until PA isn't used by another process
299 *
300 * now we're ready to make few consequences:
301 * - PA is referenced and while it is no discard is possible
302 * - PA is referenced until block isn't marked in on-disk bitmap
303 * - PA changes only after on-disk bitmap
304 * - discard must not compete with init. either init is done before
305 * any discard or they're serialized somehow
306 * - buddy init as sum of on-disk bitmap and PAs is done atomically
307 *
308 * a special case when we've used PA to emptiness. no need to modify buddy
309 * in this case, but we should care about concurrent init
310 *
311 */
312
313 /*
314 * Logic in few words:
315 *
316 * - allocation:
317 * load group
318 * find blocks
319 * mark bits in on-disk bitmap
320 * release group
321 *
322 * - use preallocation:
323 * find proper PA (per-inode or group)
324 * load group
325 * mark bits in on-disk bitmap
326 * release group
327 * release PA
328 *
329 * - free:
330 * load group
331 * mark bits in on-disk bitmap
332 * release group
333 *
334 * - discard preallocations in group:
335 * mark PAs deleted
336 * move them onto local list
337 * load on-disk bitmap
338 * load group
339 * remove PA from object (inode or locality group)
340 * mark free blocks in-core
341 *
342 * - discard inode's preallocations:
343 */
344
345 /*
346 * Locking rules
347 *
348 * Locks:
349 * - bitlock on a group (group)
350 * - object (inode/locality) (object)
351 * - per-pa lock (pa)
352 * - cr0 lists lock (cr0)
353 * - cr1 tree lock (cr1)
354 *
355 * Paths:
356 * - new pa
357 * object
358 * group
359 *
360 * - find and use pa:
361 * pa
362 *
363 * - release consumed pa:
364 * pa
365 * group
366 * object
367 *
368 * - generate in-core bitmap:
369 * group
370 * pa
371 *
372 * - discard all for given object (inode, locality group):
373 * object
374 * pa
375 * group
376 *
377 * - discard all for given group:
378 * group
379 * pa
380 * group
381 * object
382 *
383 * - allocation path (ext4_mb_regular_allocator)
384 * group
385 * cr0/cr1
386 */
387 static struct kmem_cache *ext4_pspace_cachep;
388 static struct kmem_cache *ext4_ac_cachep;
389 static struct kmem_cache *ext4_free_data_cachep;
390
391 /* We create slab caches for groupinfo data structures based on the
392 * superblock block size. There will be one per mounted filesystem for
393 * each unique s_blocksize_bits */
394 #define NR_GRPINFO_CACHES 8
395 static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES];
396
397 static const char * const ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = {
398 "ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k",
399 "ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k",
400 "ext4_groupinfo_64k", "ext4_groupinfo_128k"
401 };
402
403 static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
404 ext4_group_t group);
405 static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
406 ext4_group_t group);
407 static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac);
408
409 static bool ext4_mb_good_group(struct ext4_allocation_context *ac,
410 ext4_group_t group, int cr);
411
412 static int ext4_try_to_trim_range(struct super_block *sb,
413 struct ext4_buddy *e4b, ext4_grpblk_t start,
414 ext4_grpblk_t max, ext4_grpblk_t minblocks);
415
416 /*
417 * The algorithm using this percpu seq counter goes below:
418 * 1. We sample the percpu discard_pa_seq counter before trying for block
419 * allocation in ext4_mb_new_blocks().
420 * 2. We increment this percpu discard_pa_seq counter when we either allocate
421 * or free these blocks i.e. while marking those blocks as used/free in
422 * mb_mark_used()/mb_free_blocks().
423 * 3. We also increment this percpu seq counter when we successfully identify
424 * that the bb_prealloc_list is not empty and hence proceed for discarding
425 * of those PAs inside ext4_mb_discard_group_preallocations().
426 *
427 * Now to make sure that the regular fast path of block allocation is not
428 * affected, as a small optimization we only sample the percpu seq counter
429 * on that cpu. Only when the block allocation fails and when freed blocks
430 * found were 0, that is when we sample percpu seq counter for all cpus using
431 * below function ext4_get_discard_pa_seq_sum(). This happens after making
432 * sure that all the PAs on grp->bb_prealloc_list got freed or if it's empty.
433 */
434 static DEFINE_PER_CPU(u64, discard_pa_seq);
ext4_get_discard_pa_seq_sum(void)435 static inline u64 ext4_get_discard_pa_seq_sum(void)
436 {
437 int __cpu;
438 u64 __seq = 0;
439
440 for_each_possible_cpu(__cpu)
441 __seq += per_cpu(discard_pa_seq, __cpu);
442 return __seq;
443 }
444
mb_correct_addr_and_bit(int * bit,void * addr)445 static inline void *mb_correct_addr_and_bit(int *bit, void *addr)
446 {
447 #if BITS_PER_LONG == 64
448 *bit += ((unsigned long) addr & 7UL) << 3;
449 addr = (void *) ((unsigned long) addr & ~7UL);
450 #elif BITS_PER_LONG == 32
451 *bit += ((unsigned long) addr & 3UL) << 3;
452 addr = (void *) ((unsigned long) addr & ~3UL);
453 #else
454 #error "how many bits you are?!"
455 #endif
456 return addr;
457 }
458
mb_test_bit(int bit,void * addr)459 static inline int mb_test_bit(int bit, void *addr)
460 {
461 /*
462 * ext4_test_bit on architecture like powerpc
463 * needs unsigned long aligned address
464 */
465 addr = mb_correct_addr_and_bit(&bit, addr);
466 return ext4_test_bit(bit, addr);
467 }
468
mb_set_bit(int bit,void * addr)469 static inline void mb_set_bit(int bit, void *addr)
470 {
471 addr = mb_correct_addr_and_bit(&bit, addr);
472 ext4_set_bit(bit, addr);
473 }
474
mb_clear_bit(int bit,void * addr)475 static inline void mb_clear_bit(int bit, void *addr)
476 {
477 addr = mb_correct_addr_and_bit(&bit, addr);
478 ext4_clear_bit(bit, addr);
479 }
480
mb_test_and_clear_bit(int bit,void * addr)481 static inline int mb_test_and_clear_bit(int bit, void *addr)
482 {
483 addr = mb_correct_addr_and_bit(&bit, addr);
484 return ext4_test_and_clear_bit(bit, addr);
485 }
486
mb_find_next_zero_bit(void * addr,int max,int start)487 static inline int mb_find_next_zero_bit(void *addr, int max, int start)
488 {
489 int fix = 0, ret, tmpmax;
490 addr = mb_correct_addr_and_bit(&fix, addr);
491 tmpmax = max + fix;
492 start += fix;
493
494 ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix;
495 if (ret > max)
496 return max;
497 return ret;
498 }
499
mb_find_next_bit(void * addr,int max,int start)500 static inline int mb_find_next_bit(void *addr, int max, int start)
501 {
502 int fix = 0, ret, tmpmax;
503 addr = mb_correct_addr_and_bit(&fix, addr);
504 tmpmax = max + fix;
505 start += fix;
506
507 ret = ext4_find_next_bit(addr, tmpmax, start) - fix;
508 if (ret > max)
509 return max;
510 return ret;
511 }
512
mb_find_buddy(struct ext4_buddy * e4b,int order,int * max)513 static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max)
514 {
515 char *bb;
516
517 BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
518 BUG_ON(max == NULL);
519
520 if (order > e4b->bd_blkbits + 1) {
521 *max = 0;
522 return NULL;
523 }
524
525 /* at order 0 we see each particular block */
526 if (order == 0) {
527 *max = 1 << (e4b->bd_blkbits + 3);
528 return e4b->bd_bitmap;
529 }
530
531 bb = e4b->bd_buddy + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order];
532 *max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order];
533
534 return bb;
535 }
536
537 #ifdef DOUBLE_CHECK
mb_free_blocks_double(struct inode * inode,struct ext4_buddy * e4b,int first,int count)538 static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b,
539 int first, int count)
540 {
541 int i;
542 struct super_block *sb = e4b->bd_sb;
543
544 if (unlikely(e4b->bd_info->bb_bitmap == NULL))
545 return;
546 assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
547 for (i = 0; i < count; i++) {
548 if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) {
549 ext4_fsblk_t blocknr;
550
551 blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
552 blocknr += EXT4_C2B(EXT4_SB(sb), first + i);
553 ext4_grp_locked_error(sb, e4b->bd_group,
554 inode ? inode->i_ino : 0,
555 blocknr,
556 "freeing block already freed "
557 "(bit %u)",
558 first + i);
559 ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
560 EXT4_GROUP_INFO_BBITMAP_CORRUPT);
561 }
562 mb_clear_bit(first + i, e4b->bd_info->bb_bitmap);
563 }
564 }
565
mb_mark_used_double(struct ext4_buddy * e4b,int first,int count)566 static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count)
567 {
568 int i;
569
570 if (unlikely(e4b->bd_info->bb_bitmap == NULL))
571 return;
572 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
573 for (i = 0; i < count; i++) {
574 BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap));
575 mb_set_bit(first + i, e4b->bd_info->bb_bitmap);
576 }
577 }
578
mb_cmp_bitmaps(struct ext4_buddy * e4b,void * bitmap)579 static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
580 {
581 if (unlikely(e4b->bd_info->bb_bitmap == NULL))
582 return;
583 if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) {
584 unsigned char *b1, *b2;
585 int i;
586 b1 = (unsigned char *) e4b->bd_info->bb_bitmap;
587 b2 = (unsigned char *) bitmap;
588 for (i = 0; i < e4b->bd_sb->s_blocksize; i++) {
589 if (b1[i] != b2[i]) {
590 ext4_msg(e4b->bd_sb, KERN_ERR,
591 "corruption in group %u "
592 "at byte %u(%u): %x in copy != %x "
593 "on disk/prealloc",
594 e4b->bd_group, i, i * 8, b1[i], b2[i]);
595 BUG();
596 }
597 }
598 }
599 }
600
mb_group_bb_bitmap_alloc(struct super_block * sb,struct ext4_group_info * grp,ext4_group_t group)601 static void mb_group_bb_bitmap_alloc(struct super_block *sb,
602 struct ext4_group_info *grp, ext4_group_t group)
603 {
604 struct buffer_head *bh;
605
606 grp->bb_bitmap = kmalloc(sb->s_blocksize, GFP_NOFS);
607 if (!grp->bb_bitmap)
608 return;
609
610 bh = ext4_read_block_bitmap(sb, group);
611 if (IS_ERR_OR_NULL(bh)) {
612 kfree(grp->bb_bitmap);
613 grp->bb_bitmap = NULL;
614 return;
615 }
616
617 memcpy(grp->bb_bitmap, bh->b_data, sb->s_blocksize);
618 put_bh(bh);
619 }
620
mb_group_bb_bitmap_free(struct ext4_group_info * grp)621 static void mb_group_bb_bitmap_free(struct ext4_group_info *grp)
622 {
623 kfree(grp->bb_bitmap);
624 }
625
626 #else
mb_free_blocks_double(struct inode * inode,struct ext4_buddy * e4b,int first,int count)627 static inline void mb_free_blocks_double(struct inode *inode,
628 struct ext4_buddy *e4b, int first, int count)
629 {
630 return;
631 }
mb_mark_used_double(struct ext4_buddy * e4b,int first,int count)632 static inline void mb_mark_used_double(struct ext4_buddy *e4b,
633 int first, int count)
634 {
635 return;
636 }
mb_cmp_bitmaps(struct ext4_buddy * e4b,void * bitmap)637 static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
638 {
639 return;
640 }
641
mb_group_bb_bitmap_alloc(struct super_block * sb,struct ext4_group_info * grp,ext4_group_t group)642 static inline void mb_group_bb_bitmap_alloc(struct super_block *sb,
643 struct ext4_group_info *grp, ext4_group_t group)
644 {
645 return;
646 }
647
mb_group_bb_bitmap_free(struct ext4_group_info * grp)648 static inline void mb_group_bb_bitmap_free(struct ext4_group_info *grp)
649 {
650 return;
651 }
652 #endif
653
654 #ifdef AGGRESSIVE_CHECK
655
656 #define MB_CHECK_ASSERT(assert) \
657 do { \
658 if (!(assert)) { \
659 printk(KERN_EMERG \
660 "Assertion failure in %s() at %s:%d: \"%s\"\n", \
661 function, file, line, # assert); \
662 BUG(); \
663 } \
664 } while (0)
665
__mb_check_buddy(struct ext4_buddy * e4b,char * file,const char * function,int line)666 static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
667 const char *function, int line)
668 {
669 struct super_block *sb = e4b->bd_sb;
670 int order = e4b->bd_blkbits + 1;
671 int max;
672 int max2;
673 int i;
674 int j;
675 int k;
676 int count;
677 struct ext4_group_info *grp;
678 int fragments = 0;
679 int fstart;
680 struct list_head *cur;
681 void *buddy;
682 void *buddy2;
683
684 if (e4b->bd_info->bb_check_counter++ % 10)
685 return 0;
686
687 while (order > 1) {
688 buddy = mb_find_buddy(e4b, order, &max);
689 MB_CHECK_ASSERT(buddy);
690 buddy2 = mb_find_buddy(e4b, order - 1, &max2);
691 MB_CHECK_ASSERT(buddy2);
692 MB_CHECK_ASSERT(buddy != buddy2);
693 MB_CHECK_ASSERT(max * 2 == max2);
694
695 count = 0;
696 for (i = 0; i < max; i++) {
697
698 if (mb_test_bit(i, buddy)) {
699 /* only single bit in buddy2 may be 1 */
700 if (!mb_test_bit(i << 1, buddy2)) {
701 MB_CHECK_ASSERT(
702 mb_test_bit((i<<1)+1, buddy2));
703 } else if (!mb_test_bit((i << 1) + 1, buddy2)) {
704 MB_CHECK_ASSERT(
705 mb_test_bit(i << 1, buddy2));
706 }
707 continue;
708 }
709
710 /* both bits in buddy2 must be 1 */
711 MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2));
712 MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2));
713
714 for (j = 0; j < (1 << order); j++) {
715 k = (i * (1 << order)) + j;
716 MB_CHECK_ASSERT(
717 !mb_test_bit(k, e4b->bd_bitmap));
718 }
719 count++;
720 }
721 MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count);
722 order--;
723 }
724
725 fstart = -1;
726 buddy = mb_find_buddy(e4b, 0, &max);
727 for (i = 0; i < max; i++) {
728 if (!mb_test_bit(i, buddy)) {
729 MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free);
730 if (fstart == -1) {
731 fragments++;
732 fstart = i;
733 }
734 continue;
735 }
736 fstart = -1;
737 /* check used bits only */
738 for (j = 0; j < e4b->bd_blkbits + 1; j++) {
739 buddy2 = mb_find_buddy(e4b, j, &max2);
740 k = i >> j;
741 MB_CHECK_ASSERT(k < max2);
742 MB_CHECK_ASSERT(mb_test_bit(k, buddy2));
743 }
744 }
745 MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info));
746 MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments);
747
748 grp = ext4_get_group_info(sb, e4b->bd_group);
749 if (!grp)
750 return NULL;
751 list_for_each(cur, &grp->bb_prealloc_list) {
752 ext4_group_t groupnr;
753 struct ext4_prealloc_space *pa;
754 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
755 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k);
756 MB_CHECK_ASSERT(groupnr == e4b->bd_group);
757 for (i = 0; i < pa->pa_len; i++)
758 MB_CHECK_ASSERT(mb_test_bit(k + i, buddy));
759 }
760 return 0;
761 }
762 #undef MB_CHECK_ASSERT
763 #define mb_check_buddy(e4b) __mb_check_buddy(e4b, \
764 __FILE__, __func__, __LINE__)
765 #else
766 #define mb_check_buddy(e4b)
767 #endif
768
769 /*
770 * Divide blocks started from @first with length @len into
771 * smaller chunks with power of 2 blocks.
772 * Clear the bits in bitmap which the blocks of the chunk(s) covered,
773 * then increase bb_counters[] for corresponded chunk size.
774 */
ext4_mb_mark_free_simple(struct super_block * sb,void * buddy,ext4_grpblk_t first,ext4_grpblk_t len,struct ext4_group_info * grp)775 static void ext4_mb_mark_free_simple(struct super_block *sb,
776 void *buddy, ext4_grpblk_t first, ext4_grpblk_t len,
777 struct ext4_group_info *grp)
778 {
779 struct ext4_sb_info *sbi = EXT4_SB(sb);
780 ext4_grpblk_t min;
781 ext4_grpblk_t max;
782 ext4_grpblk_t chunk;
783 unsigned int border;
784
785 BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb));
786
787 border = 2 << sb->s_blocksize_bits;
788
789 while (len > 0) {
790 /* find how many blocks can be covered since this position */
791 max = ffs(first | border) - 1;
792
793 /* find how many blocks of power 2 we need to mark */
794 min = fls(len) - 1;
795
796 if (max < min)
797 min = max;
798 chunk = 1 << min;
799
800 /* mark multiblock chunks only */
801 grp->bb_counters[min]++;
802 if (min > 0)
803 mb_clear_bit(first >> min,
804 buddy + sbi->s_mb_offsets[min]);
805
806 len -= chunk;
807 first += chunk;
808 }
809 }
810
ext4_mb_rb_insert(struct rb_root * root,struct rb_node * new,int (* cmp)(struct rb_node *,struct rb_node *))811 static void ext4_mb_rb_insert(struct rb_root *root, struct rb_node *new,
812 int (*cmp)(struct rb_node *, struct rb_node *))
813 {
814 struct rb_node **iter = &root->rb_node, *parent = NULL;
815
816 while (*iter) {
817 parent = *iter;
818 if (cmp(new, *iter) > 0)
819 iter = &((*iter)->rb_left);
820 else
821 iter = &((*iter)->rb_right);
822 }
823
824 rb_link_node(new, parent, iter);
825 rb_insert_color(new, root);
826 }
827
828 static int
ext4_mb_avg_fragment_size_cmp(struct rb_node * rb1,struct rb_node * rb2)829 ext4_mb_avg_fragment_size_cmp(struct rb_node *rb1, struct rb_node *rb2)
830 {
831 struct ext4_group_info *grp1 = rb_entry(rb1,
832 struct ext4_group_info,
833 bb_avg_fragment_size_rb);
834 struct ext4_group_info *grp2 = rb_entry(rb2,
835 struct ext4_group_info,
836 bb_avg_fragment_size_rb);
837 int num_frags_1, num_frags_2;
838
839 num_frags_1 = grp1->bb_fragments ?
840 grp1->bb_free / grp1->bb_fragments : 0;
841 num_frags_2 = grp2->bb_fragments ?
842 grp2->bb_free / grp2->bb_fragments : 0;
843
844 return (num_frags_2 - num_frags_1);
845 }
846
847 /*
848 * Reinsert grpinfo into the avg_fragment_size tree with new average
849 * fragment size.
850 */
851 static void
mb_update_avg_fragment_size(struct super_block * sb,struct ext4_group_info * grp)852 mb_update_avg_fragment_size(struct super_block *sb, struct ext4_group_info *grp)
853 {
854 struct ext4_sb_info *sbi = EXT4_SB(sb);
855
856 if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || grp->bb_fragments == 0)
857 return;
858
859 write_lock(&sbi->s_mb_rb_lock);
860 if (!RB_EMPTY_NODE(&grp->bb_avg_fragment_size_rb)) {
861 rb_erase(&grp->bb_avg_fragment_size_rb,
862 &sbi->s_mb_avg_fragment_size_root);
863 RB_CLEAR_NODE(&grp->bb_avg_fragment_size_rb);
864 }
865
866 ext4_mb_rb_insert(&sbi->s_mb_avg_fragment_size_root,
867 &grp->bb_avg_fragment_size_rb,
868 ext4_mb_avg_fragment_size_cmp);
869 write_unlock(&sbi->s_mb_rb_lock);
870 }
871
872 /*
873 * Choose next group by traversing largest_free_order lists. Updates *new_cr if
874 * cr level needs an update.
875 */
ext4_mb_choose_next_group_cr0(struct ext4_allocation_context * ac,int * new_cr,ext4_group_t * group,ext4_group_t ngroups)876 static void ext4_mb_choose_next_group_cr0(struct ext4_allocation_context *ac,
877 int *new_cr, ext4_group_t *group, ext4_group_t ngroups)
878 {
879 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
880 struct ext4_group_info *iter, *grp;
881 int i;
882
883 if (ac->ac_status == AC_STATUS_FOUND)
884 return;
885
886 if (unlikely(sbi->s_mb_stats && ac->ac_flags & EXT4_MB_CR0_OPTIMIZED))
887 atomic_inc(&sbi->s_bal_cr0_bad_suggestions);
888
889 grp = NULL;
890 for (i = ac->ac_2order; i < MB_NUM_ORDERS(ac->ac_sb); i++) {
891 if (list_empty(&sbi->s_mb_largest_free_orders[i]))
892 continue;
893 read_lock(&sbi->s_mb_largest_free_orders_locks[i]);
894 if (list_empty(&sbi->s_mb_largest_free_orders[i])) {
895 read_unlock(&sbi->s_mb_largest_free_orders_locks[i]);
896 continue;
897 }
898 grp = NULL;
899 list_for_each_entry(iter, &sbi->s_mb_largest_free_orders[i],
900 bb_largest_free_order_node) {
901 if (sbi->s_mb_stats)
902 atomic64_inc(&sbi->s_bal_cX_groups_considered[0]);
903 if (likely(ext4_mb_good_group(ac, iter->bb_group, 0))) {
904 grp = iter;
905 break;
906 }
907 }
908 read_unlock(&sbi->s_mb_largest_free_orders_locks[i]);
909 if (grp)
910 break;
911 }
912
913 if (!grp) {
914 /* Increment cr and search again */
915 *new_cr = 1;
916 } else {
917 *group = grp->bb_group;
918 ac->ac_last_optimal_group = *group;
919 ac->ac_flags |= EXT4_MB_CR0_OPTIMIZED;
920 }
921 }
922
923 /*
924 * Choose next group by traversing average fragment size tree. Updates *new_cr
925 * if cr lvel needs an update. Sets EXT4_MB_SEARCH_NEXT_LINEAR to indicate that
926 * the linear search should continue for one iteration since there's lock
927 * contention on the rb tree lock.
928 */
ext4_mb_choose_next_group_cr1(struct ext4_allocation_context * ac,int * new_cr,ext4_group_t * group,ext4_group_t ngroups)929 static void ext4_mb_choose_next_group_cr1(struct ext4_allocation_context *ac,
930 int *new_cr, ext4_group_t *group, ext4_group_t ngroups)
931 {
932 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
933 int avg_fragment_size, best_so_far;
934 struct rb_node *node, *found;
935 struct ext4_group_info *grp;
936
937 /*
938 * If there is contention on the lock, instead of waiting for the lock
939 * to become available, just continue searching lineraly. We'll resume
940 * our rb tree search later starting at ac->ac_last_optimal_group.
941 */
942 if (!read_trylock(&sbi->s_mb_rb_lock)) {
943 ac->ac_flags |= EXT4_MB_SEARCH_NEXT_LINEAR;
944 return;
945 }
946
947 if (unlikely(ac->ac_flags & EXT4_MB_CR1_OPTIMIZED)) {
948 if (sbi->s_mb_stats)
949 atomic_inc(&sbi->s_bal_cr1_bad_suggestions);
950 /* We have found something at CR 1 in the past */
951 grp = ext4_get_group_info(ac->ac_sb, ac->ac_last_optimal_group);
952 for (found = rb_next(&grp->bb_avg_fragment_size_rb); found != NULL;
953 found = rb_next(found)) {
954 grp = rb_entry(found, struct ext4_group_info,
955 bb_avg_fragment_size_rb);
956 if (sbi->s_mb_stats)
957 atomic64_inc(&sbi->s_bal_cX_groups_considered[1]);
958 if (likely(ext4_mb_good_group(ac, grp->bb_group, 1)))
959 break;
960 }
961 goto done;
962 }
963
964 node = sbi->s_mb_avg_fragment_size_root.rb_node;
965 best_so_far = 0;
966 found = NULL;
967
968 while (node) {
969 grp = rb_entry(node, struct ext4_group_info,
970 bb_avg_fragment_size_rb);
971 avg_fragment_size = 0;
972 if (ext4_mb_good_group(ac, grp->bb_group, 1)) {
973 avg_fragment_size = grp->bb_fragments ?
974 grp->bb_free / grp->bb_fragments : 0;
975 if (!best_so_far || avg_fragment_size < best_so_far) {
976 best_so_far = avg_fragment_size;
977 found = node;
978 }
979 }
980 if (avg_fragment_size > ac->ac_g_ex.fe_len)
981 node = node->rb_right;
982 else
983 node = node->rb_left;
984 }
985
986 done:
987 if (found) {
988 grp = rb_entry(found, struct ext4_group_info,
989 bb_avg_fragment_size_rb);
990 *group = grp->bb_group;
991 ac->ac_flags |= EXT4_MB_CR1_OPTIMIZED;
992 } else {
993 *new_cr = 2;
994 }
995
996 read_unlock(&sbi->s_mb_rb_lock);
997 ac->ac_last_optimal_group = *group;
998 }
999
should_optimize_scan(struct ext4_allocation_context * ac)1000 static inline int should_optimize_scan(struct ext4_allocation_context *ac)
1001 {
1002 if (unlikely(!test_opt2(ac->ac_sb, MB_OPTIMIZE_SCAN)))
1003 return 0;
1004 if (ac->ac_criteria >= 2)
1005 return 0;
1006 if (!ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS))
1007 return 0;
1008 return 1;
1009 }
1010
1011 /*
1012 * Return next linear group for allocation. If linear traversal should not be
1013 * performed, this function just returns the same group
1014 */
1015 static ext4_group_t
next_linear_group(struct ext4_allocation_context * ac,ext4_group_t group,ext4_group_t ngroups)1016 next_linear_group(struct ext4_allocation_context *ac, ext4_group_t group,
1017 ext4_group_t ngroups)
1018 {
1019 if (!should_optimize_scan(ac))
1020 goto inc_and_return;
1021
1022 if (ac->ac_groups_linear_remaining) {
1023 ac->ac_groups_linear_remaining--;
1024 goto inc_and_return;
1025 }
1026
1027 if (ac->ac_flags & EXT4_MB_SEARCH_NEXT_LINEAR) {
1028 ac->ac_flags &= ~EXT4_MB_SEARCH_NEXT_LINEAR;
1029 goto inc_and_return;
1030 }
1031
1032 return group;
1033 inc_and_return:
1034 /*
1035 * Artificially restricted ngroups for non-extent
1036 * files makes group > ngroups possible on first loop.
1037 */
1038 return group + 1 >= ngroups ? 0 : group + 1;
1039 }
1040
1041 /*
1042 * ext4_mb_choose_next_group: choose next group for allocation.
1043 *
1044 * @ac Allocation Context
1045 * @new_cr This is an output parameter. If the there is no good group
1046 * available at current CR level, this field is updated to indicate
1047 * the new cr level that should be used.
1048 * @group This is an input / output parameter. As an input it indicates the
1049 * next group that the allocator intends to use for allocation. As
1050 * output, this field indicates the next group that should be used as
1051 * determined by the optimization functions.
1052 * @ngroups Total number of groups
1053 */
ext4_mb_choose_next_group(struct ext4_allocation_context * ac,int * new_cr,ext4_group_t * group,ext4_group_t ngroups)1054 static void ext4_mb_choose_next_group(struct ext4_allocation_context *ac,
1055 int *new_cr, ext4_group_t *group, ext4_group_t ngroups)
1056 {
1057 *new_cr = ac->ac_criteria;
1058
1059 if (!should_optimize_scan(ac) || ac->ac_groups_linear_remaining) {
1060 *group = next_linear_group(ac, *group, ngroups);
1061 return;
1062 }
1063
1064 if (*new_cr == 0) {
1065 ext4_mb_choose_next_group_cr0(ac, new_cr, group, ngroups);
1066 } else if (*new_cr == 1) {
1067 ext4_mb_choose_next_group_cr1(ac, new_cr, group, ngroups);
1068 } else {
1069 /*
1070 * TODO: For CR=2, we can arrange groups in an rb tree sorted by
1071 * bb_free. But until that happens, we should never come here.
1072 */
1073 WARN_ON(1);
1074 }
1075 }
1076
1077 /*
1078 * Cache the order of the largest free extent we have available in this block
1079 * group.
1080 */
1081 static void
mb_set_largest_free_order(struct super_block * sb,struct ext4_group_info * grp)1082 mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp)
1083 {
1084 struct ext4_sb_info *sbi = EXT4_SB(sb);
1085 int i;
1086
1087 for (i = MB_NUM_ORDERS(sb) - 1; i >= 0; i--)
1088 if (grp->bb_counters[i] > 0)
1089 break;
1090 /* No need to move between order lists? */
1091 if (!test_opt2(sb, MB_OPTIMIZE_SCAN) ||
1092 i == grp->bb_largest_free_order) {
1093 grp->bb_largest_free_order = i;
1094 return;
1095 }
1096
1097 if (grp->bb_largest_free_order >= 0) {
1098 write_lock(&sbi->s_mb_largest_free_orders_locks[
1099 grp->bb_largest_free_order]);
1100 list_del_init(&grp->bb_largest_free_order_node);
1101 write_unlock(&sbi->s_mb_largest_free_orders_locks[
1102 grp->bb_largest_free_order]);
1103 }
1104 grp->bb_largest_free_order = i;
1105 if (grp->bb_largest_free_order >= 0 && grp->bb_free) {
1106 write_lock(&sbi->s_mb_largest_free_orders_locks[
1107 grp->bb_largest_free_order]);
1108 list_add_tail(&grp->bb_largest_free_order_node,
1109 &sbi->s_mb_largest_free_orders[grp->bb_largest_free_order]);
1110 write_unlock(&sbi->s_mb_largest_free_orders_locks[
1111 grp->bb_largest_free_order]);
1112 }
1113 }
1114
1115 static noinline_for_stack
ext4_mb_generate_buddy(struct super_block * sb,void * buddy,void * bitmap,ext4_group_t group,struct ext4_group_info * grp)1116 void ext4_mb_generate_buddy(struct super_block *sb,
1117 void *buddy, void *bitmap, ext4_group_t group,
1118 struct ext4_group_info *grp)
1119 {
1120 struct ext4_sb_info *sbi = EXT4_SB(sb);
1121 ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
1122 ext4_grpblk_t i = 0;
1123 ext4_grpblk_t first;
1124 ext4_grpblk_t len;
1125 unsigned free = 0;
1126 unsigned fragments = 0;
1127 unsigned long long period = get_cycles();
1128
1129 /* initialize buddy from bitmap which is aggregation
1130 * of on-disk bitmap and preallocations */
1131 i = mb_find_next_zero_bit(bitmap, max, 0);
1132 grp->bb_first_free = i;
1133 while (i < max) {
1134 fragments++;
1135 first = i;
1136 i = mb_find_next_bit(bitmap, max, i);
1137 len = i - first;
1138 free += len;
1139 if (len > 1)
1140 ext4_mb_mark_free_simple(sb, buddy, first, len, grp);
1141 else
1142 grp->bb_counters[0]++;
1143 if (i < max)
1144 i = mb_find_next_zero_bit(bitmap, max, i);
1145 }
1146 grp->bb_fragments = fragments;
1147
1148 if (free != grp->bb_free) {
1149 ext4_grp_locked_error(sb, group, 0, 0,
1150 "block bitmap and bg descriptor "
1151 "inconsistent: %u vs %u free clusters",
1152 free, grp->bb_free);
1153 /*
1154 * If we intend to continue, we consider group descriptor
1155 * corrupt and update bb_free using bitmap value
1156 */
1157 grp->bb_free = free;
1158 ext4_mark_group_bitmap_corrupted(sb, group,
1159 EXT4_GROUP_INFO_BBITMAP_CORRUPT);
1160 }
1161 mb_set_largest_free_order(sb, grp);
1162
1163 clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state));
1164
1165 period = get_cycles() - period;
1166 atomic_inc(&sbi->s_mb_buddies_generated);
1167 atomic64_add(period, &sbi->s_mb_generation_time);
1168 mb_update_avg_fragment_size(sb, grp);
1169 }
1170
mb_regenerate_buddy(struct ext4_buddy * e4b)1171 static void mb_regenerate_buddy(struct ext4_buddy *e4b)
1172 {
1173 int count;
1174 int order = 1;
1175 void *buddy;
1176
1177 while ((buddy = mb_find_buddy(e4b, order++, &count)))
1178 ext4_set_bits(buddy, 0, count);
1179
1180 e4b->bd_info->bb_fragments = 0;
1181 memset(e4b->bd_info->bb_counters, 0,
1182 sizeof(*e4b->bd_info->bb_counters) *
1183 (e4b->bd_sb->s_blocksize_bits + 2));
1184
1185 ext4_mb_generate_buddy(e4b->bd_sb, e4b->bd_buddy,
1186 e4b->bd_bitmap, e4b->bd_group, e4b->bd_info);
1187 }
1188
1189 /* The buddy information is attached the buddy cache inode
1190 * for convenience. The information regarding each group
1191 * is loaded via ext4_mb_load_buddy. The information involve
1192 * block bitmap and buddy information. The information are
1193 * stored in the inode as
1194 *
1195 * { page }
1196 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
1197 *
1198 *
1199 * one block each for bitmap and buddy information.
1200 * So for each group we take up 2 blocks. A page can
1201 * contain blocks_per_page (PAGE_SIZE / blocksize) blocks.
1202 * So it can have information regarding groups_per_page which
1203 * is blocks_per_page/2
1204 *
1205 * Locking note: This routine takes the block group lock of all groups
1206 * for this page; do not hold this lock when calling this routine!
1207 */
1208
ext4_mb_init_cache(struct page * page,char * incore,gfp_t gfp)1209 static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
1210 {
1211 ext4_group_t ngroups;
1212 int blocksize;
1213 int blocks_per_page;
1214 int groups_per_page;
1215 int err = 0;
1216 int i;
1217 ext4_group_t first_group, group;
1218 int first_block;
1219 struct super_block *sb;
1220 struct buffer_head *bhs;
1221 struct buffer_head **bh = NULL;
1222 struct inode *inode;
1223 char *data;
1224 char *bitmap;
1225 struct ext4_group_info *grinfo;
1226
1227 inode = page->mapping->host;
1228 sb = inode->i_sb;
1229 ngroups = ext4_get_groups_count(sb);
1230 blocksize = i_blocksize(inode);
1231 blocks_per_page = PAGE_SIZE / blocksize;
1232
1233 mb_debug(sb, "init page %lu\n", page->index);
1234
1235 groups_per_page = blocks_per_page >> 1;
1236 if (groups_per_page == 0)
1237 groups_per_page = 1;
1238
1239 /* allocate buffer_heads to read bitmaps */
1240 if (groups_per_page > 1) {
1241 i = sizeof(struct buffer_head *) * groups_per_page;
1242 bh = kzalloc(i, gfp);
1243 if (bh == NULL) {
1244 err = -ENOMEM;
1245 goto out;
1246 }
1247 } else
1248 bh = &bhs;
1249
1250 first_group = page->index * blocks_per_page / 2;
1251
1252 /* read all groups the page covers into the cache */
1253 for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
1254 if (group >= ngroups)
1255 break;
1256
1257 grinfo = ext4_get_group_info(sb, group);
1258 if (!grinfo)
1259 continue;
1260 /*
1261 * If page is uptodate then we came here after online resize
1262 * which added some new uninitialized group info structs, so
1263 * we must skip all initialized uptodate buddies on the page,
1264 * which may be currently in use by an allocating task.
1265 */
1266 if (PageUptodate(page) && !EXT4_MB_GRP_NEED_INIT(grinfo)) {
1267 bh[i] = NULL;
1268 continue;
1269 }
1270 bh[i] = ext4_read_block_bitmap_nowait(sb, group, false);
1271 if (IS_ERR(bh[i])) {
1272 err = PTR_ERR(bh[i]);
1273 bh[i] = NULL;
1274 goto out;
1275 }
1276 mb_debug(sb, "read bitmap for group %u\n", group);
1277 }
1278
1279 /* wait for I/O completion */
1280 for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
1281 int err2;
1282
1283 if (!bh[i])
1284 continue;
1285 err2 = ext4_wait_block_bitmap(sb, group, bh[i]);
1286 if (!err)
1287 err = err2;
1288 }
1289
1290 first_block = page->index * blocks_per_page;
1291 for (i = 0; i < blocks_per_page; i++) {
1292 group = (first_block + i) >> 1;
1293 if (group >= ngroups)
1294 break;
1295
1296 if (!bh[group - first_group])
1297 /* skip initialized uptodate buddy */
1298 continue;
1299
1300 if (!buffer_verified(bh[group - first_group]))
1301 /* Skip faulty bitmaps */
1302 continue;
1303 err = 0;
1304
1305 /*
1306 * data carry information regarding this
1307 * particular group in the format specified
1308 * above
1309 *
1310 */
1311 data = page_address(page) + (i * blocksize);
1312 bitmap = bh[group - first_group]->b_data;
1313
1314 /*
1315 * We place the buddy block and bitmap block
1316 * close together
1317 */
1318 if ((first_block + i) & 1) {
1319 /* this is block of buddy */
1320 BUG_ON(incore == NULL);
1321 mb_debug(sb, "put buddy for group %u in page %lu/%x\n",
1322 group, page->index, i * blocksize);
1323 trace_ext4_mb_buddy_bitmap_load(sb, group);
1324 grinfo = ext4_get_group_info(sb, group);
1325 if (!grinfo) {
1326 err = -EFSCORRUPTED;
1327 goto out;
1328 }
1329 grinfo->bb_fragments = 0;
1330 memset(grinfo->bb_counters, 0,
1331 sizeof(*grinfo->bb_counters) *
1332 (MB_NUM_ORDERS(sb)));
1333 /*
1334 * incore got set to the group block bitmap below
1335 */
1336 ext4_lock_group(sb, group);
1337 /* init the buddy */
1338 memset(data, 0xff, blocksize);
1339 ext4_mb_generate_buddy(sb, data, incore, group, grinfo);
1340 ext4_unlock_group(sb, group);
1341 incore = NULL;
1342 } else {
1343 /* this is block of bitmap */
1344 BUG_ON(incore != NULL);
1345 mb_debug(sb, "put bitmap for group %u in page %lu/%x\n",
1346 group, page->index, i * blocksize);
1347 trace_ext4_mb_bitmap_load(sb, group);
1348
1349 /* see comments in ext4_mb_put_pa() */
1350 ext4_lock_group(sb, group);
1351 memcpy(data, bitmap, blocksize);
1352
1353 /* mark all preallocated blks used in in-core bitmap */
1354 ext4_mb_generate_from_pa(sb, data, group);
1355 ext4_mb_generate_from_freelist(sb, data, group);
1356 ext4_unlock_group(sb, group);
1357
1358 /* set incore so that the buddy information can be
1359 * generated using this
1360 */
1361 incore = data;
1362 }
1363 }
1364 SetPageUptodate(page);
1365
1366 out:
1367 if (bh) {
1368 for (i = 0; i < groups_per_page; i++)
1369 brelse(bh[i]);
1370 if (bh != &bhs)
1371 kfree(bh);
1372 }
1373 return err;
1374 }
1375
1376 /*
1377 * Lock the buddy and bitmap pages. This make sure other parallel init_group
1378 * on the same buddy page doesn't happen whild holding the buddy page lock.
1379 * Return locked buddy and bitmap pages on e4b struct. If buddy and bitmap
1380 * are on the same page e4b->bd_buddy_page is NULL and return value is 0.
1381 */
ext4_mb_get_buddy_page_lock(struct super_block * sb,ext4_group_t group,struct ext4_buddy * e4b,gfp_t gfp)1382 static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
1383 ext4_group_t group, struct ext4_buddy *e4b, gfp_t gfp)
1384 {
1385 struct inode *inode = EXT4_SB(sb)->s_buddy_cache;
1386 int block, pnum, poff;
1387 int blocks_per_page;
1388 struct page *page;
1389
1390 e4b->bd_buddy_page = NULL;
1391 e4b->bd_bitmap_page = NULL;
1392
1393 blocks_per_page = PAGE_SIZE / sb->s_blocksize;
1394 /*
1395 * the buddy cache inode stores the block bitmap
1396 * and buddy information in consecutive blocks.
1397 * So for each group we need two blocks.
1398 */
1399 block = group * 2;
1400 pnum = block / blocks_per_page;
1401 poff = block % blocks_per_page;
1402 page = find_or_create_page(inode->i_mapping, pnum, gfp);
1403 if (!page)
1404 return -ENOMEM;
1405 BUG_ON(page->mapping != inode->i_mapping);
1406 e4b->bd_bitmap_page = page;
1407 e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
1408
1409 if (blocks_per_page >= 2) {
1410 /* buddy and bitmap are on the same page */
1411 return 0;
1412 }
1413
1414 block++;
1415 pnum = block / blocks_per_page;
1416 page = find_or_create_page(inode->i_mapping, pnum, gfp);
1417 if (!page)
1418 return -ENOMEM;
1419 BUG_ON(page->mapping != inode->i_mapping);
1420 e4b->bd_buddy_page = page;
1421 return 0;
1422 }
1423
ext4_mb_put_buddy_page_lock(struct ext4_buddy * e4b)1424 static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b)
1425 {
1426 if (e4b->bd_bitmap_page) {
1427 unlock_page(e4b->bd_bitmap_page);
1428 put_page(e4b->bd_bitmap_page);
1429 }
1430 if (e4b->bd_buddy_page) {
1431 unlock_page(e4b->bd_buddy_page);
1432 put_page(e4b->bd_buddy_page);
1433 }
1434 }
1435
1436 /*
1437 * Locking note: This routine calls ext4_mb_init_cache(), which takes the
1438 * block group lock of all groups for this page; do not hold the BG lock when
1439 * calling this routine!
1440 */
1441 static noinline_for_stack
ext4_mb_init_group(struct super_block * sb,ext4_group_t group,gfp_t gfp)1442 int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp)
1443 {
1444
1445 struct ext4_group_info *this_grp;
1446 struct ext4_buddy e4b;
1447 struct page *page;
1448 int ret = 0;
1449
1450 might_sleep();
1451 mb_debug(sb, "init group %u\n", group);
1452 this_grp = ext4_get_group_info(sb, group);
1453 if (!this_grp)
1454 return -EFSCORRUPTED;
1455
1456 /*
1457 * This ensures that we don't reinit the buddy cache
1458 * page which map to the group from which we are already
1459 * allocating. If we are looking at the buddy cache we would
1460 * have taken a reference using ext4_mb_load_buddy and that
1461 * would have pinned buddy page to page cache.
1462 * The call to ext4_mb_get_buddy_page_lock will mark the
1463 * page accessed.
1464 */
1465 ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b, gfp);
1466 if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) {
1467 /*
1468 * somebody initialized the group
1469 * return without doing anything
1470 */
1471 goto err;
1472 }
1473
1474 page = e4b.bd_bitmap_page;
1475 ret = ext4_mb_init_cache(page, NULL, gfp);
1476 if (ret)
1477 goto err;
1478 if (!PageUptodate(page)) {
1479 ret = -EIO;
1480 goto err;
1481 }
1482
1483 if (e4b.bd_buddy_page == NULL) {
1484 /*
1485 * If both the bitmap and buddy are in
1486 * the same page we don't need to force
1487 * init the buddy
1488 */
1489 ret = 0;
1490 goto err;
1491 }
1492 /* init buddy cache */
1493 page = e4b.bd_buddy_page;
1494 ret = ext4_mb_init_cache(page, e4b.bd_bitmap, gfp);
1495 if (ret)
1496 goto err;
1497 if (!PageUptodate(page)) {
1498 ret = -EIO;
1499 goto err;
1500 }
1501 err:
1502 ext4_mb_put_buddy_page_lock(&e4b);
1503 return ret;
1504 }
1505
1506 /*
1507 * Locking note: This routine calls ext4_mb_init_cache(), which takes the
1508 * block group lock of all groups for this page; do not hold the BG lock when
1509 * calling this routine!
1510 */
1511 static noinline_for_stack int
ext4_mb_load_buddy_gfp(struct super_block * sb,ext4_group_t group,struct ext4_buddy * e4b,gfp_t gfp)1512 ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
1513 struct ext4_buddy *e4b, gfp_t gfp)
1514 {
1515 int blocks_per_page;
1516 int block;
1517 int pnum;
1518 int poff;
1519 struct page *page;
1520 int ret;
1521 struct ext4_group_info *grp;
1522 struct ext4_sb_info *sbi = EXT4_SB(sb);
1523 struct inode *inode = sbi->s_buddy_cache;
1524
1525 might_sleep();
1526 mb_debug(sb, "load group %u\n", group);
1527
1528 blocks_per_page = PAGE_SIZE / sb->s_blocksize;
1529 grp = ext4_get_group_info(sb, group);
1530 if (!grp)
1531 return -EFSCORRUPTED;
1532
1533 e4b->bd_blkbits = sb->s_blocksize_bits;
1534 e4b->bd_info = grp;
1535 e4b->bd_sb = sb;
1536 e4b->bd_group = group;
1537 e4b->bd_buddy_page = NULL;
1538 e4b->bd_bitmap_page = NULL;
1539
1540 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
1541 /*
1542 * we need full data about the group
1543 * to make a good selection
1544 */
1545 ret = ext4_mb_init_group(sb, group, gfp);
1546 if (ret)
1547 return ret;
1548 }
1549
1550 /*
1551 * the buddy cache inode stores the block bitmap
1552 * and buddy information in consecutive blocks.
1553 * So for each group we need two blocks.
1554 */
1555 block = group * 2;
1556 pnum = block / blocks_per_page;
1557 poff = block % blocks_per_page;
1558
1559 /* we could use find_or_create_page(), but it locks page
1560 * what we'd like to avoid in fast path ... */
1561 page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
1562 if (page == NULL || !PageUptodate(page)) {
1563 if (page)
1564 /*
1565 * drop the page reference and try
1566 * to get the page with lock. If we
1567 * are not uptodate that implies
1568 * somebody just created the page but
1569 * is yet to initialize the same. So
1570 * wait for it to initialize.
1571 */
1572 put_page(page);
1573 page = find_or_create_page(inode->i_mapping, pnum, gfp);
1574 if (page) {
1575 BUG_ON(page->mapping != inode->i_mapping);
1576 if (!PageUptodate(page)) {
1577 ret = ext4_mb_init_cache(page, NULL, gfp);
1578 if (ret) {
1579 unlock_page(page);
1580 goto err;
1581 }
1582 mb_cmp_bitmaps(e4b, page_address(page) +
1583 (poff * sb->s_blocksize));
1584 }
1585 unlock_page(page);
1586 }
1587 }
1588 if (page == NULL) {
1589 ret = -ENOMEM;
1590 goto err;
1591 }
1592 if (!PageUptodate(page)) {
1593 ret = -EIO;
1594 goto err;
1595 }
1596
1597 /* Pages marked accessed already */
1598 e4b->bd_bitmap_page = page;
1599 e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
1600
1601 block++;
1602 pnum = block / blocks_per_page;
1603 poff = block % blocks_per_page;
1604
1605 page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
1606 if (page == NULL || !PageUptodate(page)) {
1607 if (page)
1608 put_page(page);
1609 page = find_or_create_page(inode->i_mapping, pnum, gfp);
1610 if (page) {
1611 BUG_ON(page->mapping != inode->i_mapping);
1612 if (!PageUptodate(page)) {
1613 ret = ext4_mb_init_cache(page, e4b->bd_bitmap,
1614 gfp);
1615 if (ret) {
1616 unlock_page(page);
1617 goto err;
1618 }
1619 }
1620 unlock_page(page);
1621 }
1622 }
1623 if (page == NULL) {
1624 ret = -ENOMEM;
1625 goto err;
1626 }
1627 if (!PageUptodate(page)) {
1628 ret = -EIO;
1629 goto err;
1630 }
1631
1632 /* Pages marked accessed already */
1633 e4b->bd_buddy_page = page;
1634 e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
1635
1636 return 0;
1637
1638 err:
1639 if (page)
1640 put_page(page);
1641 if (e4b->bd_bitmap_page)
1642 put_page(e4b->bd_bitmap_page);
1643 if (e4b->bd_buddy_page)
1644 put_page(e4b->bd_buddy_page);
1645 e4b->bd_buddy = NULL;
1646 e4b->bd_bitmap = NULL;
1647 return ret;
1648 }
1649
ext4_mb_load_buddy(struct super_block * sb,ext4_group_t group,struct ext4_buddy * e4b)1650 static int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
1651 struct ext4_buddy *e4b)
1652 {
1653 return ext4_mb_load_buddy_gfp(sb, group, e4b, GFP_NOFS);
1654 }
1655
ext4_mb_unload_buddy(struct ext4_buddy * e4b)1656 static void ext4_mb_unload_buddy(struct ext4_buddy *e4b)
1657 {
1658 if (e4b->bd_bitmap_page)
1659 put_page(e4b->bd_bitmap_page);
1660 if (e4b->bd_buddy_page)
1661 put_page(e4b->bd_buddy_page);
1662 }
1663
1664
mb_find_order_for_block(struct ext4_buddy * e4b,int block)1665 static int mb_find_order_for_block(struct ext4_buddy *e4b, int block)
1666 {
1667 int order = 1, max;
1668 void *bb;
1669
1670 BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
1671 BUG_ON(block >= (1 << (e4b->bd_blkbits + 3)));
1672
1673 while (order <= e4b->bd_blkbits + 1) {
1674 bb = mb_find_buddy(e4b, order, &max);
1675 if (!mb_test_bit(block >> order, bb)) {
1676 /* this block is part of buddy of order 'order' */
1677 return order;
1678 }
1679 order++;
1680 }
1681 return 0;
1682 }
1683
mb_clear_bits(void * bm,int cur,int len)1684 static void mb_clear_bits(void *bm, int cur, int len)
1685 {
1686 __u32 *addr;
1687
1688 len = cur + len;
1689 while (cur < len) {
1690 if ((cur & 31) == 0 && (len - cur) >= 32) {
1691 /* fast path: clear whole word at once */
1692 addr = bm + (cur >> 3);
1693 *addr = 0;
1694 cur += 32;
1695 continue;
1696 }
1697 mb_clear_bit(cur, bm);
1698 cur++;
1699 }
1700 }
1701
1702 /* clear bits in given range
1703 * will return first found zero bit if any, -1 otherwise
1704 */
mb_test_and_clear_bits(void * bm,int cur,int len)1705 static int mb_test_and_clear_bits(void *bm, int cur, int len)
1706 {
1707 __u32 *addr;
1708 int zero_bit = -1;
1709
1710 len = cur + len;
1711 while (cur < len) {
1712 if ((cur & 31) == 0 && (len - cur) >= 32) {
1713 /* fast path: clear whole word at once */
1714 addr = bm + (cur >> 3);
1715 if (*addr != (__u32)(-1) && zero_bit == -1)
1716 zero_bit = cur + mb_find_next_zero_bit(addr, 32, 0);
1717 *addr = 0;
1718 cur += 32;
1719 continue;
1720 }
1721 if (!mb_test_and_clear_bit(cur, bm) && zero_bit == -1)
1722 zero_bit = cur;
1723 cur++;
1724 }
1725
1726 return zero_bit;
1727 }
1728
ext4_set_bits(void * bm,int cur,int len)1729 void ext4_set_bits(void *bm, int cur, int len)
1730 {
1731 __u32 *addr;
1732
1733 len = cur + len;
1734 while (cur < len) {
1735 if ((cur & 31) == 0 && (len - cur) >= 32) {
1736 /* fast path: set whole word at once */
1737 addr = bm + (cur >> 3);
1738 *addr = 0xffffffff;
1739 cur += 32;
1740 continue;
1741 }
1742 mb_set_bit(cur, bm);
1743 cur++;
1744 }
1745 }
1746
mb_buddy_adjust_border(int * bit,void * bitmap,int side)1747 static inline int mb_buddy_adjust_border(int* bit, void* bitmap, int side)
1748 {
1749 if (mb_test_bit(*bit + side, bitmap)) {
1750 mb_clear_bit(*bit, bitmap);
1751 (*bit) -= side;
1752 return 1;
1753 }
1754 else {
1755 (*bit) += side;
1756 mb_set_bit(*bit, bitmap);
1757 return -1;
1758 }
1759 }
1760
mb_buddy_mark_free(struct ext4_buddy * e4b,int first,int last)1761 static void mb_buddy_mark_free(struct ext4_buddy *e4b, int first, int last)
1762 {
1763 int max;
1764 int order = 1;
1765 void *buddy = mb_find_buddy(e4b, order, &max);
1766
1767 while (buddy) {
1768 void *buddy2;
1769
1770 /* Bits in range [first; last] are known to be set since
1771 * corresponding blocks were allocated. Bits in range
1772 * (first; last) will stay set because they form buddies on
1773 * upper layer. We just deal with borders if they don't
1774 * align with upper layer and then go up.
1775 * Releasing entire group is all about clearing
1776 * single bit of highest order buddy.
1777 */
1778
1779 /* Example:
1780 * ---------------------------------
1781 * | 1 | 1 | 1 | 1 |
1782 * ---------------------------------
1783 * | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 |
1784 * ---------------------------------
1785 * 0 1 2 3 4 5 6 7
1786 * \_____________________/
1787 *
1788 * Neither [1] nor [6] is aligned to above layer.
1789 * Left neighbour [0] is free, so mark it busy,
1790 * decrease bb_counters and extend range to
1791 * [0; 6]
1792 * Right neighbour [7] is busy. It can't be coaleasced with [6], so
1793 * mark [6] free, increase bb_counters and shrink range to
1794 * [0; 5].
1795 * Then shift range to [0; 2], go up and do the same.
1796 */
1797
1798
1799 if (first & 1)
1800 e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&first, buddy, -1);
1801 if (!(last & 1))
1802 e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&last, buddy, 1);
1803 if (first > last)
1804 break;
1805 order++;
1806
1807 if (first == last || !(buddy2 = mb_find_buddy(e4b, order, &max))) {
1808 mb_clear_bits(buddy, first, last - first + 1);
1809 e4b->bd_info->bb_counters[order - 1] += last - first + 1;
1810 break;
1811 }
1812 first >>= 1;
1813 last >>= 1;
1814 buddy = buddy2;
1815 }
1816 }
1817
mb_free_blocks(struct inode * inode,struct ext4_buddy * e4b,int first,int count)1818 static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
1819 int first, int count)
1820 {
1821 int left_is_free = 0;
1822 int right_is_free = 0;
1823 int block;
1824 int last = first + count - 1;
1825 struct super_block *sb = e4b->bd_sb;
1826
1827 if (WARN_ON(count == 0))
1828 return;
1829 BUG_ON(last >= (sb->s_blocksize << 3));
1830 assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
1831 /* Don't bother if the block group is corrupt. */
1832 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
1833 return;
1834
1835 mb_check_buddy(e4b);
1836 mb_free_blocks_double(inode, e4b, first, count);
1837
1838 /* access memory sequentially: check left neighbour,
1839 * clear range and then check right neighbour
1840 */
1841 if (first != 0)
1842 left_is_free = !mb_test_bit(first - 1, e4b->bd_bitmap);
1843 block = mb_test_and_clear_bits(e4b->bd_bitmap, first, count);
1844 if (last + 1 < EXT4_SB(sb)->s_mb_maxs[0])
1845 right_is_free = !mb_test_bit(last + 1, e4b->bd_bitmap);
1846
1847 if (unlikely(block != -1)) {
1848 struct ext4_sb_info *sbi = EXT4_SB(sb);
1849 ext4_fsblk_t blocknr;
1850
1851 /*
1852 * Fastcommit replay can free already freed blocks which
1853 * corrupts allocation info. Regenerate it.
1854 */
1855 if (sbi->s_mount_state & EXT4_FC_REPLAY) {
1856 mb_regenerate_buddy(e4b);
1857 goto check;
1858 }
1859
1860 blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
1861 blocknr += EXT4_C2B(sbi, block);
1862 ext4_grp_locked_error(sb, e4b->bd_group,
1863 inode ? inode->i_ino : 0, blocknr,
1864 "freeing already freed block (bit %u); block bitmap corrupt.",
1865 block);
1866 ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
1867 EXT4_GROUP_INFO_BBITMAP_CORRUPT);
1868 return;
1869 }
1870
1871 this_cpu_inc(discard_pa_seq);
1872 e4b->bd_info->bb_free += count;
1873 if (first < e4b->bd_info->bb_first_free)
1874 e4b->bd_info->bb_first_free = first;
1875
1876 /* let's maintain fragments counter */
1877 if (left_is_free && right_is_free)
1878 e4b->bd_info->bb_fragments--;
1879 else if (!left_is_free && !right_is_free)
1880 e4b->bd_info->bb_fragments++;
1881
1882 /* buddy[0] == bd_bitmap is a special case, so handle
1883 * it right away and let mb_buddy_mark_free stay free of
1884 * zero order checks.
1885 * Check if neighbours are to be coaleasced,
1886 * adjust bitmap bb_counters and borders appropriately.
1887 */
1888 if (first & 1) {
1889 first += !left_is_free;
1890 e4b->bd_info->bb_counters[0] += left_is_free ? -1 : 1;
1891 }
1892 if (!(last & 1)) {
1893 last -= !right_is_free;
1894 e4b->bd_info->bb_counters[0] += right_is_free ? -1 : 1;
1895 }
1896
1897 if (first <= last)
1898 mb_buddy_mark_free(e4b, first >> 1, last >> 1);
1899
1900 mb_set_largest_free_order(sb, e4b->bd_info);
1901 mb_update_avg_fragment_size(sb, e4b->bd_info);
1902 check:
1903 mb_check_buddy(e4b);
1904 }
1905
mb_find_extent(struct ext4_buddy * e4b,int block,int needed,struct ext4_free_extent * ex)1906 static int mb_find_extent(struct ext4_buddy *e4b, int block,
1907 int needed, struct ext4_free_extent *ex)
1908 {
1909 int next = block;
1910 int max, order;
1911 void *buddy;
1912
1913 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
1914 BUG_ON(ex == NULL);
1915
1916 buddy = mb_find_buddy(e4b, 0, &max);
1917 BUG_ON(buddy == NULL);
1918 BUG_ON(block >= max);
1919 if (mb_test_bit(block, buddy)) {
1920 ex->fe_len = 0;
1921 ex->fe_start = 0;
1922 ex->fe_group = 0;
1923 return 0;
1924 }
1925
1926 /* find actual order */
1927 order = mb_find_order_for_block(e4b, block);
1928 block = block >> order;
1929
1930 ex->fe_len = 1 << order;
1931 ex->fe_start = block << order;
1932 ex->fe_group = e4b->bd_group;
1933
1934 /* calc difference from given start */
1935 next = next - ex->fe_start;
1936 ex->fe_len -= next;
1937 ex->fe_start += next;
1938
1939 while (needed > ex->fe_len &&
1940 mb_find_buddy(e4b, order, &max)) {
1941
1942 if (block + 1 >= max)
1943 break;
1944
1945 next = (block + 1) * (1 << order);
1946 if (mb_test_bit(next, e4b->bd_bitmap))
1947 break;
1948
1949 order = mb_find_order_for_block(e4b, next);
1950
1951 block = next >> order;
1952 ex->fe_len += 1 << order;
1953 }
1954
1955 if (ex->fe_start + ex->fe_len > EXT4_CLUSTERS_PER_GROUP(e4b->bd_sb)) {
1956 /* Should never happen! (but apparently sometimes does?!?) */
1957 WARN_ON(1);
1958 ext4_grp_locked_error(e4b->bd_sb, e4b->bd_group, 0, 0,
1959 "corruption or bug in mb_find_extent "
1960 "block=%d, order=%d needed=%d ex=%u/%d/%d@%u",
1961 block, order, needed, ex->fe_group, ex->fe_start,
1962 ex->fe_len, ex->fe_logical);
1963 ex->fe_len = 0;
1964 ex->fe_start = 0;
1965 ex->fe_group = 0;
1966 }
1967 return ex->fe_len;
1968 }
1969
mb_mark_used(struct ext4_buddy * e4b,struct ext4_free_extent * ex)1970 static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex)
1971 {
1972 int ord;
1973 int mlen = 0;
1974 int max = 0;
1975 int cur;
1976 int start = ex->fe_start;
1977 int len = ex->fe_len;
1978 unsigned ret = 0;
1979 int len0 = len;
1980 void *buddy;
1981
1982 BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3));
1983 BUG_ON(e4b->bd_group != ex->fe_group);
1984 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
1985 mb_check_buddy(e4b);
1986 mb_mark_used_double(e4b, start, len);
1987
1988 this_cpu_inc(discard_pa_seq);
1989 e4b->bd_info->bb_free -= len;
1990 if (e4b->bd_info->bb_first_free == start)
1991 e4b->bd_info->bb_first_free += len;
1992
1993 /* let's maintain fragments counter */
1994 if (start != 0)
1995 mlen = !mb_test_bit(start - 1, e4b->bd_bitmap);
1996 if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0])
1997 max = !mb_test_bit(start + len, e4b->bd_bitmap);
1998 if (mlen && max)
1999 e4b->bd_info->bb_fragments++;
2000 else if (!mlen && !max)
2001 e4b->bd_info->bb_fragments--;
2002
2003 /* let's maintain buddy itself */
2004 while (len) {
2005 ord = mb_find_order_for_block(e4b, start);
2006
2007 if (((start >> ord) << ord) == start && len >= (1 << ord)) {
2008 /* the whole chunk may be allocated at once! */
2009 mlen = 1 << ord;
2010 buddy = mb_find_buddy(e4b, ord, &max);
2011 BUG_ON((start >> ord) >= max);
2012 mb_set_bit(start >> ord, buddy);
2013 e4b->bd_info->bb_counters[ord]--;
2014 start += mlen;
2015 len -= mlen;
2016 BUG_ON(len < 0);
2017 continue;
2018 }
2019
2020 /* store for history */
2021 if (ret == 0)
2022 ret = len | (ord << 16);
2023
2024 /* we have to split large buddy */
2025 BUG_ON(ord <= 0);
2026 buddy = mb_find_buddy(e4b, ord, &max);
2027 mb_set_bit(start >> ord, buddy);
2028 e4b->bd_info->bb_counters[ord]--;
2029
2030 ord--;
2031 cur = (start >> ord) & ~1U;
2032 buddy = mb_find_buddy(e4b, ord, &max);
2033 mb_clear_bit(cur, buddy);
2034 mb_clear_bit(cur + 1, buddy);
2035 e4b->bd_info->bb_counters[ord]++;
2036 e4b->bd_info->bb_counters[ord]++;
2037 }
2038 mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info);
2039
2040 mb_update_avg_fragment_size(e4b->bd_sb, e4b->bd_info);
2041 ext4_set_bits(e4b->bd_bitmap, ex->fe_start, len0);
2042 mb_check_buddy(e4b);
2043
2044 return ret;
2045 }
2046
2047 /*
2048 * Must be called under group lock!
2049 */
ext4_mb_use_best_found(struct ext4_allocation_context * ac,struct ext4_buddy * e4b)2050 static void ext4_mb_use_best_found(struct ext4_allocation_context *ac,
2051 struct ext4_buddy *e4b)
2052 {
2053 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
2054 int ret;
2055
2056 BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group);
2057 BUG_ON(ac->ac_status == AC_STATUS_FOUND);
2058
2059 ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len);
2060 ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical;
2061 ret = mb_mark_used(e4b, &ac->ac_b_ex);
2062
2063 /* preallocation can change ac_b_ex, thus we store actually
2064 * allocated blocks for history */
2065 ac->ac_f_ex = ac->ac_b_ex;
2066
2067 ac->ac_status = AC_STATUS_FOUND;
2068 ac->ac_tail = ret & 0xffff;
2069 ac->ac_buddy = ret >> 16;
2070
2071 /*
2072 * take the page reference. We want the page to be pinned
2073 * so that we don't get a ext4_mb_init_cache_call for this
2074 * group until we update the bitmap. That would mean we
2075 * double allocate blocks. The reference is dropped
2076 * in ext4_mb_release_context
2077 */
2078 ac->ac_bitmap_page = e4b->bd_bitmap_page;
2079 get_page(ac->ac_bitmap_page);
2080 ac->ac_buddy_page = e4b->bd_buddy_page;
2081 get_page(ac->ac_buddy_page);
2082 /* store last allocated for subsequent stream allocation */
2083 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
2084 spin_lock(&sbi->s_md_lock);
2085 sbi->s_mb_last_group = ac->ac_f_ex.fe_group;
2086 sbi->s_mb_last_start = ac->ac_f_ex.fe_start;
2087 spin_unlock(&sbi->s_md_lock);
2088 }
2089 /*
2090 * As we've just preallocated more space than
2091 * user requested originally, we store allocated
2092 * space in a special descriptor.
2093 */
2094 if (ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len)
2095 ext4_mb_new_preallocation(ac);
2096
2097 }
2098
ext4_mb_check_limits(struct ext4_allocation_context * ac,struct ext4_buddy * e4b,int finish_group)2099 static void ext4_mb_check_limits(struct ext4_allocation_context *ac,
2100 struct ext4_buddy *e4b,
2101 int finish_group)
2102 {
2103 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
2104 struct ext4_free_extent *bex = &ac->ac_b_ex;
2105 struct ext4_free_extent *gex = &ac->ac_g_ex;
2106 struct ext4_free_extent ex;
2107 int max;
2108
2109 if (ac->ac_status == AC_STATUS_FOUND)
2110 return;
2111 /*
2112 * We don't want to scan for a whole year
2113 */
2114 if (ac->ac_found > sbi->s_mb_max_to_scan &&
2115 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2116 ac->ac_status = AC_STATUS_BREAK;
2117 return;
2118 }
2119
2120 /*
2121 * Haven't found good chunk so far, let's continue
2122 */
2123 if (bex->fe_len < gex->fe_len)
2124 return;
2125
2126 if ((finish_group || ac->ac_found > sbi->s_mb_min_to_scan)
2127 && bex->fe_group == e4b->bd_group) {
2128 /* recheck chunk's availability - we don't know
2129 * when it was found (within this lock-unlock
2130 * period or not) */
2131 max = mb_find_extent(e4b, bex->fe_start, gex->fe_len, &ex);
2132 if (max >= gex->fe_len) {
2133 ext4_mb_use_best_found(ac, e4b);
2134 return;
2135 }
2136 }
2137 }
2138
2139 /*
2140 * The routine checks whether found extent is good enough. If it is,
2141 * then the extent gets marked used and flag is set to the context
2142 * to stop scanning. Otherwise, the extent is compared with the
2143 * previous found extent and if new one is better, then it's stored
2144 * in the context. Later, the best found extent will be used, if
2145 * mballoc can't find good enough extent.
2146 *
2147 * FIXME: real allocation policy is to be designed yet!
2148 */
ext4_mb_measure_extent(struct ext4_allocation_context * ac,struct ext4_free_extent * ex,struct ext4_buddy * e4b)2149 static void ext4_mb_measure_extent(struct ext4_allocation_context *ac,
2150 struct ext4_free_extent *ex,
2151 struct ext4_buddy *e4b)
2152 {
2153 struct ext4_free_extent *bex = &ac->ac_b_ex;
2154 struct ext4_free_extent *gex = &ac->ac_g_ex;
2155
2156 BUG_ON(ex->fe_len <= 0);
2157 BUG_ON(ex->fe_len > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
2158 BUG_ON(ex->fe_start >= EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
2159 BUG_ON(ac->ac_status != AC_STATUS_CONTINUE);
2160
2161 ac->ac_found++;
2162
2163 /*
2164 * The special case - take what you catch first
2165 */
2166 if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2167 *bex = *ex;
2168 ext4_mb_use_best_found(ac, e4b);
2169 return;
2170 }
2171
2172 /*
2173 * Let's check whether the chuck is good enough
2174 */
2175 if (ex->fe_len == gex->fe_len) {
2176 *bex = *ex;
2177 ext4_mb_use_best_found(ac, e4b);
2178 return;
2179 }
2180
2181 /*
2182 * If this is first found extent, just store it in the context
2183 */
2184 if (bex->fe_len == 0) {
2185 *bex = *ex;
2186 return;
2187 }
2188
2189 /*
2190 * If new found extent is better, store it in the context
2191 */
2192 if (bex->fe_len < gex->fe_len) {
2193 /* if the request isn't satisfied, any found extent
2194 * larger than previous best one is better */
2195 if (ex->fe_len > bex->fe_len)
2196 *bex = *ex;
2197 } else if (ex->fe_len > gex->fe_len) {
2198 /* if the request is satisfied, then we try to find
2199 * an extent that still satisfy the request, but is
2200 * smaller than previous one */
2201 if (ex->fe_len < bex->fe_len)
2202 *bex = *ex;
2203 }
2204
2205 ext4_mb_check_limits(ac, e4b, 0);
2206 }
2207
2208 static noinline_for_stack
ext4_mb_try_best_found(struct ext4_allocation_context * ac,struct ext4_buddy * e4b)2209 int ext4_mb_try_best_found(struct ext4_allocation_context *ac,
2210 struct ext4_buddy *e4b)
2211 {
2212 struct ext4_free_extent ex = ac->ac_b_ex;
2213 ext4_group_t group = ex.fe_group;
2214 int max;
2215 int err;
2216
2217 BUG_ON(ex.fe_len <= 0);
2218 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
2219 if (err)
2220 return err;
2221
2222 ext4_lock_group(ac->ac_sb, group);
2223 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
2224 goto out;
2225
2226 max = mb_find_extent(e4b, ex.fe_start, ex.fe_len, &ex);
2227
2228 if (max > 0) {
2229 ac->ac_b_ex = ex;
2230 ext4_mb_use_best_found(ac, e4b);
2231 }
2232
2233 out:
2234 ext4_unlock_group(ac->ac_sb, group);
2235 ext4_mb_unload_buddy(e4b);
2236
2237 return 0;
2238 }
2239
2240 static noinline_for_stack
ext4_mb_find_by_goal(struct ext4_allocation_context * ac,struct ext4_buddy * e4b)2241 int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
2242 struct ext4_buddy *e4b)
2243 {
2244 ext4_group_t group = ac->ac_g_ex.fe_group;
2245 int max;
2246 int err;
2247 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
2248 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
2249 struct ext4_free_extent ex;
2250
2251 if (!grp)
2252 return -EFSCORRUPTED;
2253 if (!(ac->ac_flags & (EXT4_MB_HINT_TRY_GOAL | EXT4_MB_HINT_GOAL_ONLY)))
2254 return 0;
2255 if (grp->bb_free == 0)
2256 return 0;
2257
2258 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
2259 if (err)
2260 return err;
2261
2262 ext4_lock_group(ac->ac_sb, group);
2263 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
2264 goto out;
2265
2266 max = mb_find_extent(e4b, ac->ac_g_ex.fe_start,
2267 ac->ac_g_ex.fe_len, &ex);
2268 ex.fe_logical = 0xDEADFA11; /* debug value */
2269
2270 if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) {
2271 ext4_fsblk_t start;
2272
2273 start = ext4_group_first_block_no(ac->ac_sb, e4b->bd_group) +
2274 ex.fe_start;
2275 /* use do_div to get remainder (would be 64-bit modulo) */
2276 if (do_div(start, sbi->s_stripe) == 0) {
2277 ac->ac_found++;
2278 ac->ac_b_ex = ex;
2279 ext4_mb_use_best_found(ac, e4b);
2280 }
2281 } else if (max >= ac->ac_g_ex.fe_len) {
2282 BUG_ON(ex.fe_len <= 0);
2283 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
2284 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
2285 ac->ac_found++;
2286 ac->ac_b_ex = ex;
2287 ext4_mb_use_best_found(ac, e4b);
2288 } else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) {
2289 /* Sometimes, caller may want to merge even small
2290 * number of blocks to an existing extent */
2291 BUG_ON(ex.fe_len <= 0);
2292 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
2293 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
2294 ac->ac_found++;
2295 ac->ac_b_ex = ex;
2296 ext4_mb_use_best_found(ac, e4b);
2297 }
2298 out:
2299 ext4_unlock_group(ac->ac_sb, group);
2300 ext4_mb_unload_buddy(e4b);
2301
2302 return 0;
2303 }
2304
2305 /*
2306 * The routine scans buddy structures (not bitmap!) from given order
2307 * to max order and tries to find big enough chunk to satisfy the req
2308 */
2309 static noinline_for_stack
ext4_mb_simple_scan_group(struct ext4_allocation_context * ac,struct ext4_buddy * e4b)2310 void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
2311 struct ext4_buddy *e4b)
2312 {
2313 struct super_block *sb = ac->ac_sb;
2314 struct ext4_group_info *grp = e4b->bd_info;
2315 void *buddy;
2316 int i;
2317 int k;
2318 int max;
2319
2320 BUG_ON(ac->ac_2order <= 0);
2321 for (i = ac->ac_2order; i < MB_NUM_ORDERS(sb); i++) {
2322 if (grp->bb_counters[i] == 0)
2323 continue;
2324
2325 buddy = mb_find_buddy(e4b, i, &max);
2326 BUG_ON(buddy == NULL);
2327
2328 k = mb_find_next_zero_bit(buddy, max, 0);
2329 if (k >= max) {
2330 ext4_grp_locked_error(ac->ac_sb, e4b->bd_group, 0, 0,
2331 "%d free clusters of order %d. But found 0",
2332 grp->bb_counters[i], i);
2333 ext4_mark_group_bitmap_corrupted(ac->ac_sb,
2334 e4b->bd_group,
2335 EXT4_GROUP_INFO_BBITMAP_CORRUPT);
2336 break;
2337 }
2338 ac->ac_found++;
2339
2340 ac->ac_b_ex.fe_len = 1 << i;
2341 ac->ac_b_ex.fe_start = k << i;
2342 ac->ac_b_ex.fe_group = e4b->bd_group;
2343
2344 ext4_mb_use_best_found(ac, e4b);
2345
2346 BUG_ON(ac->ac_f_ex.fe_len != ac->ac_g_ex.fe_len);
2347
2348 if (EXT4_SB(sb)->s_mb_stats)
2349 atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
2350
2351 break;
2352 }
2353 }
2354
2355 /*
2356 * The routine scans the group and measures all found extents.
2357 * In order to optimize scanning, caller must pass number of
2358 * free blocks in the group, so the routine can know upper limit.
2359 */
2360 static noinline_for_stack
ext4_mb_complex_scan_group(struct ext4_allocation_context * ac,struct ext4_buddy * e4b)2361 void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
2362 struct ext4_buddy *e4b)
2363 {
2364 struct super_block *sb = ac->ac_sb;
2365 void *bitmap = e4b->bd_bitmap;
2366 struct ext4_free_extent ex;
2367 int i;
2368 int free;
2369
2370 free = e4b->bd_info->bb_free;
2371 if (WARN_ON(free <= 0))
2372 return;
2373
2374 i = e4b->bd_info->bb_first_free;
2375
2376 while (free && ac->ac_status == AC_STATUS_CONTINUE) {
2377 i = mb_find_next_zero_bit(bitmap,
2378 EXT4_CLUSTERS_PER_GROUP(sb), i);
2379 if (i >= EXT4_CLUSTERS_PER_GROUP(sb)) {
2380 /*
2381 * IF we have corrupt bitmap, we won't find any
2382 * free blocks even though group info says we
2383 * have free blocks
2384 */
2385 ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
2386 "%d free clusters as per "
2387 "group info. But bitmap says 0",
2388 free);
2389 ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
2390 EXT4_GROUP_INFO_BBITMAP_CORRUPT);
2391 break;
2392 }
2393
2394 mb_find_extent(e4b, i, ac->ac_g_ex.fe_len, &ex);
2395 if (WARN_ON(ex.fe_len <= 0))
2396 break;
2397 if (free < ex.fe_len) {
2398 ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
2399 "%d free clusters as per "
2400 "group info. But got %d blocks",
2401 free, ex.fe_len);
2402 ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
2403 EXT4_GROUP_INFO_BBITMAP_CORRUPT);
2404 /*
2405 * The number of free blocks differs. This mostly
2406 * indicate that the bitmap is corrupt. So exit
2407 * without claiming the space.
2408 */
2409 break;
2410 }
2411 ex.fe_logical = 0xDEADC0DE; /* debug value */
2412 ext4_mb_measure_extent(ac, &ex, e4b);
2413
2414 i += ex.fe_len;
2415 free -= ex.fe_len;
2416 }
2417
2418 ext4_mb_check_limits(ac, e4b, 1);
2419 }
2420
2421 /*
2422 * This is a special case for storages like raid5
2423 * we try to find stripe-aligned chunks for stripe-size-multiple requests
2424 */
2425 static noinline_for_stack
ext4_mb_scan_aligned(struct ext4_allocation_context * ac,struct ext4_buddy * e4b)2426 void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
2427 struct ext4_buddy *e4b)
2428 {
2429 struct super_block *sb = ac->ac_sb;
2430 struct ext4_sb_info *sbi = EXT4_SB(sb);
2431 void *bitmap = e4b->bd_bitmap;
2432 struct ext4_free_extent ex;
2433 ext4_fsblk_t first_group_block;
2434 ext4_fsblk_t a;
2435 ext4_grpblk_t i;
2436 int max;
2437
2438 BUG_ON(sbi->s_stripe == 0);
2439
2440 /* find first stripe-aligned block in group */
2441 first_group_block = ext4_group_first_block_no(sb, e4b->bd_group);
2442
2443 a = first_group_block + sbi->s_stripe - 1;
2444 do_div(a, sbi->s_stripe);
2445 i = (a * sbi->s_stripe) - first_group_block;
2446
2447 while (i < EXT4_CLUSTERS_PER_GROUP(sb)) {
2448 if (!mb_test_bit(i, bitmap)) {
2449 max = mb_find_extent(e4b, i, sbi->s_stripe, &ex);
2450 if (max >= sbi->s_stripe) {
2451 ac->ac_found++;
2452 ex.fe_logical = 0xDEADF00D; /* debug value */
2453 ac->ac_b_ex = ex;
2454 ext4_mb_use_best_found(ac, e4b);
2455 break;
2456 }
2457 }
2458 i += sbi->s_stripe;
2459 }
2460 }
2461
2462 /*
2463 * This is also called BEFORE we load the buddy bitmap.
2464 * Returns either 1 or 0 indicating that the group is either suitable
2465 * for the allocation or not.
2466 */
ext4_mb_good_group(struct ext4_allocation_context * ac,ext4_group_t group,int cr)2467 static bool ext4_mb_good_group(struct ext4_allocation_context *ac,
2468 ext4_group_t group, int cr)
2469 {
2470 ext4_grpblk_t free, fragments;
2471 int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb));
2472 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
2473
2474 BUG_ON(cr < 0 || cr >= 4);
2475
2476 if (unlikely(!grp || EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
2477 return false;
2478
2479 free = grp->bb_free;
2480 if (free == 0)
2481 return false;
2482
2483 fragments = grp->bb_fragments;
2484 if (fragments == 0)
2485 return false;
2486
2487 switch (cr) {
2488 case 0:
2489 BUG_ON(ac->ac_2order == 0);
2490
2491 /* Avoid using the first bg of a flexgroup for data files */
2492 if ((ac->ac_flags & EXT4_MB_HINT_DATA) &&
2493 (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) &&
2494 ((group % flex_size) == 0))
2495 return false;
2496
2497 if (free < ac->ac_g_ex.fe_len)
2498 return false;
2499
2500 if (ac->ac_2order >= MB_NUM_ORDERS(ac->ac_sb))
2501 return true;
2502
2503 if (grp->bb_largest_free_order < ac->ac_2order)
2504 return false;
2505
2506 return true;
2507 case 1:
2508 if ((free / fragments) >= ac->ac_g_ex.fe_len)
2509 return true;
2510 break;
2511 case 2:
2512 if (free >= ac->ac_g_ex.fe_len)
2513 return true;
2514 break;
2515 case 3:
2516 return true;
2517 default:
2518 BUG();
2519 }
2520
2521 return false;
2522 }
2523
2524 /*
2525 * This could return negative error code if something goes wrong
2526 * during ext4_mb_init_group(). This should not be called with
2527 * ext4_lock_group() held.
2528 *
2529 * Note: because we are conditionally operating with the group lock in
2530 * the EXT4_MB_STRICT_CHECK case, we need to fake out sparse in this
2531 * function using __acquire and __release. This means we need to be
2532 * super careful before messing with the error path handling via "goto
2533 * out"!
2534 */
ext4_mb_good_group_nolock(struct ext4_allocation_context * ac,ext4_group_t group,int cr)2535 static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac,
2536 ext4_group_t group, int cr)
2537 {
2538 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
2539 struct super_block *sb = ac->ac_sb;
2540 struct ext4_sb_info *sbi = EXT4_SB(sb);
2541 bool should_lock = ac->ac_flags & EXT4_MB_STRICT_CHECK;
2542 ext4_grpblk_t free;
2543 int ret = 0;
2544
2545 if (!grp)
2546 return -EFSCORRUPTED;
2547 if (sbi->s_mb_stats)
2548 atomic64_inc(&sbi->s_bal_cX_groups_considered[ac->ac_criteria]);
2549 if (should_lock) {
2550 ext4_lock_group(sb, group);
2551 __release(ext4_group_lock_ptr(sb, group));
2552 }
2553 free = grp->bb_free;
2554 if (free == 0)
2555 goto out;
2556 if (cr <= 2 && free < ac->ac_g_ex.fe_len)
2557 goto out;
2558 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
2559 goto out;
2560 if (should_lock) {
2561 __acquire(ext4_group_lock_ptr(sb, group));
2562 ext4_unlock_group(sb, group);
2563 }
2564
2565 /* We only do this if the grp has never been initialized */
2566 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
2567 struct ext4_group_desc *gdp =
2568 ext4_get_group_desc(sb, group, NULL);
2569 int ret;
2570
2571 /* cr=0/1 is a very optimistic search to find large
2572 * good chunks almost for free. If buddy data is not
2573 * ready, then this optimization makes no sense. But
2574 * we never skip the first block group in a flex_bg,
2575 * since this gets used for metadata block allocation,
2576 * and we want to make sure we locate metadata blocks
2577 * in the first block group in the flex_bg if possible.
2578 */
2579 if (cr < 2 &&
2580 (!sbi->s_log_groups_per_flex ||
2581 ((group & ((1 << sbi->s_log_groups_per_flex) - 1)) != 0)) &&
2582 !(ext4_has_group_desc_csum(sb) &&
2583 (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))))
2584 return 0;
2585 ret = ext4_mb_init_group(sb, group, GFP_NOFS);
2586 if (ret)
2587 return ret;
2588 }
2589
2590 if (should_lock) {
2591 ext4_lock_group(sb, group);
2592 __release(ext4_group_lock_ptr(sb, group));
2593 }
2594 ret = ext4_mb_good_group(ac, group, cr);
2595 out:
2596 if (should_lock) {
2597 __acquire(ext4_group_lock_ptr(sb, group));
2598 ext4_unlock_group(sb, group);
2599 }
2600 return ret;
2601 }
2602
2603 /*
2604 * Start prefetching @nr block bitmaps starting at @group.
2605 * Return the next group which needs to be prefetched.
2606 */
ext4_mb_prefetch(struct super_block * sb,ext4_group_t group,unsigned int nr,int * cnt)2607 ext4_group_t ext4_mb_prefetch(struct super_block *sb, ext4_group_t group,
2608 unsigned int nr, int *cnt)
2609 {
2610 ext4_group_t ngroups = ext4_get_groups_count(sb);
2611 struct buffer_head *bh;
2612 struct blk_plug plug;
2613
2614 blk_start_plug(&plug);
2615 while (nr-- > 0) {
2616 struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group,
2617 NULL);
2618 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
2619
2620 /*
2621 * Prefetch block groups with free blocks; but don't
2622 * bother if it is marked uninitialized on disk, since
2623 * it won't require I/O to read. Also only try to
2624 * prefetch once, so we avoid getblk() call, which can
2625 * be expensive.
2626 */
2627 if (gdp && grp && !EXT4_MB_GRP_TEST_AND_SET_READ(grp) &&
2628 EXT4_MB_GRP_NEED_INIT(grp) &&
2629 ext4_free_group_clusters(sb, gdp) > 0 &&
2630 !(ext4_has_group_desc_csum(sb) &&
2631 (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) {
2632 bh = ext4_read_block_bitmap_nowait(sb, group, true);
2633 if (bh && !IS_ERR(bh)) {
2634 if (!buffer_uptodate(bh) && cnt)
2635 (*cnt)++;
2636 brelse(bh);
2637 }
2638 }
2639 if (++group >= ngroups)
2640 group = 0;
2641 }
2642 blk_finish_plug(&plug);
2643 return group;
2644 }
2645
2646 /*
2647 * Prefetching reads the block bitmap into the buffer cache; but we
2648 * need to make sure that the buddy bitmap in the page cache has been
2649 * initialized. Note that ext4_mb_init_group() will block if the I/O
2650 * is not yet completed, or indeed if it was not initiated by
2651 * ext4_mb_prefetch did not start the I/O.
2652 *
2653 * TODO: We should actually kick off the buddy bitmap setup in a work
2654 * queue when the buffer I/O is completed, so that we don't block
2655 * waiting for the block allocation bitmap read to finish when
2656 * ext4_mb_prefetch_fini is called from ext4_mb_regular_allocator().
2657 */
ext4_mb_prefetch_fini(struct super_block * sb,ext4_group_t group,unsigned int nr)2658 void ext4_mb_prefetch_fini(struct super_block *sb, ext4_group_t group,
2659 unsigned int nr)
2660 {
2661 while (nr-- > 0) {
2662 struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group,
2663 NULL);
2664 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
2665
2666 if (!group)
2667 group = ext4_get_groups_count(sb);
2668 group--;
2669 grp = ext4_get_group_info(sb, group);
2670
2671 if (grp && gdp && EXT4_MB_GRP_NEED_INIT(grp) &&
2672 ext4_free_group_clusters(sb, gdp) > 0 &&
2673 !(ext4_has_group_desc_csum(sb) &&
2674 (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) {
2675 if (ext4_mb_init_group(sb, group, GFP_NOFS))
2676 break;
2677 }
2678 }
2679 }
2680
2681 static noinline_for_stack int
ext4_mb_regular_allocator(struct ext4_allocation_context * ac)2682 ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
2683 {
2684 ext4_group_t prefetch_grp = 0, ngroups, group, i;
2685 int cr = -1, new_cr;
2686 int err = 0, first_err = 0;
2687 unsigned int nr = 0, prefetch_ios = 0;
2688 struct ext4_sb_info *sbi;
2689 struct super_block *sb;
2690 struct ext4_buddy e4b;
2691 int lost;
2692
2693 sb = ac->ac_sb;
2694 sbi = EXT4_SB(sb);
2695 ngroups = ext4_get_groups_count(sb);
2696 /* non-extent files are limited to low blocks/groups */
2697 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)))
2698 ngroups = sbi->s_blockfile_groups;
2699
2700 BUG_ON(ac->ac_status == AC_STATUS_FOUND);
2701
2702 /* first, try the goal */
2703 err = ext4_mb_find_by_goal(ac, &e4b);
2704 if (err || ac->ac_status == AC_STATUS_FOUND)
2705 goto out;
2706
2707 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
2708 goto out;
2709
2710 /*
2711 * ac->ac_2order is set only if the fe_len is a power of 2
2712 * if ac->ac_2order is set we also set criteria to 0 so that we
2713 * try exact allocation using buddy.
2714 */
2715 i = fls(ac->ac_g_ex.fe_len);
2716 ac->ac_2order = 0;
2717 /*
2718 * We search using buddy data only if the order of the request
2719 * is greater than equal to the sbi_s_mb_order2_reqs
2720 * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req
2721 * We also support searching for power-of-two requests only for
2722 * requests upto maximum buddy size we have constructed.
2723 */
2724 if (i >= sbi->s_mb_order2_reqs && i <= MB_NUM_ORDERS(sb)) {
2725 /*
2726 * This should tell if fe_len is exactly power of 2
2727 */
2728 if ((ac->ac_g_ex.fe_len & (~(1 << (i - 1)))) == 0)
2729 ac->ac_2order = array_index_nospec(i - 1,
2730 MB_NUM_ORDERS(sb));
2731 }
2732
2733 /* if stream allocation is enabled, use global goal */
2734 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
2735 /* TBD: may be hot point */
2736 spin_lock(&sbi->s_md_lock);
2737 ac->ac_g_ex.fe_group = sbi->s_mb_last_group;
2738 ac->ac_g_ex.fe_start = sbi->s_mb_last_start;
2739 spin_unlock(&sbi->s_md_lock);
2740 }
2741
2742 /* Let's just scan groups to find more-less suitable blocks */
2743 cr = ac->ac_2order ? 0 : 1;
2744 /*
2745 * cr == 0 try to get exact allocation,
2746 * cr == 3 try to get anything
2747 */
2748 repeat:
2749 for (; cr < 4 && ac->ac_status == AC_STATUS_CONTINUE; cr++) {
2750 ac->ac_criteria = cr;
2751 /*
2752 * searching for the right group start
2753 * from the goal value specified
2754 */
2755 group = ac->ac_g_ex.fe_group;
2756 ac->ac_last_optimal_group = group;
2757 ac->ac_groups_linear_remaining = sbi->s_mb_max_linear_groups;
2758 prefetch_grp = group;
2759
2760 for (i = 0, new_cr = cr; i < ngroups; i++,
2761 ext4_mb_choose_next_group(ac, &new_cr, &group, ngroups)) {
2762 int ret = 0;
2763
2764 cond_resched();
2765 if (new_cr != cr) {
2766 cr = new_cr;
2767 goto repeat;
2768 }
2769
2770 /*
2771 * Batch reads of the block allocation bitmaps
2772 * to get multiple READs in flight; limit
2773 * prefetching at cr=0/1, otherwise mballoc can
2774 * spend a lot of time loading imperfect groups
2775 */
2776 if ((prefetch_grp == group) &&
2777 (cr > 1 ||
2778 prefetch_ios < sbi->s_mb_prefetch_limit)) {
2779 unsigned int curr_ios = prefetch_ios;
2780
2781 nr = sbi->s_mb_prefetch;
2782 if (ext4_has_feature_flex_bg(sb)) {
2783 nr = 1 << sbi->s_log_groups_per_flex;
2784 nr -= group & (nr - 1);
2785 nr = min(nr, sbi->s_mb_prefetch);
2786 }
2787 prefetch_grp = ext4_mb_prefetch(sb, group,
2788 nr, &prefetch_ios);
2789 if (prefetch_ios == curr_ios)
2790 nr = 0;
2791 }
2792
2793 /* This now checks without needing the buddy page */
2794 ret = ext4_mb_good_group_nolock(ac, group, cr);
2795 if (ret <= 0) {
2796 if (!first_err)
2797 first_err = ret;
2798 continue;
2799 }
2800
2801 err = ext4_mb_load_buddy(sb, group, &e4b);
2802 if (err)
2803 goto out;
2804
2805 ext4_lock_group(sb, group);
2806
2807 /*
2808 * We need to check again after locking the
2809 * block group
2810 */
2811 ret = ext4_mb_good_group(ac, group, cr);
2812 if (ret == 0) {
2813 ext4_unlock_group(sb, group);
2814 ext4_mb_unload_buddy(&e4b);
2815 continue;
2816 }
2817
2818 ac->ac_groups_scanned++;
2819 if (cr == 0)
2820 ext4_mb_simple_scan_group(ac, &e4b);
2821 else if (cr == 1 && sbi->s_stripe &&
2822 !(ac->ac_g_ex.fe_len % sbi->s_stripe))
2823 ext4_mb_scan_aligned(ac, &e4b);
2824 else
2825 ext4_mb_complex_scan_group(ac, &e4b);
2826
2827 ext4_unlock_group(sb, group);
2828 ext4_mb_unload_buddy(&e4b);
2829
2830 if (ac->ac_status != AC_STATUS_CONTINUE)
2831 break;
2832 }
2833 /* Processed all groups and haven't found blocks */
2834 if (sbi->s_mb_stats && i == ngroups)
2835 atomic64_inc(&sbi->s_bal_cX_failed[cr]);
2836 }
2837
2838 if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND &&
2839 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2840 /*
2841 * We've been searching too long. Let's try to allocate
2842 * the best chunk we've found so far
2843 */
2844 ext4_mb_try_best_found(ac, &e4b);
2845 if (ac->ac_status != AC_STATUS_FOUND) {
2846 /*
2847 * Someone more lucky has already allocated it.
2848 * The only thing we can do is just take first
2849 * found block(s)
2850 */
2851 lost = atomic_inc_return(&sbi->s_mb_lost_chunks);
2852 mb_debug(sb, "lost chunk, group: %u, start: %d, len: %d, lost: %d\n",
2853 ac->ac_b_ex.fe_group, ac->ac_b_ex.fe_start,
2854 ac->ac_b_ex.fe_len, lost);
2855
2856 ac->ac_b_ex.fe_group = 0;
2857 ac->ac_b_ex.fe_start = 0;
2858 ac->ac_b_ex.fe_len = 0;
2859 ac->ac_status = AC_STATUS_CONTINUE;
2860 ac->ac_flags |= EXT4_MB_HINT_FIRST;
2861 cr = 3;
2862 goto repeat;
2863 }
2864 }
2865
2866 if (sbi->s_mb_stats && ac->ac_status == AC_STATUS_FOUND)
2867 atomic64_inc(&sbi->s_bal_cX_hits[ac->ac_criteria]);
2868 out:
2869 if (!err && ac->ac_status != AC_STATUS_FOUND && first_err)
2870 err = first_err;
2871
2872 mb_debug(sb, "Best len %d, origin len %d, ac_status %u, ac_flags 0x%x, cr %d ret %d\n",
2873 ac->ac_b_ex.fe_len, ac->ac_o_ex.fe_len, ac->ac_status,
2874 ac->ac_flags, cr, err);
2875
2876 if (nr)
2877 ext4_mb_prefetch_fini(sb, prefetch_grp, nr);
2878
2879 return err;
2880 }
2881
ext4_mb_seq_groups_start(struct seq_file * seq,loff_t * pos)2882 static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos)
2883 {
2884 struct super_block *sb = PDE_DATA(file_inode(seq->file));
2885 ext4_group_t group;
2886
2887 if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
2888 return NULL;
2889 group = *pos + 1;
2890 return (void *) ((unsigned long) group);
2891 }
2892
ext4_mb_seq_groups_next(struct seq_file * seq,void * v,loff_t * pos)2893 static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos)
2894 {
2895 struct super_block *sb = PDE_DATA(file_inode(seq->file));
2896 ext4_group_t group;
2897
2898 ++*pos;
2899 if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
2900 return NULL;
2901 group = *pos + 1;
2902 return (void *) ((unsigned long) group);
2903 }
2904
ext4_mb_seq_groups_show(struct seq_file * seq,void * v)2905 static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
2906 {
2907 struct super_block *sb = PDE_DATA(file_inode(seq->file));
2908 ext4_group_t group = (ext4_group_t) ((unsigned long) v);
2909 int i;
2910 int err, buddy_loaded = 0;
2911 struct ext4_buddy e4b;
2912 struct ext4_group_info *grinfo;
2913 unsigned char blocksize_bits = min_t(unsigned char,
2914 sb->s_blocksize_bits,
2915 EXT4_MAX_BLOCK_LOG_SIZE);
2916 struct sg {
2917 struct ext4_group_info info;
2918 ext4_grpblk_t counters[EXT4_MAX_BLOCK_LOG_SIZE + 2];
2919 } sg;
2920
2921 group--;
2922 if (group == 0)
2923 seq_puts(seq, "#group: free frags first ["
2924 " 2^0 2^1 2^2 2^3 2^4 2^5 2^6 "
2925 " 2^7 2^8 2^9 2^10 2^11 2^12 2^13 ]\n");
2926
2927 i = (blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) +
2928 sizeof(struct ext4_group_info);
2929
2930 grinfo = ext4_get_group_info(sb, group);
2931 if (!grinfo)
2932 return 0;
2933 /* Load the group info in memory only if not already loaded. */
2934 if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) {
2935 err = ext4_mb_load_buddy(sb, group, &e4b);
2936 if (err) {
2937 seq_printf(seq, "#%-5u: I/O error\n", group);
2938 return 0;
2939 }
2940 buddy_loaded = 1;
2941 }
2942
2943 memcpy(&sg, grinfo, i);
2944
2945 if (buddy_loaded)
2946 ext4_mb_unload_buddy(&e4b);
2947
2948 seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free,
2949 sg.info.bb_fragments, sg.info.bb_first_free);
2950 for (i = 0; i <= 13; i++)
2951 seq_printf(seq, " %-5u", i <= blocksize_bits + 1 ?
2952 sg.info.bb_counters[i] : 0);
2953 seq_puts(seq, " ]\n");
2954
2955 return 0;
2956 }
2957
ext4_mb_seq_groups_stop(struct seq_file * seq,void * v)2958 static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v)
2959 {
2960 }
2961
2962 const struct seq_operations ext4_mb_seq_groups_ops = {
2963 .start = ext4_mb_seq_groups_start,
2964 .next = ext4_mb_seq_groups_next,
2965 .stop = ext4_mb_seq_groups_stop,
2966 .show = ext4_mb_seq_groups_show,
2967 };
2968
ext4_seq_mb_stats_show(struct seq_file * seq,void * offset)2969 int ext4_seq_mb_stats_show(struct seq_file *seq, void *offset)
2970 {
2971 struct super_block *sb = (struct super_block *)seq->private;
2972 struct ext4_sb_info *sbi = EXT4_SB(sb);
2973
2974 seq_puts(seq, "mballoc:\n");
2975 if (!sbi->s_mb_stats) {
2976 seq_puts(seq, "\tmb stats collection turned off.\n");
2977 seq_puts(seq, "\tTo enable, please write \"1\" to sysfs file mb_stats.\n");
2978 return 0;
2979 }
2980 seq_printf(seq, "\treqs: %u\n", atomic_read(&sbi->s_bal_reqs));
2981 seq_printf(seq, "\tsuccess: %u\n", atomic_read(&sbi->s_bal_success));
2982
2983 seq_printf(seq, "\tgroups_scanned: %u\n", atomic_read(&sbi->s_bal_groups_scanned));
2984
2985 seq_puts(seq, "\tcr0_stats:\n");
2986 seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[0]));
2987 seq_printf(seq, "\t\tgroups_considered: %llu\n",
2988 atomic64_read(&sbi->s_bal_cX_groups_considered[0]));
2989 seq_printf(seq, "\t\tuseless_loops: %llu\n",
2990 atomic64_read(&sbi->s_bal_cX_failed[0]));
2991 seq_printf(seq, "\t\tbad_suggestions: %u\n",
2992 atomic_read(&sbi->s_bal_cr0_bad_suggestions));
2993
2994 seq_puts(seq, "\tcr1_stats:\n");
2995 seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[1]));
2996 seq_printf(seq, "\t\tgroups_considered: %llu\n",
2997 atomic64_read(&sbi->s_bal_cX_groups_considered[1]));
2998 seq_printf(seq, "\t\tuseless_loops: %llu\n",
2999 atomic64_read(&sbi->s_bal_cX_failed[1]));
3000 seq_printf(seq, "\t\tbad_suggestions: %u\n",
3001 atomic_read(&sbi->s_bal_cr1_bad_suggestions));
3002
3003 seq_puts(seq, "\tcr2_stats:\n");
3004 seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[2]));
3005 seq_printf(seq, "\t\tgroups_considered: %llu\n",
3006 atomic64_read(&sbi->s_bal_cX_groups_considered[2]));
3007 seq_printf(seq, "\t\tuseless_loops: %llu\n",
3008 atomic64_read(&sbi->s_bal_cX_failed[2]));
3009
3010 seq_puts(seq, "\tcr3_stats:\n");
3011 seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[3]));
3012 seq_printf(seq, "\t\tgroups_considered: %llu\n",
3013 atomic64_read(&sbi->s_bal_cX_groups_considered[3]));
3014 seq_printf(seq, "\t\tuseless_loops: %llu\n",
3015 atomic64_read(&sbi->s_bal_cX_failed[3]));
3016 seq_printf(seq, "\textents_scanned: %u\n", atomic_read(&sbi->s_bal_ex_scanned));
3017 seq_printf(seq, "\t\tgoal_hits: %u\n", atomic_read(&sbi->s_bal_goals));
3018 seq_printf(seq, "\t\t2^n_hits: %u\n", atomic_read(&sbi->s_bal_2orders));
3019 seq_printf(seq, "\t\tbreaks: %u\n", atomic_read(&sbi->s_bal_breaks));
3020 seq_printf(seq, "\t\tlost: %u\n", atomic_read(&sbi->s_mb_lost_chunks));
3021
3022 seq_printf(seq, "\tbuddies_generated: %u/%u\n",
3023 atomic_read(&sbi->s_mb_buddies_generated),
3024 ext4_get_groups_count(sb));
3025 seq_printf(seq, "\tbuddies_time_used: %llu\n",
3026 atomic64_read(&sbi->s_mb_generation_time));
3027 seq_printf(seq, "\tpreallocated: %u\n",
3028 atomic_read(&sbi->s_mb_preallocated));
3029 seq_printf(seq, "\tdiscarded: %u\n",
3030 atomic_read(&sbi->s_mb_discarded));
3031 return 0;
3032 }
3033
ext4_mb_seq_structs_summary_start(struct seq_file * seq,loff_t * pos)3034 static void *ext4_mb_seq_structs_summary_start(struct seq_file *seq, loff_t *pos)
3035 __acquires(&EXT4_SB(sb)->s_mb_rb_lock)
3036 {
3037 struct super_block *sb = PDE_DATA(file_inode(seq->file));
3038 unsigned long position;
3039
3040 read_lock(&EXT4_SB(sb)->s_mb_rb_lock);
3041
3042 if (*pos < 0 || *pos >= MB_NUM_ORDERS(sb) + 1)
3043 return NULL;
3044 position = *pos + 1;
3045 return (void *) ((unsigned long) position);
3046 }
3047
ext4_mb_seq_structs_summary_next(struct seq_file * seq,void * v,loff_t * pos)3048 static void *ext4_mb_seq_structs_summary_next(struct seq_file *seq, void *v, loff_t *pos)
3049 {
3050 struct super_block *sb = PDE_DATA(file_inode(seq->file));
3051 unsigned long position;
3052
3053 ++*pos;
3054 if (*pos < 0 || *pos >= MB_NUM_ORDERS(sb) + 1)
3055 return NULL;
3056 position = *pos + 1;
3057 return (void *) ((unsigned long) position);
3058 }
3059
ext4_mb_seq_structs_summary_show(struct seq_file * seq,void * v)3060 static int ext4_mb_seq_structs_summary_show(struct seq_file *seq, void *v)
3061 {
3062 struct super_block *sb = PDE_DATA(file_inode(seq->file));
3063 struct ext4_sb_info *sbi = EXT4_SB(sb);
3064 unsigned long position = ((unsigned long) v);
3065 struct ext4_group_info *grp;
3066 struct rb_node *n;
3067 unsigned int count, min, max;
3068
3069 position--;
3070 if (position >= MB_NUM_ORDERS(sb)) {
3071 seq_puts(seq, "fragment_size_tree:\n");
3072 n = rb_first(&sbi->s_mb_avg_fragment_size_root);
3073 if (!n) {
3074 seq_puts(seq, "\ttree_min: 0\n\ttree_max: 0\n\ttree_nodes: 0\n");
3075 return 0;
3076 }
3077 grp = rb_entry(n, struct ext4_group_info, bb_avg_fragment_size_rb);
3078 min = grp->bb_fragments ? grp->bb_free / grp->bb_fragments : 0;
3079 count = 1;
3080 while (rb_next(n)) {
3081 count++;
3082 n = rb_next(n);
3083 }
3084 grp = rb_entry(n, struct ext4_group_info, bb_avg_fragment_size_rb);
3085 max = grp->bb_fragments ? grp->bb_free / grp->bb_fragments : 0;
3086
3087 seq_printf(seq, "\ttree_min: %u\n\ttree_max: %u\n\ttree_nodes: %u\n",
3088 min, max, count);
3089 return 0;
3090 }
3091
3092 if (position == 0) {
3093 seq_printf(seq, "optimize_scan: %d\n",
3094 test_opt2(sb, MB_OPTIMIZE_SCAN) ? 1 : 0);
3095 seq_puts(seq, "max_free_order_lists:\n");
3096 }
3097 count = 0;
3098 list_for_each_entry(grp, &sbi->s_mb_largest_free_orders[position],
3099 bb_largest_free_order_node)
3100 count++;
3101 seq_printf(seq, "\tlist_order_%u_groups: %u\n",
3102 (unsigned int)position, count);
3103
3104 return 0;
3105 }
3106
ext4_mb_seq_structs_summary_stop(struct seq_file * seq,void * v)3107 static void ext4_mb_seq_structs_summary_stop(struct seq_file *seq, void *v)
3108 __releases(&EXT4_SB(sb)->s_mb_rb_lock)
3109 {
3110 struct super_block *sb = PDE_DATA(file_inode(seq->file));
3111
3112 read_unlock(&EXT4_SB(sb)->s_mb_rb_lock);
3113 }
3114
3115 const struct seq_operations ext4_mb_seq_structs_summary_ops = {
3116 .start = ext4_mb_seq_structs_summary_start,
3117 .next = ext4_mb_seq_structs_summary_next,
3118 .stop = ext4_mb_seq_structs_summary_stop,
3119 .show = ext4_mb_seq_structs_summary_show,
3120 };
3121
get_groupinfo_cache(int blocksize_bits)3122 static struct kmem_cache *get_groupinfo_cache(int blocksize_bits)
3123 {
3124 int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
3125 struct kmem_cache *cachep = ext4_groupinfo_caches[cache_index];
3126
3127 BUG_ON(!cachep);
3128 return cachep;
3129 }
3130
3131 /*
3132 * Allocate the top-level s_group_info array for the specified number
3133 * of groups
3134 */
ext4_mb_alloc_groupinfo(struct super_block * sb,ext4_group_t ngroups)3135 int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups)
3136 {
3137 struct ext4_sb_info *sbi = EXT4_SB(sb);
3138 unsigned size;
3139 struct ext4_group_info ***old_groupinfo, ***new_groupinfo;
3140
3141 size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >>
3142 EXT4_DESC_PER_BLOCK_BITS(sb);
3143 if (size <= sbi->s_group_info_size)
3144 return 0;
3145
3146 size = roundup_pow_of_two(sizeof(*sbi->s_group_info) * size);
3147 new_groupinfo = kvzalloc(size, GFP_KERNEL);
3148 if (!new_groupinfo) {
3149 ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group");
3150 return -ENOMEM;
3151 }
3152 rcu_read_lock();
3153 old_groupinfo = rcu_dereference(sbi->s_group_info);
3154 if (old_groupinfo)
3155 memcpy(new_groupinfo, old_groupinfo,
3156 sbi->s_group_info_size * sizeof(*sbi->s_group_info));
3157 rcu_read_unlock();
3158 rcu_assign_pointer(sbi->s_group_info, new_groupinfo);
3159 sbi->s_group_info_size = size / sizeof(*sbi->s_group_info);
3160 if (old_groupinfo)
3161 ext4_kvfree_array_rcu(old_groupinfo);
3162 ext4_debug("allocated s_groupinfo array for %d meta_bg's\n",
3163 sbi->s_group_info_size);
3164 return 0;
3165 }
3166
3167 /* Create and initialize ext4_group_info data for the given group. */
ext4_mb_add_groupinfo(struct super_block * sb,ext4_group_t group,struct ext4_group_desc * desc)3168 int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
3169 struct ext4_group_desc *desc)
3170 {
3171 int i;
3172 int metalen = 0;
3173 int idx = group >> EXT4_DESC_PER_BLOCK_BITS(sb);
3174 struct ext4_sb_info *sbi = EXT4_SB(sb);
3175 struct ext4_group_info **meta_group_info;
3176 struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
3177
3178 /*
3179 * First check if this group is the first of a reserved block.
3180 * If it's true, we have to allocate a new table of pointers
3181 * to ext4_group_info structures
3182 */
3183 if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
3184 metalen = sizeof(*meta_group_info) <<
3185 EXT4_DESC_PER_BLOCK_BITS(sb);
3186 meta_group_info = kmalloc(metalen, GFP_NOFS);
3187 if (meta_group_info == NULL) {
3188 ext4_msg(sb, KERN_ERR, "can't allocate mem "
3189 "for a buddy group");
3190 goto exit_meta_group_info;
3191 }
3192 rcu_read_lock();
3193 rcu_dereference(sbi->s_group_info)[idx] = meta_group_info;
3194 rcu_read_unlock();
3195 }
3196
3197 meta_group_info = sbi_array_rcu_deref(sbi, s_group_info, idx);
3198 i = group & (EXT4_DESC_PER_BLOCK(sb) - 1);
3199
3200 meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_NOFS);
3201 if (meta_group_info[i] == NULL) {
3202 ext4_msg(sb, KERN_ERR, "can't allocate buddy mem");
3203 goto exit_group_info;
3204 }
3205 set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT,
3206 &(meta_group_info[i]->bb_state));
3207
3208 /*
3209 * initialize bb_free to be able to skip
3210 * empty groups without initialization
3211 */
3212 if (ext4_has_group_desc_csum(sb) &&
3213 (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
3214 meta_group_info[i]->bb_free =
3215 ext4_free_clusters_after_init(sb, group, desc);
3216 } else {
3217 meta_group_info[i]->bb_free =
3218 ext4_free_group_clusters(sb, desc);
3219 }
3220
3221 INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list);
3222 init_rwsem(&meta_group_info[i]->alloc_sem);
3223 meta_group_info[i]->bb_free_root = RB_ROOT;
3224 INIT_LIST_HEAD(&meta_group_info[i]->bb_largest_free_order_node);
3225 RB_CLEAR_NODE(&meta_group_info[i]->bb_avg_fragment_size_rb);
3226 meta_group_info[i]->bb_largest_free_order = -1; /* uninit */
3227 meta_group_info[i]->bb_group = group;
3228
3229 mb_group_bb_bitmap_alloc(sb, meta_group_info[i], group);
3230 return 0;
3231
3232 exit_group_info:
3233 /* If a meta_group_info table has been allocated, release it now */
3234 if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
3235 struct ext4_group_info ***group_info;
3236
3237 rcu_read_lock();
3238 group_info = rcu_dereference(sbi->s_group_info);
3239 kfree(group_info[idx]);
3240 group_info[idx] = NULL;
3241 rcu_read_unlock();
3242 }
3243 exit_meta_group_info:
3244 return -ENOMEM;
3245 } /* ext4_mb_add_groupinfo */
3246
ext4_mb_init_backend(struct super_block * sb)3247 static int ext4_mb_init_backend(struct super_block *sb)
3248 {
3249 ext4_group_t ngroups = ext4_get_groups_count(sb);
3250 ext4_group_t i;
3251 struct ext4_sb_info *sbi = EXT4_SB(sb);
3252 int err;
3253 struct ext4_group_desc *desc;
3254 struct ext4_group_info ***group_info;
3255 struct kmem_cache *cachep;
3256
3257 err = ext4_mb_alloc_groupinfo(sb, ngroups);
3258 if (err)
3259 return err;
3260
3261 sbi->s_buddy_cache = new_inode(sb);
3262 if (sbi->s_buddy_cache == NULL) {
3263 ext4_msg(sb, KERN_ERR, "can't get new inode");
3264 goto err_freesgi;
3265 }
3266 /* To avoid potentially colliding with an valid on-disk inode number,
3267 * use EXT4_BAD_INO for the buddy cache inode number. This inode is
3268 * not in the inode hash, so it should never be found by iget(), but
3269 * this will avoid confusion if it ever shows up during debugging. */
3270 sbi->s_buddy_cache->i_ino = EXT4_BAD_INO;
3271 EXT4_I(sbi->s_buddy_cache)->i_disksize = 0;
3272 for (i = 0; i < ngroups; i++) {
3273 cond_resched();
3274 desc = ext4_get_group_desc(sb, i, NULL);
3275 if (desc == NULL) {
3276 ext4_msg(sb, KERN_ERR, "can't read descriptor %u", i);
3277 goto err_freebuddy;
3278 }
3279 if (ext4_mb_add_groupinfo(sb, i, desc) != 0)
3280 goto err_freebuddy;
3281 }
3282
3283 if (ext4_has_feature_flex_bg(sb)) {
3284 /* a single flex group is supposed to be read by a single IO.
3285 * 2 ^ s_log_groups_per_flex != UINT_MAX as s_mb_prefetch is
3286 * unsigned integer, so the maximum shift is 32.
3287 */
3288 if (sbi->s_es->s_log_groups_per_flex >= 32) {
3289 ext4_msg(sb, KERN_ERR, "too many log groups per flexible block group");
3290 goto err_freebuddy;
3291 }
3292 sbi->s_mb_prefetch = min_t(uint, 1 << sbi->s_es->s_log_groups_per_flex,
3293 BLK_MAX_SEGMENT_SIZE >> (sb->s_blocksize_bits - 9));
3294 sbi->s_mb_prefetch *= 8; /* 8 prefetch IOs in flight at most */
3295 } else {
3296 sbi->s_mb_prefetch = 32;
3297 }
3298 if (sbi->s_mb_prefetch > ext4_get_groups_count(sb))
3299 sbi->s_mb_prefetch = ext4_get_groups_count(sb);
3300 /* now many real IOs to prefetch within a single allocation at cr=0
3301 * given cr=0 is an CPU-related optimization we shouldn't try to
3302 * load too many groups, at some point we should start to use what
3303 * we've got in memory.
3304 * with an average random access time 5ms, it'd take a second to get
3305 * 200 groups (* N with flex_bg), so let's make this limit 4
3306 */
3307 sbi->s_mb_prefetch_limit = sbi->s_mb_prefetch * 4;
3308 if (sbi->s_mb_prefetch_limit > ext4_get_groups_count(sb))
3309 sbi->s_mb_prefetch_limit = ext4_get_groups_count(sb);
3310
3311 return 0;
3312
3313 err_freebuddy:
3314 cachep = get_groupinfo_cache(sb->s_blocksize_bits);
3315 while (i-- > 0) {
3316 struct ext4_group_info *grp = ext4_get_group_info(sb, i);
3317
3318 if (grp)
3319 kmem_cache_free(cachep, grp);
3320 }
3321 i = sbi->s_group_info_size;
3322 rcu_read_lock();
3323 group_info = rcu_dereference(sbi->s_group_info);
3324 while (i-- > 0)
3325 kfree(group_info[i]);
3326 rcu_read_unlock();
3327 iput(sbi->s_buddy_cache);
3328 err_freesgi:
3329 rcu_read_lock();
3330 kvfree(rcu_dereference(sbi->s_group_info));
3331 rcu_read_unlock();
3332 return -ENOMEM;
3333 }
3334
ext4_groupinfo_destroy_slabs(void)3335 static void ext4_groupinfo_destroy_slabs(void)
3336 {
3337 int i;
3338
3339 for (i = 0; i < NR_GRPINFO_CACHES; i++) {
3340 kmem_cache_destroy(ext4_groupinfo_caches[i]);
3341 ext4_groupinfo_caches[i] = NULL;
3342 }
3343 }
3344
ext4_groupinfo_create_slab(size_t size)3345 static int ext4_groupinfo_create_slab(size_t size)
3346 {
3347 static DEFINE_MUTEX(ext4_grpinfo_slab_create_mutex);
3348 int slab_size;
3349 int blocksize_bits = order_base_2(size);
3350 int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
3351 struct kmem_cache *cachep;
3352
3353 if (cache_index >= NR_GRPINFO_CACHES)
3354 return -EINVAL;
3355
3356 if (unlikely(cache_index < 0))
3357 cache_index = 0;
3358
3359 mutex_lock(&ext4_grpinfo_slab_create_mutex);
3360 if (ext4_groupinfo_caches[cache_index]) {
3361 mutex_unlock(&ext4_grpinfo_slab_create_mutex);
3362 return 0; /* Already created */
3363 }
3364
3365 slab_size = offsetof(struct ext4_group_info,
3366 bb_counters[blocksize_bits + 2]);
3367
3368 cachep = kmem_cache_create(ext4_groupinfo_slab_names[cache_index],
3369 slab_size, 0, SLAB_RECLAIM_ACCOUNT,
3370 NULL);
3371
3372 ext4_groupinfo_caches[cache_index] = cachep;
3373
3374 mutex_unlock(&ext4_grpinfo_slab_create_mutex);
3375 if (!cachep) {
3376 printk(KERN_EMERG
3377 "EXT4-fs: no memory for groupinfo slab cache\n");
3378 return -ENOMEM;
3379 }
3380
3381 return 0;
3382 }
3383
ext4_discard_work(struct work_struct * work)3384 static void ext4_discard_work(struct work_struct *work)
3385 {
3386 struct ext4_sb_info *sbi = container_of(work,
3387 struct ext4_sb_info, s_discard_work);
3388 struct super_block *sb = sbi->s_sb;
3389 struct ext4_free_data *fd, *nfd;
3390 struct ext4_buddy e4b;
3391 struct list_head discard_list;
3392 ext4_group_t grp, load_grp;
3393 int err = 0;
3394
3395 INIT_LIST_HEAD(&discard_list);
3396 spin_lock(&sbi->s_md_lock);
3397 list_splice_init(&sbi->s_discard_list, &discard_list);
3398 spin_unlock(&sbi->s_md_lock);
3399
3400 load_grp = UINT_MAX;
3401 list_for_each_entry_safe(fd, nfd, &discard_list, efd_list) {
3402 /*
3403 * If filesystem is umounting or no memory or suffering
3404 * from no space, give up the discard
3405 */
3406 if ((sb->s_flags & SB_ACTIVE) && !err &&
3407 !atomic_read(&sbi->s_retry_alloc_pending)) {
3408 grp = fd->efd_group;
3409 if (grp != load_grp) {
3410 if (load_grp != UINT_MAX)
3411 ext4_mb_unload_buddy(&e4b);
3412
3413 err = ext4_mb_load_buddy(sb, grp, &e4b);
3414 if (err) {
3415 kmem_cache_free(ext4_free_data_cachep, fd);
3416 load_grp = UINT_MAX;
3417 continue;
3418 } else {
3419 load_grp = grp;
3420 }
3421 }
3422
3423 ext4_lock_group(sb, grp);
3424 ext4_try_to_trim_range(sb, &e4b, fd->efd_start_cluster,
3425 fd->efd_start_cluster + fd->efd_count - 1, 1);
3426 ext4_unlock_group(sb, grp);
3427 }
3428 kmem_cache_free(ext4_free_data_cachep, fd);
3429 }
3430
3431 if (load_grp != UINT_MAX)
3432 ext4_mb_unload_buddy(&e4b);
3433 }
3434
ext4_mb_init(struct super_block * sb)3435 int ext4_mb_init(struct super_block *sb)
3436 {
3437 struct ext4_sb_info *sbi = EXT4_SB(sb);
3438 unsigned i, j;
3439 unsigned offset, offset_incr;
3440 unsigned max;
3441 int ret;
3442
3443 i = MB_NUM_ORDERS(sb) * sizeof(*sbi->s_mb_offsets);
3444
3445 sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL);
3446 if (sbi->s_mb_offsets == NULL) {
3447 ret = -ENOMEM;
3448 goto out;
3449 }
3450
3451 i = MB_NUM_ORDERS(sb) * sizeof(*sbi->s_mb_maxs);
3452 sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL);
3453 if (sbi->s_mb_maxs == NULL) {
3454 ret = -ENOMEM;
3455 goto out;
3456 }
3457
3458 ret = ext4_groupinfo_create_slab(sb->s_blocksize);
3459 if (ret < 0)
3460 goto out;
3461
3462 /* order 0 is regular bitmap */
3463 sbi->s_mb_maxs[0] = sb->s_blocksize << 3;
3464 sbi->s_mb_offsets[0] = 0;
3465
3466 i = 1;
3467 offset = 0;
3468 offset_incr = 1 << (sb->s_blocksize_bits - 1);
3469 max = sb->s_blocksize << 2;
3470 do {
3471 sbi->s_mb_offsets[i] = offset;
3472 sbi->s_mb_maxs[i] = max;
3473 offset += offset_incr;
3474 offset_incr = offset_incr >> 1;
3475 max = max >> 1;
3476 i++;
3477 } while (i < MB_NUM_ORDERS(sb));
3478
3479 sbi->s_mb_avg_fragment_size_root = RB_ROOT;
3480 sbi->s_mb_largest_free_orders =
3481 kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct list_head),
3482 GFP_KERNEL);
3483 if (!sbi->s_mb_largest_free_orders) {
3484 ret = -ENOMEM;
3485 goto out;
3486 }
3487 sbi->s_mb_largest_free_orders_locks =
3488 kmalloc_array(MB_NUM_ORDERS(sb), sizeof(rwlock_t),
3489 GFP_KERNEL);
3490 if (!sbi->s_mb_largest_free_orders_locks) {
3491 ret = -ENOMEM;
3492 goto out;
3493 }
3494 for (i = 0; i < MB_NUM_ORDERS(sb); i++) {
3495 INIT_LIST_HEAD(&sbi->s_mb_largest_free_orders[i]);
3496 rwlock_init(&sbi->s_mb_largest_free_orders_locks[i]);
3497 }
3498 rwlock_init(&sbi->s_mb_rb_lock);
3499
3500 spin_lock_init(&sbi->s_md_lock);
3501 sbi->s_mb_free_pending = 0;
3502 INIT_LIST_HEAD(&sbi->s_freed_data_list);
3503 INIT_LIST_HEAD(&sbi->s_discard_list);
3504 INIT_WORK(&sbi->s_discard_work, ext4_discard_work);
3505 atomic_set(&sbi->s_retry_alloc_pending, 0);
3506
3507 sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN;
3508 sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN;
3509 sbi->s_mb_stats = MB_DEFAULT_STATS;
3510 sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
3511 sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
3512 sbi->s_mb_max_inode_prealloc = MB_DEFAULT_MAX_INODE_PREALLOC;
3513 /*
3514 * The default group preallocation is 512, which for 4k block
3515 * sizes translates to 2 megabytes. However for bigalloc file
3516 * systems, this is probably too big (i.e, if the cluster size
3517 * is 1 megabyte, then group preallocation size becomes half a
3518 * gigabyte!). As a default, we will keep a two megabyte
3519 * group pralloc size for cluster sizes up to 64k, and after
3520 * that, we will force a minimum group preallocation size of
3521 * 32 clusters. This translates to 8 megs when the cluster
3522 * size is 256k, and 32 megs when the cluster size is 1 meg,
3523 * which seems reasonable as a default.
3524 */
3525 sbi->s_mb_group_prealloc = max(MB_DEFAULT_GROUP_PREALLOC >>
3526 sbi->s_cluster_bits, 32);
3527 /*
3528 * If there is a s_stripe > 1, then we set the s_mb_group_prealloc
3529 * to the lowest multiple of s_stripe which is bigger than
3530 * the s_mb_group_prealloc as determined above. We want
3531 * the preallocation size to be an exact multiple of the
3532 * RAID stripe size so that preallocations don't fragment
3533 * the stripes.
3534 */
3535 if (sbi->s_stripe > 1) {
3536 sbi->s_mb_group_prealloc = roundup(
3537 sbi->s_mb_group_prealloc, sbi->s_stripe);
3538 }
3539
3540 sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
3541 if (sbi->s_locality_groups == NULL) {
3542 ret = -ENOMEM;
3543 goto out;
3544 }
3545 for_each_possible_cpu(i) {
3546 struct ext4_locality_group *lg;
3547 lg = per_cpu_ptr(sbi->s_locality_groups, i);
3548 mutex_init(&lg->lg_mutex);
3549 for (j = 0; j < PREALLOC_TB_SIZE; j++)
3550 INIT_LIST_HEAD(&lg->lg_prealloc_list[j]);
3551 spin_lock_init(&lg->lg_prealloc_lock);
3552 }
3553
3554 if (blk_queue_nonrot(bdev_get_queue(sb->s_bdev)))
3555 sbi->s_mb_max_linear_groups = 0;
3556 else
3557 sbi->s_mb_max_linear_groups = MB_DEFAULT_LINEAR_LIMIT;
3558 /* init file for buddy data */
3559 ret = ext4_mb_init_backend(sb);
3560 if (ret != 0)
3561 goto out_free_locality_groups;
3562
3563 return 0;
3564
3565 out_free_locality_groups:
3566 free_percpu(sbi->s_locality_groups);
3567 sbi->s_locality_groups = NULL;
3568 out:
3569 kfree(sbi->s_mb_largest_free_orders);
3570 kfree(sbi->s_mb_largest_free_orders_locks);
3571 kfree(sbi->s_mb_offsets);
3572 sbi->s_mb_offsets = NULL;
3573 kfree(sbi->s_mb_maxs);
3574 sbi->s_mb_maxs = NULL;
3575 return ret;
3576 }
3577
3578 /* need to called with the ext4 group lock held */
ext4_mb_cleanup_pa(struct ext4_group_info * grp)3579 static int ext4_mb_cleanup_pa(struct ext4_group_info *grp)
3580 {
3581 struct ext4_prealloc_space *pa;
3582 struct list_head *cur, *tmp;
3583 int count = 0;
3584
3585 list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) {
3586 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
3587 list_del(&pa->pa_group_list);
3588 count++;
3589 kmem_cache_free(ext4_pspace_cachep, pa);
3590 }
3591 return count;
3592 }
3593
ext4_mb_release(struct super_block * sb)3594 int ext4_mb_release(struct super_block *sb)
3595 {
3596 ext4_group_t ngroups = ext4_get_groups_count(sb);
3597 ext4_group_t i;
3598 int num_meta_group_infos;
3599 struct ext4_group_info *grinfo, ***group_info;
3600 struct ext4_sb_info *sbi = EXT4_SB(sb);
3601 struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
3602 int count;
3603
3604 if (test_opt(sb, DISCARD)) {
3605 /*
3606 * wait the discard work to drain all of ext4_free_data
3607 */
3608 flush_work(&sbi->s_discard_work);
3609 WARN_ON_ONCE(!list_empty(&sbi->s_discard_list));
3610 }
3611
3612 if (sbi->s_group_info) {
3613 for (i = 0; i < ngroups; i++) {
3614 cond_resched();
3615 grinfo = ext4_get_group_info(sb, i);
3616 if (!grinfo)
3617 continue;
3618 mb_group_bb_bitmap_free(grinfo);
3619 ext4_lock_group(sb, i);
3620 count = ext4_mb_cleanup_pa(grinfo);
3621 if (count)
3622 mb_debug(sb, "mballoc: %d PAs left\n",
3623 count);
3624 ext4_unlock_group(sb, i);
3625 kmem_cache_free(cachep, grinfo);
3626 }
3627 num_meta_group_infos = (ngroups +
3628 EXT4_DESC_PER_BLOCK(sb) - 1) >>
3629 EXT4_DESC_PER_BLOCK_BITS(sb);
3630 rcu_read_lock();
3631 group_info = rcu_dereference(sbi->s_group_info);
3632 for (i = 0; i < num_meta_group_infos; i++)
3633 kfree(group_info[i]);
3634 kvfree(group_info);
3635 rcu_read_unlock();
3636 }
3637 kfree(sbi->s_mb_largest_free_orders);
3638 kfree(sbi->s_mb_largest_free_orders_locks);
3639 kfree(sbi->s_mb_offsets);
3640 kfree(sbi->s_mb_maxs);
3641 iput(sbi->s_buddy_cache);
3642 if (sbi->s_mb_stats) {
3643 ext4_msg(sb, KERN_INFO,
3644 "mballoc: %u blocks %u reqs (%u success)",
3645 atomic_read(&sbi->s_bal_allocated),
3646 atomic_read(&sbi->s_bal_reqs),
3647 atomic_read(&sbi->s_bal_success));
3648 ext4_msg(sb, KERN_INFO,
3649 "mballoc: %u extents scanned, %u groups scanned, %u goal hits, "
3650 "%u 2^N hits, %u breaks, %u lost",
3651 atomic_read(&sbi->s_bal_ex_scanned),
3652 atomic_read(&sbi->s_bal_groups_scanned),
3653 atomic_read(&sbi->s_bal_goals),
3654 atomic_read(&sbi->s_bal_2orders),
3655 atomic_read(&sbi->s_bal_breaks),
3656 atomic_read(&sbi->s_mb_lost_chunks));
3657 ext4_msg(sb, KERN_INFO,
3658 "mballoc: %u generated and it took %llu",
3659 atomic_read(&sbi->s_mb_buddies_generated),
3660 atomic64_read(&sbi->s_mb_generation_time));
3661 ext4_msg(sb, KERN_INFO,
3662 "mballoc: %u preallocated, %u discarded",
3663 atomic_read(&sbi->s_mb_preallocated),
3664 atomic_read(&sbi->s_mb_discarded));
3665 }
3666
3667 free_percpu(sbi->s_locality_groups);
3668
3669 return 0;
3670 }
3671
ext4_issue_discard(struct super_block * sb,ext4_group_t block_group,ext4_grpblk_t cluster,int count,struct bio ** biop)3672 static inline int ext4_issue_discard(struct super_block *sb,
3673 ext4_group_t block_group, ext4_grpblk_t cluster, int count,
3674 struct bio **biop)
3675 {
3676 ext4_fsblk_t discard_block;
3677
3678 discard_block = (EXT4_C2B(EXT4_SB(sb), cluster) +
3679 ext4_group_first_block_no(sb, block_group));
3680 count = EXT4_C2B(EXT4_SB(sb), count);
3681 trace_ext4_discard_blocks(sb,
3682 (unsigned long long) discard_block, count);
3683 if (biop) {
3684 return __blkdev_issue_discard(sb->s_bdev,
3685 (sector_t)discard_block << (sb->s_blocksize_bits - 9),
3686 (sector_t)count << (sb->s_blocksize_bits - 9),
3687 GFP_NOFS, 0, biop);
3688 } else
3689 return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
3690 }
3691
ext4_free_data_in_buddy(struct super_block * sb,struct ext4_free_data * entry)3692 static void ext4_free_data_in_buddy(struct super_block *sb,
3693 struct ext4_free_data *entry)
3694 {
3695 struct ext4_buddy e4b;
3696 struct ext4_group_info *db;
3697 int err, count = 0, count2 = 0;
3698
3699 mb_debug(sb, "gonna free %u blocks in group %u (0x%p):",
3700 entry->efd_count, entry->efd_group, entry);
3701
3702 err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b);
3703 /* we expect to find existing buddy because it's pinned */
3704 BUG_ON(err != 0);
3705
3706 spin_lock(&EXT4_SB(sb)->s_md_lock);
3707 EXT4_SB(sb)->s_mb_free_pending -= entry->efd_count;
3708 spin_unlock(&EXT4_SB(sb)->s_md_lock);
3709
3710 db = e4b.bd_info;
3711 /* there are blocks to put in buddy to make them really free */
3712 count += entry->efd_count;
3713 count2++;
3714 ext4_lock_group(sb, entry->efd_group);
3715 /* Take it out of per group rb tree */
3716 rb_erase(&entry->efd_node, &(db->bb_free_root));
3717 mb_free_blocks(NULL, &e4b, entry->efd_start_cluster, entry->efd_count);
3718
3719 /*
3720 * Clear the trimmed flag for the group so that the next
3721 * ext4_trim_fs can trim it.
3722 * If the volume is mounted with -o discard, online discard
3723 * is supported and the free blocks will be trimmed online.
3724 */
3725 if (!test_opt(sb, DISCARD))
3726 EXT4_MB_GRP_CLEAR_TRIMMED(db);
3727
3728 if (!db->bb_free_root.rb_node) {
3729 /* No more items in the per group rb tree
3730 * balance refcounts from ext4_mb_free_metadata()
3731 */
3732 put_page(e4b.bd_buddy_page);
3733 put_page(e4b.bd_bitmap_page);
3734 }
3735 ext4_unlock_group(sb, entry->efd_group);
3736 ext4_mb_unload_buddy(&e4b);
3737
3738 mb_debug(sb, "freed %d blocks in %d structures\n", count,
3739 count2);
3740 }
3741
3742 /*
3743 * This function is called by the jbd2 layer once the commit has finished,
3744 * so we know we can free the blocks that were released with that commit.
3745 */
ext4_process_freed_data(struct super_block * sb,tid_t commit_tid)3746 void ext4_process_freed_data(struct super_block *sb, tid_t commit_tid)
3747 {
3748 struct ext4_sb_info *sbi = EXT4_SB(sb);
3749 struct ext4_free_data *entry, *tmp;
3750 struct list_head freed_data_list;
3751 struct list_head *cut_pos = NULL;
3752 bool wake;
3753
3754 INIT_LIST_HEAD(&freed_data_list);
3755
3756 spin_lock(&sbi->s_md_lock);
3757 list_for_each_entry(entry, &sbi->s_freed_data_list, efd_list) {
3758 if (entry->efd_tid != commit_tid)
3759 break;
3760 cut_pos = &entry->efd_list;
3761 }
3762 if (cut_pos)
3763 list_cut_position(&freed_data_list, &sbi->s_freed_data_list,
3764 cut_pos);
3765 spin_unlock(&sbi->s_md_lock);
3766
3767 list_for_each_entry(entry, &freed_data_list, efd_list)
3768 ext4_free_data_in_buddy(sb, entry);
3769
3770 if (test_opt(sb, DISCARD)) {
3771 spin_lock(&sbi->s_md_lock);
3772 wake = list_empty(&sbi->s_discard_list);
3773 list_splice_tail(&freed_data_list, &sbi->s_discard_list);
3774 spin_unlock(&sbi->s_md_lock);
3775 if (wake)
3776 queue_work(system_unbound_wq, &sbi->s_discard_work);
3777 } else {
3778 list_for_each_entry_safe(entry, tmp, &freed_data_list, efd_list)
3779 kmem_cache_free(ext4_free_data_cachep, entry);
3780 }
3781 }
3782
ext4_init_mballoc(void)3783 int __init ext4_init_mballoc(void)
3784 {
3785 ext4_pspace_cachep = KMEM_CACHE(ext4_prealloc_space,
3786 SLAB_RECLAIM_ACCOUNT);
3787 if (ext4_pspace_cachep == NULL)
3788 goto out;
3789
3790 ext4_ac_cachep = KMEM_CACHE(ext4_allocation_context,
3791 SLAB_RECLAIM_ACCOUNT);
3792 if (ext4_ac_cachep == NULL)
3793 goto out_pa_free;
3794
3795 ext4_free_data_cachep = KMEM_CACHE(ext4_free_data,
3796 SLAB_RECLAIM_ACCOUNT);
3797 if (ext4_free_data_cachep == NULL)
3798 goto out_ac_free;
3799
3800 return 0;
3801
3802 out_ac_free:
3803 kmem_cache_destroy(ext4_ac_cachep);
3804 out_pa_free:
3805 kmem_cache_destroy(ext4_pspace_cachep);
3806 out:
3807 return -ENOMEM;
3808 }
3809
ext4_exit_mballoc(void)3810 void ext4_exit_mballoc(void)
3811 {
3812 /*
3813 * Wait for completion of call_rcu()'s on ext4_pspace_cachep
3814 * before destroying the slab cache.
3815 */
3816 rcu_barrier();
3817 kmem_cache_destroy(ext4_pspace_cachep);
3818 kmem_cache_destroy(ext4_ac_cachep);
3819 kmem_cache_destroy(ext4_free_data_cachep);
3820 ext4_groupinfo_destroy_slabs();
3821 }
3822
3823
3824 /*
3825 * Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps
3826 * Returns 0 if success or error code
3827 */
3828 static noinline_for_stack int
ext4_mb_mark_diskspace_used(struct ext4_allocation_context * ac,handle_t * handle,unsigned int reserv_clstrs)3829 ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
3830 handle_t *handle, unsigned int reserv_clstrs)
3831 {
3832 struct buffer_head *bitmap_bh = NULL;
3833 struct ext4_group_desc *gdp;
3834 struct buffer_head *gdp_bh;
3835 struct ext4_sb_info *sbi;
3836 struct super_block *sb;
3837 ext4_fsblk_t block;
3838 int err, len;
3839
3840 BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3841 BUG_ON(ac->ac_b_ex.fe_len <= 0);
3842
3843 sb = ac->ac_sb;
3844 sbi = EXT4_SB(sb);
3845
3846 bitmap_bh = ext4_read_block_bitmap(sb, ac->ac_b_ex.fe_group);
3847 if (IS_ERR(bitmap_bh)) {
3848 err = PTR_ERR(bitmap_bh);
3849 bitmap_bh = NULL;
3850 goto out_err;
3851 }
3852
3853 BUFFER_TRACE(bitmap_bh, "getting write access");
3854 err = ext4_journal_get_write_access(handle, sb, bitmap_bh,
3855 EXT4_JTR_NONE);
3856 if (err)
3857 goto out_err;
3858
3859 err = -EIO;
3860 gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, &gdp_bh);
3861 if (!gdp)
3862 goto out_err;
3863
3864 ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group,
3865 ext4_free_group_clusters(sb, gdp));
3866
3867 BUFFER_TRACE(gdp_bh, "get_write_access");
3868 err = ext4_journal_get_write_access(handle, sb, gdp_bh, EXT4_JTR_NONE);
3869 if (err)
3870 goto out_err;
3871
3872 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
3873
3874 len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
3875 if (!ext4_inode_block_valid(ac->ac_inode, block, len)) {
3876 ext4_error(sb, "Allocating blocks %llu-%llu which overlap "
3877 "fs metadata", block, block+len);
3878 /* File system mounted not to panic on error
3879 * Fix the bitmap and return EFSCORRUPTED
3880 * We leak some of the blocks here.
3881 */
3882 ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3883 ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
3884 ac->ac_b_ex.fe_len);
3885 ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3886 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
3887 if (!err)
3888 err = -EFSCORRUPTED;
3889 goto out_err;
3890 }
3891
3892 ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3893 #ifdef AGGRESSIVE_CHECK
3894 {
3895 int i;
3896 for (i = 0; i < ac->ac_b_ex.fe_len; i++) {
3897 BUG_ON(mb_test_bit(ac->ac_b_ex.fe_start + i,
3898 bitmap_bh->b_data));
3899 }
3900 }
3901 #endif
3902 ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
3903 ac->ac_b_ex.fe_len);
3904 if (ext4_has_group_desc_csum(sb) &&
3905 (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
3906 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
3907 ext4_free_group_clusters_set(sb, gdp,
3908 ext4_free_clusters_after_init(sb,
3909 ac->ac_b_ex.fe_group, gdp));
3910 }
3911 len = ext4_free_group_clusters(sb, gdp) - ac->ac_b_ex.fe_len;
3912 ext4_free_group_clusters_set(sb, gdp, len);
3913 ext4_block_bitmap_csum_set(sb, ac->ac_b_ex.fe_group, gdp, bitmap_bh);
3914 ext4_group_desc_csum_set(sb, ac->ac_b_ex.fe_group, gdp);
3915
3916 ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3917 percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len);
3918 /*
3919 * Now reduce the dirty block count also. Should not go negative
3920 */
3921 if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED))
3922 /* release all the reserved blocks if non delalloc */
3923 percpu_counter_sub(&sbi->s_dirtyclusters_counter,
3924 reserv_clstrs);
3925
3926 if (sbi->s_log_groups_per_flex) {
3927 ext4_group_t flex_group = ext4_flex_group(sbi,
3928 ac->ac_b_ex.fe_group);
3929 atomic64_sub(ac->ac_b_ex.fe_len,
3930 &sbi_array_rcu_deref(sbi, s_flex_groups,
3931 flex_group)->free_clusters);
3932 }
3933
3934 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
3935 if (err)
3936 goto out_err;
3937 err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh);
3938
3939 out_err:
3940 brelse(bitmap_bh);
3941 return err;
3942 }
3943
3944 /*
3945 * Idempotent helper for Ext4 fast commit replay path to set the state of
3946 * blocks in bitmaps and update counters.
3947 */
ext4_mb_mark_bb(struct super_block * sb,ext4_fsblk_t block,int len,int state)3948 void ext4_mb_mark_bb(struct super_block *sb, ext4_fsblk_t block,
3949 int len, int state)
3950 {
3951 struct buffer_head *bitmap_bh = NULL;
3952 struct ext4_group_desc *gdp;
3953 struct buffer_head *gdp_bh;
3954 struct ext4_sb_info *sbi = EXT4_SB(sb);
3955 ext4_group_t group;
3956 ext4_grpblk_t blkoff;
3957 int i, err;
3958 int already;
3959 unsigned int clen, clen_changed, thisgrp_len;
3960
3961 while (len > 0) {
3962 ext4_get_group_no_and_offset(sb, block, &group, &blkoff);
3963
3964 /*
3965 * Check to see if we are freeing blocks across a group
3966 * boundary.
3967 * In case of flex_bg, this can happen that (block, len) may
3968 * span across more than one group. In that case we need to
3969 * get the corresponding group metadata to work with.
3970 * For this we have goto again loop.
3971 */
3972 thisgrp_len = min_t(unsigned int, (unsigned int)len,
3973 EXT4_BLOCKS_PER_GROUP(sb) - EXT4_C2B(sbi, blkoff));
3974 clen = EXT4_NUM_B2C(sbi, thisgrp_len);
3975
3976 bitmap_bh = ext4_read_block_bitmap(sb, group);
3977 if (IS_ERR(bitmap_bh)) {
3978 err = PTR_ERR(bitmap_bh);
3979 bitmap_bh = NULL;
3980 break;
3981 }
3982
3983 err = -EIO;
3984 gdp = ext4_get_group_desc(sb, group, &gdp_bh);
3985 if (!gdp)
3986 break;
3987
3988 ext4_lock_group(sb, group);
3989 already = 0;
3990 for (i = 0; i < clen; i++)
3991 if (!mb_test_bit(blkoff + i, bitmap_bh->b_data) ==
3992 !state)
3993 already++;
3994
3995 clen_changed = clen - already;
3996 if (state)
3997 ext4_set_bits(bitmap_bh->b_data, blkoff, clen);
3998 else
3999 mb_test_and_clear_bits(bitmap_bh->b_data, blkoff, clen);
4000 if (ext4_has_group_desc_csum(sb) &&
4001 (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
4002 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
4003 ext4_free_group_clusters_set(sb, gdp,
4004 ext4_free_clusters_after_init(sb, group, gdp));
4005 }
4006 if (state)
4007 clen = ext4_free_group_clusters(sb, gdp) - clen_changed;
4008 else
4009 clen = ext4_free_group_clusters(sb, gdp) + clen_changed;
4010
4011 ext4_free_group_clusters_set(sb, gdp, clen);
4012 ext4_block_bitmap_csum_set(sb, group, gdp, bitmap_bh);
4013 ext4_group_desc_csum_set(sb, group, gdp);
4014
4015 ext4_unlock_group(sb, group);
4016
4017 if (sbi->s_log_groups_per_flex) {
4018 ext4_group_t flex_group = ext4_flex_group(sbi, group);
4019 struct flex_groups *fg = sbi_array_rcu_deref(sbi,
4020 s_flex_groups, flex_group);
4021
4022 if (state)
4023 atomic64_sub(clen_changed, &fg->free_clusters);
4024 else
4025 atomic64_add(clen_changed, &fg->free_clusters);
4026
4027 }
4028
4029 err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh);
4030 if (err)
4031 break;
4032 sync_dirty_buffer(bitmap_bh);
4033 err = ext4_handle_dirty_metadata(NULL, NULL, gdp_bh);
4034 sync_dirty_buffer(gdp_bh);
4035 if (err)
4036 break;
4037
4038 block += thisgrp_len;
4039 len -= thisgrp_len;
4040 brelse(bitmap_bh);
4041 BUG_ON(len < 0);
4042 }
4043
4044 if (err)
4045 brelse(bitmap_bh);
4046 }
4047
4048 /*
4049 * here we normalize request for locality group
4050 * Group request are normalized to s_mb_group_prealloc, which goes to
4051 * s_strip if we set the same via mount option.
4052 * s_mb_group_prealloc can be configured via
4053 * /sys/fs/ext4/<partition>/mb_group_prealloc
4054 *
4055 * XXX: should we try to preallocate more than the group has now?
4056 */
ext4_mb_normalize_group_request(struct ext4_allocation_context * ac)4057 static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac)
4058 {
4059 struct super_block *sb = ac->ac_sb;
4060 struct ext4_locality_group *lg = ac->ac_lg;
4061
4062 BUG_ON(lg == NULL);
4063 ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc;
4064 mb_debug(sb, "goal %u blocks for locality group\n", ac->ac_g_ex.fe_len);
4065 }
4066
4067 /*
4068 * Normalization means making request better in terms of
4069 * size and alignment
4070 */
4071 static noinline_for_stack void
ext4_mb_normalize_request(struct ext4_allocation_context * ac,struct ext4_allocation_request * ar)4072 ext4_mb_normalize_request(struct ext4_allocation_context *ac,
4073 struct ext4_allocation_request *ar)
4074 {
4075 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4076 struct ext4_super_block *es = sbi->s_es;
4077 int bsbits, max;
4078 loff_t size, start_off, end;
4079 loff_t orig_size __maybe_unused;
4080 ext4_lblk_t start;
4081 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
4082 struct ext4_prealloc_space *pa;
4083
4084 /* do normalize only data requests, metadata requests
4085 do not need preallocation */
4086 if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
4087 return;
4088
4089 /* sometime caller may want exact blocks */
4090 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
4091 return;
4092
4093 /* caller may indicate that preallocation isn't
4094 * required (it's a tail, for example) */
4095 if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC)
4096 return;
4097
4098 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) {
4099 ext4_mb_normalize_group_request(ac);
4100 return ;
4101 }
4102
4103 bsbits = ac->ac_sb->s_blocksize_bits;
4104
4105 /* first, let's learn actual file size
4106 * given current request is allocated */
4107 size = extent_logical_end(sbi, &ac->ac_o_ex);
4108 size = size << bsbits;
4109 if (size < i_size_read(ac->ac_inode))
4110 size = i_size_read(ac->ac_inode);
4111 orig_size = size;
4112
4113 /* max size of free chunks */
4114 max = 2 << bsbits;
4115
4116 #define NRL_CHECK_SIZE(req, size, max, chunk_size) \
4117 (req <= (size) || max <= (chunk_size))
4118
4119 /* first, try to predict filesize */
4120 /* XXX: should this table be tunable? */
4121 start_off = 0;
4122 if (size <= 16 * 1024) {
4123 size = 16 * 1024;
4124 } else if (size <= 32 * 1024) {
4125 size = 32 * 1024;
4126 } else if (size <= 64 * 1024) {
4127 size = 64 * 1024;
4128 } else if (size <= 128 * 1024) {
4129 size = 128 * 1024;
4130 } else if (size <= 256 * 1024) {
4131 size = 256 * 1024;
4132 } else if (size <= 512 * 1024) {
4133 size = 512 * 1024;
4134 } else if (size <= 1024 * 1024) {
4135 size = 1024 * 1024;
4136 } else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) {
4137 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
4138 (21 - bsbits)) << 21;
4139 size = 2 * 1024 * 1024;
4140 } else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) {
4141 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
4142 (22 - bsbits)) << 22;
4143 size = 4 * 1024 * 1024;
4144 } else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len,
4145 (8<<20)>>bsbits, max, 8 * 1024)) {
4146 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
4147 (23 - bsbits)) << 23;
4148 size = 8 * 1024 * 1024;
4149 } else {
4150 start_off = (loff_t) ac->ac_o_ex.fe_logical << bsbits;
4151 size = (loff_t) EXT4_C2B(EXT4_SB(ac->ac_sb),
4152 ac->ac_o_ex.fe_len) << bsbits;
4153 }
4154 size = size >> bsbits;
4155 start = start_off >> bsbits;
4156
4157 /*
4158 * For tiny groups (smaller than 8MB) the chosen allocation
4159 * alignment may be larger than group size. Make sure the
4160 * alignment does not move allocation to a different group which
4161 * makes mballoc fail assertions later.
4162 */
4163 start = max(start, rounddown(ac->ac_o_ex.fe_logical,
4164 (ext4_lblk_t)EXT4_BLOCKS_PER_GROUP(ac->ac_sb)));
4165
4166 /* avoid unnecessary preallocation that may trigger assertions */
4167 if (start + size > EXT_MAX_BLOCKS)
4168 size = EXT_MAX_BLOCKS - start;
4169
4170 /* don't cover already allocated blocks in selected range */
4171 if (ar->pleft && start <= ar->lleft) {
4172 size -= ar->lleft + 1 - start;
4173 start = ar->lleft + 1;
4174 }
4175 if (ar->pright && start + size - 1 >= ar->lright)
4176 size -= start + size - ar->lright;
4177
4178 /*
4179 * Trim allocation request for filesystems with artificially small
4180 * groups.
4181 */
4182 if (size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb))
4183 size = EXT4_BLOCKS_PER_GROUP(ac->ac_sb);
4184
4185 end = start + size;
4186
4187 /* check we don't cross already preallocated blocks */
4188 rcu_read_lock();
4189 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
4190 loff_t pa_end;
4191
4192 if (pa->pa_deleted)
4193 continue;
4194 spin_lock(&pa->pa_lock);
4195 if (pa->pa_deleted) {
4196 spin_unlock(&pa->pa_lock);
4197 continue;
4198 }
4199
4200 pa_end = pa_logical_end(EXT4_SB(ac->ac_sb), pa);
4201
4202 /* PA must not overlap original request */
4203 BUG_ON(!(ac->ac_o_ex.fe_logical >= pa_end ||
4204 ac->ac_o_ex.fe_logical < pa->pa_lstart));
4205
4206 /* skip PAs this normalized request doesn't overlap with */
4207 if (pa->pa_lstart >= end || pa_end <= start) {
4208 spin_unlock(&pa->pa_lock);
4209 continue;
4210 }
4211 BUG_ON(pa->pa_lstart <= start && pa_end >= end);
4212
4213 /* adjust start or end to be adjacent to this pa */
4214 if (pa_end <= ac->ac_o_ex.fe_logical) {
4215 BUG_ON(pa_end < start);
4216 start = pa_end;
4217 } else if (pa->pa_lstart > ac->ac_o_ex.fe_logical) {
4218 BUG_ON(pa->pa_lstart > end);
4219 end = pa->pa_lstart;
4220 }
4221 spin_unlock(&pa->pa_lock);
4222 }
4223 rcu_read_unlock();
4224 size = end - start;
4225
4226 /* XXX: extra loop to check we really don't overlap preallocations */
4227 rcu_read_lock();
4228 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
4229 loff_t pa_end;
4230
4231 spin_lock(&pa->pa_lock);
4232 if (pa->pa_deleted == 0) {
4233 pa_end = pa_logical_end(EXT4_SB(ac->ac_sb), pa);
4234 BUG_ON(!(start >= pa_end || end <= pa->pa_lstart));
4235 }
4236 spin_unlock(&pa->pa_lock);
4237 }
4238 rcu_read_unlock();
4239
4240 if (start + size <= ac->ac_o_ex.fe_logical &&
4241 start > ac->ac_o_ex.fe_logical) {
4242 ext4_msg(ac->ac_sb, KERN_ERR,
4243 "start %lu, size %lu, fe_logical %lu",
4244 (unsigned long) start, (unsigned long) size,
4245 (unsigned long) ac->ac_o_ex.fe_logical);
4246 BUG();
4247 }
4248 BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
4249
4250 /* now prepare goal request */
4251
4252 /* XXX: is it better to align blocks WRT to logical
4253 * placement or satisfy big request as is */
4254 ac->ac_g_ex.fe_logical = start;
4255 ac->ac_g_ex.fe_len = EXT4_NUM_B2C(sbi, size);
4256
4257 /* define goal start in order to merge */
4258 if (ar->pright && (ar->lright == (start + size)) &&
4259 ar->pright >= size &&
4260 ar->pright - size >= le32_to_cpu(es->s_first_data_block)) {
4261 /* merge to the right */
4262 ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size,
4263 &ac->ac_g_ex.fe_group,
4264 &ac->ac_g_ex.fe_start);
4265 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
4266 }
4267 if (ar->pleft && (ar->lleft + 1 == start) &&
4268 ar->pleft + 1 < ext4_blocks_count(es)) {
4269 /* merge to the left */
4270 ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1,
4271 &ac->ac_g_ex.fe_group,
4272 &ac->ac_g_ex.fe_start);
4273 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
4274 }
4275
4276 mb_debug(ac->ac_sb, "goal: %lld(was %lld) blocks at %u\n", size,
4277 orig_size, start);
4278 }
4279
ext4_mb_collect_stats(struct ext4_allocation_context * ac)4280 static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
4281 {
4282 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4283
4284 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len >= 1) {
4285 atomic_inc(&sbi->s_bal_reqs);
4286 atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
4287 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
4288 atomic_inc(&sbi->s_bal_success);
4289 atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
4290 atomic_add(ac->ac_groups_scanned, &sbi->s_bal_groups_scanned);
4291 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
4292 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
4293 atomic_inc(&sbi->s_bal_goals);
4294 if (ac->ac_found > sbi->s_mb_max_to_scan)
4295 atomic_inc(&sbi->s_bal_breaks);
4296 }
4297
4298 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
4299 trace_ext4_mballoc_alloc(ac);
4300 else
4301 trace_ext4_mballoc_prealloc(ac);
4302 }
4303
4304 /*
4305 * Called on failure; free up any blocks from the inode PA for this
4306 * context. We don't need this for MB_GROUP_PA because we only change
4307 * pa_free in ext4_mb_release_context(), but on failure, we've already
4308 * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed.
4309 */
ext4_discard_allocated_blocks(struct ext4_allocation_context * ac)4310 static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac)
4311 {
4312 struct ext4_prealloc_space *pa = ac->ac_pa;
4313 struct ext4_buddy e4b;
4314 int err;
4315
4316 if (pa == NULL) {
4317 if (ac->ac_f_ex.fe_len == 0)
4318 return;
4319 err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b);
4320 if (err) {
4321 /*
4322 * This should never happen since we pin the
4323 * pages in the ext4_allocation_context so
4324 * ext4_mb_load_buddy() should never fail.
4325 */
4326 WARN(1, "mb_load_buddy failed (%d)", err);
4327 return;
4328 }
4329 ext4_lock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
4330 mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start,
4331 ac->ac_f_ex.fe_len);
4332 ext4_unlock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
4333 ext4_mb_unload_buddy(&e4b);
4334 return;
4335 }
4336 if (pa->pa_type == MB_INODE_PA)
4337 pa->pa_free += ac->ac_b_ex.fe_len;
4338 }
4339
4340 /*
4341 * use blocks preallocated to inode
4342 */
ext4_mb_use_inode_pa(struct ext4_allocation_context * ac,struct ext4_prealloc_space * pa)4343 static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac,
4344 struct ext4_prealloc_space *pa)
4345 {
4346 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4347 ext4_fsblk_t start;
4348 ext4_fsblk_t end;
4349 int len;
4350
4351 /* found preallocated blocks, use them */
4352 start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart);
4353 end = min(pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len),
4354 start + EXT4_C2B(sbi, ac->ac_o_ex.fe_len));
4355 len = EXT4_NUM_B2C(sbi, end - start);
4356 ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group,
4357 &ac->ac_b_ex.fe_start);
4358 ac->ac_b_ex.fe_len = len;
4359 ac->ac_status = AC_STATUS_FOUND;
4360 ac->ac_pa = pa;
4361
4362 BUG_ON(start < pa->pa_pstart);
4363 BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len));
4364 BUG_ON(pa->pa_free < len);
4365 BUG_ON(ac->ac_b_ex.fe_len <= 0);
4366 pa->pa_free -= len;
4367
4368 mb_debug(ac->ac_sb, "use %llu/%d from inode pa %p\n", start, len, pa);
4369 }
4370
4371 /*
4372 * use blocks preallocated to locality group
4373 */
ext4_mb_use_group_pa(struct ext4_allocation_context * ac,struct ext4_prealloc_space * pa)4374 static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac,
4375 struct ext4_prealloc_space *pa)
4376 {
4377 unsigned int len = ac->ac_o_ex.fe_len;
4378
4379 ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart,
4380 &ac->ac_b_ex.fe_group,
4381 &ac->ac_b_ex.fe_start);
4382 ac->ac_b_ex.fe_len = len;
4383 ac->ac_status = AC_STATUS_FOUND;
4384 ac->ac_pa = pa;
4385
4386 /* we don't correct pa_pstart or pa_plen here to avoid
4387 * possible race when the group is being loaded concurrently
4388 * instead we correct pa later, after blocks are marked
4389 * in on-disk bitmap -- see ext4_mb_release_context()
4390 * Other CPUs are prevented from allocating from this pa by lg_mutex
4391 */
4392 mb_debug(ac->ac_sb, "use %u/%u from group pa %p\n",
4393 pa->pa_lstart-len, len, pa);
4394 }
4395
4396 /*
4397 * Return the prealloc space that have minimal distance
4398 * from the goal block. @cpa is the prealloc
4399 * space that is having currently known minimal distance
4400 * from the goal block.
4401 */
4402 static struct ext4_prealloc_space *
ext4_mb_check_group_pa(ext4_fsblk_t goal_block,struct ext4_prealloc_space * pa,struct ext4_prealloc_space * cpa)4403 ext4_mb_check_group_pa(ext4_fsblk_t goal_block,
4404 struct ext4_prealloc_space *pa,
4405 struct ext4_prealloc_space *cpa)
4406 {
4407 ext4_fsblk_t cur_distance, new_distance;
4408
4409 if (cpa == NULL) {
4410 atomic_inc(&pa->pa_count);
4411 return pa;
4412 }
4413 cur_distance = abs(goal_block - cpa->pa_pstart);
4414 new_distance = abs(goal_block - pa->pa_pstart);
4415
4416 if (cur_distance <= new_distance)
4417 return cpa;
4418
4419 /* drop the previous reference */
4420 atomic_dec(&cpa->pa_count);
4421 atomic_inc(&pa->pa_count);
4422 return pa;
4423 }
4424
4425 /*
4426 * search goal blocks in preallocated space
4427 */
4428 static noinline_for_stack bool
ext4_mb_use_preallocated(struct ext4_allocation_context * ac)4429 ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
4430 {
4431 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4432 int order, i;
4433 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
4434 struct ext4_locality_group *lg;
4435 struct ext4_prealloc_space *pa, *cpa = NULL;
4436 ext4_fsblk_t goal_block;
4437
4438 /* only data can be preallocated */
4439 if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
4440 return false;
4441
4442 /* first, try per-file preallocation */
4443 rcu_read_lock();
4444 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
4445
4446 /* all fields in this condition don't change,
4447 * so we can skip locking for them */
4448 if (ac->ac_o_ex.fe_logical < pa->pa_lstart ||
4449 ac->ac_o_ex.fe_logical >= pa_logical_end(sbi, pa))
4450 continue;
4451
4452 /* non-extent files can't have physical blocks past 2^32 */
4453 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) &&
4454 (pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len) >
4455 EXT4_MAX_BLOCK_FILE_PHYS))
4456 continue;
4457
4458 /* found preallocated blocks, use them */
4459 spin_lock(&pa->pa_lock);
4460 if (pa->pa_deleted == 0 && pa->pa_free) {
4461 atomic_inc(&pa->pa_count);
4462 ext4_mb_use_inode_pa(ac, pa);
4463 spin_unlock(&pa->pa_lock);
4464 ac->ac_criteria = 10;
4465 rcu_read_unlock();
4466 return true;
4467 }
4468 spin_unlock(&pa->pa_lock);
4469 }
4470 rcu_read_unlock();
4471
4472 /* can we use group allocation? */
4473 if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC))
4474 return false;
4475
4476 /* inode may have no locality group for some reason */
4477 lg = ac->ac_lg;
4478 if (lg == NULL)
4479 return false;
4480 order = fls(ac->ac_o_ex.fe_len) - 1;
4481 if (order > PREALLOC_TB_SIZE - 1)
4482 /* The max size of hash table is PREALLOC_TB_SIZE */
4483 order = PREALLOC_TB_SIZE - 1;
4484
4485 goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex);
4486 /*
4487 * search for the prealloc space that is having
4488 * minimal distance from the goal block.
4489 */
4490 for (i = order; i < PREALLOC_TB_SIZE; i++) {
4491 rcu_read_lock();
4492 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[i],
4493 pa_inode_list) {
4494 spin_lock(&pa->pa_lock);
4495 if (pa->pa_deleted == 0 &&
4496 pa->pa_free >= ac->ac_o_ex.fe_len) {
4497
4498 cpa = ext4_mb_check_group_pa(goal_block,
4499 pa, cpa);
4500 }
4501 spin_unlock(&pa->pa_lock);
4502 }
4503 rcu_read_unlock();
4504 }
4505 if (cpa) {
4506 ext4_mb_use_group_pa(ac, cpa);
4507 ac->ac_criteria = 20;
4508 return true;
4509 }
4510 return false;
4511 }
4512
4513 /*
4514 * the function goes through all block freed in the group
4515 * but not yet committed and marks them used in in-core bitmap.
4516 * buddy must be generated from this bitmap
4517 * Need to be called with the ext4 group lock held
4518 */
ext4_mb_generate_from_freelist(struct super_block * sb,void * bitmap,ext4_group_t group)4519 static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
4520 ext4_group_t group)
4521 {
4522 struct rb_node *n;
4523 struct ext4_group_info *grp;
4524 struct ext4_free_data *entry;
4525
4526 grp = ext4_get_group_info(sb, group);
4527 if (!grp)
4528 return;
4529 n = rb_first(&(grp->bb_free_root));
4530
4531 while (n) {
4532 entry = rb_entry(n, struct ext4_free_data, efd_node);
4533 ext4_set_bits(bitmap, entry->efd_start_cluster, entry->efd_count);
4534 n = rb_next(n);
4535 }
4536 return;
4537 }
4538
4539 /*
4540 * the function goes through all preallocation in this group and marks them
4541 * used in in-core bitmap. buddy must be generated from this bitmap
4542 * Need to be called with ext4 group lock held
4543 */
4544 static noinline_for_stack
ext4_mb_generate_from_pa(struct super_block * sb,void * bitmap,ext4_group_t group)4545 void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
4546 ext4_group_t group)
4547 {
4548 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
4549 struct ext4_prealloc_space *pa;
4550 struct list_head *cur;
4551 ext4_group_t groupnr;
4552 ext4_grpblk_t start;
4553 int preallocated = 0;
4554 int len;
4555
4556 if (!grp)
4557 return;
4558
4559 /* all form of preallocation discards first load group,
4560 * so the only competing code is preallocation use.
4561 * we don't need any locking here
4562 * notice we do NOT ignore preallocations with pa_deleted
4563 * otherwise we could leave used blocks available for
4564 * allocation in buddy when concurrent ext4_mb_put_pa()
4565 * is dropping preallocation
4566 */
4567 list_for_each(cur, &grp->bb_prealloc_list) {
4568 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
4569 spin_lock(&pa->pa_lock);
4570 ext4_get_group_no_and_offset(sb, pa->pa_pstart,
4571 &groupnr, &start);
4572 len = pa->pa_len;
4573 spin_unlock(&pa->pa_lock);
4574 if (unlikely(len == 0))
4575 continue;
4576 BUG_ON(groupnr != group);
4577 ext4_set_bits(bitmap, start, len);
4578 preallocated += len;
4579 }
4580 mb_debug(sb, "preallocated %d for group %u\n", preallocated, group);
4581 }
4582
ext4_mb_mark_pa_deleted(struct super_block * sb,struct ext4_prealloc_space * pa)4583 static void ext4_mb_mark_pa_deleted(struct super_block *sb,
4584 struct ext4_prealloc_space *pa)
4585 {
4586 struct ext4_inode_info *ei;
4587
4588 if (pa->pa_deleted) {
4589 ext4_warning(sb, "deleted pa, type:%d, pblk:%llu, lblk:%u, len:%d\n",
4590 pa->pa_type, pa->pa_pstart, pa->pa_lstart,
4591 pa->pa_len);
4592 return;
4593 }
4594
4595 pa->pa_deleted = 1;
4596
4597 if (pa->pa_type == MB_INODE_PA) {
4598 ei = EXT4_I(pa->pa_inode);
4599 atomic_dec(&ei->i_prealloc_active);
4600 }
4601 }
4602
ext4_mb_pa_callback(struct rcu_head * head)4603 static void ext4_mb_pa_callback(struct rcu_head *head)
4604 {
4605 struct ext4_prealloc_space *pa;
4606 pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu);
4607
4608 BUG_ON(atomic_read(&pa->pa_count));
4609 BUG_ON(pa->pa_deleted == 0);
4610 kmem_cache_free(ext4_pspace_cachep, pa);
4611 }
4612
4613 /*
4614 * drops a reference to preallocated space descriptor
4615 * if this was the last reference and the space is consumed
4616 */
ext4_mb_put_pa(struct ext4_allocation_context * ac,struct super_block * sb,struct ext4_prealloc_space * pa)4617 static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
4618 struct super_block *sb, struct ext4_prealloc_space *pa)
4619 {
4620 ext4_group_t grp;
4621 ext4_fsblk_t grp_blk;
4622
4623 /* in this short window concurrent discard can set pa_deleted */
4624 spin_lock(&pa->pa_lock);
4625 if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) {
4626 spin_unlock(&pa->pa_lock);
4627 return;
4628 }
4629
4630 if (pa->pa_deleted == 1) {
4631 spin_unlock(&pa->pa_lock);
4632 return;
4633 }
4634
4635 ext4_mb_mark_pa_deleted(sb, pa);
4636 spin_unlock(&pa->pa_lock);
4637
4638 grp_blk = pa->pa_pstart;
4639 /*
4640 * If doing group-based preallocation, pa_pstart may be in the
4641 * next group when pa is used up
4642 */
4643 if (pa->pa_type == MB_GROUP_PA)
4644 grp_blk--;
4645
4646 grp = ext4_get_group_number(sb, grp_blk);
4647
4648 /*
4649 * possible race:
4650 *
4651 * P1 (buddy init) P2 (regular allocation)
4652 * find block B in PA
4653 * copy on-disk bitmap to buddy
4654 * mark B in on-disk bitmap
4655 * drop PA from group
4656 * mark all PAs in buddy
4657 *
4658 * thus, P1 initializes buddy with B available. to prevent this
4659 * we make "copy" and "mark all PAs" atomic and serialize "drop PA"
4660 * against that pair
4661 */
4662 ext4_lock_group(sb, grp);
4663 list_del(&pa->pa_group_list);
4664 ext4_unlock_group(sb, grp);
4665
4666 spin_lock(pa->pa_obj_lock);
4667 list_del_rcu(&pa->pa_inode_list);
4668 spin_unlock(pa->pa_obj_lock);
4669
4670 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
4671 }
4672
4673 /*
4674 * creates new preallocated space for given inode
4675 */
4676 static noinline_for_stack void
ext4_mb_new_inode_pa(struct ext4_allocation_context * ac)4677 ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
4678 {
4679 struct super_block *sb = ac->ac_sb;
4680 struct ext4_sb_info *sbi = EXT4_SB(sb);
4681 struct ext4_prealloc_space *pa;
4682 struct ext4_group_info *grp;
4683 struct ext4_inode_info *ei;
4684
4685 /* preallocate only when found space is larger then requested */
4686 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
4687 BUG_ON(ac->ac_status != AC_STATUS_FOUND);
4688 BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
4689 BUG_ON(ac->ac_pa == NULL);
4690
4691 pa = ac->ac_pa;
4692
4693 if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) {
4694 struct ext4_free_extent ex = {
4695 .fe_logical = ac->ac_g_ex.fe_logical,
4696 .fe_len = ac->ac_g_ex.fe_len,
4697 };
4698 loff_t orig_goal_end = extent_logical_end(sbi, &ex);
4699
4700 /* we can't allocate as much as normalizer wants.
4701 * so, found space must get proper lstart
4702 * to cover original request */
4703 BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical);
4704 BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len);
4705
4706 /*
4707 * Use the below logic for adjusting best extent as it keeps
4708 * fragmentation in check while ensuring logical range of best
4709 * extent doesn't overflow out of goal extent:
4710 *
4711 * 1. Check if best ex can be kept at end of goal and still
4712 * cover original start
4713 * 2. Else, check if best ex can be kept at start of goal and
4714 * still cover original start
4715 * 3. Else, keep the best ex at start of original request.
4716 */
4717 ex.fe_len = ac->ac_b_ex.fe_len;
4718
4719 ex.fe_logical = orig_goal_end - EXT4_C2B(sbi, ex.fe_len);
4720 if (ac->ac_o_ex.fe_logical >= ex.fe_logical)
4721 goto adjust_bex;
4722
4723 ex.fe_logical = ac->ac_g_ex.fe_logical;
4724 if (ac->ac_o_ex.fe_logical < extent_logical_end(sbi, &ex))
4725 goto adjust_bex;
4726
4727 ex.fe_logical = ac->ac_o_ex.fe_logical;
4728 adjust_bex:
4729 ac->ac_b_ex.fe_logical = ex.fe_logical;
4730
4731 BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
4732 BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len);
4733 BUG_ON(extent_logical_end(sbi, &ex) > orig_goal_end);
4734 }
4735
4736 /* preallocation can change ac_b_ex, thus we store actually
4737 * allocated blocks for history */
4738 ac->ac_f_ex = ac->ac_b_ex;
4739
4740 pa->pa_lstart = ac->ac_b_ex.fe_logical;
4741 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
4742 pa->pa_len = ac->ac_b_ex.fe_len;
4743 pa->pa_free = pa->pa_len;
4744 spin_lock_init(&pa->pa_lock);
4745 INIT_LIST_HEAD(&pa->pa_inode_list);
4746 INIT_LIST_HEAD(&pa->pa_group_list);
4747 pa->pa_deleted = 0;
4748 pa->pa_type = MB_INODE_PA;
4749
4750 mb_debug(sb, "new inode pa %p: %llu/%d for %u\n", pa, pa->pa_pstart,
4751 pa->pa_len, pa->pa_lstart);
4752 trace_ext4_mb_new_inode_pa(ac, pa);
4753
4754 ext4_mb_use_inode_pa(ac, pa);
4755 atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
4756
4757 ei = EXT4_I(ac->ac_inode);
4758 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
4759 if (!grp)
4760 return;
4761
4762 pa->pa_obj_lock = &ei->i_prealloc_lock;
4763 pa->pa_inode = ac->ac_inode;
4764
4765 list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
4766
4767 spin_lock(pa->pa_obj_lock);
4768 list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list);
4769 spin_unlock(pa->pa_obj_lock);
4770 atomic_inc(&ei->i_prealloc_active);
4771 }
4772
4773 /*
4774 * creates new preallocated space for locality group inodes belongs to
4775 */
4776 static noinline_for_stack void
ext4_mb_new_group_pa(struct ext4_allocation_context * ac)4777 ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
4778 {
4779 struct super_block *sb = ac->ac_sb;
4780 struct ext4_locality_group *lg;
4781 struct ext4_prealloc_space *pa;
4782 struct ext4_group_info *grp;
4783
4784 /* preallocate only when found space is larger then requested */
4785 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
4786 BUG_ON(ac->ac_status != AC_STATUS_FOUND);
4787 BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
4788 BUG_ON(ac->ac_pa == NULL);
4789
4790 pa = ac->ac_pa;
4791
4792 /* preallocation can change ac_b_ex, thus we store actually
4793 * allocated blocks for history */
4794 ac->ac_f_ex = ac->ac_b_ex;
4795
4796 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
4797 pa->pa_lstart = pa->pa_pstart;
4798 pa->pa_len = ac->ac_b_ex.fe_len;
4799 pa->pa_free = pa->pa_len;
4800 spin_lock_init(&pa->pa_lock);
4801 INIT_LIST_HEAD(&pa->pa_inode_list);
4802 INIT_LIST_HEAD(&pa->pa_group_list);
4803 pa->pa_deleted = 0;
4804 pa->pa_type = MB_GROUP_PA;
4805
4806 mb_debug(sb, "new group pa %p: %llu/%d for %u\n", pa, pa->pa_pstart,
4807 pa->pa_len, pa->pa_lstart);
4808 trace_ext4_mb_new_group_pa(ac, pa);
4809
4810 ext4_mb_use_group_pa(ac, pa);
4811 atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
4812
4813 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
4814 if (!grp)
4815 return;
4816 lg = ac->ac_lg;
4817 BUG_ON(lg == NULL);
4818
4819 pa->pa_obj_lock = &lg->lg_prealloc_lock;
4820 pa->pa_inode = NULL;
4821
4822 list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
4823
4824 /*
4825 * We will later add the new pa to the right bucket
4826 * after updating the pa_free in ext4_mb_release_context
4827 */
4828 }
4829
ext4_mb_new_preallocation(struct ext4_allocation_context * ac)4830 static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac)
4831 {
4832 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
4833 ext4_mb_new_group_pa(ac);
4834 else
4835 ext4_mb_new_inode_pa(ac);
4836 }
4837
4838 /*
4839 * finds all unused blocks in on-disk bitmap, frees them in
4840 * in-core bitmap and buddy.
4841 * @pa must be unlinked from inode and group lists, so that
4842 * nobody else can find/use it.
4843 * the caller MUST hold group/inode locks.
4844 * TODO: optimize the case when there are no in-core structures yet
4845 */
4846 static noinline_for_stack int
ext4_mb_release_inode_pa(struct ext4_buddy * e4b,struct buffer_head * bitmap_bh,struct ext4_prealloc_space * pa)4847 ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
4848 struct ext4_prealloc_space *pa)
4849 {
4850 struct super_block *sb = e4b->bd_sb;
4851 struct ext4_sb_info *sbi = EXT4_SB(sb);
4852 unsigned int end;
4853 unsigned int next;
4854 ext4_group_t group;
4855 ext4_grpblk_t bit;
4856 unsigned long long grp_blk_start;
4857 int free = 0;
4858
4859 BUG_ON(pa->pa_deleted == 0);
4860 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
4861 grp_blk_start = pa->pa_pstart - EXT4_C2B(sbi, bit);
4862 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
4863 end = bit + pa->pa_len;
4864
4865 while (bit < end) {
4866 bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit);
4867 if (bit >= end)
4868 break;
4869 next = mb_find_next_bit(bitmap_bh->b_data, end, bit);
4870 mb_debug(sb, "free preallocated %u/%u in group %u\n",
4871 (unsigned) ext4_group_first_block_no(sb, group) + bit,
4872 (unsigned) next - bit, (unsigned) group);
4873 free += next - bit;
4874
4875 trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit);
4876 trace_ext4_mb_release_inode_pa(pa, (grp_blk_start +
4877 EXT4_C2B(sbi, bit)),
4878 next - bit);
4879 mb_free_blocks(pa->pa_inode, e4b, bit, next - bit);
4880 bit = next + 1;
4881 }
4882 if (free != pa->pa_free) {
4883 ext4_msg(e4b->bd_sb, KERN_CRIT,
4884 "pa %p: logic %lu, phys. %lu, len %d",
4885 pa, (unsigned long) pa->pa_lstart,
4886 (unsigned long) pa->pa_pstart,
4887 pa->pa_len);
4888 ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u",
4889 free, pa->pa_free);
4890 /*
4891 * pa is already deleted so we use the value obtained
4892 * from the bitmap and continue.
4893 */
4894 }
4895 atomic_add(free, &sbi->s_mb_discarded);
4896
4897 return 0;
4898 }
4899
4900 static noinline_for_stack int
ext4_mb_release_group_pa(struct ext4_buddy * e4b,struct ext4_prealloc_space * pa)4901 ext4_mb_release_group_pa(struct ext4_buddy *e4b,
4902 struct ext4_prealloc_space *pa)
4903 {
4904 struct super_block *sb = e4b->bd_sb;
4905 ext4_group_t group;
4906 ext4_grpblk_t bit;
4907
4908 trace_ext4_mb_release_group_pa(sb, pa);
4909 BUG_ON(pa->pa_deleted == 0);
4910 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
4911 if (unlikely(group != e4b->bd_group && pa->pa_len != 0)) {
4912 ext4_warning(sb, "bad group: expected %u, group %u, pa_start %llu",
4913 e4b->bd_group, group, pa->pa_pstart);
4914 return 0;
4915 }
4916 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
4917 atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
4918 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
4919
4920 return 0;
4921 }
4922
4923 /*
4924 * releases all preallocations in given group
4925 *
4926 * first, we need to decide discard policy:
4927 * - when do we discard
4928 * 1) ENOSPC
4929 * - how many do we discard
4930 * 1) how many requested
4931 */
4932 static noinline_for_stack int
ext4_mb_discard_group_preallocations(struct super_block * sb,ext4_group_t group,int * busy)4933 ext4_mb_discard_group_preallocations(struct super_block *sb,
4934 ext4_group_t group, int *busy)
4935 {
4936 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
4937 struct buffer_head *bitmap_bh = NULL;
4938 struct ext4_prealloc_space *pa, *tmp;
4939 struct list_head list;
4940 struct ext4_buddy e4b;
4941 int err;
4942 int free = 0;
4943
4944 if (!grp)
4945 return 0;
4946 mb_debug(sb, "discard preallocation for group %u\n", group);
4947 if (list_empty(&grp->bb_prealloc_list))
4948 goto out_dbg;
4949
4950 bitmap_bh = ext4_read_block_bitmap(sb, group);
4951 if (IS_ERR(bitmap_bh)) {
4952 err = PTR_ERR(bitmap_bh);
4953 ext4_error_err(sb, -err,
4954 "Error %d reading block bitmap for %u",
4955 err, group);
4956 goto out_dbg;
4957 }
4958
4959 err = ext4_mb_load_buddy(sb, group, &e4b);
4960 if (err) {
4961 ext4_warning(sb, "Error %d loading buddy information for %u",
4962 err, group);
4963 put_bh(bitmap_bh);
4964 goto out_dbg;
4965 }
4966
4967 INIT_LIST_HEAD(&list);
4968 ext4_lock_group(sb, group);
4969 list_for_each_entry_safe(pa, tmp,
4970 &grp->bb_prealloc_list, pa_group_list) {
4971 spin_lock(&pa->pa_lock);
4972 if (atomic_read(&pa->pa_count)) {
4973 spin_unlock(&pa->pa_lock);
4974 *busy = 1;
4975 continue;
4976 }
4977 if (pa->pa_deleted) {
4978 spin_unlock(&pa->pa_lock);
4979 continue;
4980 }
4981
4982 /* seems this one can be freed ... */
4983 ext4_mb_mark_pa_deleted(sb, pa);
4984
4985 if (!free)
4986 this_cpu_inc(discard_pa_seq);
4987
4988 /* we can trust pa_free ... */
4989 free += pa->pa_free;
4990
4991 spin_unlock(&pa->pa_lock);
4992
4993 list_del(&pa->pa_group_list);
4994 list_add(&pa->u.pa_tmp_list, &list);
4995 }
4996
4997 /* now free all selected PAs */
4998 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
4999
5000 /* remove from object (inode or locality group) */
5001 spin_lock(pa->pa_obj_lock);
5002 list_del_rcu(&pa->pa_inode_list);
5003 spin_unlock(pa->pa_obj_lock);
5004
5005 if (pa->pa_type == MB_GROUP_PA)
5006 ext4_mb_release_group_pa(&e4b, pa);
5007 else
5008 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
5009
5010 list_del(&pa->u.pa_tmp_list);
5011 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
5012 }
5013
5014 ext4_unlock_group(sb, group);
5015 ext4_mb_unload_buddy(&e4b);
5016 put_bh(bitmap_bh);
5017 out_dbg:
5018 mb_debug(sb, "discarded (%d) blocks preallocated for group %u bb_free (%d)\n",
5019 free, group, grp->bb_free);
5020 return free;
5021 }
5022
5023 /*
5024 * releases all non-used preallocated blocks for given inode
5025 *
5026 * It's important to discard preallocations under i_data_sem
5027 * We don't want another block to be served from the prealloc
5028 * space when we are discarding the inode prealloc space.
5029 *
5030 * FIXME!! Make sure it is valid at all the call sites
5031 */
ext4_discard_preallocations(struct inode * inode,unsigned int needed)5032 void ext4_discard_preallocations(struct inode *inode, unsigned int needed)
5033 {
5034 struct ext4_inode_info *ei = EXT4_I(inode);
5035 struct super_block *sb = inode->i_sb;
5036 struct buffer_head *bitmap_bh = NULL;
5037 struct ext4_prealloc_space *pa, *tmp;
5038 ext4_group_t group = 0;
5039 struct list_head list;
5040 struct ext4_buddy e4b;
5041 int err;
5042
5043 if (!S_ISREG(inode->i_mode)) {
5044 /*BUG_ON(!list_empty(&ei->i_prealloc_list));*/
5045 return;
5046 }
5047
5048 if (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY)
5049 return;
5050
5051 mb_debug(sb, "discard preallocation for inode %lu\n",
5052 inode->i_ino);
5053 trace_ext4_discard_preallocations(inode,
5054 atomic_read(&ei->i_prealloc_active), needed);
5055
5056 INIT_LIST_HEAD(&list);
5057
5058 if (needed == 0)
5059 needed = UINT_MAX;
5060
5061 repeat:
5062 /* first, collect all pa's in the inode */
5063 spin_lock(&ei->i_prealloc_lock);
5064 while (!list_empty(&ei->i_prealloc_list) && needed) {
5065 pa = list_entry(ei->i_prealloc_list.prev,
5066 struct ext4_prealloc_space, pa_inode_list);
5067 BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock);
5068 spin_lock(&pa->pa_lock);
5069 if (atomic_read(&pa->pa_count)) {
5070 /* this shouldn't happen often - nobody should
5071 * use preallocation while we're discarding it */
5072 spin_unlock(&pa->pa_lock);
5073 spin_unlock(&ei->i_prealloc_lock);
5074 ext4_msg(sb, KERN_ERR,
5075 "uh-oh! used pa while discarding");
5076 WARN_ON(1);
5077 schedule_timeout_uninterruptible(HZ);
5078 goto repeat;
5079
5080 }
5081 if (pa->pa_deleted == 0) {
5082 ext4_mb_mark_pa_deleted(sb, pa);
5083 spin_unlock(&pa->pa_lock);
5084 list_del_rcu(&pa->pa_inode_list);
5085 list_add(&pa->u.pa_tmp_list, &list);
5086 needed--;
5087 continue;
5088 }
5089
5090 /* someone is deleting pa right now */
5091 spin_unlock(&pa->pa_lock);
5092 spin_unlock(&ei->i_prealloc_lock);
5093
5094 /* we have to wait here because pa_deleted
5095 * doesn't mean pa is already unlinked from
5096 * the list. as we might be called from
5097 * ->clear_inode() the inode will get freed
5098 * and concurrent thread which is unlinking
5099 * pa from inode's list may access already
5100 * freed memory, bad-bad-bad */
5101
5102 /* XXX: if this happens too often, we can
5103 * add a flag to force wait only in case
5104 * of ->clear_inode(), but not in case of
5105 * regular truncate */
5106 schedule_timeout_uninterruptible(HZ);
5107 goto repeat;
5108 }
5109 spin_unlock(&ei->i_prealloc_lock);
5110
5111 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
5112 BUG_ON(pa->pa_type != MB_INODE_PA);
5113 group = ext4_get_group_number(sb, pa->pa_pstart);
5114
5115 err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
5116 GFP_NOFS|__GFP_NOFAIL);
5117 if (err) {
5118 ext4_error_err(sb, -err, "Error %d loading buddy information for %u",
5119 err, group);
5120 continue;
5121 }
5122
5123 bitmap_bh = ext4_read_block_bitmap(sb, group);
5124 if (IS_ERR(bitmap_bh)) {
5125 err = PTR_ERR(bitmap_bh);
5126 ext4_error_err(sb, -err, "Error %d reading block bitmap for %u",
5127 err, group);
5128 ext4_mb_unload_buddy(&e4b);
5129 continue;
5130 }
5131
5132 ext4_lock_group(sb, group);
5133 list_del(&pa->pa_group_list);
5134 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
5135 ext4_unlock_group(sb, group);
5136
5137 ext4_mb_unload_buddy(&e4b);
5138 put_bh(bitmap_bh);
5139
5140 list_del(&pa->u.pa_tmp_list);
5141 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
5142 }
5143 }
5144
ext4_mb_pa_alloc(struct ext4_allocation_context * ac)5145 static int ext4_mb_pa_alloc(struct ext4_allocation_context *ac)
5146 {
5147 struct ext4_prealloc_space *pa;
5148
5149 BUG_ON(ext4_pspace_cachep == NULL);
5150 pa = kmem_cache_zalloc(ext4_pspace_cachep, GFP_NOFS);
5151 if (!pa)
5152 return -ENOMEM;
5153 atomic_set(&pa->pa_count, 1);
5154 ac->ac_pa = pa;
5155 return 0;
5156 }
5157
ext4_mb_pa_free(struct ext4_allocation_context * ac)5158 static void ext4_mb_pa_free(struct ext4_allocation_context *ac)
5159 {
5160 struct ext4_prealloc_space *pa = ac->ac_pa;
5161
5162 BUG_ON(!pa);
5163 ac->ac_pa = NULL;
5164 WARN_ON(!atomic_dec_and_test(&pa->pa_count));
5165 kmem_cache_free(ext4_pspace_cachep, pa);
5166 }
5167
5168 #ifdef CONFIG_EXT4_DEBUG
ext4_mb_show_pa(struct super_block * sb)5169 static inline void ext4_mb_show_pa(struct super_block *sb)
5170 {
5171 ext4_group_t i, ngroups;
5172
5173 if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED))
5174 return;
5175
5176 ngroups = ext4_get_groups_count(sb);
5177 mb_debug(sb, "groups: ");
5178 for (i = 0; i < ngroups; i++) {
5179 struct ext4_group_info *grp = ext4_get_group_info(sb, i);
5180 struct ext4_prealloc_space *pa;
5181 ext4_grpblk_t start;
5182 struct list_head *cur;
5183
5184 if (!grp)
5185 continue;
5186 ext4_lock_group(sb, i);
5187 list_for_each(cur, &grp->bb_prealloc_list) {
5188 pa = list_entry(cur, struct ext4_prealloc_space,
5189 pa_group_list);
5190 spin_lock(&pa->pa_lock);
5191 ext4_get_group_no_and_offset(sb, pa->pa_pstart,
5192 NULL, &start);
5193 spin_unlock(&pa->pa_lock);
5194 mb_debug(sb, "PA:%u:%d:%d\n", i, start,
5195 pa->pa_len);
5196 }
5197 ext4_unlock_group(sb, i);
5198 mb_debug(sb, "%u: %d/%d\n", i, grp->bb_free,
5199 grp->bb_fragments);
5200 }
5201 }
5202
ext4_mb_show_ac(struct ext4_allocation_context * ac)5203 static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
5204 {
5205 struct super_block *sb = ac->ac_sb;
5206
5207 if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED))
5208 return;
5209
5210 mb_debug(sb, "Can't allocate:"
5211 " Allocation context details:");
5212 mb_debug(sb, "status %u flags 0x%x",
5213 ac->ac_status, ac->ac_flags);
5214 mb_debug(sb, "orig %lu/%lu/%lu@%lu, "
5215 "goal %lu/%lu/%lu@%lu, "
5216 "best %lu/%lu/%lu@%lu cr %d",
5217 (unsigned long)ac->ac_o_ex.fe_group,
5218 (unsigned long)ac->ac_o_ex.fe_start,
5219 (unsigned long)ac->ac_o_ex.fe_len,
5220 (unsigned long)ac->ac_o_ex.fe_logical,
5221 (unsigned long)ac->ac_g_ex.fe_group,
5222 (unsigned long)ac->ac_g_ex.fe_start,
5223 (unsigned long)ac->ac_g_ex.fe_len,
5224 (unsigned long)ac->ac_g_ex.fe_logical,
5225 (unsigned long)ac->ac_b_ex.fe_group,
5226 (unsigned long)ac->ac_b_ex.fe_start,
5227 (unsigned long)ac->ac_b_ex.fe_len,
5228 (unsigned long)ac->ac_b_ex.fe_logical,
5229 (int)ac->ac_criteria);
5230 mb_debug(sb, "%u found", ac->ac_found);
5231 ext4_mb_show_pa(sb);
5232 }
5233 #else
ext4_mb_show_pa(struct super_block * sb)5234 static inline void ext4_mb_show_pa(struct super_block *sb)
5235 {
5236 return;
5237 }
ext4_mb_show_ac(struct ext4_allocation_context * ac)5238 static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac)
5239 {
5240 ext4_mb_show_pa(ac->ac_sb);
5241 return;
5242 }
5243 #endif
5244
5245 /*
5246 * We use locality group preallocation for small size file. The size of the
5247 * file is determined by the current size or the resulting size after
5248 * allocation which ever is larger
5249 *
5250 * One can tune this size via /sys/fs/ext4/<partition>/mb_stream_req
5251 */
ext4_mb_group_or_file(struct ext4_allocation_context * ac)5252 static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
5253 {
5254 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
5255 int bsbits = ac->ac_sb->s_blocksize_bits;
5256 loff_t size, isize;
5257 bool inode_pa_eligible, group_pa_eligible;
5258
5259 if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
5260 return;
5261
5262 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
5263 return;
5264
5265 group_pa_eligible = sbi->s_mb_group_prealloc > 0;
5266 inode_pa_eligible = true;
5267 size = extent_logical_end(sbi, &ac->ac_o_ex);
5268 isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1)
5269 >> bsbits;
5270
5271 /* No point in using inode preallocation for closed files */
5272 if ((size == isize) && !ext4_fs_is_busy(sbi) &&
5273 !inode_is_open_for_write(ac->ac_inode))
5274 inode_pa_eligible = false;
5275
5276 size = max(size, isize);
5277 /* Don't use group allocation for large files */
5278 if (size > sbi->s_mb_stream_request)
5279 group_pa_eligible = false;
5280
5281 if (!group_pa_eligible) {
5282 if (inode_pa_eligible)
5283 ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
5284 else
5285 ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC;
5286 return;
5287 }
5288
5289 BUG_ON(ac->ac_lg != NULL);
5290 /*
5291 * locality group prealloc space are per cpu. The reason for having
5292 * per cpu locality group is to reduce the contention between block
5293 * request from multiple CPUs.
5294 */
5295 ac->ac_lg = raw_cpu_ptr(sbi->s_locality_groups);
5296
5297 /* we're going to use group allocation */
5298 ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC;
5299
5300 /* serialize all allocations in the group */
5301 mutex_lock(&ac->ac_lg->lg_mutex);
5302 }
5303
5304 static noinline_for_stack int
ext4_mb_initialize_context(struct ext4_allocation_context * ac,struct ext4_allocation_request * ar)5305 ext4_mb_initialize_context(struct ext4_allocation_context *ac,
5306 struct ext4_allocation_request *ar)
5307 {
5308 struct super_block *sb = ar->inode->i_sb;
5309 struct ext4_sb_info *sbi = EXT4_SB(sb);
5310 struct ext4_super_block *es = sbi->s_es;
5311 ext4_group_t group;
5312 unsigned int len;
5313 ext4_fsblk_t goal;
5314 ext4_grpblk_t block;
5315
5316 /* we can't allocate > group size */
5317 len = ar->len;
5318
5319 /* just a dirty hack to filter too big requests */
5320 if (len >= EXT4_CLUSTERS_PER_GROUP(sb))
5321 len = EXT4_CLUSTERS_PER_GROUP(sb);
5322
5323 /* start searching from the goal */
5324 goal = ar->goal;
5325 if (goal < le32_to_cpu(es->s_first_data_block) ||
5326 goal >= ext4_blocks_count(es))
5327 goal = le32_to_cpu(es->s_first_data_block);
5328 ext4_get_group_no_and_offset(sb, goal, &group, &block);
5329
5330 /* set up allocation goals */
5331 ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical);
5332 ac->ac_status = AC_STATUS_CONTINUE;
5333 ac->ac_sb = sb;
5334 ac->ac_inode = ar->inode;
5335 ac->ac_o_ex.fe_logical = ac->ac_b_ex.fe_logical;
5336 ac->ac_o_ex.fe_group = group;
5337 ac->ac_o_ex.fe_start = block;
5338 ac->ac_o_ex.fe_len = len;
5339 ac->ac_g_ex = ac->ac_o_ex;
5340 ac->ac_flags = ar->flags;
5341
5342 /* we have to define context: we'll work with a file or
5343 * locality group. this is a policy, actually */
5344 ext4_mb_group_or_file(ac);
5345
5346 mb_debug(sb, "init ac: %u blocks @ %u, goal %u, flags 0x%x, 2^%d, "
5347 "left: %u/%u, right %u/%u to %swritable\n",
5348 (unsigned) ar->len, (unsigned) ar->logical,
5349 (unsigned) ar->goal, ac->ac_flags, ac->ac_2order,
5350 (unsigned) ar->lleft, (unsigned) ar->pleft,
5351 (unsigned) ar->lright, (unsigned) ar->pright,
5352 inode_is_open_for_write(ar->inode) ? "" : "non-");
5353 return 0;
5354
5355 }
5356
5357 static noinline_for_stack void
ext4_mb_discard_lg_preallocations(struct super_block * sb,struct ext4_locality_group * lg,int order,int total_entries)5358 ext4_mb_discard_lg_preallocations(struct super_block *sb,
5359 struct ext4_locality_group *lg,
5360 int order, int total_entries)
5361 {
5362 ext4_group_t group = 0;
5363 struct ext4_buddy e4b;
5364 struct list_head discard_list;
5365 struct ext4_prealloc_space *pa, *tmp;
5366
5367 mb_debug(sb, "discard locality group preallocation\n");
5368
5369 INIT_LIST_HEAD(&discard_list);
5370
5371 spin_lock(&lg->lg_prealloc_lock);
5372 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order],
5373 pa_inode_list,
5374 lockdep_is_held(&lg->lg_prealloc_lock)) {
5375 spin_lock(&pa->pa_lock);
5376 if (atomic_read(&pa->pa_count)) {
5377 /*
5378 * This is the pa that we just used
5379 * for block allocation. So don't
5380 * free that
5381 */
5382 spin_unlock(&pa->pa_lock);
5383 continue;
5384 }
5385 if (pa->pa_deleted) {
5386 spin_unlock(&pa->pa_lock);
5387 continue;
5388 }
5389 /* only lg prealloc space */
5390 BUG_ON(pa->pa_type != MB_GROUP_PA);
5391
5392 /* seems this one can be freed ... */
5393 ext4_mb_mark_pa_deleted(sb, pa);
5394 spin_unlock(&pa->pa_lock);
5395
5396 list_del_rcu(&pa->pa_inode_list);
5397 list_add(&pa->u.pa_tmp_list, &discard_list);
5398
5399 total_entries--;
5400 if (total_entries <= 5) {
5401 /*
5402 * we want to keep only 5 entries
5403 * allowing it to grow to 8. This
5404 * mak sure we don't call discard
5405 * soon for this list.
5406 */
5407 break;
5408 }
5409 }
5410 spin_unlock(&lg->lg_prealloc_lock);
5411
5412 list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) {
5413 int err;
5414
5415 group = ext4_get_group_number(sb, pa->pa_pstart);
5416 err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
5417 GFP_NOFS|__GFP_NOFAIL);
5418 if (err) {
5419 ext4_error_err(sb, -err, "Error %d loading buddy information for %u",
5420 err, group);
5421 continue;
5422 }
5423 ext4_lock_group(sb, group);
5424 list_del(&pa->pa_group_list);
5425 ext4_mb_release_group_pa(&e4b, pa);
5426 ext4_unlock_group(sb, group);
5427
5428 ext4_mb_unload_buddy(&e4b);
5429 list_del(&pa->u.pa_tmp_list);
5430 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
5431 }
5432 }
5433
5434 /*
5435 * We have incremented pa_count. So it cannot be freed at this
5436 * point. Also we hold lg_mutex. So no parallel allocation is
5437 * possible from this lg. That means pa_free cannot be updated.
5438 *
5439 * A parallel ext4_mb_discard_group_preallocations is possible.
5440 * which can cause the lg_prealloc_list to be updated.
5441 */
5442
ext4_mb_add_n_trim(struct ext4_allocation_context * ac)5443 static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
5444 {
5445 int order, added = 0, lg_prealloc_count = 1;
5446 struct super_block *sb = ac->ac_sb;
5447 struct ext4_locality_group *lg = ac->ac_lg;
5448 struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa;
5449
5450 order = fls(pa->pa_free) - 1;
5451 if (order > PREALLOC_TB_SIZE - 1)
5452 /* The max size of hash table is PREALLOC_TB_SIZE */
5453 order = PREALLOC_TB_SIZE - 1;
5454 /* Add the prealloc space to lg */
5455 spin_lock(&lg->lg_prealloc_lock);
5456 list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order],
5457 pa_inode_list,
5458 lockdep_is_held(&lg->lg_prealloc_lock)) {
5459 spin_lock(&tmp_pa->pa_lock);
5460 if (tmp_pa->pa_deleted) {
5461 spin_unlock(&tmp_pa->pa_lock);
5462 continue;
5463 }
5464 if (!added && pa->pa_free < tmp_pa->pa_free) {
5465 /* Add to the tail of the previous entry */
5466 list_add_tail_rcu(&pa->pa_inode_list,
5467 &tmp_pa->pa_inode_list);
5468 added = 1;
5469 /*
5470 * we want to count the total
5471 * number of entries in the list
5472 */
5473 }
5474 spin_unlock(&tmp_pa->pa_lock);
5475 lg_prealloc_count++;
5476 }
5477 if (!added)
5478 list_add_tail_rcu(&pa->pa_inode_list,
5479 &lg->lg_prealloc_list[order]);
5480 spin_unlock(&lg->lg_prealloc_lock);
5481
5482 /* Now trim the list to be not more than 8 elements */
5483 if (lg_prealloc_count > 8) {
5484 ext4_mb_discard_lg_preallocations(sb, lg,
5485 order, lg_prealloc_count);
5486 return;
5487 }
5488 return ;
5489 }
5490
5491 /*
5492 * if per-inode prealloc list is too long, trim some PA
5493 */
ext4_mb_trim_inode_pa(struct inode * inode)5494 static void ext4_mb_trim_inode_pa(struct inode *inode)
5495 {
5496 struct ext4_inode_info *ei = EXT4_I(inode);
5497 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
5498 int count, delta;
5499
5500 count = atomic_read(&ei->i_prealloc_active);
5501 delta = (sbi->s_mb_max_inode_prealloc >> 2) + 1;
5502 if (count > sbi->s_mb_max_inode_prealloc + delta) {
5503 count -= sbi->s_mb_max_inode_prealloc;
5504 ext4_discard_preallocations(inode, count);
5505 }
5506 }
5507
5508 /*
5509 * release all resource we used in allocation
5510 */
ext4_mb_release_context(struct ext4_allocation_context * ac)5511 static int ext4_mb_release_context(struct ext4_allocation_context *ac)
5512 {
5513 struct inode *inode = ac->ac_inode;
5514 struct ext4_inode_info *ei = EXT4_I(inode);
5515 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
5516 struct ext4_prealloc_space *pa = ac->ac_pa;
5517 if (pa) {
5518 if (pa->pa_type == MB_GROUP_PA) {
5519 /* see comment in ext4_mb_use_group_pa() */
5520 spin_lock(&pa->pa_lock);
5521 pa->pa_pstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
5522 pa->pa_lstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
5523 pa->pa_free -= ac->ac_b_ex.fe_len;
5524 pa->pa_len -= ac->ac_b_ex.fe_len;
5525 spin_unlock(&pa->pa_lock);
5526
5527 /*
5528 * We want to add the pa to the right bucket.
5529 * Remove it from the list and while adding
5530 * make sure the list to which we are adding
5531 * doesn't grow big.
5532 */
5533 if (likely(pa->pa_free)) {
5534 spin_lock(pa->pa_obj_lock);
5535 list_del_rcu(&pa->pa_inode_list);
5536 spin_unlock(pa->pa_obj_lock);
5537 ext4_mb_add_n_trim(ac);
5538 }
5539 }
5540
5541 if (pa->pa_type == MB_INODE_PA) {
5542 /*
5543 * treat per-inode prealloc list as a lru list, then try
5544 * to trim the least recently used PA.
5545 */
5546 spin_lock(pa->pa_obj_lock);
5547 list_move(&pa->pa_inode_list, &ei->i_prealloc_list);
5548 spin_unlock(pa->pa_obj_lock);
5549 }
5550
5551 ext4_mb_put_pa(ac, ac->ac_sb, pa);
5552 }
5553 if (ac->ac_bitmap_page)
5554 put_page(ac->ac_bitmap_page);
5555 if (ac->ac_buddy_page)
5556 put_page(ac->ac_buddy_page);
5557 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
5558 mutex_unlock(&ac->ac_lg->lg_mutex);
5559 ext4_mb_collect_stats(ac);
5560 ext4_mb_trim_inode_pa(inode);
5561 return 0;
5562 }
5563
ext4_mb_discard_preallocations(struct super_block * sb,int needed)5564 static int ext4_mb_discard_preallocations(struct super_block *sb, int needed)
5565 {
5566 ext4_group_t i, ngroups = ext4_get_groups_count(sb);
5567 int ret;
5568 int freed = 0, busy = 0;
5569 int retry = 0;
5570
5571 trace_ext4_mb_discard_preallocations(sb, needed);
5572
5573 if (needed == 0)
5574 needed = EXT4_CLUSTERS_PER_GROUP(sb) + 1;
5575 repeat:
5576 for (i = 0; i < ngroups && needed > 0; i++) {
5577 ret = ext4_mb_discard_group_preallocations(sb, i, &busy);
5578 freed += ret;
5579 needed -= ret;
5580 cond_resched();
5581 }
5582
5583 if (needed > 0 && busy && ++retry < 3) {
5584 busy = 0;
5585 goto repeat;
5586 }
5587
5588 return freed;
5589 }
5590
ext4_mb_discard_preallocations_should_retry(struct super_block * sb,struct ext4_allocation_context * ac,u64 * seq)5591 static bool ext4_mb_discard_preallocations_should_retry(struct super_block *sb,
5592 struct ext4_allocation_context *ac, u64 *seq)
5593 {
5594 int freed;
5595 u64 seq_retry = 0;
5596 bool ret = false;
5597
5598 freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len);
5599 if (freed) {
5600 ret = true;
5601 goto out_dbg;
5602 }
5603 seq_retry = ext4_get_discard_pa_seq_sum();
5604 if (!(ac->ac_flags & EXT4_MB_STRICT_CHECK) || seq_retry != *seq) {
5605 ac->ac_flags |= EXT4_MB_STRICT_CHECK;
5606 *seq = seq_retry;
5607 ret = true;
5608 }
5609
5610 out_dbg:
5611 mb_debug(sb, "freed %d, retry ? %s\n", freed, ret ? "yes" : "no");
5612 return ret;
5613 }
5614
5615 static ext4_fsblk_t ext4_mb_new_blocks_simple(handle_t *handle,
5616 struct ext4_allocation_request *ar, int *errp);
5617
5618 /*
5619 * Main entry point into mballoc to allocate blocks
5620 * it tries to use preallocation first, then falls back
5621 * to usual allocation
5622 */
ext4_mb_new_blocks(handle_t * handle,struct ext4_allocation_request * ar,int * errp)5623 ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
5624 struct ext4_allocation_request *ar, int *errp)
5625 {
5626 struct ext4_allocation_context *ac = NULL;
5627 struct ext4_sb_info *sbi;
5628 struct super_block *sb;
5629 ext4_fsblk_t block = 0;
5630 unsigned int inquota = 0;
5631 unsigned int reserv_clstrs = 0;
5632 int retries = 0;
5633 u64 seq;
5634
5635 might_sleep();
5636 sb = ar->inode->i_sb;
5637 sbi = EXT4_SB(sb);
5638
5639 trace_ext4_request_blocks(ar);
5640 if (sbi->s_mount_state & EXT4_FC_REPLAY)
5641 return ext4_mb_new_blocks_simple(handle, ar, errp);
5642
5643 /* Allow to use superuser reservation for quota file */
5644 if (ext4_is_quota_file(ar->inode))
5645 ar->flags |= EXT4_MB_USE_ROOT_BLOCKS;
5646
5647 if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) {
5648 /* Without delayed allocation we need to verify
5649 * there is enough free blocks to do block allocation
5650 * and verify allocation doesn't exceed the quota limits.
5651 */
5652 while (ar->len &&
5653 ext4_claim_free_clusters(sbi, ar->len, ar->flags)) {
5654
5655 /* let others to free the space */
5656 cond_resched();
5657 ar->len = ar->len >> 1;
5658 }
5659 if (!ar->len) {
5660 ext4_mb_show_pa(sb);
5661 *errp = -ENOSPC;
5662 return 0;
5663 }
5664 reserv_clstrs = ar->len;
5665 if (ar->flags & EXT4_MB_USE_ROOT_BLOCKS) {
5666 dquot_alloc_block_nofail(ar->inode,
5667 EXT4_C2B(sbi, ar->len));
5668 } else {
5669 while (ar->len &&
5670 dquot_alloc_block(ar->inode,
5671 EXT4_C2B(sbi, ar->len))) {
5672
5673 ar->flags |= EXT4_MB_HINT_NOPREALLOC;
5674 ar->len--;
5675 }
5676 }
5677 inquota = ar->len;
5678 if (ar->len == 0) {
5679 *errp = -EDQUOT;
5680 goto out;
5681 }
5682 }
5683
5684 ac = kmem_cache_zalloc(ext4_ac_cachep, GFP_NOFS);
5685 if (!ac) {
5686 ar->len = 0;
5687 *errp = -ENOMEM;
5688 goto out;
5689 }
5690
5691 *errp = ext4_mb_initialize_context(ac, ar);
5692 if (*errp) {
5693 ar->len = 0;
5694 goto out;
5695 }
5696
5697 ac->ac_op = EXT4_MB_HISTORY_PREALLOC;
5698 seq = this_cpu_read(discard_pa_seq);
5699 if (!ext4_mb_use_preallocated(ac)) {
5700 ac->ac_op = EXT4_MB_HISTORY_ALLOC;
5701 ext4_mb_normalize_request(ac, ar);
5702
5703 *errp = ext4_mb_pa_alloc(ac);
5704 if (*errp)
5705 goto errout;
5706 repeat:
5707 /* allocate space in core */
5708 *errp = ext4_mb_regular_allocator(ac);
5709 /*
5710 * pa allocated above is added to grp->bb_prealloc_list only
5711 * when we were able to allocate some block i.e. when
5712 * ac->ac_status == AC_STATUS_FOUND.
5713 * And error from above mean ac->ac_status != AC_STATUS_FOUND
5714 * So we have to free this pa here itself.
5715 */
5716 if (*errp) {
5717 ext4_mb_pa_free(ac);
5718 ext4_discard_allocated_blocks(ac);
5719 goto errout;
5720 }
5721 if (ac->ac_status == AC_STATUS_FOUND &&
5722 ac->ac_o_ex.fe_len >= ac->ac_f_ex.fe_len)
5723 ext4_mb_pa_free(ac);
5724 }
5725 if (likely(ac->ac_status == AC_STATUS_FOUND)) {
5726 *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs);
5727 if (*errp) {
5728 ext4_discard_allocated_blocks(ac);
5729 goto errout;
5730 } else {
5731 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
5732 ar->len = ac->ac_b_ex.fe_len;
5733 }
5734 } else {
5735 if (++retries < 3 &&
5736 ext4_mb_discard_preallocations_should_retry(sb, ac, &seq))
5737 goto repeat;
5738 /*
5739 * If block allocation fails then the pa allocated above
5740 * needs to be freed here itself.
5741 */
5742 ext4_mb_pa_free(ac);
5743 *errp = -ENOSPC;
5744 }
5745
5746 errout:
5747 if (*errp) {
5748 ac->ac_b_ex.fe_len = 0;
5749 ar->len = 0;
5750 ext4_mb_show_ac(ac);
5751 }
5752 ext4_mb_release_context(ac);
5753 out:
5754 if (ac)
5755 kmem_cache_free(ext4_ac_cachep, ac);
5756 if (inquota && ar->len < inquota)
5757 dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len));
5758 if (!ar->len) {
5759 if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0)
5760 /* release all the reserved blocks if non delalloc */
5761 percpu_counter_sub(&sbi->s_dirtyclusters_counter,
5762 reserv_clstrs);
5763 }
5764
5765 trace_ext4_allocate_blocks(ar, (unsigned long long)block);
5766
5767 return block;
5768 }
5769
5770 /*
5771 * We can merge two free data extents only if the physical blocks
5772 * are contiguous, AND the extents were freed by the same transaction,
5773 * AND the blocks are associated with the same group.
5774 */
ext4_try_merge_freed_extent(struct ext4_sb_info * sbi,struct ext4_free_data * entry,struct ext4_free_data * new_entry,struct rb_root * entry_rb_root)5775 static void ext4_try_merge_freed_extent(struct ext4_sb_info *sbi,
5776 struct ext4_free_data *entry,
5777 struct ext4_free_data *new_entry,
5778 struct rb_root *entry_rb_root)
5779 {
5780 if ((entry->efd_tid != new_entry->efd_tid) ||
5781 (entry->efd_group != new_entry->efd_group))
5782 return;
5783 if (entry->efd_start_cluster + entry->efd_count ==
5784 new_entry->efd_start_cluster) {
5785 new_entry->efd_start_cluster = entry->efd_start_cluster;
5786 new_entry->efd_count += entry->efd_count;
5787 } else if (new_entry->efd_start_cluster + new_entry->efd_count ==
5788 entry->efd_start_cluster) {
5789 new_entry->efd_count += entry->efd_count;
5790 } else
5791 return;
5792 spin_lock(&sbi->s_md_lock);
5793 list_del(&entry->efd_list);
5794 spin_unlock(&sbi->s_md_lock);
5795 rb_erase(&entry->efd_node, entry_rb_root);
5796 kmem_cache_free(ext4_free_data_cachep, entry);
5797 }
5798
5799 static noinline_for_stack int
ext4_mb_free_metadata(handle_t * handle,struct ext4_buddy * e4b,struct ext4_free_data * new_entry)5800 ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
5801 struct ext4_free_data *new_entry)
5802 {
5803 ext4_group_t group = e4b->bd_group;
5804 ext4_grpblk_t cluster;
5805 ext4_grpblk_t clusters = new_entry->efd_count;
5806 struct ext4_free_data *entry;
5807 struct ext4_group_info *db = e4b->bd_info;
5808 struct super_block *sb = e4b->bd_sb;
5809 struct ext4_sb_info *sbi = EXT4_SB(sb);
5810 struct rb_node **n = &db->bb_free_root.rb_node, *node;
5811 struct rb_node *parent = NULL, *new_node;
5812
5813 BUG_ON(!ext4_handle_valid(handle));
5814 BUG_ON(e4b->bd_bitmap_page == NULL);
5815 BUG_ON(e4b->bd_buddy_page == NULL);
5816
5817 new_node = &new_entry->efd_node;
5818 cluster = new_entry->efd_start_cluster;
5819
5820 if (!*n) {
5821 /* first free block exent. We need to
5822 protect buddy cache from being freed,
5823 * otherwise we'll refresh it from
5824 * on-disk bitmap and lose not-yet-available
5825 * blocks */
5826 get_page(e4b->bd_buddy_page);
5827 get_page(e4b->bd_bitmap_page);
5828 }
5829 while (*n) {
5830 parent = *n;
5831 entry = rb_entry(parent, struct ext4_free_data, efd_node);
5832 if (cluster < entry->efd_start_cluster)
5833 n = &(*n)->rb_left;
5834 else if (cluster >= (entry->efd_start_cluster + entry->efd_count))
5835 n = &(*n)->rb_right;
5836 else {
5837 ext4_grp_locked_error(sb, group, 0,
5838 ext4_group_first_block_no(sb, group) +
5839 EXT4_C2B(sbi, cluster),
5840 "Block already on to-be-freed list");
5841 kmem_cache_free(ext4_free_data_cachep, new_entry);
5842 return 0;
5843 }
5844 }
5845
5846 rb_link_node(new_node, parent, n);
5847 rb_insert_color(new_node, &db->bb_free_root);
5848
5849 /* Now try to see the extent can be merged to left and right */
5850 node = rb_prev(new_node);
5851 if (node) {
5852 entry = rb_entry(node, struct ext4_free_data, efd_node);
5853 ext4_try_merge_freed_extent(sbi, entry, new_entry,
5854 &(db->bb_free_root));
5855 }
5856
5857 node = rb_next(new_node);
5858 if (node) {
5859 entry = rb_entry(node, struct ext4_free_data, efd_node);
5860 ext4_try_merge_freed_extent(sbi, entry, new_entry,
5861 &(db->bb_free_root));
5862 }
5863
5864 spin_lock(&sbi->s_md_lock);
5865 list_add_tail(&new_entry->efd_list, &sbi->s_freed_data_list);
5866 sbi->s_mb_free_pending += clusters;
5867 spin_unlock(&sbi->s_md_lock);
5868 return 0;
5869 }
5870
5871 /*
5872 * Simple allocator for Ext4 fast commit replay path. It searches for blocks
5873 * linearly starting at the goal block and also excludes the blocks which
5874 * are going to be in use after fast commit replay.
5875 */
ext4_mb_new_blocks_simple(handle_t * handle,struct ext4_allocation_request * ar,int * errp)5876 static ext4_fsblk_t ext4_mb_new_blocks_simple(handle_t *handle,
5877 struct ext4_allocation_request *ar, int *errp)
5878 {
5879 struct buffer_head *bitmap_bh;
5880 struct super_block *sb = ar->inode->i_sb;
5881 ext4_group_t group;
5882 ext4_grpblk_t blkoff;
5883 ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
5884 ext4_grpblk_t i = 0;
5885 ext4_fsblk_t goal, block;
5886 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
5887
5888 goal = ar->goal;
5889 if (goal < le32_to_cpu(es->s_first_data_block) ||
5890 goal >= ext4_blocks_count(es))
5891 goal = le32_to_cpu(es->s_first_data_block);
5892
5893 ar->len = 0;
5894 ext4_get_group_no_and_offset(sb, goal, &group, &blkoff);
5895 for (; group < ext4_get_groups_count(sb); group++) {
5896 bitmap_bh = ext4_read_block_bitmap(sb, group);
5897 if (IS_ERR(bitmap_bh)) {
5898 *errp = PTR_ERR(bitmap_bh);
5899 pr_warn("Failed to read block bitmap\n");
5900 return 0;
5901 }
5902
5903 ext4_get_group_no_and_offset(sb,
5904 max(ext4_group_first_block_no(sb, group), goal),
5905 NULL, &blkoff);
5906 while (1) {
5907 i = mb_find_next_zero_bit(bitmap_bh->b_data, max,
5908 blkoff);
5909 if (i >= max)
5910 break;
5911 if (ext4_fc_replay_check_excluded(sb,
5912 ext4_group_first_block_no(sb, group) + i)) {
5913 blkoff = i + 1;
5914 } else
5915 break;
5916 }
5917 brelse(bitmap_bh);
5918 if (i < max)
5919 break;
5920 }
5921
5922 if (group >= ext4_get_groups_count(sb) || i >= max) {
5923 *errp = -ENOSPC;
5924 return 0;
5925 }
5926
5927 block = ext4_group_first_block_no(sb, group) + i;
5928 ext4_mb_mark_bb(sb, block, 1, 1);
5929 ar->len = 1;
5930
5931 return block;
5932 }
5933
ext4_free_blocks_simple(struct inode * inode,ext4_fsblk_t block,unsigned long count)5934 static void ext4_free_blocks_simple(struct inode *inode, ext4_fsblk_t block,
5935 unsigned long count)
5936 {
5937 struct buffer_head *bitmap_bh;
5938 struct super_block *sb = inode->i_sb;
5939 struct ext4_group_desc *gdp;
5940 struct buffer_head *gdp_bh;
5941 ext4_group_t group;
5942 ext4_grpblk_t blkoff;
5943 int already_freed = 0, err, i;
5944
5945 ext4_get_group_no_and_offset(sb, block, &group, &blkoff);
5946 bitmap_bh = ext4_read_block_bitmap(sb, group);
5947 if (IS_ERR(bitmap_bh)) {
5948 err = PTR_ERR(bitmap_bh);
5949 pr_warn("Failed to read block bitmap\n");
5950 return;
5951 }
5952 gdp = ext4_get_group_desc(sb, group, &gdp_bh);
5953 if (!gdp)
5954 return;
5955
5956 for (i = 0; i < count; i++) {
5957 if (!mb_test_bit(blkoff + i, bitmap_bh->b_data))
5958 already_freed++;
5959 }
5960 mb_clear_bits(bitmap_bh->b_data, blkoff, count);
5961 err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh);
5962 if (err)
5963 return;
5964 ext4_free_group_clusters_set(
5965 sb, gdp, ext4_free_group_clusters(sb, gdp) +
5966 count - already_freed);
5967 ext4_block_bitmap_csum_set(sb, group, gdp, bitmap_bh);
5968 ext4_group_desc_csum_set(sb, group, gdp);
5969 ext4_handle_dirty_metadata(NULL, NULL, gdp_bh);
5970 sync_dirty_buffer(bitmap_bh);
5971 sync_dirty_buffer(gdp_bh);
5972 brelse(bitmap_bh);
5973 }
5974
5975 /**
5976 * ext4_mb_clear_bb() -- helper function for freeing blocks.
5977 * Used by ext4_free_blocks()
5978 * @handle: handle for this transaction
5979 * @inode: inode
5980 * @bh: optional buffer of the block to be freed
5981 * @block: starting physical block to be freed
5982 * @count: number of blocks to be freed
5983 * @flags: flags used by ext4_free_blocks
5984 */
ext4_mb_clear_bb(handle_t * handle,struct inode * inode,ext4_fsblk_t block,unsigned long count,int flags)5985 static void ext4_mb_clear_bb(handle_t *handle, struct inode *inode,
5986 ext4_fsblk_t block, unsigned long count,
5987 int flags)
5988 {
5989 struct buffer_head *bitmap_bh = NULL;
5990 struct super_block *sb = inode->i_sb;
5991 struct ext4_group_desc *gdp;
5992 struct ext4_group_info *grp;
5993 unsigned int overflow;
5994 ext4_grpblk_t bit;
5995 struct buffer_head *gd_bh;
5996 ext4_group_t block_group;
5997 struct ext4_sb_info *sbi;
5998 struct ext4_buddy e4b;
5999 unsigned int count_clusters;
6000 int err = 0;
6001 int ret;
6002
6003 sbi = EXT4_SB(sb);
6004
6005 if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
6006 !ext4_inode_block_valid(inode, block, count)) {
6007 ext4_error(sb, "Freeing blocks in system zone - "
6008 "Block = %llu, count = %lu", block, count);
6009 /* err = 0. ext4_std_error should be a no op */
6010 goto error_return;
6011 }
6012 flags |= EXT4_FREE_BLOCKS_VALIDATED;
6013
6014 do_more:
6015 overflow = 0;
6016 ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
6017
6018 grp = ext4_get_group_info(sb, block_group);
6019 if (unlikely(!grp || EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
6020 return;
6021
6022 /*
6023 * Check to see if we are freeing blocks across a group
6024 * boundary.
6025 */
6026 if (EXT4_C2B(sbi, bit) + count > EXT4_BLOCKS_PER_GROUP(sb)) {
6027 overflow = EXT4_C2B(sbi, bit) + count -
6028 EXT4_BLOCKS_PER_GROUP(sb);
6029 count -= overflow;
6030 /* The range changed so it's no longer validated */
6031 flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
6032 }
6033 count_clusters = EXT4_NUM_B2C(sbi, count);
6034 bitmap_bh = ext4_read_block_bitmap(sb, block_group);
6035 if (IS_ERR(bitmap_bh)) {
6036 err = PTR_ERR(bitmap_bh);
6037 bitmap_bh = NULL;
6038 goto error_return;
6039 }
6040 gdp = ext4_get_group_desc(sb, block_group, &gd_bh);
6041 if (!gdp) {
6042 err = -EIO;
6043 goto error_return;
6044 }
6045
6046 if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
6047 !ext4_inode_block_valid(inode, block, count)) {
6048 ext4_error(sb, "Freeing blocks in system zone - "
6049 "Block = %llu, count = %lu", block, count);
6050 /* err = 0. ext4_std_error should be a no op */
6051 goto error_return;
6052 }
6053
6054 BUFFER_TRACE(bitmap_bh, "getting write access");
6055 err = ext4_journal_get_write_access(handle, sb, bitmap_bh,
6056 EXT4_JTR_NONE);
6057 if (err)
6058 goto error_return;
6059
6060 /*
6061 * We are about to modify some metadata. Call the journal APIs
6062 * to unshare ->b_data if a currently-committing transaction is
6063 * using it
6064 */
6065 BUFFER_TRACE(gd_bh, "get_write_access");
6066 err = ext4_journal_get_write_access(handle, sb, gd_bh, EXT4_JTR_NONE);
6067 if (err)
6068 goto error_return;
6069 #ifdef AGGRESSIVE_CHECK
6070 {
6071 int i;
6072 for (i = 0; i < count_clusters; i++)
6073 BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data));
6074 }
6075 #endif
6076 trace_ext4_mballoc_free(sb, inode, block_group, bit, count_clusters);
6077
6078 /* __GFP_NOFAIL: retry infinitely, ignore TIF_MEMDIE and memcg limit. */
6079 err = ext4_mb_load_buddy_gfp(sb, block_group, &e4b,
6080 GFP_NOFS|__GFP_NOFAIL);
6081 if (err)
6082 goto error_return;
6083
6084 /*
6085 * We need to make sure we don't reuse the freed block until after the
6086 * transaction is committed. We make an exception if the inode is to be
6087 * written in writeback mode since writeback mode has weak data
6088 * consistency guarantees.
6089 */
6090 if (ext4_handle_valid(handle) &&
6091 ((flags & EXT4_FREE_BLOCKS_METADATA) ||
6092 !ext4_should_writeback_data(inode))) {
6093 struct ext4_free_data *new_entry;
6094 /*
6095 * We use __GFP_NOFAIL because ext4_free_blocks() is not allowed
6096 * to fail.
6097 */
6098 new_entry = kmem_cache_alloc(ext4_free_data_cachep,
6099 GFP_NOFS|__GFP_NOFAIL);
6100 new_entry->efd_start_cluster = bit;
6101 new_entry->efd_group = block_group;
6102 new_entry->efd_count = count_clusters;
6103 new_entry->efd_tid = handle->h_transaction->t_tid;
6104
6105 ext4_lock_group(sb, block_group);
6106 mb_clear_bits(bitmap_bh->b_data, bit, count_clusters);
6107 ext4_mb_free_metadata(handle, &e4b, new_entry);
6108 } else {
6109 /* need to update group_info->bb_free and bitmap
6110 * with group lock held. generate_buddy look at
6111 * them with group lock_held
6112 */
6113 if (test_opt(sb, DISCARD)) {
6114 err = ext4_issue_discard(sb, block_group, bit,
6115 count_clusters, NULL);
6116 if (err && err != -EOPNOTSUPP)
6117 ext4_msg(sb, KERN_WARNING, "discard request in"
6118 " group:%u block:%d count:%lu failed"
6119 " with %d", block_group, bit, count,
6120 err);
6121 } else
6122 EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info);
6123
6124 ext4_lock_group(sb, block_group);
6125 mb_clear_bits(bitmap_bh->b_data, bit, count_clusters);
6126 mb_free_blocks(inode, &e4b, bit, count_clusters);
6127 }
6128
6129 ret = ext4_free_group_clusters(sb, gdp) + count_clusters;
6130 ext4_free_group_clusters_set(sb, gdp, ret);
6131 ext4_block_bitmap_csum_set(sb, block_group, gdp, bitmap_bh);
6132 ext4_group_desc_csum_set(sb, block_group, gdp);
6133 ext4_unlock_group(sb, block_group);
6134
6135 if (sbi->s_log_groups_per_flex) {
6136 ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
6137 atomic64_add(count_clusters,
6138 &sbi_array_rcu_deref(sbi, s_flex_groups,
6139 flex_group)->free_clusters);
6140 }
6141
6142 /*
6143 * on a bigalloc file system, defer the s_freeclusters_counter
6144 * update to the caller (ext4_remove_space and friends) so they
6145 * can determine if a cluster freed here should be rereserved
6146 */
6147 if (!(flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)) {
6148 if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
6149 dquot_free_block(inode, EXT4_C2B(sbi, count_clusters));
6150 percpu_counter_add(&sbi->s_freeclusters_counter,
6151 count_clusters);
6152 }
6153
6154 ext4_mb_unload_buddy(&e4b);
6155
6156 /* We dirtied the bitmap block */
6157 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
6158 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
6159
6160 /* And the group descriptor block */
6161 BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
6162 ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
6163 if (!err)
6164 err = ret;
6165
6166 if (overflow && !err) {
6167 block += count;
6168 count = overflow;
6169 put_bh(bitmap_bh);
6170 /* The range changed so it's no longer validated */
6171 flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
6172 goto do_more;
6173 }
6174 error_return:
6175 brelse(bitmap_bh);
6176 ext4_std_error(sb, err);
6177 return;
6178 }
6179
6180 /**
6181 * ext4_free_blocks() -- Free given blocks and update quota
6182 * @handle: handle for this transaction
6183 * @inode: inode
6184 * @bh: optional buffer of the block to be freed
6185 * @block: starting physical block to be freed
6186 * @count: number of blocks to be freed
6187 * @flags: flags used by ext4_free_blocks
6188 */
ext4_free_blocks(handle_t * handle,struct inode * inode,struct buffer_head * bh,ext4_fsblk_t block,unsigned long count,int flags)6189 void ext4_free_blocks(handle_t *handle, struct inode *inode,
6190 struct buffer_head *bh, ext4_fsblk_t block,
6191 unsigned long count, int flags)
6192 {
6193 struct super_block *sb = inode->i_sb;
6194 unsigned int overflow;
6195 struct ext4_sb_info *sbi;
6196
6197 sbi = EXT4_SB(sb);
6198
6199 if (bh) {
6200 if (block)
6201 BUG_ON(block != bh->b_blocknr);
6202 else
6203 block = bh->b_blocknr;
6204 }
6205
6206 if (sbi->s_mount_state & EXT4_FC_REPLAY) {
6207 ext4_free_blocks_simple(inode, block, EXT4_NUM_B2C(sbi, count));
6208 return;
6209 }
6210
6211 might_sleep();
6212
6213 if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
6214 !ext4_inode_block_valid(inode, block, count)) {
6215 ext4_error(sb, "Freeing blocks not in datazone - "
6216 "block = %llu, count = %lu", block, count);
6217 return;
6218 }
6219 flags |= EXT4_FREE_BLOCKS_VALIDATED;
6220
6221 ext4_debug("freeing block %llu\n", block);
6222 trace_ext4_free_blocks(inode, block, count, flags);
6223
6224 if (bh && (flags & EXT4_FREE_BLOCKS_FORGET)) {
6225 BUG_ON(count > 1);
6226
6227 ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA,
6228 inode, bh, block);
6229 }
6230
6231 /*
6232 * If the extent to be freed does not begin on a cluster
6233 * boundary, we need to deal with partial clusters at the
6234 * beginning and end of the extent. Normally we will free
6235 * blocks at the beginning or the end unless we are explicitly
6236 * requested to avoid doing so.
6237 */
6238 overflow = EXT4_PBLK_COFF(sbi, block);
6239 if (overflow) {
6240 if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) {
6241 overflow = sbi->s_cluster_ratio - overflow;
6242 block += overflow;
6243 if (count > overflow)
6244 count -= overflow;
6245 else
6246 return;
6247 } else {
6248 block -= overflow;
6249 count += overflow;
6250 }
6251 /* The range changed so it's no longer validated */
6252 flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
6253 }
6254 overflow = EXT4_LBLK_COFF(sbi, count);
6255 if (overflow) {
6256 if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) {
6257 if (count > overflow)
6258 count -= overflow;
6259 else
6260 return;
6261 } else
6262 count += sbi->s_cluster_ratio - overflow;
6263 /* The range changed so it's no longer validated */
6264 flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
6265 }
6266
6267 if (!bh && (flags & EXT4_FREE_BLOCKS_FORGET)) {
6268 int i;
6269 int is_metadata = flags & EXT4_FREE_BLOCKS_METADATA;
6270
6271 for (i = 0; i < count; i++) {
6272 cond_resched();
6273 if (is_metadata)
6274 bh = sb_find_get_block(inode->i_sb, block + i);
6275 ext4_forget(handle, is_metadata, inode, bh, block + i);
6276 }
6277 }
6278
6279 ext4_mb_clear_bb(handle, inode, block, count, flags);
6280 return;
6281 }
6282
6283 /**
6284 * ext4_group_add_blocks() -- Add given blocks to an existing group
6285 * @handle: handle to this transaction
6286 * @sb: super block
6287 * @block: start physical block to add to the block group
6288 * @count: number of blocks to free
6289 *
6290 * This marks the blocks as free in the bitmap and buddy.
6291 */
ext4_group_add_blocks(handle_t * handle,struct super_block * sb,ext4_fsblk_t block,unsigned long count)6292 int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
6293 ext4_fsblk_t block, unsigned long count)
6294 {
6295 struct buffer_head *bitmap_bh = NULL;
6296 struct buffer_head *gd_bh;
6297 ext4_group_t block_group;
6298 ext4_grpblk_t bit;
6299 unsigned int i;
6300 struct ext4_group_desc *desc;
6301 struct ext4_sb_info *sbi = EXT4_SB(sb);
6302 struct ext4_buddy e4b;
6303 int err = 0, ret, free_clusters_count;
6304 ext4_grpblk_t clusters_freed;
6305 ext4_fsblk_t first_cluster = EXT4_B2C(sbi, block);
6306 ext4_fsblk_t last_cluster = EXT4_B2C(sbi, block + count - 1);
6307 unsigned long cluster_count = last_cluster - first_cluster + 1;
6308
6309 ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1);
6310
6311 if (count == 0)
6312 return 0;
6313
6314 ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
6315 /*
6316 * Check to see if we are freeing blocks across a group
6317 * boundary.
6318 */
6319 if (bit + cluster_count > EXT4_CLUSTERS_PER_GROUP(sb)) {
6320 ext4_warning(sb, "too many blocks added to group %u",
6321 block_group);
6322 err = -EINVAL;
6323 goto error_return;
6324 }
6325
6326 bitmap_bh = ext4_read_block_bitmap(sb, block_group);
6327 if (IS_ERR(bitmap_bh)) {
6328 err = PTR_ERR(bitmap_bh);
6329 bitmap_bh = NULL;
6330 goto error_return;
6331 }
6332
6333 desc = ext4_get_group_desc(sb, block_group, &gd_bh);
6334 if (!desc) {
6335 err = -EIO;
6336 goto error_return;
6337 }
6338
6339 if (!ext4_sb_block_valid(sb, NULL, block, count)) {
6340 ext4_error(sb, "Adding blocks in system zones - "
6341 "Block = %llu, count = %lu",
6342 block, count);
6343 err = -EINVAL;
6344 goto error_return;
6345 }
6346
6347 BUFFER_TRACE(bitmap_bh, "getting write access");
6348 err = ext4_journal_get_write_access(handle, sb, bitmap_bh,
6349 EXT4_JTR_NONE);
6350 if (err)
6351 goto error_return;
6352
6353 /*
6354 * We are about to modify some metadata. Call the journal APIs
6355 * to unshare ->b_data if a currently-committing transaction is
6356 * using it
6357 */
6358 BUFFER_TRACE(gd_bh, "get_write_access");
6359 err = ext4_journal_get_write_access(handle, sb, gd_bh, EXT4_JTR_NONE);
6360 if (err)
6361 goto error_return;
6362
6363 for (i = 0, clusters_freed = 0; i < cluster_count; i++) {
6364 BUFFER_TRACE(bitmap_bh, "clear bit");
6365 if (!mb_test_bit(bit + i, bitmap_bh->b_data)) {
6366 ext4_error(sb, "bit already cleared for block %llu",
6367 (ext4_fsblk_t)(block + i));
6368 BUFFER_TRACE(bitmap_bh, "bit already cleared");
6369 } else {
6370 clusters_freed++;
6371 }
6372 }
6373
6374 err = ext4_mb_load_buddy(sb, block_group, &e4b);
6375 if (err)
6376 goto error_return;
6377
6378 /*
6379 * need to update group_info->bb_free and bitmap
6380 * with group lock held. generate_buddy look at
6381 * them with group lock_held
6382 */
6383 ext4_lock_group(sb, block_group);
6384 mb_clear_bits(bitmap_bh->b_data, bit, cluster_count);
6385 mb_free_blocks(NULL, &e4b, bit, cluster_count);
6386 free_clusters_count = clusters_freed +
6387 ext4_free_group_clusters(sb, desc);
6388 ext4_free_group_clusters_set(sb, desc, free_clusters_count);
6389 ext4_block_bitmap_csum_set(sb, block_group, desc, bitmap_bh);
6390 ext4_group_desc_csum_set(sb, block_group, desc);
6391 ext4_unlock_group(sb, block_group);
6392 percpu_counter_add(&sbi->s_freeclusters_counter,
6393 clusters_freed);
6394
6395 if (sbi->s_log_groups_per_flex) {
6396 ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
6397 atomic64_add(clusters_freed,
6398 &sbi_array_rcu_deref(sbi, s_flex_groups,
6399 flex_group)->free_clusters);
6400 }
6401
6402 ext4_mb_unload_buddy(&e4b);
6403
6404 /* We dirtied the bitmap block */
6405 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
6406 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
6407
6408 /* And the group descriptor block */
6409 BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
6410 ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
6411 if (!err)
6412 err = ret;
6413
6414 error_return:
6415 brelse(bitmap_bh);
6416 ext4_std_error(sb, err);
6417 return err;
6418 }
6419
6420 /**
6421 * ext4_trim_extent -- function to TRIM one single free extent in the group
6422 * @sb: super block for the file system
6423 * @start: starting block of the free extent in the alloc. group
6424 * @count: number of blocks to TRIM
6425 * @e4b: ext4 buddy for the group
6426 *
6427 * Trim "count" blocks starting at "start" in the "group". To assure that no
6428 * one will allocate those blocks, mark it as used in buddy bitmap. This must
6429 * be called with under the group lock.
6430 */
ext4_trim_extent(struct super_block * sb,int start,int count,struct ext4_buddy * e4b)6431 static int ext4_trim_extent(struct super_block *sb,
6432 int start, int count, struct ext4_buddy *e4b)
6433 __releases(bitlock)
6434 __acquires(bitlock)
6435 {
6436 struct ext4_free_extent ex;
6437 ext4_group_t group = e4b->bd_group;
6438 int ret = 0;
6439
6440 trace_ext4_trim_extent(sb, group, start, count);
6441
6442 assert_spin_locked(ext4_group_lock_ptr(sb, group));
6443
6444 ex.fe_start = start;
6445 ex.fe_group = group;
6446 ex.fe_len = count;
6447
6448 /*
6449 * Mark blocks used, so no one can reuse them while
6450 * being trimmed.
6451 */
6452 mb_mark_used(e4b, &ex);
6453 ext4_unlock_group(sb, group);
6454 ret = ext4_issue_discard(sb, group, start, count, NULL);
6455 ext4_lock_group(sb, group);
6456 mb_free_blocks(NULL, e4b, start, ex.fe_len);
6457 return ret;
6458 }
6459
ext4_last_grp_cluster(struct super_block * sb,ext4_group_t grp)6460 static ext4_grpblk_t ext4_last_grp_cluster(struct super_block *sb,
6461 ext4_group_t grp)
6462 {
6463 unsigned long nr_clusters_in_group;
6464
6465 if (grp < (ext4_get_groups_count(sb) - 1))
6466 nr_clusters_in_group = EXT4_CLUSTERS_PER_GROUP(sb);
6467 else
6468 nr_clusters_in_group = (ext4_blocks_count(EXT4_SB(sb)->s_es) -
6469 ext4_group_first_block_no(sb, grp))
6470 >> EXT4_CLUSTER_BITS(sb);
6471
6472 return nr_clusters_in_group - 1;
6473 }
6474
ext4_trim_interrupted(void)6475 static bool ext4_trim_interrupted(void)
6476 {
6477 return fatal_signal_pending(current) || freezing(current);
6478 }
6479
ext4_try_to_trim_range(struct super_block * sb,struct ext4_buddy * e4b,ext4_grpblk_t start,ext4_grpblk_t max,ext4_grpblk_t minblocks)6480 static int ext4_try_to_trim_range(struct super_block *sb,
6481 struct ext4_buddy *e4b, ext4_grpblk_t start,
6482 ext4_grpblk_t max, ext4_grpblk_t minblocks)
6483 __acquires(ext4_group_lock_ptr(sb, e4b->bd_group))
6484 __releases(ext4_group_lock_ptr(sb, e4b->bd_group))
6485 {
6486 ext4_grpblk_t next, count, free_count, last, origin_start;
6487 bool set_trimmed = false;
6488 void *bitmap;
6489
6490 last = ext4_last_grp_cluster(sb, e4b->bd_group);
6491 bitmap = e4b->bd_bitmap;
6492 if (start == 0 && max >= last)
6493 set_trimmed = true;
6494 origin_start = start;
6495 start = max(e4b->bd_info->bb_first_free, start);
6496 count = 0;
6497 free_count = 0;
6498
6499 while (start <= max) {
6500 start = mb_find_next_zero_bit(bitmap, max + 1, start);
6501 if (start > max)
6502 break;
6503
6504 next = mb_find_next_bit(bitmap, last + 1, start);
6505 if (origin_start == 0 && next >= last)
6506 set_trimmed = true;
6507
6508 if ((next - start) >= minblocks) {
6509 int ret = ext4_trim_extent(sb, start, next - start, e4b);
6510
6511 if (ret && ret != -EOPNOTSUPP)
6512 return count;
6513 count += next - start;
6514 }
6515 free_count += next - start;
6516 start = next + 1;
6517
6518 if (ext4_trim_interrupted())
6519 return count;
6520
6521 if (need_resched()) {
6522 ext4_unlock_group(sb, e4b->bd_group);
6523 cond_resched();
6524 ext4_lock_group(sb, e4b->bd_group);
6525 }
6526
6527 if ((e4b->bd_info->bb_free - free_count) < minblocks)
6528 break;
6529 }
6530
6531 if (set_trimmed)
6532 EXT4_MB_GRP_SET_TRIMMED(e4b->bd_info);
6533
6534 return count;
6535 }
6536
6537 /**
6538 * ext4_trim_all_free -- function to trim all free space in alloc. group
6539 * @sb: super block for file system
6540 * @group: group to be trimmed
6541 * @start: first group block to examine
6542 * @max: last group block to examine
6543 * @minblocks: minimum extent block count
6544 *
6545 * ext4_trim_all_free walks through group's block bitmap searching for free
6546 * extents. When the free extent is found, mark it as used in group buddy
6547 * bitmap. Then issue a TRIM command on this extent and free the extent in
6548 * the group buddy bitmap.
6549 */
6550 static ext4_grpblk_t
ext4_trim_all_free(struct super_block * sb,ext4_group_t group,ext4_grpblk_t start,ext4_grpblk_t max,ext4_grpblk_t minblocks)6551 ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
6552 ext4_grpblk_t start, ext4_grpblk_t max,
6553 ext4_grpblk_t minblocks)
6554 {
6555 struct ext4_buddy e4b;
6556 int ret;
6557
6558 trace_ext4_trim_all_free(sb, group, start, max);
6559
6560 ret = ext4_mb_load_buddy(sb, group, &e4b);
6561 if (ret) {
6562 ext4_warning(sb, "Error %d loading buddy information for %u",
6563 ret, group);
6564 return ret;
6565 }
6566
6567 ext4_lock_group(sb, group);
6568
6569 if (!EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) ||
6570 minblocks < EXT4_SB(sb)->s_last_trim_minblks)
6571 ret = ext4_try_to_trim_range(sb, &e4b, start, max, minblocks);
6572 else
6573 ret = 0;
6574
6575 ext4_unlock_group(sb, group);
6576 ext4_mb_unload_buddy(&e4b);
6577
6578 ext4_debug("trimmed %d blocks in the group %d\n",
6579 ret, group);
6580
6581 return ret;
6582 }
6583
6584 /**
6585 * ext4_trim_fs() -- trim ioctl handle function
6586 * @sb: superblock for filesystem
6587 * @range: fstrim_range structure
6588 *
6589 * start: First Byte to trim
6590 * len: number of Bytes to trim from start
6591 * minlen: minimum extent length in Bytes
6592 * ext4_trim_fs goes through all allocation groups containing Bytes from
6593 * start to start+len. For each such a group ext4_trim_all_free function
6594 * is invoked to trim all free space.
6595 */
ext4_trim_fs(struct super_block * sb,struct fstrim_range * range)6596 int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
6597 {
6598 struct request_queue *q = bdev_get_queue(sb->s_bdev);
6599 struct ext4_group_info *grp;
6600 ext4_group_t group, first_group, last_group;
6601 ext4_grpblk_t cnt = 0, first_cluster, last_cluster;
6602 uint64_t start, end, minlen, trimmed = 0;
6603 ext4_fsblk_t first_data_blk =
6604 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
6605 ext4_fsblk_t max_blks = ext4_blocks_count(EXT4_SB(sb)->s_es);
6606 int ret = 0;
6607
6608 start = range->start >> sb->s_blocksize_bits;
6609 end = start + (range->len >> sb->s_blocksize_bits) - 1;
6610 minlen = EXT4_NUM_B2C(EXT4_SB(sb),
6611 range->minlen >> sb->s_blocksize_bits);
6612
6613 if (minlen > EXT4_CLUSTERS_PER_GROUP(sb) ||
6614 start >= max_blks ||
6615 range->len < sb->s_blocksize)
6616 return -EINVAL;
6617 /* No point to try to trim less than discard granularity */
6618 if (range->minlen < q->limits.discard_granularity) {
6619 minlen = EXT4_NUM_B2C(EXT4_SB(sb),
6620 q->limits.discard_granularity >> sb->s_blocksize_bits);
6621 if (minlen > EXT4_CLUSTERS_PER_GROUP(sb))
6622 goto out;
6623 }
6624 if (end >= max_blks - 1)
6625 end = max_blks - 1;
6626 if (end <= first_data_blk)
6627 goto out;
6628 if (start < first_data_blk)
6629 start = first_data_blk;
6630
6631 /* Determine first and last group to examine based on start and end */
6632 ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start,
6633 &first_group, &first_cluster);
6634 ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) end,
6635 &last_group, &last_cluster);
6636
6637 /* end now represents the last cluster to discard in this group */
6638 end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
6639
6640 for (group = first_group; group <= last_group; group++) {
6641 if (ext4_trim_interrupted())
6642 break;
6643 grp = ext4_get_group_info(sb, group);
6644 if (!grp)
6645 continue;
6646 /* We only do this if the grp has never been initialized */
6647 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
6648 ret = ext4_mb_init_group(sb, group, GFP_NOFS);
6649 if (ret)
6650 break;
6651 }
6652
6653 /*
6654 * For all the groups except the last one, last cluster will
6655 * always be EXT4_CLUSTERS_PER_GROUP(sb)-1, so we only need to
6656 * change it for the last group, note that last_cluster is
6657 * already computed earlier by ext4_get_group_no_and_offset()
6658 */
6659 if (group == last_group)
6660 end = last_cluster;
6661 if (grp->bb_free >= minlen) {
6662 cnt = ext4_trim_all_free(sb, group, first_cluster,
6663 end, minlen);
6664 if (cnt < 0) {
6665 ret = cnt;
6666 break;
6667 }
6668 trimmed += cnt;
6669 }
6670
6671 /*
6672 * For every group except the first one, we are sure
6673 * that the first cluster to discard will be cluster #0.
6674 */
6675 first_cluster = 0;
6676 }
6677
6678 if (!ret)
6679 EXT4_SB(sb)->s_last_trim_minblks = minlen;
6680
6681 out:
6682 range->len = EXT4_C2B(EXT4_SB(sb), trimmed) << sb->s_blocksize_bits;
6683 return ret;
6684 }
6685
6686 /* Iterate all the free extents in the group. */
6687 int
ext4_mballoc_query_range(struct super_block * sb,ext4_group_t group,ext4_grpblk_t start,ext4_grpblk_t end,ext4_mballoc_query_range_fn formatter,void * priv)6688 ext4_mballoc_query_range(
6689 struct super_block *sb,
6690 ext4_group_t group,
6691 ext4_grpblk_t start,
6692 ext4_grpblk_t end,
6693 ext4_mballoc_query_range_fn formatter,
6694 void *priv)
6695 {
6696 void *bitmap;
6697 ext4_grpblk_t next;
6698 struct ext4_buddy e4b;
6699 int error;
6700
6701 error = ext4_mb_load_buddy(sb, group, &e4b);
6702 if (error)
6703 return error;
6704 bitmap = e4b.bd_bitmap;
6705
6706 ext4_lock_group(sb, group);
6707
6708 start = max(e4b.bd_info->bb_first_free, start);
6709 if (end >= EXT4_CLUSTERS_PER_GROUP(sb))
6710 end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
6711
6712 while (start <= end) {
6713 start = mb_find_next_zero_bit(bitmap, end + 1, start);
6714 if (start > end)
6715 break;
6716 next = mb_find_next_bit(bitmap, end + 1, start);
6717
6718 ext4_unlock_group(sb, group);
6719 error = formatter(sb, group, start, next - start, priv);
6720 if (error)
6721 goto out_unload;
6722 ext4_lock_group(sb, group);
6723
6724 start = next + 1;
6725 }
6726
6727 ext4_unlock_group(sb, group);
6728 out_unload:
6729 ext4_mb_unload_buddy(&e4b);
6730
6731 return error;
6732 }
6733