1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
4 * Written by Alex Tomas <alex@clusterfs.com>
5 */
6
7
8 /*
9 * mballoc.c contains the multiblocks allocation routines
10 */
11
12 #include "ext4_jbd2.h"
13 #include "mballoc.h"
14 #include <linux/log2.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/nospec.h>
18 #include <linux/backing-dev.h>
19 #include <linux/freezer.h>
20 #include <trace/events/ext4.h>
21
22 /*
23 * MUSTDO:
24 * - test ext4_ext_search_left() and ext4_ext_search_right()
25 * - search for metadata in few groups
26 *
27 * TODO v4:
28 * - normalization should take into account whether file is still open
29 * - discard preallocations if no free space left (policy?)
30 * - don't normalize tails
31 * - quota
32 * - reservation for superuser
33 *
34 * TODO v3:
35 * - bitmap read-ahead (proposed by Oleg Drokin aka green)
36 * - track min/max extents in each group for better group selection
37 * - mb_mark_used() may allocate chunk right after splitting buddy
38 * - tree of groups sorted by number of free blocks
39 * - error handling
40 */
41
42 /*
43 * The allocation request involve request for multiple number of blocks
44 * near to the goal(block) value specified.
45 *
46 * During initialization phase of the allocator we decide to use the
47 * group preallocation or inode preallocation depending on the size of
48 * the file. The size of the file could be the resulting file size we
49 * would have after allocation, or the current file size, which ever
50 * is larger. If the size is less than sbi->s_mb_stream_request we
51 * select to use the group preallocation. The default value of
52 * s_mb_stream_request is 16 blocks. This can also be tuned via
53 * /sys/fs/ext4/<partition>/mb_stream_req. The value is represented in
54 * terms of number of blocks.
55 *
56 * The main motivation for having small file use group preallocation is to
57 * ensure that we have small files closer together on the disk.
58 *
59 * First stage the allocator looks at the inode prealloc list,
60 * ext4_inode_info->i_prealloc_list, which contains list of prealloc
61 * spaces for this particular inode. The inode prealloc space is
62 * represented as:
63 *
64 * pa_lstart -> the logical start block for this prealloc space
65 * pa_pstart -> the physical start block for this prealloc space
66 * pa_len -> length for this prealloc space (in clusters)
67 * pa_free -> free space available in this prealloc space (in clusters)
68 *
69 * The inode preallocation space is used looking at the _logical_ start
70 * block. If only the logical file block falls within the range of prealloc
71 * space we will consume the particular prealloc space. This makes sure that
72 * we have contiguous physical blocks representing the file blocks
73 *
74 * The important thing to be noted in case of inode prealloc space is that
75 * we don't modify the values associated to inode prealloc space except
76 * pa_free.
77 *
78 * If we are not able to find blocks in the inode prealloc space and if we
79 * have the group allocation flag set then we look at the locality group
80 * prealloc space. These are per CPU prealloc list represented as
81 *
82 * ext4_sb_info.s_locality_groups[smp_processor_id()]
83 *
84 * The reason for having a per cpu locality group is to reduce the contention
85 * between CPUs. It is possible to get scheduled at this point.
86 *
87 * The locality group prealloc space is used looking at whether we have
88 * enough free space (pa_free) within the prealloc space.
89 *
90 * If we can't allocate blocks via inode prealloc or/and locality group
91 * prealloc then we look at the buddy cache. The buddy cache is represented
92 * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets
93 * mapped to the buddy and bitmap information regarding different
94 * groups. The buddy information is attached to buddy cache inode so that
95 * we can access them through the page cache. The information regarding
96 * each group is loaded via ext4_mb_load_buddy. The information involve
97 * block bitmap and buddy information. The information are stored in the
98 * inode as:
99 *
100 * { page }
101 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
102 *
103 *
104 * one block each for bitmap and buddy information. So for each group we
105 * take up 2 blocks. A page can contain blocks_per_page (PAGE_SIZE /
106 * blocksize) blocks. So it can have information regarding groups_per_page
107 * which is blocks_per_page/2
108 *
109 * The buddy cache inode is not stored on disk. The inode is thrown
110 * away when the filesystem is unmounted.
111 *
112 * We look for count number of blocks in the buddy cache. If we were able
113 * to locate that many free blocks we return with additional information
114 * regarding rest of the contiguous physical block available
115 *
116 * Before allocating blocks via buddy cache we normalize the request
117 * blocks. This ensure we ask for more blocks that we needed. The extra
118 * blocks that we get after allocation is added to the respective prealloc
119 * list. In case of inode preallocation we follow a list of heuristics
120 * based on file size. This can be found in ext4_mb_normalize_request. If
121 * we are doing a group prealloc we try to normalize the request to
122 * sbi->s_mb_group_prealloc. The default value of s_mb_group_prealloc is
123 * dependent on the cluster size; for non-bigalloc file systems, it is
124 * 512 blocks. This can be tuned via
125 * /sys/fs/ext4/<partition>/mb_group_prealloc. The value is represented in
126 * terms of number of blocks. If we have mounted the file system with -O
127 * stripe=<value> option the group prealloc request is normalized to the
128 * smallest multiple of the stripe value (sbi->s_stripe) which is
129 * greater than the default mb_group_prealloc.
130 *
131 * If "mb_optimize_scan" mount option is set, we maintain in memory group info
132 * structures in two data structures:
133 *
134 * 1) Array of largest free order lists (sbi->s_mb_largest_free_orders)
135 *
136 * Locking: sbi->s_mb_largest_free_orders_locks(array of rw locks)
137 *
138 * This is an array of lists where the index in the array represents the
139 * largest free order in the buddy bitmap of the participating group infos of
140 * that list. So, there are exactly MB_NUM_ORDERS(sb) (which means total
141 * number of buddy bitmap orders possible) number of lists. Group-infos are
142 * placed in appropriate lists.
143 *
144 * 2) Average fragment size lists (sbi->s_mb_avg_fragment_size)
145 *
146 * Locking: sbi->s_mb_avg_fragment_size_locks(array of rw locks)
147 *
148 * This is an array of lists where in the i-th list there are groups with
149 * average fragment size >= 2^i and < 2^(i+1). The average fragment size
150 * is computed as ext4_group_info->bb_free / ext4_group_info->bb_fragments.
151 * Note that we don't bother with a special list for completely empty groups
152 * so we only have MB_NUM_ORDERS(sb) lists.
153 *
154 * When "mb_optimize_scan" mount option is set, mballoc consults the above data
155 * structures to decide the order in which groups are to be traversed for
156 * fulfilling an allocation request.
157 *
158 * At CR_POWER2_ALIGNED , we look for groups which have the largest_free_order
159 * >= the order of the request. We directly look at the largest free order list
160 * in the data structure (1) above where largest_free_order = order of the
161 * request. If that list is empty, we look at remaining list in the increasing
162 * order of largest_free_order. This allows us to perform CR_POWER2_ALIGNED
163 * lookup in O(1) time.
164 *
165 * At CR_GOAL_LEN_FAST, we only consider groups where
166 * average fragment size > request size. So, we lookup a group which has average
167 * fragment size just above or equal to request size using our average fragment
168 * size group lists (data structure 2) in O(1) time.
169 *
170 * At CR_BEST_AVAIL_LEN, we aim to optimize allocations which can't be satisfied
171 * in CR_GOAL_LEN_FAST. The fact that we couldn't find a group in
172 * CR_GOAL_LEN_FAST suggests that there is no BG that has avg
173 * fragment size > goal length. So before falling to the slower
174 * CR_GOAL_LEN_SLOW, in CR_BEST_AVAIL_LEN we proactively trim goal length and
175 * then use the same fragment lists as CR_GOAL_LEN_FAST to find a BG with a big
176 * enough average fragment size. This increases the chances of finding a
177 * suitable block group in O(1) time and results in faster allocation at the
178 * cost of reduced size of allocation.
179 *
180 * If "mb_optimize_scan" mount option is not set, mballoc traverses groups in
181 * linear order which requires O(N) search time for each CR_POWER2_ALIGNED and
182 * CR_GOAL_LEN_FAST phase.
183 *
184 * The regular allocator (using the buddy cache) supports a few tunables.
185 *
186 * /sys/fs/ext4/<partition>/mb_min_to_scan
187 * /sys/fs/ext4/<partition>/mb_max_to_scan
188 * /sys/fs/ext4/<partition>/mb_order2_req
189 * /sys/fs/ext4/<partition>/mb_linear_limit
190 *
191 * The regular allocator uses buddy scan only if the request len is power of
192 * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The
193 * value of s_mb_order2_reqs can be tuned via
194 * /sys/fs/ext4/<partition>/mb_order2_req. If the request len is equal to
195 * stripe size (sbi->s_stripe), we try to search for contiguous block in
196 * stripe size. This should result in better allocation on RAID setups. If
197 * not, we search in the specific group using bitmap for best extents. The
198 * tunable min_to_scan and max_to_scan control the behaviour here.
199 * min_to_scan indicate how long the mballoc __must__ look for a best
200 * extent and max_to_scan indicates how long the mballoc __can__ look for a
201 * best extent in the found extents. Searching for the blocks starts with
202 * the group specified as the goal value in allocation context via
203 * ac_g_ex. Each group is first checked based on the criteria whether it
204 * can be used for allocation. ext4_mb_good_group explains how the groups are
205 * checked.
206 *
207 * When "mb_optimize_scan" is turned on, as mentioned above, the groups may not
208 * get traversed linearly. That may result in subsequent allocations being not
209 * close to each other. And so, the underlying device may get filled up in a
210 * non-linear fashion. While that may not matter on non-rotational devices, for
211 * rotational devices that may result in higher seek times. "mb_linear_limit"
212 * tells mballoc how many groups mballoc should search linearly before
213 * performing consulting above data structures for more efficient lookups. For
214 * non rotational devices, this value defaults to 0 and for rotational devices
215 * this is set to MB_DEFAULT_LINEAR_LIMIT.
216 *
217 * Both the prealloc space are getting populated as above. So for the first
218 * request we will hit the buddy cache which will result in this prealloc
219 * space getting filled. The prealloc space is then later used for the
220 * subsequent request.
221 */
222
223 /*
224 * mballoc operates on the following data:
225 * - on-disk bitmap
226 * - in-core buddy (actually includes buddy and bitmap)
227 * - preallocation descriptors (PAs)
228 *
229 * there are two types of preallocations:
230 * - inode
231 * assiged to specific inode and can be used for this inode only.
232 * it describes part of inode's space preallocated to specific
233 * physical blocks. any block from that preallocated can be used
234 * independent. the descriptor just tracks number of blocks left
235 * unused. so, before taking some block from descriptor, one must
236 * make sure corresponded logical block isn't allocated yet. this
237 * also means that freeing any block within descriptor's range
238 * must discard all preallocated blocks.
239 * - locality group
240 * assigned to specific locality group which does not translate to
241 * permanent set of inodes: inode can join and leave group. space
242 * from this type of preallocation can be used for any inode. thus
243 * it's consumed from the beginning to the end.
244 *
245 * relation between them can be expressed as:
246 * in-core buddy = on-disk bitmap + preallocation descriptors
247 *
248 * this mean blocks mballoc considers used are:
249 * - allocated blocks (persistent)
250 * - preallocated blocks (non-persistent)
251 *
252 * consistency in mballoc world means that at any time a block is either
253 * free or used in ALL structures. notice: "any time" should not be read
254 * literally -- time is discrete and delimited by locks.
255 *
256 * to keep it simple, we don't use block numbers, instead we count number of
257 * blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA.
258 *
259 * all operations can be expressed as:
260 * - init buddy: buddy = on-disk + PAs
261 * - new PA: buddy += N; PA = N
262 * - use inode PA: on-disk += N; PA -= N
263 * - discard inode PA buddy -= on-disk - PA; PA = 0
264 * - use locality group PA on-disk += N; PA -= N
265 * - discard locality group PA buddy -= PA; PA = 0
266 * note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap
267 * is used in real operation because we can't know actual used
268 * bits from PA, only from on-disk bitmap
269 *
270 * if we follow this strict logic, then all operations above should be atomic.
271 * given some of them can block, we'd have to use something like semaphores
272 * killing performance on high-end SMP hardware. let's try to relax it using
273 * the following knowledge:
274 * 1) if buddy is referenced, it's already initialized
275 * 2) while block is used in buddy and the buddy is referenced,
276 * nobody can re-allocate that block
277 * 3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has
278 * bit set and PA claims same block, it's OK. IOW, one can set bit in
279 * on-disk bitmap if buddy has same bit set or/and PA covers corresponded
280 * block
281 *
282 * so, now we're building a concurrency table:
283 * - init buddy vs.
284 * - new PA
285 * blocks for PA are allocated in the buddy, buddy must be referenced
286 * until PA is linked to allocation group to avoid concurrent buddy init
287 * - use inode PA
288 * we need to make sure that either on-disk bitmap or PA has uptodate data
289 * given (3) we care that PA-=N operation doesn't interfere with init
290 * - discard inode PA
291 * the simplest way would be to have buddy initialized by the discard
292 * - use locality group PA
293 * again PA-=N must be serialized with init
294 * - discard locality group PA
295 * the simplest way would be to have buddy initialized by the discard
296 * - new PA vs.
297 * - use inode PA
298 * i_data_sem serializes them
299 * - discard inode PA
300 * discard process must wait until PA isn't used by another process
301 * - use locality group PA
302 * some mutex should serialize them
303 * - discard locality group PA
304 * discard process must wait until PA isn't used by another process
305 * - use inode PA
306 * - use inode PA
307 * i_data_sem or another mutex should serializes them
308 * - discard inode PA
309 * discard process must wait until PA isn't used by another process
310 * - use locality group PA
311 * nothing wrong here -- they're different PAs covering different blocks
312 * - discard locality group PA
313 * discard process must wait until PA isn't used by another process
314 *
315 * now we're ready to make few consequences:
316 * - PA is referenced and while it is no discard is possible
317 * - PA is referenced until block isn't marked in on-disk bitmap
318 * - PA changes only after on-disk bitmap
319 * - discard must not compete with init. either init is done before
320 * any discard or they're serialized somehow
321 * - buddy init as sum of on-disk bitmap and PAs is done atomically
322 *
323 * a special case when we've used PA to emptiness. no need to modify buddy
324 * in this case, but we should care about concurrent init
325 *
326 */
327
328 /*
329 * Logic in few words:
330 *
331 * - allocation:
332 * load group
333 * find blocks
334 * mark bits in on-disk bitmap
335 * release group
336 *
337 * - use preallocation:
338 * find proper PA (per-inode or group)
339 * load group
340 * mark bits in on-disk bitmap
341 * release group
342 * release PA
343 *
344 * - free:
345 * load group
346 * mark bits in on-disk bitmap
347 * release group
348 *
349 * - discard preallocations in group:
350 * mark PAs deleted
351 * move them onto local list
352 * load on-disk bitmap
353 * load group
354 * remove PA from object (inode or locality group)
355 * mark free blocks in-core
356 *
357 * - discard inode's preallocations:
358 */
359
360 /*
361 * Locking rules
362 *
363 * Locks:
364 * - bitlock on a group (group)
365 * - object (inode/locality) (object)
366 * - per-pa lock (pa)
367 * - cr_power2_aligned lists lock (cr_power2_aligned)
368 * - cr_goal_len_fast lists lock (cr_goal_len_fast)
369 *
370 * Paths:
371 * - new pa
372 * object
373 * group
374 *
375 * - find and use pa:
376 * pa
377 *
378 * - release consumed pa:
379 * pa
380 * group
381 * object
382 *
383 * - generate in-core bitmap:
384 * group
385 * pa
386 *
387 * - discard all for given object (inode, locality group):
388 * object
389 * pa
390 * group
391 *
392 * - discard all for given group:
393 * group
394 * pa
395 * group
396 * object
397 *
398 * - allocation path (ext4_mb_regular_allocator)
399 * group
400 * cr_power2_aligned/cr_goal_len_fast
401 */
402 static struct kmem_cache *ext4_pspace_cachep;
403 static struct kmem_cache *ext4_ac_cachep;
404 static struct kmem_cache *ext4_free_data_cachep;
405
406 /* We create slab caches for groupinfo data structures based on the
407 * superblock block size. There will be one per mounted filesystem for
408 * each unique s_blocksize_bits */
409 #define NR_GRPINFO_CACHES 8
410 static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES];
411
412 static const char * const ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = {
413 "ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k",
414 "ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k",
415 "ext4_groupinfo_64k", "ext4_groupinfo_128k"
416 };
417
418 static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
419 ext4_group_t group);
420 static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac);
421
422 static bool ext4_mb_good_group(struct ext4_allocation_context *ac,
423 ext4_group_t group, enum criteria cr);
424
425 static int ext4_try_to_trim_range(struct super_block *sb,
426 struct ext4_buddy *e4b, ext4_grpblk_t start,
427 ext4_grpblk_t max, ext4_grpblk_t minblocks);
428
429 /*
430 * The algorithm using this percpu seq counter goes below:
431 * 1. We sample the percpu discard_pa_seq counter before trying for block
432 * allocation in ext4_mb_new_blocks().
433 * 2. We increment this percpu discard_pa_seq counter when we either allocate
434 * or free these blocks i.e. while marking those blocks as used/free in
435 * mb_mark_used()/mb_free_blocks().
436 * 3. We also increment this percpu seq counter when we successfully identify
437 * that the bb_prealloc_list is not empty and hence proceed for discarding
438 * of those PAs inside ext4_mb_discard_group_preallocations().
439 *
440 * Now to make sure that the regular fast path of block allocation is not
441 * affected, as a small optimization we only sample the percpu seq counter
442 * on that cpu. Only when the block allocation fails and when freed blocks
443 * found were 0, that is when we sample percpu seq counter for all cpus using
444 * below function ext4_get_discard_pa_seq_sum(). This happens after making
445 * sure that all the PAs on grp->bb_prealloc_list got freed or if it's empty.
446 */
447 static DEFINE_PER_CPU(u64, discard_pa_seq);
ext4_get_discard_pa_seq_sum(void)448 static inline u64 ext4_get_discard_pa_seq_sum(void)
449 {
450 int __cpu;
451 u64 __seq = 0;
452
453 for_each_possible_cpu(__cpu)
454 __seq += per_cpu(discard_pa_seq, __cpu);
455 return __seq;
456 }
457
mb_correct_addr_and_bit(int * bit,void * addr)458 static inline void *mb_correct_addr_and_bit(int *bit, void *addr)
459 {
460 #if BITS_PER_LONG == 64
461 *bit += ((unsigned long) addr & 7UL) << 3;
462 addr = (void *) ((unsigned long) addr & ~7UL);
463 #elif BITS_PER_LONG == 32
464 *bit += ((unsigned long) addr & 3UL) << 3;
465 addr = (void *) ((unsigned long) addr & ~3UL);
466 #else
467 #error "how many bits you are?!"
468 #endif
469 return addr;
470 }
471
mb_test_bit(int bit,void * addr)472 static inline int mb_test_bit(int bit, void *addr)
473 {
474 /*
475 * ext4_test_bit on architecture like powerpc
476 * needs unsigned long aligned address
477 */
478 addr = mb_correct_addr_and_bit(&bit, addr);
479 return ext4_test_bit(bit, addr);
480 }
481
mb_set_bit(int bit,void * addr)482 static inline void mb_set_bit(int bit, void *addr)
483 {
484 addr = mb_correct_addr_and_bit(&bit, addr);
485 ext4_set_bit(bit, addr);
486 }
487
mb_clear_bit(int bit,void * addr)488 static inline void mb_clear_bit(int bit, void *addr)
489 {
490 addr = mb_correct_addr_and_bit(&bit, addr);
491 ext4_clear_bit(bit, addr);
492 }
493
mb_test_and_clear_bit(int bit,void * addr)494 static inline int mb_test_and_clear_bit(int bit, void *addr)
495 {
496 addr = mb_correct_addr_and_bit(&bit, addr);
497 return ext4_test_and_clear_bit(bit, addr);
498 }
499
mb_find_next_zero_bit(void * addr,int max,int start)500 static inline int mb_find_next_zero_bit(void *addr, int max, int start)
501 {
502 int fix = 0, ret, tmpmax;
503 addr = mb_correct_addr_and_bit(&fix, addr);
504 tmpmax = max + fix;
505 start += fix;
506
507 ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix;
508 if (ret > max)
509 return max;
510 return ret;
511 }
512
mb_find_next_bit(void * addr,int max,int start)513 static inline int mb_find_next_bit(void *addr, int max, int start)
514 {
515 int fix = 0, ret, tmpmax;
516 addr = mb_correct_addr_and_bit(&fix, addr);
517 tmpmax = max + fix;
518 start += fix;
519
520 ret = ext4_find_next_bit(addr, tmpmax, start) - fix;
521 if (ret > max)
522 return max;
523 return ret;
524 }
525
mb_find_buddy(struct ext4_buddy * e4b,int order,int * max)526 static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max)
527 {
528 char *bb;
529
530 BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
531 BUG_ON(max == NULL);
532
533 if (order > e4b->bd_blkbits + 1) {
534 *max = 0;
535 return NULL;
536 }
537
538 /* at order 0 we see each particular block */
539 if (order == 0) {
540 *max = 1 << (e4b->bd_blkbits + 3);
541 return e4b->bd_bitmap;
542 }
543
544 bb = e4b->bd_buddy + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order];
545 *max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order];
546
547 return bb;
548 }
549
550 #ifdef DOUBLE_CHECK
mb_free_blocks_double(struct inode * inode,struct ext4_buddy * e4b,int first,int count)551 static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b,
552 int first, int count)
553 {
554 int i;
555 struct super_block *sb = e4b->bd_sb;
556
557 if (unlikely(e4b->bd_info->bb_bitmap == NULL))
558 return;
559 assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
560 for (i = 0; i < count; i++) {
561 if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) {
562 ext4_fsblk_t blocknr;
563
564 blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
565 blocknr += EXT4_C2B(EXT4_SB(sb), first + i);
566 ext4_grp_locked_error(sb, e4b->bd_group,
567 inode ? inode->i_ino : 0,
568 blocknr,
569 "freeing block already freed "
570 "(bit %u)",
571 first + i);
572 ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
573 EXT4_GROUP_INFO_BBITMAP_CORRUPT);
574 }
575 mb_clear_bit(first + i, e4b->bd_info->bb_bitmap);
576 }
577 }
578
mb_mark_used_double(struct ext4_buddy * e4b,int first,int count)579 static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count)
580 {
581 int i;
582
583 if (unlikely(e4b->bd_info->bb_bitmap == NULL))
584 return;
585 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
586 for (i = 0; i < count; i++) {
587 BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap));
588 mb_set_bit(first + i, e4b->bd_info->bb_bitmap);
589 }
590 }
591
mb_cmp_bitmaps(struct ext4_buddy * e4b,void * bitmap)592 static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
593 {
594 if (unlikely(e4b->bd_info->bb_bitmap == NULL))
595 return;
596 if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) {
597 unsigned char *b1, *b2;
598 int i;
599 b1 = (unsigned char *) e4b->bd_info->bb_bitmap;
600 b2 = (unsigned char *) bitmap;
601 for (i = 0; i < e4b->bd_sb->s_blocksize; i++) {
602 if (b1[i] != b2[i]) {
603 ext4_msg(e4b->bd_sb, KERN_ERR,
604 "corruption in group %u "
605 "at byte %u(%u): %x in copy != %x "
606 "on disk/prealloc",
607 e4b->bd_group, i, i * 8, b1[i], b2[i]);
608 BUG();
609 }
610 }
611 }
612 }
613
mb_group_bb_bitmap_alloc(struct super_block * sb,struct ext4_group_info * grp,ext4_group_t group)614 static void mb_group_bb_bitmap_alloc(struct super_block *sb,
615 struct ext4_group_info *grp, ext4_group_t group)
616 {
617 struct buffer_head *bh;
618
619 grp->bb_bitmap = kmalloc(sb->s_blocksize, GFP_NOFS);
620 if (!grp->bb_bitmap)
621 return;
622
623 bh = ext4_read_block_bitmap(sb, group);
624 if (IS_ERR_OR_NULL(bh)) {
625 kfree(grp->bb_bitmap);
626 grp->bb_bitmap = NULL;
627 return;
628 }
629
630 memcpy(grp->bb_bitmap, bh->b_data, sb->s_blocksize);
631 put_bh(bh);
632 }
633
mb_group_bb_bitmap_free(struct ext4_group_info * grp)634 static void mb_group_bb_bitmap_free(struct ext4_group_info *grp)
635 {
636 kfree(grp->bb_bitmap);
637 }
638
639 #else
mb_free_blocks_double(struct inode * inode,struct ext4_buddy * e4b,int first,int count)640 static inline void mb_free_blocks_double(struct inode *inode,
641 struct ext4_buddy *e4b, int first, int count)
642 {
643 return;
644 }
mb_mark_used_double(struct ext4_buddy * e4b,int first,int count)645 static inline void mb_mark_used_double(struct ext4_buddy *e4b,
646 int first, int count)
647 {
648 return;
649 }
mb_cmp_bitmaps(struct ext4_buddy * e4b,void * bitmap)650 static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
651 {
652 return;
653 }
654
mb_group_bb_bitmap_alloc(struct super_block * sb,struct ext4_group_info * grp,ext4_group_t group)655 static inline void mb_group_bb_bitmap_alloc(struct super_block *sb,
656 struct ext4_group_info *grp, ext4_group_t group)
657 {
658 return;
659 }
660
mb_group_bb_bitmap_free(struct ext4_group_info * grp)661 static inline void mb_group_bb_bitmap_free(struct ext4_group_info *grp)
662 {
663 return;
664 }
665 #endif
666
667 #ifdef AGGRESSIVE_CHECK
668
669 #define MB_CHECK_ASSERT(assert) \
670 do { \
671 if (!(assert)) { \
672 printk(KERN_EMERG \
673 "Assertion failure in %s() at %s:%d: \"%s\"\n", \
674 function, file, line, # assert); \
675 BUG(); \
676 } \
677 } while (0)
678
__mb_check_buddy(struct ext4_buddy * e4b,char * file,const char * function,int line)679 static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
680 const char *function, int line)
681 {
682 struct super_block *sb = e4b->bd_sb;
683 int order = e4b->bd_blkbits + 1;
684 int max;
685 int max2;
686 int i;
687 int j;
688 int k;
689 int count;
690 struct ext4_group_info *grp;
691 int fragments = 0;
692 int fstart;
693 struct list_head *cur;
694 void *buddy;
695 void *buddy2;
696
697 if (e4b->bd_info->bb_check_counter++ % 10)
698 return 0;
699
700 while (order > 1) {
701 buddy = mb_find_buddy(e4b, order, &max);
702 MB_CHECK_ASSERT(buddy);
703 buddy2 = mb_find_buddy(e4b, order - 1, &max2);
704 MB_CHECK_ASSERT(buddy2);
705 MB_CHECK_ASSERT(buddy != buddy2);
706 MB_CHECK_ASSERT(max * 2 == max2);
707
708 count = 0;
709 for (i = 0; i < max; i++) {
710
711 if (mb_test_bit(i, buddy)) {
712 /* only single bit in buddy2 may be 0 */
713 if (!mb_test_bit(i << 1, buddy2)) {
714 MB_CHECK_ASSERT(
715 mb_test_bit((i<<1)+1, buddy2));
716 }
717 continue;
718 }
719
720 /* both bits in buddy2 must be 1 */
721 MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2));
722 MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2));
723
724 for (j = 0; j < (1 << order); j++) {
725 k = (i * (1 << order)) + j;
726 MB_CHECK_ASSERT(
727 !mb_test_bit(k, e4b->bd_bitmap));
728 }
729 count++;
730 }
731 MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count);
732 order--;
733 }
734
735 fstart = -1;
736 buddy = mb_find_buddy(e4b, 0, &max);
737 for (i = 0; i < max; i++) {
738 if (!mb_test_bit(i, buddy)) {
739 MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free);
740 if (fstart == -1) {
741 fragments++;
742 fstart = i;
743 }
744 continue;
745 }
746 fstart = -1;
747 /* check used bits only */
748 for (j = 0; j < e4b->bd_blkbits + 1; j++) {
749 buddy2 = mb_find_buddy(e4b, j, &max2);
750 k = i >> j;
751 MB_CHECK_ASSERT(k < max2);
752 MB_CHECK_ASSERT(mb_test_bit(k, buddy2));
753 }
754 }
755 MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info));
756 MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments);
757
758 grp = ext4_get_group_info(sb, e4b->bd_group);
759 if (!grp)
760 return NULL;
761 list_for_each(cur, &grp->bb_prealloc_list) {
762 ext4_group_t groupnr;
763 struct ext4_prealloc_space *pa;
764 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
765 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k);
766 MB_CHECK_ASSERT(groupnr == e4b->bd_group);
767 for (i = 0; i < pa->pa_len; i++)
768 MB_CHECK_ASSERT(mb_test_bit(k + i, buddy));
769 }
770 return 0;
771 }
772 #undef MB_CHECK_ASSERT
773 #define mb_check_buddy(e4b) __mb_check_buddy(e4b, \
774 __FILE__, __func__, __LINE__)
775 #else
776 #define mb_check_buddy(e4b)
777 #endif
778
779 /*
780 * Divide blocks started from @first with length @len into
781 * smaller chunks with power of 2 blocks.
782 * Clear the bits in bitmap which the blocks of the chunk(s) covered,
783 * then increase bb_counters[] for corresponded chunk size.
784 */
ext4_mb_mark_free_simple(struct super_block * sb,void * buddy,ext4_grpblk_t first,ext4_grpblk_t len,struct ext4_group_info * grp)785 static void ext4_mb_mark_free_simple(struct super_block *sb,
786 void *buddy, ext4_grpblk_t first, ext4_grpblk_t len,
787 struct ext4_group_info *grp)
788 {
789 struct ext4_sb_info *sbi = EXT4_SB(sb);
790 ext4_grpblk_t min;
791 ext4_grpblk_t max;
792 ext4_grpblk_t chunk;
793 unsigned int border;
794
795 BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb));
796
797 border = 2 << sb->s_blocksize_bits;
798
799 while (len > 0) {
800 /* find how many blocks can be covered since this position */
801 max = ffs(first | border) - 1;
802
803 /* find how many blocks of power 2 we need to mark */
804 min = fls(len) - 1;
805
806 if (max < min)
807 min = max;
808 chunk = 1 << min;
809
810 /* mark multiblock chunks only */
811 grp->bb_counters[min]++;
812 if (min > 0)
813 mb_clear_bit(first >> min,
814 buddy + sbi->s_mb_offsets[min]);
815
816 len -= chunk;
817 first += chunk;
818 }
819 }
820
mb_avg_fragment_size_order(struct super_block * sb,ext4_grpblk_t len)821 static int mb_avg_fragment_size_order(struct super_block *sb, ext4_grpblk_t len)
822 {
823 int order;
824
825 /*
826 * We don't bother with a special lists groups with only 1 block free
827 * extents and for completely empty groups.
828 */
829 order = fls(len) - 2;
830 if (order < 0)
831 return 0;
832 if (order == MB_NUM_ORDERS(sb))
833 order--;
834 if (WARN_ON_ONCE(order > MB_NUM_ORDERS(sb)))
835 order = MB_NUM_ORDERS(sb) - 1;
836 return order;
837 }
838
839 /* Move group to appropriate avg_fragment_size list */
840 static void
mb_update_avg_fragment_size(struct super_block * sb,struct ext4_group_info * grp)841 mb_update_avg_fragment_size(struct super_block *sb, struct ext4_group_info *grp)
842 {
843 struct ext4_sb_info *sbi = EXT4_SB(sb);
844 int new_order;
845
846 if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || grp->bb_fragments == 0)
847 return;
848
849 new_order = mb_avg_fragment_size_order(sb,
850 grp->bb_free / grp->bb_fragments);
851 if (new_order == grp->bb_avg_fragment_size_order)
852 return;
853
854 if (grp->bb_avg_fragment_size_order != -1) {
855 write_lock(&sbi->s_mb_avg_fragment_size_locks[
856 grp->bb_avg_fragment_size_order]);
857 list_del(&grp->bb_avg_fragment_size_node);
858 write_unlock(&sbi->s_mb_avg_fragment_size_locks[
859 grp->bb_avg_fragment_size_order]);
860 }
861 grp->bb_avg_fragment_size_order = new_order;
862 write_lock(&sbi->s_mb_avg_fragment_size_locks[
863 grp->bb_avg_fragment_size_order]);
864 list_add_tail(&grp->bb_avg_fragment_size_node,
865 &sbi->s_mb_avg_fragment_size[grp->bb_avg_fragment_size_order]);
866 write_unlock(&sbi->s_mb_avg_fragment_size_locks[
867 grp->bb_avg_fragment_size_order]);
868 }
869
870 /*
871 * Choose next group by traversing largest_free_order lists. Updates *new_cr if
872 * cr level needs an update.
873 */
ext4_mb_choose_next_group_p2_aligned(struct ext4_allocation_context * ac,enum criteria * new_cr,ext4_group_t * group,ext4_group_t ngroups)874 static void ext4_mb_choose_next_group_p2_aligned(struct ext4_allocation_context *ac,
875 enum criteria *new_cr, ext4_group_t *group, ext4_group_t ngroups)
876 {
877 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
878 struct ext4_group_info *iter;
879 int i;
880
881 if (ac->ac_status == AC_STATUS_FOUND)
882 return;
883
884 if (unlikely(sbi->s_mb_stats && ac->ac_flags & EXT4_MB_CR_POWER2_ALIGNED_OPTIMIZED))
885 atomic_inc(&sbi->s_bal_p2_aligned_bad_suggestions);
886
887 for (i = ac->ac_2order; i < MB_NUM_ORDERS(ac->ac_sb); i++) {
888 if (list_empty(&sbi->s_mb_largest_free_orders[i]))
889 continue;
890 read_lock(&sbi->s_mb_largest_free_orders_locks[i]);
891 if (list_empty(&sbi->s_mb_largest_free_orders[i])) {
892 read_unlock(&sbi->s_mb_largest_free_orders_locks[i]);
893 continue;
894 }
895 list_for_each_entry(iter, &sbi->s_mb_largest_free_orders[i],
896 bb_largest_free_order_node) {
897 if (sbi->s_mb_stats)
898 atomic64_inc(&sbi->s_bal_cX_groups_considered[CR_POWER2_ALIGNED]);
899 if (likely(ext4_mb_good_group(ac, iter->bb_group, CR_POWER2_ALIGNED))) {
900 *group = iter->bb_group;
901 ac->ac_flags |= EXT4_MB_CR_POWER2_ALIGNED_OPTIMIZED;
902 read_unlock(&sbi->s_mb_largest_free_orders_locks[i]);
903 return;
904 }
905 }
906 read_unlock(&sbi->s_mb_largest_free_orders_locks[i]);
907 }
908
909 /* Increment cr and search again if no group is found */
910 *new_cr = CR_GOAL_LEN_FAST;
911 }
912
913 /*
914 * Find a suitable group of given order from the average fragments list.
915 */
916 static struct ext4_group_info *
ext4_mb_find_good_group_avg_frag_lists(struct ext4_allocation_context * ac,int order)917 ext4_mb_find_good_group_avg_frag_lists(struct ext4_allocation_context *ac, int order)
918 {
919 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
920 struct list_head *frag_list = &sbi->s_mb_avg_fragment_size[order];
921 rwlock_t *frag_list_lock = &sbi->s_mb_avg_fragment_size_locks[order];
922 struct ext4_group_info *grp = NULL, *iter;
923 enum criteria cr = ac->ac_criteria;
924
925 if (list_empty(frag_list))
926 return NULL;
927 read_lock(frag_list_lock);
928 if (list_empty(frag_list)) {
929 read_unlock(frag_list_lock);
930 return NULL;
931 }
932 list_for_each_entry(iter, frag_list, bb_avg_fragment_size_node) {
933 if (sbi->s_mb_stats)
934 atomic64_inc(&sbi->s_bal_cX_groups_considered[cr]);
935 if (likely(ext4_mb_good_group(ac, iter->bb_group, cr))) {
936 grp = iter;
937 break;
938 }
939 }
940 read_unlock(frag_list_lock);
941 return grp;
942 }
943
944 /*
945 * Choose next group by traversing average fragment size list of suitable
946 * order. Updates *new_cr if cr level needs an update.
947 */
ext4_mb_choose_next_group_goal_fast(struct ext4_allocation_context * ac,enum criteria * new_cr,ext4_group_t * group,ext4_group_t ngroups)948 static void ext4_mb_choose_next_group_goal_fast(struct ext4_allocation_context *ac,
949 enum criteria *new_cr, ext4_group_t *group, ext4_group_t ngroups)
950 {
951 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
952 struct ext4_group_info *grp = NULL;
953 int i;
954
955 if (unlikely(ac->ac_flags & EXT4_MB_CR_GOAL_LEN_FAST_OPTIMIZED)) {
956 if (sbi->s_mb_stats)
957 atomic_inc(&sbi->s_bal_goal_fast_bad_suggestions);
958 }
959
960 for (i = mb_avg_fragment_size_order(ac->ac_sb, ac->ac_g_ex.fe_len);
961 i < MB_NUM_ORDERS(ac->ac_sb); i++) {
962 grp = ext4_mb_find_good_group_avg_frag_lists(ac, i);
963 if (grp) {
964 *group = grp->bb_group;
965 ac->ac_flags |= EXT4_MB_CR_GOAL_LEN_FAST_OPTIMIZED;
966 return;
967 }
968 }
969
970 /*
971 * CR_BEST_AVAIL_LEN works based on the concept that we have
972 * a larger normalized goal len request which can be trimmed to
973 * a smaller goal len such that it can still satisfy original
974 * request len. However, allocation request for non-regular
975 * files never gets normalized.
976 * See function ext4_mb_normalize_request() (EXT4_MB_HINT_DATA).
977 */
978 if (ac->ac_flags & EXT4_MB_HINT_DATA)
979 *new_cr = CR_BEST_AVAIL_LEN;
980 else
981 *new_cr = CR_GOAL_LEN_SLOW;
982 }
983
984 /*
985 * We couldn't find a group in CR_GOAL_LEN_FAST so try to find the highest free fragment
986 * order we have and proactively trim the goal request length to that order to
987 * find a suitable group faster.
988 *
989 * This optimizes allocation speed at the cost of slightly reduced
990 * preallocations. However, we make sure that we don't trim the request too
991 * much and fall to CR_GOAL_LEN_SLOW in that case.
992 */
ext4_mb_choose_next_group_best_avail(struct ext4_allocation_context * ac,enum criteria * new_cr,ext4_group_t * group,ext4_group_t ngroups)993 static void ext4_mb_choose_next_group_best_avail(struct ext4_allocation_context *ac,
994 enum criteria *new_cr, ext4_group_t *group, ext4_group_t ngroups)
995 {
996 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
997 struct ext4_group_info *grp = NULL;
998 int i, order, min_order;
999 unsigned long num_stripe_clusters = 0;
1000
1001 if (unlikely(ac->ac_flags & EXT4_MB_CR_BEST_AVAIL_LEN_OPTIMIZED)) {
1002 if (sbi->s_mb_stats)
1003 atomic_inc(&sbi->s_bal_best_avail_bad_suggestions);
1004 }
1005
1006 /*
1007 * mb_avg_fragment_size_order() returns order in a way that makes
1008 * retrieving back the length using (1 << order) inaccurate. Hence, use
1009 * fls() instead since we need to know the actual length while modifying
1010 * goal length.
1011 */
1012 order = fls(ac->ac_g_ex.fe_len) - 1;
1013 if (WARN_ON_ONCE(order - 1 > MB_NUM_ORDERS(ac->ac_sb)))
1014 order = MB_NUM_ORDERS(ac->ac_sb);
1015 min_order = order - sbi->s_mb_best_avail_max_trim_order;
1016 if (min_order < 0)
1017 min_order = 0;
1018
1019 if (sbi->s_stripe > 0) {
1020 /*
1021 * We are assuming that stripe size is always a multiple of
1022 * cluster ratio otherwise __ext4_fill_super exists early.
1023 */
1024 num_stripe_clusters = EXT4_NUM_B2C(sbi, sbi->s_stripe);
1025 if (1 << min_order < num_stripe_clusters)
1026 /*
1027 * We consider 1 order less because later we round
1028 * up the goal len to num_stripe_clusters
1029 */
1030 min_order = fls(num_stripe_clusters) - 1;
1031 }
1032
1033 if (1 << min_order < ac->ac_o_ex.fe_len)
1034 min_order = fls(ac->ac_o_ex.fe_len);
1035
1036 for (i = order; i >= min_order; i--) {
1037 int frag_order;
1038 /*
1039 * Scale down goal len to make sure we find something
1040 * in the free fragments list. Basically, reduce
1041 * preallocations.
1042 */
1043 ac->ac_g_ex.fe_len = 1 << i;
1044
1045 if (num_stripe_clusters > 0) {
1046 /*
1047 * Try to round up the adjusted goal length to
1048 * stripe size (in cluster units) multiple for
1049 * efficiency.
1050 */
1051 ac->ac_g_ex.fe_len = roundup(ac->ac_g_ex.fe_len,
1052 num_stripe_clusters);
1053 }
1054
1055 frag_order = mb_avg_fragment_size_order(ac->ac_sb,
1056 ac->ac_g_ex.fe_len);
1057
1058 grp = ext4_mb_find_good_group_avg_frag_lists(ac, frag_order);
1059 if (grp) {
1060 *group = grp->bb_group;
1061 ac->ac_flags |= EXT4_MB_CR_BEST_AVAIL_LEN_OPTIMIZED;
1062 return;
1063 }
1064 }
1065
1066 /* Reset goal length to original goal length before falling into CR_GOAL_LEN_SLOW */
1067 ac->ac_g_ex.fe_len = ac->ac_orig_goal_len;
1068 *new_cr = CR_GOAL_LEN_SLOW;
1069 }
1070
should_optimize_scan(struct ext4_allocation_context * ac)1071 static inline int should_optimize_scan(struct ext4_allocation_context *ac)
1072 {
1073 if (unlikely(!test_opt2(ac->ac_sb, MB_OPTIMIZE_SCAN)))
1074 return 0;
1075 if (ac->ac_criteria >= CR_GOAL_LEN_SLOW)
1076 return 0;
1077 if (!ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS))
1078 return 0;
1079 return 1;
1080 }
1081
1082 /*
1083 * Return next linear group for allocation. If linear traversal should not be
1084 * performed, this function just returns the same group
1085 */
1086 static ext4_group_t
next_linear_group(struct ext4_allocation_context * ac,ext4_group_t group,ext4_group_t ngroups)1087 next_linear_group(struct ext4_allocation_context *ac, ext4_group_t group,
1088 ext4_group_t ngroups)
1089 {
1090 if (!should_optimize_scan(ac))
1091 goto inc_and_return;
1092
1093 if (ac->ac_groups_linear_remaining) {
1094 ac->ac_groups_linear_remaining--;
1095 goto inc_and_return;
1096 }
1097
1098 return group;
1099 inc_and_return:
1100 /*
1101 * Artificially restricted ngroups for non-extent
1102 * files makes group > ngroups possible on first loop.
1103 */
1104 return group + 1 >= ngroups ? 0 : group + 1;
1105 }
1106
1107 /*
1108 * ext4_mb_choose_next_group: choose next group for allocation.
1109 *
1110 * @ac Allocation Context
1111 * @new_cr This is an output parameter. If the there is no good group
1112 * available at current CR level, this field is updated to indicate
1113 * the new cr level that should be used.
1114 * @group This is an input / output parameter. As an input it indicates the
1115 * next group that the allocator intends to use for allocation. As
1116 * output, this field indicates the next group that should be used as
1117 * determined by the optimization functions.
1118 * @ngroups Total number of groups
1119 */
ext4_mb_choose_next_group(struct ext4_allocation_context * ac,enum criteria * new_cr,ext4_group_t * group,ext4_group_t ngroups)1120 static void ext4_mb_choose_next_group(struct ext4_allocation_context *ac,
1121 enum criteria *new_cr, ext4_group_t *group, ext4_group_t ngroups)
1122 {
1123 *new_cr = ac->ac_criteria;
1124
1125 if (!should_optimize_scan(ac) || ac->ac_groups_linear_remaining) {
1126 *group = next_linear_group(ac, *group, ngroups);
1127 return;
1128 }
1129
1130 if (*new_cr == CR_POWER2_ALIGNED) {
1131 ext4_mb_choose_next_group_p2_aligned(ac, new_cr, group, ngroups);
1132 } else if (*new_cr == CR_GOAL_LEN_FAST) {
1133 ext4_mb_choose_next_group_goal_fast(ac, new_cr, group, ngroups);
1134 } else if (*new_cr == CR_BEST_AVAIL_LEN) {
1135 ext4_mb_choose_next_group_best_avail(ac, new_cr, group, ngroups);
1136 } else {
1137 /*
1138 * TODO: For CR=2, we can arrange groups in an rb tree sorted by
1139 * bb_free. But until that happens, we should never come here.
1140 */
1141 WARN_ON(1);
1142 }
1143 }
1144
1145 /*
1146 * Cache the order of the largest free extent we have available in this block
1147 * group.
1148 */
1149 static void
mb_set_largest_free_order(struct super_block * sb,struct ext4_group_info * grp)1150 mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp)
1151 {
1152 struct ext4_sb_info *sbi = EXT4_SB(sb);
1153 int i;
1154
1155 for (i = MB_NUM_ORDERS(sb) - 1; i >= 0; i--)
1156 if (grp->bb_counters[i] > 0)
1157 break;
1158 /* No need to move between order lists? */
1159 if (!test_opt2(sb, MB_OPTIMIZE_SCAN) ||
1160 i == grp->bb_largest_free_order) {
1161 grp->bb_largest_free_order = i;
1162 return;
1163 }
1164
1165 if (grp->bb_largest_free_order >= 0) {
1166 write_lock(&sbi->s_mb_largest_free_orders_locks[
1167 grp->bb_largest_free_order]);
1168 list_del_init(&grp->bb_largest_free_order_node);
1169 write_unlock(&sbi->s_mb_largest_free_orders_locks[
1170 grp->bb_largest_free_order]);
1171 }
1172 grp->bb_largest_free_order = i;
1173 if (grp->bb_largest_free_order >= 0 && grp->bb_free) {
1174 write_lock(&sbi->s_mb_largest_free_orders_locks[
1175 grp->bb_largest_free_order]);
1176 list_add_tail(&grp->bb_largest_free_order_node,
1177 &sbi->s_mb_largest_free_orders[grp->bb_largest_free_order]);
1178 write_unlock(&sbi->s_mb_largest_free_orders_locks[
1179 grp->bb_largest_free_order]);
1180 }
1181 }
1182
1183 static noinline_for_stack
ext4_mb_generate_buddy(struct super_block * sb,void * buddy,void * bitmap,ext4_group_t group,struct ext4_group_info * grp)1184 void ext4_mb_generate_buddy(struct super_block *sb,
1185 void *buddy, void *bitmap, ext4_group_t group,
1186 struct ext4_group_info *grp)
1187 {
1188 struct ext4_sb_info *sbi = EXT4_SB(sb);
1189 ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
1190 ext4_grpblk_t i = 0;
1191 ext4_grpblk_t first;
1192 ext4_grpblk_t len;
1193 unsigned free = 0;
1194 unsigned fragments = 0;
1195 unsigned long long period = get_cycles();
1196
1197 /* initialize buddy from bitmap which is aggregation
1198 * of on-disk bitmap and preallocations */
1199 i = mb_find_next_zero_bit(bitmap, max, 0);
1200 grp->bb_first_free = i;
1201 while (i < max) {
1202 fragments++;
1203 first = i;
1204 i = mb_find_next_bit(bitmap, max, i);
1205 len = i - first;
1206 free += len;
1207 if (len > 1)
1208 ext4_mb_mark_free_simple(sb, buddy, first, len, grp);
1209 else
1210 grp->bb_counters[0]++;
1211 if (i < max)
1212 i = mb_find_next_zero_bit(bitmap, max, i);
1213 }
1214 grp->bb_fragments = fragments;
1215
1216 if (free != grp->bb_free) {
1217 ext4_grp_locked_error(sb, group, 0, 0,
1218 "block bitmap and bg descriptor "
1219 "inconsistent: %u vs %u free clusters",
1220 free, grp->bb_free);
1221 /*
1222 * If we intend to continue, we consider group descriptor
1223 * corrupt and update bb_free using bitmap value
1224 */
1225 grp->bb_free = free;
1226 ext4_mark_group_bitmap_corrupted(sb, group,
1227 EXT4_GROUP_INFO_BBITMAP_CORRUPT);
1228 }
1229 mb_set_largest_free_order(sb, grp);
1230 mb_update_avg_fragment_size(sb, grp);
1231
1232 clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state));
1233
1234 period = get_cycles() - period;
1235 atomic_inc(&sbi->s_mb_buddies_generated);
1236 atomic64_add(period, &sbi->s_mb_generation_time);
1237 }
1238
mb_regenerate_buddy(struct ext4_buddy * e4b)1239 static void mb_regenerate_buddy(struct ext4_buddy *e4b)
1240 {
1241 int count;
1242 int order = 1;
1243 void *buddy;
1244
1245 while ((buddy = mb_find_buddy(e4b, order++, &count)))
1246 mb_set_bits(buddy, 0, count);
1247
1248 e4b->bd_info->bb_fragments = 0;
1249 memset(e4b->bd_info->bb_counters, 0,
1250 sizeof(*e4b->bd_info->bb_counters) *
1251 (e4b->bd_sb->s_blocksize_bits + 2));
1252
1253 ext4_mb_generate_buddy(e4b->bd_sb, e4b->bd_buddy,
1254 e4b->bd_bitmap, e4b->bd_group, e4b->bd_info);
1255 }
1256
1257 /* The buddy information is attached the buddy cache inode
1258 * for convenience. The information regarding each group
1259 * is loaded via ext4_mb_load_buddy. The information involve
1260 * block bitmap and buddy information. The information are
1261 * stored in the inode as
1262 *
1263 * { page }
1264 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
1265 *
1266 *
1267 * one block each for bitmap and buddy information.
1268 * So for each group we take up 2 blocks. A page can
1269 * contain blocks_per_page (PAGE_SIZE / blocksize) blocks.
1270 * So it can have information regarding groups_per_page which
1271 * is blocks_per_page/2
1272 *
1273 * Locking note: This routine takes the block group lock of all groups
1274 * for this page; do not hold this lock when calling this routine!
1275 */
1276
ext4_mb_init_cache(struct page * page,char * incore,gfp_t gfp)1277 static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
1278 {
1279 ext4_group_t ngroups;
1280 unsigned int blocksize;
1281 int blocks_per_page;
1282 int groups_per_page;
1283 int err = 0;
1284 int i;
1285 ext4_group_t first_group, group;
1286 int first_block;
1287 struct super_block *sb;
1288 struct buffer_head *bhs;
1289 struct buffer_head **bh = NULL;
1290 struct inode *inode;
1291 char *data;
1292 char *bitmap;
1293 struct ext4_group_info *grinfo;
1294
1295 inode = page->mapping->host;
1296 sb = inode->i_sb;
1297 ngroups = ext4_get_groups_count(sb);
1298 blocksize = i_blocksize(inode);
1299 blocks_per_page = PAGE_SIZE / blocksize;
1300
1301 mb_debug(sb, "init page %lu\n", page->index);
1302
1303 groups_per_page = blocks_per_page >> 1;
1304 if (groups_per_page == 0)
1305 groups_per_page = 1;
1306
1307 /* allocate buffer_heads to read bitmaps */
1308 if (groups_per_page > 1) {
1309 i = sizeof(struct buffer_head *) * groups_per_page;
1310 bh = kzalloc(i, gfp);
1311 if (bh == NULL)
1312 return -ENOMEM;
1313 } else
1314 bh = &bhs;
1315
1316 first_group = page->index * blocks_per_page / 2;
1317
1318 /* read all groups the page covers into the cache */
1319 for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
1320 if (group >= ngroups)
1321 break;
1322
1323 grinfo = ext4_get_group_info(sb, group);
1324 if (!grinfo)
1325 continue;
1326 /*
1327 * If page is uptodate then we came here after online resize
1328 * which added some new uninitialized group info structs, so
1329 * we must skip all initialized uptodate buddies on the page,
1330 * which may be currently in use by an allocating task.
1331 */
1332 if (PageUptodate(page) && !EXT4_MB_GRP_NEED_INIT(grinfo)) {
1333 bh[i] = NULL;
1334 continue;
1335 }
1336 bh[i] = ext4_read_block_bitmap_nowait(sb, group, false);
1337 if (IS_ERR(bh[i])) {
1338 err = PTR_ERR(bh[i]);
1339 bh[i] = NULL;
1340 goto out;
1341 }
1342 mb_debug(sb, "read bitmap for group %u\n", group);
1343 }
1344
1345 /* wait for I/O completion */
1346 for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
1347 int err2;
1348
1349 if (!bh[i])
1350 continue;
1351 err2 = ext4_wait_block_bitmap(sb, group, bh[i]);
1352 if (!err)
1353 err = err2;
1354 }
1355
1356 first_block = page->index * blocks_per_page;
1357 for (i = 0; i < blocks_per_page; i++) {
1358 group = (first_block + i) >> 1;
1359 if (group >= ngroups)
1360 break;
1361
1362 if (!bh[group - first_group])
1363 /* skip initialized uptodate buddy */
1364 continue;
1365
1366 if (!buffer_verified(bh[group - first_group]))
1367 /* Skip faulty bitmaps */
1368 continue;
1369 err = 0;
1370
1371 /*
1372 * data carry information regarding this
1373 * particular group in the format specified
1374 * above
1375 *
1376 */
1377 data = page_address(page) + (i * blocksize);
1378 bitmap = bh[group - first_group]->b_data;
1379
1380 /*
1381 * We place the buddy block and bitmap block
1382 * close together
1383 */
1384 grinfo = ext4_get_group_info(sb, group);
1385 if (!grinfo) {
1386 err = -EFSCORRUPTED;
1387 goto out;
1388 }
1389 if ((first_block + i) & 1) {
1390 /* this is block of buddy */
1391 BUG_ON(incore == NULL);
1392 mb_debug(sb, "put buddy for group %u in page %lu/%x\n",
1393 group, page->index, i * blocksize);
1394 trace_ext4_mb_buddy_bitmap_load(sb, group);
1395 grinfo->bb_fragments = 0;
1396 memset(grinfo->bb_counters, 0,
1397 sizeof(*grinfo->bb_counters) *
1398 (MB_NUM_ORDERS(sb)));
1399 /*
1400 * incore got set to the group block bitmap below
1401 */
1402 ext4_lock_group(sb, group);
1403 /* init the buddy */
1404 memset(data, 0xff, blocksize);
1405 ext4_mb_generate_buddy(sb, data, incore, group, grinfo);
1406 ext4_unlock_group(sb, group);
1407 incore = NULL;
1408 } else {
1409 /* this is block of bitmap */
1410 BUG_ON(incore != NULL);
1411 mb_debug(sb, "put bitmap for group %u in page %lu/%x\n",
1412 group, page->index, i * blocksize);
1413 trace_ext4_mb_bitmap_load(sb, group);
1414
1415 /* see comments in ext4_mb_put_pa() */
1416 ext4_lock_group(sb, group);
1417 memcpy(data, bitmap, blocksize);
1418
1419 /* mark all preallocated blks used in in-core bitmap */
1420 ext4_mb_generate_from_pa(sb, data, group);
1421 WARN_ON_ONCE(!RB_EMPTY_ROOT(&grinfo->bb_free_root));
1422 ext4_unlock_group(sb, group);
1423
1424 /* set incore so that the buddy information can be
1425 * generated using this
1426 */
1427 incore = data;
1428 }
1429 }
1430 SetPageUptodate(page);
1431
1432 out:
1433 if (bh) {
1434 for (i = 0; i < groups_per_page; i++)
1435 brelse(bh[i]);
1436 if (bh != &bhs)
1437 kfree(bh);
1438 }
1439 return err;
1440 }
1441
1442 /*
1443 * Lock the buddy and bitmap pages. This make sure other parallel init_group
1444 * on the same buddy page doesn't happen whild holding the buddy page lock.
1445 * Return locked buddy and bitmap pages on e4b struct. If buddy and bitmap
1446 * are on the same page e4b->bd_buddy_page is NULL and return value is 0.
1447 */
ext4_mb_get_buddy_page_lock(struct super_block * sb,ext4_group_t group,struct ext4_buddy * e4b,gfp_t gfp)1448 static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
1449 ext4_group_t group, struct ext4_buddy *e4b, gfp_t gfp)
1450 {
1451 struct inode *inode = EXT4_SB(sb)->s_buddy_cache;
1452 int block, pnum, poff;
1453 int blocks_per_page;
1454 struct page *page;
1455
1456 e4b->bd_buddy_page = NULL;
1457 e4b->bd_bitmap_page = NULL;
1458
1459 blocks_per_page = PAGE_SIZE / sb->s_blocksize;
1460 /*
1461 * the buddy cache inode stores the block bitmap
1462 * and buddy information in consecutive blocks.
1463 * So for each group we need two blocks.
1464 */
1465 block = group * 2;
1466 pnum = block / blocks_per_page;
1467 poff = block % blocks_per_page;
1468 page = find_or_create_page(inode->i_mapping, pnum, gfp);
1469 if (!page)
1470 return -ENOMEM;
1471 BUG_ON(page->mapping != inode->i_mapping);
1472 e4b->bd_bitmap_page = page;
1473 e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
1474
1475 if (blocks_per_page >= 2) {
1476 /* buddy and bitmap are on the same page */
1477 return 0;
1478 }
1479
1480 block++;
1481 pnum = block / blocks_per_page;
1482 page = find_or_create_page(inode->i_mapping, pnum, gfp);
1483 if (!page)
1484 return -ENOMEM;
1485 BUG_ON(page->mapping != inode->i_mapping);
1486 e4b->bd_buddy_page = page;
1487 return 0;
1488 }
1489
ext4_mb_put_buddy_page_lock(struct ext4_buddy * e4b)1490 static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b)
1491 {
1492 if (e4b->bd_bitmap_page) {
1493 unlock_page(e4b->bd_bitmap_page);
1494 put_page(e4b->bd_bitmap_page);
1495 }
1496 if (e4b->bd_buddy_page) {
1497 unlock_page(e4b->bd_buddy_page);
1498 put_page(e4b->bd_buddy_page);
1499 }
1500 }
1501
1502 /*
1503 * Locking note: This routine calls ext4_mb_init_cache(), which takes the
1504 * block group lock of all groups for this page; do not hold the BG lock when
1505 * calling this routine!
1506 */
1507 static noinline_for_stack
ext4_mb_init_group(struct super_block * sb,ext4_group_t group,gfp_t gfp)1508 int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp)
1509 {
1510
1511 struct ext4_group_info *this_grp;
1512 struct ext4_buddy e4b;
1513 struct page *page;
1514 int ret = 0;
1515
1516 might_sleep();
1517 mb_debug(sb, "init group %u\n", group);
1518 this_grp = ext4_get_group_info(sb, group);
1519 if (!this_grp)
1520 return -EFSCORRUPTED;
1521
1522 /*
1523 * This ensures that we don't reinit the buddy cache
1524 * page which map to the group from which we are already
1525 * allocating. If we are looking at the buddy cache we would
1526 * have taken a reference using ext4_mb_load_buddy and that
1527 * would have pinned buddy page to page cache.
1528 * The call to ext4_mb_get_buddy_page_lock will mark the
1529 * page accessed.
1530 */
1531 ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b, gfp);
1532 if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) {
1533 /*
1534 * somebody initialized the group
1535 * return without doing anything
1536 */
1537 goto err;
1538 }
1539
1540 page = e4b.bd_bitmap_page;
1541 ret = ext4_mb_init_cache(page, NULL, gfp);
1542 if (ret)
1543 goto err;
1544 if (!PageUptodate(page)) {
1545 ret = -EIO;
1546 goto err;
1547 }
1548
1549 if (e4b.bd_buddy_page == NULL) {
1550 /*
1551 * If both the bitmap and buddy are in
1552 * the same page we don't need to force
1553 * init the buddy
1554 */
1555 ret = 0;
1556 goto err;
1557 }
1558 /* init buddy cache */
1559 page = e4b.bd_buddy_page;
1560 ret = ext4_mb_init_cache(page, e4b.bd_bitmap, gfp);
1561 if (ret)
1562 goto err;
1563 if (!PageUptodate(page)) {
1564 ret = -EIO;
1565 goto err;
1566 }
1567 err:
1568 ext4_mb_put_buddy_page_lock(&e4b);
1569 return ret;
1570 }
1571
1572 /*
1573 * Locking note: This routine calls ext4_mb_init_cache(), which takes the
1574 * block group lock of all groups for this page; do not hold the BG lock when
1575 * calling this routine!
1576 */
1577 static noinline_for_stack int
ext4_mb_load_buddy_gfp(struct super_block * sb,ext4_group_t group,struct ext4_buddy * e4b,gfp_t gfp)1578 ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
1579 struct ext4_buddy *e4b, gfp_t gfp)
1580 {
1581 int blocks_per_page;
1582 int block;
1583 int pnum;
1584 int poff;
1585 struct page *page;
1586 int ret;
1587 struct ext4_group_info *grp;
1588 struct ext4_sb_info *sbi = EXT4_SB(sb);
1589 struct inode *inode = sbi->s_buddy_cache;
1590
1591 might_sleep();
1592 mb_debug(sb, "load group %u\n", group);
1593
1594 blocks_per_page = PAGE_SIZE / sb->s_blocksize;
1595 grp = ext4_get_group_info(sb, group);
1596 if (!grp)
1597 return -EFSCORRUPTED;
1598
1599 e4b->bd_blkbits = sb->s_blocksize_bits;
1600 e4b->bd_info = grp;
1601 e4b->bd_sb = sb;
1602 e4b->bd_group = group;
1603 e4b->bd_buddy_page = NULL;
1604 e4b->bd_bitmap_page = NULL;
1605
1606 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
1607 /*
1608 * we need full data about the group
1609 * to make a good selection
1610 */
1611 ret = ext4_mb_init_group(sb, group, gfp);
1612 if (ret)
1613 return ret;
1614 }
1615
1616 /*
1617 * the buddy cache inode stores the block bitmap
1618 * and buddy information in consecutive blocks.
1619 * So for each group we need two blocks.
1620 */
1621 block = group * 2;
1622 pnum = block / blocks_per_page;
1623 poff = block % blocks_per_page;
1624
1625 /* we could use find_or_create_page(), but it locks page
1626 * what we'd like to avoid in fast path ... */
1627 page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
1628 if (page == NULL || !PageUptodate(page)) {
1629 if (page)
1630 /*
1631 * drop the page reference and try
1632 * to get the page with lock. If we
1633 * are not uptodate that implies
1634 * somebody just created the page but
1635 * is yet to initialize the same. So
1636 * wait for it to initialize.
1637 */
1638 put_page(page);
1639 page = find_or_create_page(inode->i_mapping, pnum, gfp);
1640 if (page) {
1641 if (WARN_RATELIMIT(page->mapping != inode->i_mapping,
1642 "ext4: bitmap's paging->mapping != inode->i_mapping\n")) {
1643 /* should never happen */
1644 unlock_page(page);
1645 ret = -EINVAL;
1646 goto err;
1647 }
1648 if (!PageUptodate(page)) {
1649 ret = ext4_mb_init_cache(page, NULL, gfp);
1650 if (ret) {
1651 unlock_page(page);
1652 goto err;
1653 }
1654 mb_cmp_bitmaps(e4b, page_address(page) +
1655 (poff * sb->s_blocksize));
1656 }
1657 unlock_page(page);
1658 }
1659 }
1660 if (page == NULL) {
1661 ret = -ENOMEM;
1662 goto err;
1663 }
1664 if (!PageUptodate(page)) {
1665 ret = -EIO;
1666 goto err;
1667 }
1668
1669 /* Pages marked accessed already */
1670 e4b->bd_bitmap_page = page;
1671 e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
1672
1673 block++;
1674 pnum = block / blocks_per_page;
1675 poff = block % blocks_per_page;
1676
1677 page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
1678 if (page == NULL || !PageUptodate(page)) {
1679 if (page)
1680 put_page(page);
1681 page = find_or_create_page(inode->i_mapping, pnum, gfp);
1682 if (page) {
1683 if (WARN_RATELIMIT(page->mapping != inode->i_mapping,
1684 "ext4: buddy bitmap's page->mapping != inode->i_mapping\n")) {
1685 /* should never happen */
1686 unlock_page(page);
1687 ret = -EINVAL;
1688 goto err;
1689 }
1690 if (!PageUptodate(page)) {
1691 ret = ext4_mb_init_cache(page, e4b->bd_bitmap,
1692 gfp);
1693 if (ret) {
1694 unlock_page(page);
1695 goto err;
1696 }
1697 }
1698 unlock_page(page);
1699 }
1700 }
1701 if (page == NULL) {
1702 ret = -ENOMEM;
1703 goto err;
1704 }
1705 if (!PageUptodate(page)) {
1706 ret = -EIO;
1707 goto err;
1708 }
1709
1710 /* Pages marked accessed already */
1711 e4b->bd_buddy_page = page;
1712 e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
1713
1714 return 0;
1715
1716 err:
1717 if (page)
1718 put_page(page);
1719 if (e4b->bd_bitmap_page)
1720 put_page(e4b->bd_bitmap_page);
1721
1722 e4b->bd_buddy = NULL;
1723 e4b->bd_bitmap = NULL;
1724 return ret;
1725 }
1726
ext4_mb_load_buddy(struct super_block * sb,ext4_group_t group,struct ext4_buddy * e4b)1727 static int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
1728 struct ext4_buddy *e4b)
1729 {
1730 return ext4_mb_load_buddy_gfp(sb, group, e4b, GFP_NOFS);
1731 }
1732
ext4_mb_unload_buddy(struct ext4_buddy * e4b)1733 static void ext4_mb_unload_buddy(struct ext4_buddy *e4b)
1734 {
1735 if (e4b->bd_bitmap_page)
1736 put_page(e4b->bd_bitmap_page);
1737 if (e4b->bd_buddy_page)
1738 put_page(e4b->bd_buddy_page);
1739 }
1740
1741
mb_find_order_for_block(struct ext4_buddy * e4b,int block)1742 static int mb_find_order_for_block(struct ext4_buddy *e4b, int block)
1743 {
1744 int order = 1, max;
1745 void *bb;
1746
1747 BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
1748 BUG_ON(block >= (1 << (e4b->bd_blkbits + 3)));
1749
1750 while (order <= e4b->bd_blkbits + 1) {
1751 bb = mb_find_buddy(e4b, order, &max);
1752 if (!mb_test_bit(block >> order, bb)) {
1753 /* this block is part of buddy of order 'order' */
1754 return order;
1755 }
1756 order++;
1757 }
1758 return 0;
1759 }
1760
mb_clear_bits(void * bm,int cur,int len)1761 static void mb_clear_bits(void *bm, int cur, int len)
1762 {
1763 __u32 *addr;
1764
1765 len = cur + len;
1766 while (cur < len) {
1767 if ((cur & 31) == 0 && (len - cur) >= 32) {
1768 /* fast path: clear whole word at once */
1769 addr = bm + (cur >> 3);
1770 *addr = 0;
1771 cur += 32;
1772 continue;
1773 }
1774 mb_clear_bit(cur, bm);
1775 cur++;
1776 }
1777 }
1778
1779 /* clear bits in given range
1780 * will return first found zero bit if any, -1 otherwise
1781 */
mb_test_and_clear_bits(void * bm,int cur,int len)1782 static int mb_test_and_clear_bits(void *bm, int cur, int len)
1783 {
1784 __u32 *addr;
1785 int zero_bit = -1;
1786
1787 len = cur + len;
1788 while (cur < len) {
1789 if ((cur & 31) == 0 && (len - cur) >= 32) {
1790 /* fast path: clear whole word at once */
1791 addr = bm + (cur >> 3);
1792 if (*addr != (__u32)(-1) && zero_bit == -1)
1793 zero_bit = cur + mb_find_next_zero_bit(addr, 32, 0);
1794 *addr = 0;
1795 cur += 32;
1796 continue;
1797 }
1798 if (!mb_test_and_clear_bit(cur, bm) && zero_bit == -1)
1799 zero_bit = cur;
1800 cur++;
1801 }
1802
1803 return zero_bit;
1804 }
1805
mb_set_bits(void * bm,int cur,int len)1806 void mb_set_bits(void *bm, int cur, int len)
1807 {
1808 __u32 *addr;
1809
1810 len = cur + len;
1811 while (cur < len) {
1812 if ((cur & 31) == 0 && (len - cur) >= 32) {
1813 /* fast path: set whole word at once */
1814 addr = bm + (cur >> 3);
1815 *addr = 0xffffffff;
1816 cur += 32;
1817 continue;
1818 }
1819 mb_set_bit(cur, bm);
1820 cur++;
1821 }
1822 }
1823
mb_buddy_adjust_border(int * bit,void * bitmap,int side)1824 static inline int mb_buddy_adjust_border(int* bit, void* bitmap, int side)
1825 {
1826 if (mb_test_bit(*bit + side, bitmap)) {
1827 mb_clear_bit(*bit, bitmap);
1828 (*bit) -= side;
1829 return 1;
1830 }
1831 else {
1832 (*bit) += side;
1833 mb_set_bit(*bit, bitmap);
1834 return -1;
1835 }
1836 }
1837
mb_buddy_mark_free(struct ext4_buddy * e4b,int first,int last)1838 static void mb_buddy_mark_free(struct ext4_buddy *e4b, int first, int last)
1839 {
1840 int max;
1841 int order = 1;
1842 void *buddy = mb_find_buddy(e4b, order, &max);
1843
1844 while (buddy) {
1845 void *buddy2;
1846
1847 /* Bits in range [first; last] are known to be set since
1848 * corresponding blocks were allocated. Bits in range
1849 * (first; last) will stay set because they form buddies on
1850 * upper layer. We just deal with borders if they don't
1851 * align with upper layer and then go up.
1852 * Releasing entire group is all about clearing
1853 * single bit of highest order buddy.
1854 */
1855
1856 /* Example:
1857 * ---------------------------------
1858 * | 1 | 1 | 1 | 1 |
1859 * ---------------------------------
1860 * | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 |
1861 * ---------------------------------
1862 * 0 1 2 3 4 5 6 7
1863 * \_____________________/
1864 *
1865 * Neither [1] nor [6] is aligned to above layer.
1866 * Left neighbour [0] is free, so mark it busy,
1867 * decrease bb_counters and extend range to
1868 * [0; 6]
1869 * Right neighbour [7] is busy. It can't be coaleasced with [6], so
1870 * mark [6] free, increase bb_counters and shrink range to
1871 * [0; 5].
1872 * Then shift range to [0; 2], go up and do the same.
1873 */
1874
1875
1876 if (first & 1)
1877 e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&first, buddy, -1);
1878 if (!(last & 1))
1879 e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&last, buddy, 1);
1880 if (first > last)
1881 break;
1882 order++;
1883
1884 buddy2 = mb_find_buddy(e4b, order, &max);
1885 if (!buddy2) {
1886 mb_clear_bits(buddy, first, last - first + 1);
1887 e4b->bd_info->bb_counters[order - 1] += last - first + 1;
1888 break;
1889 }
1890 first >>= 1;
1891 last >>= 1;
1892 buddy = buddy2;
1893 }
1894 }
1895
mb_free_blocks(struct inode * inode,struct ext4_buddy * e4b,int first,int count)1896 static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
1897 int first, int count)
1898 {
1899 int left_is_free = 0;
1900 int right_is_free = 0;
1901 int block;
1902 int last = first + count - 1;
1903 struct super_block *sb = e4b->bd_sb;
1904
1905 if (WARN_ON(count == 0))
1906 return;
1907 BUG_ON(last >= (sb->s_blocksize << 3));
1908 assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
1909 /* Don't bother if the block group is corrupt. */
1910 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
1911 return;
1912
1913 mb_check_buddy(e4b);
1914 mb_free_blocks_double(inode, e4b, first, count);
1915
1916 /* access memory sequentially: check left neighbour,
1917 * clear range and then check right neighbour
1918 */
1919 if (first != 0)
1920 left_is_free = !mb_test_bit(first - 1, e4b->bd_bitmap);
1921 block = mb_test_and_clear_bits(e4b->bd_bitmap, first, count);
1922 if (last + 1 < EXT4_SB(sb)->s_mb_maxs[0])
1923 right_is_free = !mb_test_bit(last + 1, e4b->bd_bitmap);
1924
1925 if (unlikely(block != -1)) {
1926 struct ext4_sb_info *sbi = EXT4_SB(sb);
1927 ext4_fsblk_t blocknr;
1928
1929 /*
1930 * Fastcommit replay can free already freed blocks which
1931 * corrupts allocation info. Regenerate it.
1932 */
1933 if (sbi->s_mount_state & EXT4_FC_REPLAY) {
1934 mb_regenerate_buddy(e4b);
1935 goto check;
1936 }
1937
1938 blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
1939 blocknr += EXT4_C2B(sbi, block);
1940 ext4_grp_locked_error(sb, e4b->bd_group,
1941 inode ? inode->i_ino : 0, blocknr,
1942 "freeing already freed block (bit %u); block bitmap corrupt.",
1943 block);
1944 ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
1945 EXT4_GROUP_INFO_BBITMAP_CORRUPT);
1946 return;
1947 }
1948
1949 this_cpu_inc(discard_pa_seq);
1950 e4b->bd_info->bb_free += count;
1951 if (first < e4b->bd_info->bb_first_free)
1952 e4b->bd_info->bb_first_free = first;
1953
1954 /* let's maintain fragments counter */
1955 if (left_is_free && right_is_free)
1956 e4b->bd_info->bb_fragments--;
1957 else if (!left_is_free && !right_is_free)
1958 e4b->bd_info->bb_fragments++;
1959
1960 /* buddy[0] == bd_bitmap is a special case, so handle
1961 * it right away and let mb_buddy_mark_free stay free of
1962 * zero order checks.
1963 * Check if neighbours are to be coaleasced,
1964 * adjust bitmap bb_counters and borders appropriately.
1965 */
1966 if (first & 1) {
1967 first += !left_is_free;
1968 e4b->bd_info->bb_counters[0] += left_is_free ? -1 : 1;
1969 }
1970 if (!(last & 1)) {
1971 last -= !right_is_free;
1972 e4b->bd_info->bb_counters[0] += right_is_free ? -1 : 1;
1973 }
1974
1975 if (first <= last)
1976 mb_buddy_mark_free(e4b, first >> 1, last >> 1);
1977
1978 mb_set_largest_free_order(sb, e4b->bd_info);
1979 mb_update_avg_fragment_size(sb, e4b->bd_info);
1980 check:
1981 mb_check_buddy(e4b);
1982 }
1983
mb_find_extent(struct ext4_buddy * e4b,int block,int needed,struct ext4_free_extent * ex)1984 static int mb_find_extent(struct ext4_buddy *e4b, int block,
1985 int needed, struct ext4_free_extent *ex)
1986 {
1987 int next = block;
1988 int max, order;
1989 void *buddy;
1990
1991 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
1992 BUG_ON(ex == NULL);
1993
1994 buddy = mb_find_buddy(e4b, 0, &max);
1995 BUG_ON(buddy == NULL);
1996 BUG_ON(block >= max);
1997 if (mb_test_bit(block, buddy)) {
1998 ex->fe_len = 0;
1999 ex->fe_start = 0;
2000 ex->fe_group = 0;
2001 return 0;
2002 }
2003
2004 /* find actual order */
2005 order = mb_find_order_for_block(e4b, block);
2006 block = block >> order;
2007
2008 ex->fe_len = 1 << order;
2009 ex->fe_start = block << order;
2010 ex->fe_group = e4b->bd_group;
2011
2012 /* calc difference from given start */
2013 next = next - ex->fe_start;
2014 ex->fe_len -= next;
2015 ex->fe_start += next;
2016
2017 while (needed > ex->fe_len &&
2018 mb_find_buddy(e4b, order, &max)) {
2019
2020 if (block + 1 >= max)
2021 break;
2022
2023 next = (block + 1) * (1 << order);
2024 if (mb_test_bit(next, e4b->bd_bitmap))
2025 break;
2026
2027 order = mb_find_order_for_block(e4b, next);
2028
2029 block = next >> order;
2030 ex->fe_len += 1 << order;
2031 }
2032
2033 if (ex->fe_start + ex->fe_len > EXT4_CLUSTERS_PER_GROUP(e4b->bd_sb)) {
2034 /* Should never happen! (but apparently sometimes does?!?) */
2035 WARN_ON(1);
2036 ext4_grp_locked_error(e4b->bd_sb, e4b->bd_group, 0, 0,
2037 "corruption or bug in mb_find_extent "
2038 "block=%d, order=%d needed=%d ex=%u/%d/%d@%u",
2039 block, order, needed, ex->fe_group, ex->fe_start,
2040 ex->fe_len, ex->fe_logical);
2041 ex->fe_len = 0;
2042 ex->fe_start = 0;
2043 ex->fe_group = 0;
2044 }
2045 return ex->fe_len;
2046 }
2047
mb_mark_used(struct ext4_buddy * e4b,struct ext4_free_extent * ex)2048 static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex)
2049 {
2050 int ord;
2051 int mlen = 0;
2052 int max = 0;
2053 int cur;
2054 int start = ex->fe_start;
2055 int len = ex->fe_len;
2056 unsigned ret = 0;
2057 int len0 = len;
2058 void *buddy;
2059 bool split = false;
2060
2061 BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3));
2062 BUG_ON(e4b->bd_group != ex->fe_group);
2063 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
2064 mb_check_buddy(e4b);
2065 mb_mark_used_double(e4b, start, len);
2066
2067 this_cpu_inc(discard_pa_seq);
2068 e4b->bd_info->bb_free -= len;
2069 if (e4b->bd_info->bb_first_free == start)
2070 e4b->bd_info->bb_first_free += len;
2071
2072 /* let's maintain fragments counter */
2073 if (start != 0)
2074 mlen = !mb_test_bit(start - 1, e4b->bd_bitmap);
2075 if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0])
2076 max = !mb_test_bit(start + len, e4b->bd_bitmap);
2077 if (mlen && max)
2078 e4b->bd_info->bb_fragments++;
2079 else if (!mlen && !max)
2080 e4b->bd_info->bb_fragments--;
2081
2082 /* let's maintain buddy itself */
2083 while (len) {
2084 if (!split)
2085 ord = mb_find_order_for_block(e4b, start);
2086
2087 if (((start >> ord) << ord) == start && len >= (1 << ord)) {
2088 /* the whole chunk may be allocated at once! */
2089 mlen = 1 << ord;
2090 if (!split)
2091 buddy = mb_find_buddy(e4b, ord, &max);
2092 else
2093 split = false;
2094 BUG_ON((start >> ord) >= max);
2095 mb_set_bit(start >> ord, buddy);
2096 e4b->bd_info->bb_counters[ord]--;
2097 start += mlen;
2098 len -= mlen;
2099 BUG_ON(len < 0);
2100 continue;
2101 }
2102
2103 /* store for history */
2104 if (ret == 0)
2105 ret = len | (ord << 16);
2106
2107 /* we have to split large buddy */
2108 BUG_ON(ord <= 0);
2109 buddy = mb_find_buddy(e4b, ord, &max);
2110 mb_set_bit(start >> ord, buddy);
2111 e4b->bd_info->bb_counters[ord]--;
2112
2113 ord--;
2114 cur = (start >> ord) & ~1U;
2115 buddy = mb_find_buddy(e4b, ord, &max);
2116 mb_clear_bit(cur, buddy);
2117 mb_clear_bit(cur + 1, buddy);
2118 e4b->bd_info->bb_counters[ord]++;
2119 e4b->bd_info->bb_counters[ord]++;
2120 split = true;
2121 }
2122 mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info);
2123
2124 mb_update_avg_fragment_size(e4b->bd_sb, e4b->bd_info);
2125 mb_set_bits(e4b->bd_bitmap, ex->fe_start, len0);
2126 mb_check_buddy(e4b);
2127
2128 return ret;
2129 }
2130
2131 /*
2132 * Must be called under group lock!
2133 */
ext4_mb_use_best_found(struct ext4_allocation_context * ac,struct ext4_buddy * e4b)2134 static void ext4_mb_use_best_found(struct ext4_allocation_context *ac,
2135 struct ext4_buddy *e4b)
2136 {
2137 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
2138 int ret;
2139
2140 BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group);
2141 BUG_ON(ac->ac_status == AC_STATUS_FOUND);
2142
2143 ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len);
2144 ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical;
2145 ret = mb_mark_used(e4b, &ac->ac_b_ex);
2146
2147 /* preallocation can change ac_b_ex, thus we store actually
2148 * allocated blocks for history */
2149 ac->ac_f_ex = ac->ac_b_ex;
2150
2151 ac->ac_status = AC_STATUS_FOUND;
2152 ac->ac_tail = ret & 0xffff;
2153 ac->ac_buddy = ret >> 16;
2154
2155 /*
2156 * take the page reference. We want the page to be pinned
2157 * so that we don't get a ext4_mb_init_cache_call for this
2158 * group until we update the bitmap. That would mean we
2159 * double allocate blocks. The reference is dropped
2160 * in ext4_mb_release_context
2161 */
2162 ac->ac_bitmap_page = e4b->bd_bitmap_page;
2163 get_page(ac->ac_bitmap_page);
2164 ac->ac_buddy_page = e4b->bd_buddy_page;
2165 get_page(ac->ac_buddy_page);
2166 /* store last allocated for subsequent stream allocation */
2167 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
2168 spin_lock(&sbi->s_md_lock);
2169 sbi->s_mb_last_group = ac->ac_f_ex.fe_group;
2170 sbi->s_mb_last_start = ac->ac_f_ex.fe_start;
2171 spin_unlock(&sbi->s_md_lock);
2172 }
2173 /*
2174 * As we've just preallocated more space than
2175 * user requested originally, we store allocated
2176 * space in a special descriptor.
2177 */
2178 if (ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len)
2179 ext4_mb_new_preallocation(ac);
2180
2181 }
2182
ext4_mb_check_limits(struct ext4_allocation_context * ac,struct ext4_buddy * e4b,int finish_group)2183 static void ext4_mb_check_limits(struct ext4_allocation_context *ac,
2184 struct ext4_buddy *e4b,
2185 int finish_group)
2186 {
2187 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
2188 struct ext4_free_extent *bex = &ac->ac_b_ex;
2189 struct ext4_free_extent *gex = &ac->ac_g_ex;
2190
2191 if (ac->ac_status == AC_STATUS_FOUND)
2192 return;
2193 /*
2194 * We don't want to scan for a whole year
2195 */
2196 if (ac->ac_found > sbi->s_mb_max_to_scan &&
2197 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2198 ac->ac_status = AC_STATUS_BREAK;
2199 return;
2200 }
2201
2202 /*
2203 * Haven't found good chunk so far, let's continue
2204 */
2205 if (bex->fe_len < gex->fe_len)
2206 return;
2207
2208 if (finish_group || ac->ac_found > sbi->s_mb_min_to_scan)
2209 ext4_mb_use_best_found(ac, e4b);
2210 }
2211
2212 /*
2213 * The routine checks whether found extent is good enough. If it is,
2214 * then the extent gets marked used and flag is set to the context
2215 * to stop scanning. Otherwise, the extent is compared with the
2216 * previous found extent and if new one is better, then it's stored
2217 * in the context. Later, the best found extent will be used, if
2218 * mballoc can't find good enough extent.
2219 *
2220 * The algorithm used is roughly as follows:
2221 *
2222 * * If free extent found is exactly as big as goal, then
2223 * stop the scan and use it immediately
2224 *
2225 * * If free extent found is smaller than goal, then keep retrying
2226 * upto a max of sbi->s_mb_max_to_scan times (default 200). After
2227 * that stop scanning and use whatever we have.
2228 *
2229 * * If free extent found is bigger than goal, then keep retrying
2230 * upto a max of sbi->s_mb_min_to_scan times (default 10) before
2231 * stopping the scan and using the extent.
2232 *
2233 *
2234 * FIXME: real allocation policy is to be designed yet!
2235 */
ext4_mb_measure_extent(struct ext4_allocation_context * ac,struct ext4_free_extent * ex,struct ext4_buddy * e4b)2236 static void ext4_mb_measure_extent(struct ext4_allocation_context *ac,
2237 struct ext4_free_extent *ex,
2238 struct ext4_buddy *e4b)
2239 {
2240 struct ext4_free_extent *bex = &ac->ac_b_ex;
2241 struct ext4_free_extent *gex = &ac->ac_g_ex;
2242
2243 BUG_ON(ex->fe_len <= 0);
2244 BUG_ON(ex->fe_len > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
2245 BUG_ON(ex->fe_start >= EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
2246 BUG_ON(ac->ac_status != AC_STATUS_CONTINUE);
2247
2248 ac->ac_found++;
2249 ac->ac_cX_found[ac->ac_criteria]++;
2250
2251 /*
2252 * The special case - take what you catch first
2253 */
2254 if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2255 *bex = *ex;
2256 ext4_mb_use_best_found(ac, e4b);
2257 return;
2258 }
2259
2260 /*
2261 * Let's check whether the chuck is good enough
2262 */
2263 if (ex->fe_len == gex->fe_len) {
2264 *bex = *ex;
2265 ext4_mb_use_best_found(ac, e4b);
2266 return;
2267 }
2268
2269 /*
2270 * If this is first found extent, just store it in the context
2271 */
2272 if (bex->fe_len == 0) {
2273 *bex = *ex;
2274 return;
2275 }
2276
2277 /*
2278 * If new found extent is better, store it in the context
2279 */
2280 if (bex->fe_len < gex->fe_len) {
2281 /* if the request isn't satisfied, any found extent
2282 * larger than previous best one is better */
2283 if (ex->fe_len > bex->fe_len)
2284 *bex = *ex;
2285 } else if (ex->fe_len > gex->fe_len) {
2286 /* if the request is satisfied, then we try to find
2287 * an extent that still satisfy the request, but is
2288 * smaller than previous one */
2289 if (ex->fe_len < bex->fe_len)
2290 *bex = *ex;
2291 }
2292
2293 ext4_mb_check_limits(ac, e4b, 0);
2294 }
2295
2296 static noinline_for_stack
ext4_mb_try_best_found(struct ext4_allocation_context * ac,struct ext4_buddy * e4b)2297 void ext4_mb_try_best_found(struct ext4_allocation_context *ac,
2298 struct ext4_buddy *e4b)
2299 {
2300 struct ext4_free_extent ex = ac->ac_b_ex;
2301 ext4_group_t group = ex.fe_group;
2302 int max;
2303 int err;
2304
2305 BUG_ON(ex.fe_len <= 0);
2306 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
2307 if (err)
2308 return;
2309
2310 ext4_lock_group(ac->ac_sb, group);
2311 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
2312 goto out;
2313
2314 max = mb_find_extent(e4b, ex.fe_start, ex.fe_len, &ex);
2315
2316 if (max > 0) {
2317 ac->ac_b_ex = ex;
2318 ext4_mb_use_best_found(ac, e4b);
2319 }
2320
2321 out:
2322 ext4_unlock_group(ac->ac_sb, group);
2323 ext4_mb_unload_buddy(e4b);
2324 }
2325
2326 static noinline_for_stack
ext4_mb_find_by_goal(struct ext4_allocation_context * ac,struct ext4_buddy * e4b)2327 int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
2328 struct ext4_buddy *e4b)
2329 {
2330 ext4_group_t group = ac->ac_g_ex.fe_group;
2331 int max;
2332 int err;
2333 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
2334 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
2335 struct ext4_free_extent ex;
2336
2337 if (!grp)
2338 return -EFSCORRUPTED;
2339 if (!(ac->ac_flags & (EXT4_MB_HINT_TRY_GOAL | EXT4_MB_HINT_GOAL_ONLY)))
2340 return 0;
2341 if (grp->bb_free == 0)
2342 return 0;
2343
2344 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
2345 if (err)
2346 return err;
2347
2348 ext4_lock_group(ac->ac_sb, group);
2349 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
2350 goto out;
2351
2352 max = mb_find_extent(e4b, ac->ac_g_ex.fe_start,
2353 ac->ac_g_ex.fe_len, &ex);
2354 ex.fe_logical = 0xDEADFA11; /* debug value */
2355
2356 if (max >= ac->ac_g_ex.fe_len &&
2357 ac->ac_g_ex.fe_len == EXT4_B2C(sbi, sbi->s_stripe)) {
2358 ext4_fsblk_t start;
2359
2360 start = ext4_grp_offs_to_block(ac->ac_sb, &ex);
2361 /* use do_div to get remainder (would be 64-bit modulo) */
2362 if (do_div(start, sbi->s_stripe) == 0) {
2363 ac->ac_found++;
2364 ac->ac_b_ex = ex;
2365 ext4_mb_use_best_found(ac, e4b);
2366 }
2367 } else if (max >= ac->ac_g_ex.fe_len) {
2368 BUG_ON(ex.fe_len <= 0);
2369 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
2370 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
2371 ac->ac_found++;
2372 ac->ac_b_ex = ex;
2373 ext4_mb_use_best_found(ac, e4b);
2374 } else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) {
2375 /* Sometimes, caller may want to merge even small
2376 * number of blocks to an existing extent */
2377 BUG_ON(ex.fe_len <= 0);
2378 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
2379 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
2380 ac->ac_found++;
2381 ac->ac_b_ex = ex;
2382 ext4_mb_use_best_found(ac, e4b);
2383 }
2384 out:
2385 ext4_unlock_group(ac->ac_sb, group);
2386 ext4_mb_unload_buddy(e4b);
2387
2388 return 0;
2389 }
2390
2391 /*
2392 * The routine scans buddy structures (not bitmap!) from given order
2393 * to max order and tries to find big enough chunk to satisfy the req
2394 */
2395 static noinline_for_stack
ext4_mb_simple_scan_group(struct ext4_allocation_context * ac,struct ext4_buddy * e4b)2396 void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
2397 struct ext4_buddy *e4b)
2398 {
2399 struct super_block *sb = ac->ac_sb;
2400 struct ext4_group_info *grp = e4b->bd_info;
2401 void *buddy;
2402 int i;
2403 int k;
2404 int max;
2405
2406 BUG_ON(ac->ac_2order <= 0);
2407 for (i = ac->ac_2order; i < MB_NUM_ORDERS(sb); i++) {
2408 if (grp->bb_counters[i] == 0)
2409 continue;
2410
2411 buddy = mb_find_buddy(e4b, i, &max);
2412 if (WARN_RATELIMIT(buddy == NULL,
2413 "ext4: mb_simple_scan_group: mb_find_buddy failed, (%d)\n", i))
2414 continue;
2415
2416 k = mb_find_next_zero_bit(buddy, max, 0);
2417 if (k >= max) {
2418 ext4_grp_locked_error(ac->ac_sb, e4b->bd_group, 0, 0,
2419 "%d free clusters of order %d. But found 0",
2420 grp->bb_counters[i], i);
2421 ext4_mark_group_bitmap_corrupted(ac->ac_sb,
2422 e4b->bd_group,
2423 EXT4_GROUP_INFO_BBITMAP_CORRUPT);
2424 break;
2425 }
2426 ac->ac_found++;
2427 ac->ac_cX_found[ac->ac_criteria]++;
2428
2429 ac->ac_b_ex.fe_len = 1 << i;
2430 ac->ac_b_ex.fe_start = k << i;
2431 ac->ac_b_ex.fe_group = e4b->bd_group;
2432
2433 ext4_mb_use_best_found(ac, e4b);
2434
2435 BUG_ON(ac->ac_f_ex.fe_len != ac->ac_g_ex.fe_len);
2436
2437 if (EXT4_SB(sb)->s_mb_stats)
2438 atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
2439
2440 break;
2441 }
2442 }
2443
2444 /*
2445 * The routine scans the group and measures all found extents.
2446 * In order to optimize scanning, caller must pass number of
2447 * free blocks in the group, so the routine can know upper limit.
2448 */
2449 static noinline_for_stack
ext4_mb_complex_scan_group(struct ext4_allocation_context * ac,struct ext4_buddy * e4b)2450 void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
2451 struct ext4_buddy *e4b)
2452 {
2453 struct super_block *sb = ac->ac_sb;
2454 void *bitmap = e4b->bd_bitmap;
2455 struct ext4_free_extent ex;
2456 int i, j, freelen;
2457 int free;
2458
2459 free = e4b->bd_info->bb_free;
2460 if (WARN_ON(free <= 0))
2461 return;
2462
2463 i = e4b->bd_info->bb_first_free;
2464
2465 while (free && ac->ac_status == AC_STATUS_CONTINUE) {
2466 i = mb_find_next_zero_bit(bitmap,
2467 EXT4_CLUSTERS_PER_GROUP(sb), i);
2468 if (i >= EXT4_CLUSTERS_PER_GROUP(sb)) {
2469 /*
2470 * IF we have corrupt bitmap, we won't find any
2471 * free blocks even though group info says we
2472 * have free blocks
2473 */
2474 ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
2475 "%d free clusters as per "
2476 "group info. But bitmap says 0",
2477 free);
2478 ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
2479 EXT4_GROUP_INFO_BBITMAP_CORRUPT);
2480 break;
2481 }
2482
2483 if (!ext4_mb_cr_expensive(ac->ac_criteria)) {
2484 /*
2485 * In CR_GOAL_LEN_FAST and CR_BEST_AVAIL_LEN, we are
2486 * sure that this group will have a large enough
2487 * continuous free extent, so skip over the smaller free
2488 * extents
2489 */
2490 j = mb_find_next_bit(bitmap,
2491 EXT4_CLUSTERS_PER_GROUP(sb), i);
2492 freelen = j - i;
2493
2494 if (freelen < ac->ac_g_ex.fe_len) {
2495 i = j;
2496 free -= freelen;
2497 continue;
2498 }
2499 }
2500
2501 mb_find_extent(e4b, i, ac->ac_g_ex.fe_len, &ex);
2502 if (WARN_ON(ex.fe_len <= 0))
2503 break;
2504 if (free < ex.fe_len) {
2505 ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
2506 "%d free clusters as per "
2507 "group info. But got %d blocks",
2508 free, ex.fe_len);
2509 ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
2510 EXT4_GROUP_INFO_BBITMAP_CORRUPT);
2511 /*
2512 * The number of free blocks differs. This mostly
2513 * indicate that the bitmap is corrupt. So exit
2514 * without claiming the space.
2515 */
2516 break;
2517 }
2518 ex.fe_logical = 0xDEADC0DE; /* debug value */
2519 ext4_mb_measure_extent(ac, &ex, e4b);
2520
2521 i += ex.fe_len;
2522 free -= ex.fe_len;
2523 }
2524
2525 ext4_mb_check_limits(ac, e4b, 1);
2526 }
2527
2528 /*
2529 * This is a special case for storages like raid5
2530 * we try to find stripe-aligned chunks for stripe-size-multiple requests
2531 */
2532 static noinline_for_stack
ext4_mb_scan_aligned(struct ext4_allocation_context * ac,struct ext4_buddy * e4b)2533 void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
2534 struct ext4_buddy *e4b)
2535 {
2536 struct super_block *sb = ac->ac_sb;
2537 struct ext4_sb_info *sbi = EXT4_SB(sb);
2538 void *bitmap = e4b->bd_bitmap;
2539 struct ext4_free_extent ex;
2540 ext4_fsblk_t first_group_block;
2541 ext4_fsblk_t a;
2542 ext4_grpblk_t i, stripe;
2543 int max;
2544
2545 BUG_ON(sbi->s_stripe == 0);
2546
2547 /* find first stripe-aligned block in group */
2548 first_group_block = ext4_group_first_block_no(sb, e4b->bd_group);
2549
2550 a = first_group_block + sbi->s_stripe - 1;
2551 do_div(a, sbi->s_stripe);
2552 i = (a * sbi->s_stripe) - first_group_block;
2553
2554 stripe = EXT4_B2C(sbi, sbi->s_stripe);
2555 i = EXT4_B2C(sbi, i);
2556 while (i < EXT4_CLUSTERS_PER_GROUP(sb)) {
2557 if (!mb_test_bit(i, bitmap)) {
2558 max = mb_find_extent(e4b, i, stripe, &ex);
2559 if (max >= stripe) {
2560 ac->ac_found++;
2561 ac->ac_cX_found[ac->ac_criteria]++;
2562 ex.fe_logical = 0xDEADF00D; /* debug value */
2563 ac->ac_b_ex = ex;
2564 ext4_mb_use_best_found(ac, e4b);
2565 break;
2566 }
2567 }
2568 i += stripe;
2569 }
2570 }
2571
2572 /*
2573 * This is also called BEFORE we load the buddy bitmap.
2574 * Returns either 1 or 0 indicating that the group is either suitable
2575 * for the allocation or not.
2576 */
ext4_mb_good_group(struct ext4_allocation_context * ac,ext4_group_t group,enum criteria cr)2577 static bool ext4_mb_good_group(struct ext4_allocation_context *ac,
2578 ext4_group_t group, enum criteria cr)
2579 {
2580 ext4_grpblk_t free, fragments;
2581 int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb));
2582 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
2583
2584 BUG_ON(cr < CR_POWER2_ALIGNED || cr >= EXT4_MB_NUM_CRS);
2585
2586 if (unlikely(!grp || EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
2587 return false;
2588
2589 free = grp->bb_free;
2590 if (free == 0)
2591 return false;
2592
2593 fragments = grp->bb_fragments;
2594 if (fragments == 0)
2595 return false;
2596
2597 switch (cr) {
2598 case CR_POWER2_ALIGNED:
2599 BUG_ON(ac->ac_2order == 0);
2600
2601 /* Avoid using the first bg of a flexgroup for data files */
2602 if ((ac->ac_flags & EXT4_MB_HINT_DATA) &&
2603 (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) &&
2604 ((group % flex_size) == 0))
2605 return false;
2606
2607 if (free < ac->ac_g_ex.fe_len)
2608 return false;
2609
2610 if (ac->ac_2order >= MB_NUM_ORDERS(ac->ac_sb))
2611 return true;
2612
2613 if (grp->bb_largest_free_order < ac->ac_2order)
2614 return false;
2615
2616 return true;
2617 case CR_GOAL_LEN_FAST:
2618 case CR_BEST_AVAIL_LEN:
2619 if ((free / fragments) >= ac->ac_g_ex.fe_len)
2620 return true;
2621 break;
2622 case CR_GOAL_LEN_SLOW:
2623 if (free >= ac->ac_g_ex.fe_len)
2624 return true;
2625 break;
2626 case CR_ANY_FREE:
2627 return true;
2628 default:
2629 BUG();
2630 }
2631
2632 return false;
2633 }
2634
2635 /*
2636 * This could return negative error code if something goes wrong
2637 * during ext4_mb_init_group(). This should not be called with
2638 * ext4_lock_group() held.
2639 *
2640 * Note: because we are conditionally operating with the group lock in
2641 * the EXT4_MB_STRICT_CHECK case, we need to fake out sparse in this
2642 * function using __acquire and __release. This means we need to be
2643 * super careful before messing with the error path handling via "goto
2644 * out"!
2645 */
ext4_mb_good_group_nolock(struct ext4_allocation_context * ac,ext4_group_t group,enum criteria cr)2646 static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac,
2647 ext4_group_t group, enum criteria cr)
2648 {
2649 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
2650 struct super_block *sb = ac->ac_sb;
2651 struct ext4_sb_info *sbi = EXT4_SB(sb);
2652 bool should_lock = ac->ac_flags & EXT4_MB_STRICT_CHECK;
2653 ext4_grpblk_t free;
2654 int ret = 0;
2655
2656 if (!grp)
2657 return -EFSCORRUPTED;
2658 if (sbi->s_mb_stats)
2659 atomic64_inc(&sbi->s_bal_cX_groups_considered[ac->ac_criteria]);
2660 if (should_lock) {
2661 ext4_lock_group(sb, group);
2662 __release(ext4_group_lock_ptr(sb, group));
2663 }
2664 free = grp->bb_free;
2665 if (free == 0)
2666 goto out;
2667 /*
2668 * In all criterias except CR_ANY_FREE we try to avoid groups that
2669 * can't possibly satisfy the full goal request due to insufficient
2670 * free blocks.
2671 */
2672 if (cr < CR_ANY_FREE && free < ac->ac_g_ex.fe_len)
2673 goto out;
2674 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
2675 goto out;
2676 if (should_lock) {
2677 __acquire(ext4_group_lock_ptr(sb, group));
2678 ext4_unlock_group(sb, group);
2679 }
2680
2681 /* We only do this if the grp has never been initialized */
2682 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
2683 struct ext4_group_desc *gdp =
2684 ext4_get_group_desc(sb, group, NULL);
2685 int ret;
2686
2687 /*
2688 * cr=CR_POWER2_ALIGNED/CR_GOAL_LEN_FAST is a very optimistic
2689 * search to find large good chunks almost for free. If buddy
2690 * data is not ready, then this optimization makes no sense. But
2691 * we never skip the first block group in a flex_bg, since this
2692 * gets used for metadata block allocation, and we want to make
2693 * sure we locate metadata blocks in the first block group in
2694 * the flex_bg if possible.
2695 */
2696 if (!ext4_mb_cr_expensive(cr) &&
2697 (!sbi->s_log_groups_per_flex ||
2698 ((group & ((1 << sbi->s_log_groups_per_flex) - 1)) != 0)) &&
2699 !(ext4_has_group_desc_csum(sb) &&
2700 (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))))
2701 return 0;
2702 ret = ext4_mb_init_group(sb, group, GFP_NOFS);
2703 if (ret)
2704 return ret;
2705 }
2706
2707 if (should_lock) {
2708 ext4_lock_group(sb, group);
2709 __release(ext4_group_lock_ptr(sb, group));
2710 }
2711 ret = ext4_mb_good_group(ac, group, cr);
2712 out:
2713 if (should_lock) {
2714 __acquire(ext4_group_lock_ptr(sb, group));
2715 ext4_unlock_group(sb, group);
2716 }
2717 return ret;
2718 }
2719
2720 /*
2721 * Start prefetching @nr block bitmaps starting at @group.
2722 * Return the next group which needs to be prefetched.
2723 */
ext4_mb_prefetch(struct super_block * sb,ext4_group_t group,unsigned int nr,int * cnt)2724 ext4_group_t ext4_mb_prefetch(struct super_block *sb, ext4_group_t group,
2725 unsigned int nr, int *cnt)
2726 {
2727 ext4_group_t ngroups = ext4_get_groups_count(sb);
2728 struct buffer_head *bh;
2729 struct blk_plug plug;
2730
2731 blk_start_plug(&plug);
2732 while (nr-- > 0) {
2733 struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group,
2734 NULL);
2735 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
2736
2737 /*
2738 * Prefetch block groups with free blocks; but don't
2739 * bother if it is marked uninitialized on disk, since
2740 * it won't require I/O to read. Also only try to
2741 * prefetch once, so we avoid getblk() call, which can
2742 * be expensive.
2743 */
2744 if (gdp && grp && !EXT4_MB_GRP_TEST_AND_SET_READ(grp) &&
2745 EXT4_MB_GRP_NEED_INIT(grp) &&
2746 ext4_free_group_clusters(sb, gdp) > 0 ) {
2747 bh = ext4_read_block_bitmap_nowait(sb, group, true);
2748 if (bh && !IS_ERR(bh)) {
2749 if (!buffer_uptodate(bh) && cnt)
2750 (*cnt)++;
2751 brelse(bh);
2752 }
2753 }
2754 if (++group >= ngroups)
2755 group = 0;
2756 }
2757 blk_finish_plug(&plug);
2758 return group;
2759 }
2760
2761 /*
2762 * Prefetching reads the block bitmap into the buffer cache; but we
2763 * need to make sure that the buddy bitmap in the page cache has been
2764 * initialized. Note that ext4_mb_init_group() will block if the I/O
2765 * is not yet completed, or indeed if it was not initiated by
2766 * ext4_mb_prefetch did not start the I/O.
2767 *
2768 * TODO: We should actually kick off the buddy bitmap setup in a work
2769 * queue when the buffer I/O is completed, so that we don't block
2770 * waiting for the block allocation bitmap read to finish when
2771 * ext4_mb_prefetch_fini is called from ext4_mb_regular_allocator().
2772 */
ext4_mb_prefetch_fini(struct super_block * sb,ext4_group_t group,unsigned int nr)2773 void ext4_mb_prefetch_fini(struct super_block *sb, ext4_group_t group,
2774 unsigned int nr)
2775 {
2776 struct ext4_group_desc *gdp;
2777 struct ext4_group_info *grp;
2778
2779 while (nr-- > 0) {
2780 if (!group)
2781 group = ext4_get_groups_count(sb);
2782 group--;
2783 gdp = ext4_get_group_desc(sb, group, NULL);
2784 grp = ext4_get_group_info(sb, group);
2785
2786 if (grp && gdp && EXT4_MB_GRP_NEED_INIT(grp) &&
2787 ext4_free_group_clusters(sb, gdp) > 0) {
2788 if (ext4_mb_init_group(sb, group, GFP_NOFS))
2789 break;
2790 }
2791 }
2792 }
2793
2794 static noinline_for_stack int
ext4_mb_regular_allocator(struct ext4_allocation_context * ac)2795 ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
2796 {
2797 ext4_group_t prefetch_grp = 0, ngroups, group, i;
2798 enum criteria new_cr, cr = CR_GOAL_LEN_FAST;
2799 int err = 0, first_err = 0;
2800 unsigned int nr = 0, prefetch_ios = 0;
2801 struct ext4_sb_info *sbi;
2802 struct super_block *sb;
2803 struct ext4_buddy e4b;
2804 int lost;
2805
2806 sb = ac->ac_sb;
2807 sbi = EXT4_SB(sb);
2808 ngroups = ext4_get_groups_count(sb);
2809 /* non-extent files are limited to low blocks/groups */
2810 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)))
2811 ngroups = sbi->s_blockfile_groups;
2812
2813 BUG_ON(ac->ac_status == AC_STATUS_FOUND);
2814
2815 /* first, try the goal */
2816 err = ext4_mb_find_by_goal(ac, &e4b);
2817 if (err || ac->ac_status == AC_STATUS_FOUND)
2818 goto out;
2819
2820 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
2821 goto out;
2822
2823 /*
2824 * ac->ac_2order is set only if the fe_len is a power of 2
2825 * if ac->ac_2order is set we also set criteria to CR_POWER2_ALIGNED
2826 * so that we try exact allocation using buddy.
2827 */
2828 i = fls(ac->ac_g_ex.fe_len);
2829 ac->ac_2order = 0;
2830 /*
2831 * We search using buddy data only if the order of the request
2832 * is greater than equal to the sbi_s_mb_order2_reqs
2833 * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req
2834 * We also support searching for power-of-two requests only for
2835 * requests upto maximum buddy size we have constructed.
2836 */
2837 if (i >= sbi->s_mb_order2_reqs && i <= MB_NUM_ORDERS(sb)) {
2838 if (is_power_of_2(ac->ac_g_ex.fe_len))
2839 ac->ac_2order = array_index_nospec(i - 1,
2840 MB_NUM_ORDERS(sb));
2841 }
2842
2843 /* if stream allocation is enabled, use global goal */
2844 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
2845 /* TBD: may be hot point */
2846 spin_lock(&sbi->s_md_lock);
2847 ac->ac_g_ex.fe_group = sbi->s_mb_last_group;
2848 ac->ac_g_ex.fe_start = sbi->s_mb_last_start;
2849 spin_unlock(&sbi->s_md_lock);
2850 }
2851
2852 /*
2853 * Let's just scan groups to find more-less suitable blocks We
2854 * start with CR_GOAL_LEN_FAST, unless it is power of 2
2855 * aligned, in which case let's do that faster approach first.
2856 */
2857 if (ac->ac_2order)
2858 cr = CR_POWER2_ALIGNED;
2859 repeat:
2860 for (; cr < EXT4_MB_NUM_CRS && ac->ac_status == AC_STATUS_CONTINUE; cr++) {
2861 ac->ac_criteria = cr;
2862 /*
2863 * searching for the right group start
2864 * from the goal value specified
2865 */
2866 group = ac->ac_g_ex.fe_group;
2867 ac->ac_groups_linear_remaining = sbi->s_mb_max_linear_groups;
2868 prefetch_grp = group;
2869
2870 for (i = 0, new_cr = cr; i < ngroups; i++,
2871 ext4_mb_choose_next_group(ac, &new_cr, &group, ngroups)) {
2872 int ret = 0;
2873
2874 cond_resched();
2875 if (new_cr != cr) {
2876 cr = new_cr;
2877 goto repeat;
2878 }
2879
2880 /*
2881 * Batch reads of the block allocation bitmaps
2882 * to get multiple READs in flight; limit
2883 * prefetching at inexpensive CR, otherwise mballoc
2884 * can spend a lot of time loading imperfect groups
2885 */
2886 if ((prefetch_grp == group) &&
2887 (ext4_mb_cr_expensive(cr) ||
2888 prefetch_ios < sbi->s_mb_prefetch_limit)) {
2889 nr = sbi->s_mb_prefetch;
2890 if (ext4_has_feature_flex_bg(sb)) {
2891 nr = 1 << sbi->s_log_groups_per_flex;
2892 nr -= group & (nr - 1);
2893 nr = min(nr, sbi->s_mb_prefetch);
2894 }
2895 prefetch_grp = ext4_mb_prefetch(sb, group,
2896 nr, &prefetch_ios);
2897 }
2898
2899 /* This now checks without needing the buddy page */
2900 ret = ext4_mb_good_group_nolock(ac, group, cr);
2901 if (ret <= 0) {
2902 if (!first_err)
2903 first_err = ret;
2904 continue;
2905 }
2906
2907 err = ext4_mb_load_buddy(sb, group, &e4b);
2908 if (err)
2909 goto out;
2910
2911 ext4_lock_group(sb, group);
2912
2913 /*
2914 * We need to check again after locking the
2915 * block group
2916 */
2917 ret = ext4_mb_good_group(ac, group, cr);
2918 if (ret == 0) {
2919 ext4_unlock_group(sb, group);
2920 ext4_mb_unload_buddy(&e4b);
2921 continue;
2922 }
2923
2924 ac->ac_groups_scanned++;
2925 if (cr == CR_POWER2_ALIGNED)
2926 ext4_mb_simple_scan_group(ac, &e4b);
2927 else if ((cr == CR_GOAL_LEN_FAST ||
2928 cr == CR_BEST_AVAIL_LEN) &&
2929 sbi->s_stripe &&
2930 !(ac->ac_g_ex.fe_len %
2931 EXT4_B2C(sbi, sbi->s_stripe)))
2932 ext4_mb_scan_aligned(ac, &e4b);
2933 else
2934 ext4_mb_complex_scan_group(ac, &e4b);
2935
2936 ext4_unlock_group(sb, group);
2937 ext4_mb_unload_buddy(&e4b);
2938
2939 if (ac->ac_status != AC_STATUS_CONTINUE)
2940 break;
2941 }
2942 /* Processed all groups and haven't found blocks */
2943 if (sbi->s_mb_stats && i == ngroups)
2944 atomic64_inc(&sbi->s_bal_cX_failed[cr]);
2945
2946 if (i == ngroups && ac->ac_criteria == CR_BEST_AVAIL_LEN)
2947 /* Reset goal length to original goal length before
2948 * falling into CR_GOAL_LEN_SLOW */
2949 ac->ac_g_ex.fe_len = ac->ac_orig_goal_len;
2950 }
2951
2952 if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND &&
2953 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2954 /*
2955 * We've been searching too long. Let's try to allocate
2956 * the best chunk we've found so far
2957 */
2958 ext4_mb_try_best_found(ac, &e4b);
2959 if (ac->ac_status != AC_STATUS_FOUND) {
2960 /*
2961 * Someone more lucky has already allocated it.
2962 * The only thing we can do is just take first
2963 * found block(s)
2964 */
2965 lost = atomic_inc_return(&sbi->s_mb_lost_chunks);
2966 mb_debug(sb, "lost chunk, group: %u, start: %d, len: %d, lost: %d\n",
2967 ac->ac_b_ex.fe_group, ac->ac_b_ex.fe_start,
2968 ac->ac_b_ex.fe_len, lost);
2969
2970 ac->ac_b_ex.fe_group = 0;
2971 ac->ac_b_ex.fe_start = 0;
2972 ac->ac_b_ex.fe_len = 0;
2973 ac->ac_status = AC_STATUS_CONTINUE;
2974 ac->ac_flags |= EXT4_MB_HINT_FIRST;
2975 cr = CR_ANY_FREE;
2976 goto repeat;
2977 }
2978 }
2979
2980 if (sbi->s_mb_stats && ac->ac_status == AC_STATUS_FOUND)
2981 atomic64_inc(&sbi->s_bal_cX_hits[ac->ac_criteria]);
2982 out:
2983 if (!err && ac->ac_status != AC_STATUS_FOUND && first_err)
2984 err = first_err;
2985
2986 mb_debug(sb, "Best len %d, origin len %d, ac_status %u, ac_flags 0x%x, cr %d ret %d\n",
2987 ac->ac_b_ex.fe_len, ac->ac_o_ex.fe_len, ac->ac_status,
2988 ac->ac_flags, cr, err);
2989
2990 if (nr)
2991 ext4_mb_prefetch_fini(sb, prefetch_grp, nr);
2992
2993 return err;
2994 }
2995
ext4_mb_seq_groups_start(struct seq_file * seq,loff_t * pos)2996 static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos)
2997 {
2998 struct super_block *sb = pde_data(file_inode(seq->file));
2999 ext4_group_t group;
3000
3001 if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
3002 return NULL;
3003 group = *pos + 1;
3004 return (void *) ((unsigned long) group);
3005 }
3006
ext4_mb_seq_groups_next(struct seq_file * seq,void * v,loff_t * pos)3007 static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos)
3008 {
3009 struct super_block *sb = pde_data(file_inode(seq->file));
3010 ext4_group_t group;
3011
3012 ++*pos;
3013 if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
3014 return NULL;
3015 group = *pos + 1;
3016 return (void *) ((unsigned long) group);
3017 }
3018
ext4_mb_seq_groups_show(struct seq_file * seq,void * v)3019 static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
3020 {
3021 struct super_block *sb = pde_data(file_inode(seq->file));
3022 ext4_group_t group = (ext4_group_t) ((unsigned long) v);
3023 int i;
3024 int err, buddy_loaded = 0;
3025 struct ext4_buddy e4b;
3026 struct ext4_group_info *grinfo;
3027 unsigned char blocksize_bits = min_t(unsigned char,
3028 sb->s_blocksize_bits,
3029 EXT4_MAX_BLOCK_LOG_SIZE);
3030 struct sg {
3031 struct ext4_group_info info;
3032 ext4_grpblk_t counters[EXT4_MAX_BLOCK_LOG_SIZE + 2];
3033 } sg;
3034
3035 group--;
3036 if (group == 0)
3037 seq_puts(seq, "#group: free frags first ["
3038 " 2^0 2^1 2^2 2^3 2^4 2^5 2^6 "
3039 " 2^7 2^8 2^9 2^10 2^11 2^12 2^13 ]\n");
3040
3041 i = (blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) +
3042 sizeof(struct ext4_group_info);
3043
3044 grinfo = ext4_get_group_info(sb, group);
3045 if (!grinfo)
3046 return 0;
3047 /* Load the group info in memory only if not already loaded. */
3048 if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) {
3049 err = ext4_mb_load_buddy(sb, group, &e4b);
3050 if (err) {
3051 seq_printf(seq, "#%-5u: I/O error\n", group);
3052 return 0;
3053 }
3054 buddy_loaded = 1;
3055 }
3056
3057 memcpy(&sg, grinfo, i);
3058
3059 if (buddy_loaded)
3060 ext4_mb_unload_buddy(&e4b);
3061
3062 seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free,
3063 sg.info.bb_fragments, sg.info.bb_first_free);
3064 for (i = 0; i <= 13; i++)
3065 seq_printf(seq, " %-5u", i <= blocksize_bits + 1 ?
3066 sg.info.bb_counters[i] : 0);
3067 seq_puts(seq, " ]");
3068 if (EXT4_MB_GRP_BBITMAP_CORRUPT(&sg.info))
3069 seq_puts(seq, " Block bitmap corrupted!");
3070 seq_puts(seq, "\n");
3071
3072 return 0;
3073 }
3074
ext4_mb_seq_groups_stop(struct seq_file * seq,void * v)3075 static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v)
3076 {
3077 }
3078
3079 const struct seq_operations ext4_mb_seq_groups_ops = {
3080 .start = ext4_mb_seq_groups_start,
3081 .next = ext4_mb_seq_groups_next,
3082 .stop = ext4_mb_seq_groups_stop,
3083 .show = ext4_mb_seq_groups_show,
3084 };
3085
ext4_seq_mb_stats_show(struct seq_file * seq,void * offset)3086 int ext4_seq_mb_stats_show(struct seq_file *seq, void *offset)
3087 {
3088 struct super_block *sb = seq->private;
3089 struct ext4_sb_info *sbi = EXT4_SB(sb);
3090
3091 seq_puts(seq, "mballoc:\n");
3092 if (!sbi->s_mb_stats) {
3093 seq_puts(seq, "\tmb stats collection turned off.\n");
3094 seq_puts(
3095 seq,
3096 "\tTo enable, please write \"1\" to sysfs file mb_stats.\n");
3097 return 0;
3098 }
3099 seq_printf(seq, "\treqs: %u\n", atomic_read(&sbi->s_bal_reqs));
3100 seq_printf(seq, "\tsuccess: %u\n", atomic_read(&sbi->s_bal_success));
3101
3102 seq_printf(seq, "\tgroups_scanned: %u\n",
3103 atomic_read(&sbi->s_bal_groups_scanned));
3104
3105 /* CR_POWER2_ALIGNED stats */
3106 seq_puts(seq, "\tcr_p2_aligned_stats:\n");
3107 seq_printf(seq, "\t\thits: %llu\n",
3108 atomic64_read(&sbi->s_bal_cX_hits[CR_POWER2_ALIGNED]));
3109 seq_printf(
3110 seq, "\t\tgroups_considered: %llu\n",
3111 atomic64_read(
3112 &sbi->s_bal_cX_groups_considered[CR_POWER2_ALIGNED]));
3113 seq_printf(seq, "\t\textents_scanned: %u\n",
3114 atomic_read(&sbi->s_bal_cX_ex_scanned[CR_POWER2_ALIGNED]));
3115 seq_printf(seq, "\t\tuseless_loops: %llu\n",
3116 atomic64_read(&sbi->s_bal_cX_failed[CR_POWER2_ALIGNED]));
3117 seq_printf(seq, "\t\tbad_suggestions: %u\n",
3118 atomic_read(&sbi->s_bal_p2_aligned_bad_suggestions));
3119
3120 /* CR_GOAL_LEN_FAST stats */
3121 seq_puts(seq, "\tcr_goal_fast_stats:\n");
3122 seq_printf(seq, "\t\thits: %llu\n",
3123 atomic64_read(&sbi->s_bal_cX_hits[CR_GOAL_LEN_FAST]));
3124 seq_printf(seq, "\t\tgroups_considered: %llu\n",
3125 atomic64_read(
3126 &sbi->s_bal_cX_groups_considered[CR_GOAL_LEN_FAST]));
3127 seq_printf(seq, "\t\textents_scanned: %u\n",
3128 atomic_read(&sbi->s_bal_cX_ex_scanned[CR_GOAL_LEN_FAST]));
3129 seq_printf(seq, "\t\tuseless_loops: %llu\n",
3130 atomic64_read(&sbi->s_bal_cX_failed[CR_GOAL_LEN_FAST]));
3131 seq_printf(seq, "\t\tbad_suggestions: %u\n",
3132 atomic_read(&sbi->s_bal_goal_fast_bad_suggestions));
3133
3134 /* CR_BEST_AVAIL_LEN stats */
3135 seq_puts(seq, "\tcr_best_avail_stats:\n");
3136 seq_printf(seq, "\t\thits: %llu\n",
3137 atomic64_read(&sbi->s_bal_cX_hits[CR_BEST_AVAIL_LEN]));
3138 seq_printf(
3139 seq, "\t\tgroups_considered: %llu\n",
3140 atomic64_read(
3141 &sbi->s_bal_cX_groups_considered[CR_BEST_AVAIL_LEN]));
3142 seq_printf(seq, "\t\textents_scanned: %u\n",
3143 atomic_read(&sbi->s_bal_cX_ex_scanned[CR_BEST_AVAIL_LEN]));
3144 seq_printf(seq, "\t\tuseless_loops: %llu\n",
3145 atomic64_read(&sbi->s_bal_cX_failed[CR_BEST_AVAIL_LEN]));
3146 seq_printf(seq, "\t\tbad_suggestions: %u\n",
3147 atomic_read(&sbi->s_bal_best_avail_bad_suggestions));
3148
3149 /* CR_GOAL_LEN_SLOW stats */
3150 seq_puts(seq, "\tcr_goal_slow_stats:\n");
3151 seq_printf(seq, "\t\thits: %llu\n",
3152 atomic64_read(&sbi->s_bal_cX_hits[CR_GOAL_LEN_SLOW]));
3153 seq_printf(seq, "\t\tgroups_considered: %llu\n",
3154 atomic64_read(
3155 &sbi->s_bal_cX_groups_considered[CR_GOAL_LEN_SLOW]));
3156 seq_printf(seq, "\t\textents_scanned: %u\n",
3157 atomic_read(&sbi->s_bal_cX_ex_scanned[CR_GOAL_LEN_SLOW]));
3158 seq_printf(seq, "\t\tuseless_loops: %llu\n",
3159 atomic64_read(&sbi->s_bal_cX_failed[CR_GOAL_LEN_SLOW]));
3160
3161 /* CR_ANY_FREE stats */
3162 seq_puts(seq, "\tcr_any_free_stats:\n");
3163 seq_printf(seq, "\t\thits: %llu\n",
3164 atomic64_read(&sbi->s_bal_cX_hits[CR_ANY_FREE]));
3165 seq_printf(
3166 seq, "\t\tgroups_considered: %llu\n",
3167 atomic64_read(&sbi->s_bal_cX_groups_considered[CR_ANY_FREE]));
3168 seq_printf(seq, "\t\textents_scanned: %u\n",
3169 atomic_read(&sbi->s_bal_cX_ex_scanned[CR_ANY_FREE]));
3170 seq_printf(seq, "\t\tuseless_loops: %llu\n",
3171 atomic64_read(&sbi->s_bal_cX_failed[CR_ANY_FREE]));
3172
3173 /* Aggregates */
3174 seq_printf(seq, "\textents_scanned: %u\n",
3175 atomic_read(&sbi->s_bal_ex_scanned));
3176 seq_printf(seq, "\t\tgoal_hits: %u\n", atomic_read(&sbi->s_bal_goals));
3177 seq_printf(seq, "\t\tlen_goal_hits: %u\n",
3178 atomic_read(&sbi->s_bal_len_goals));
3179 seq_printf(seq, "\t\t2^n_hits: %u\n", atomic_read(&sbi->s_bal_2orders));
3180 seq_printf(seq, "\t\tbreaks: %u\n", atomic_read(&sbi->s_bal_breaks));
3181 seq_printf(seq, "\t\tlost: %u\n", atomic_read(&sbi->s_mb_lost_chunks));
3182 seq_printf(seq, "\tbuddies_generated: %u/%u\n",
3183 atomic_read(&sbi->s_mb_buddies_generated),
3184 ext4_get_groups_count(sb));
3185 seq_printf(seq, "\tbuddies_time_used: %llu\n",
3186 atomic64_read(&sbi->s_mb_generation_time));
3187 seq_printf(seq, "\tpreallocated: %u\n",
3188 atomic_read(&sbi->s_mb_preallocated));
3189 seq_printf(seq, "\tdiscarded: %u\n", atomic_read(&sbi->s_mb_discarded));
3190 return 0;
3191 }
3192
ext4_mb_seq_structs_summary_start(struct seq_file * seq,loff_t * pos)3193 static void *ext4_mb_seq_structs_summary_start(struct seq_file *seq, loff_t *pos)
3194 __acquires(&EXT4_SB(sb)->s_mb_rb_lock)
3195 {
3196 struct super_block *sb = pde_data(file_inode(seq->file));
3197 unsigned long position;
3198
3199 if (*pos < 0 || *pos >= 2*MB_NUM_ORDERS(sb))
3200 return NULL;
3201 position = *pos + 1;
3202 return (void *) ((unsigned long) position);
3203 }
3204
ext4_mb_seq_structs_summary_next(struct seq_file * seq,void * v,loff_t * pos)3205 static void *ext4_mb_seq_structs_summary_next(struct seq_file *seq, void *v, loff_t *pos)
3206 {
3207 struct super_block *sb = pde_data(file_inode(seq->file));
3208 unsigned long position;
3209
3210 ++*pos;
3211 if (*pos < 0 || *pos >= 2*MB_NUM_ORDERS(sb))
3212 return NULL;
3213 position = *pos + 1;
3214 return (void *) ((unsigned long) position);
3215 }
3216
ext4_mb_seq_structs_summary_show(struct seq_file * seq,void * v)3217 static int ext4_mb_seq_structs_summary_show(struct seq_file *seq, void *v)
3218 {
3219 struct super_block *sb = pde_data(file_inode(seq->file));
3220 struct ext4_sb_info *sbi = EXT4_SB(sb);
3221 unsigned long position = ((unsigned long) v);
3222 struct ext4_group_info *grp;
3223 unsigned int count;
3224
3225 position--;
3226 if (position >= MB_NUM_ORDERS(sb)) {
3227 position -= MB_NUM_ORDERS(sb);
3228 if (position == 0)
3229 seq_puts(seq, "avg_fragment_size_lists:\n");
3230
3231 count = 0;
3232 read_lock(&sbi->s_mb_avg_fragment_size_locks[position]);
3233 list_for_each_entry(grp, &sbi->s_mb_avg_fragment_size[position],
3234 bb_avg_fragment_size_node)
3235 count++;
3236 read_unlock(&sbi->s_mb_avg_fragment_size_locks[position]);
3237 seq_printf(seq, "\tlist_order_%u_groups: %u\n",
3238 (unsigned int)position, count);
3239 return 0;
3240 }
3241
3242 if (position == 0) {
3243 seq_printf(seq, "optimize_scan: %d\n",
3244 test_opt2(sb, MB_OPTIMIZE_SCAN) ? 1 : 0);
3245 seq_puts(seq, "max_free_order_lists:\n");
3246 }
3247 count = 0;
3248 read_lock(&sbi->s_mb_largest_free_orders_locks[position]);
3249 list_for_each_entry(grp, &sbi->s_mb_largest_free_orders[position],
3250 bb_largest_free_order_node)
3251 count++;
3252 read_unlock(&sbi->s_mb_largest_free_orders_locks[position]);
3253 seq_printf(seq, "\tlist_order_%u_groups: %u\n",
3254 (unsigned int)position, count);
3255
3256 return 0;
3257 }
3258
ext4_mb_seq_structs_summary_stop(struct seq_file * seq,void * v)3259 static void ext4_mb_seq_structs_summary_stop(struct seq_file *seq, void *v)
3260 {
3261 }
3262
3263 const struct seq_operations ext4_mb_seq_structs_summary_ops = {
3264 .start = ext4_mb_seq_structs_summary_start,
3265 .next = ext4_mb_seq_structs_summary_next,
3266 .stop = ext4_mb_seq_structs_summary_stop,
3267 .show = ext4_mb_seq_structs_summary_show,
3268 };
3269
get_groupinfo_cache(int blocksize_bits)3270 static struct kmem_cache *get_groupinfo_cache(int blocksize_bits)
3271 {
3272 int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
3273 struct kmem_cache *cachep = ext4_groupinfo_caches[cache_index];
3274
3275 BUG_ON(!cachep);
3276 return cachep;
3277 }
3278
3279 /*
3280 * Allocate the top-level s_group_info array for the specified number
3281 * of groups
3282 */
ext4_mb_alloc_groupinfo(struct super_block * sb,ext4_group_t ngroups)3283 int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups)
3284 {
3285 struct ext4_sb_info *sbi = EXT4_SB(sb);
3286 unsigned size;
3287 struct ext4_group_info ***old_groupinfo, ***new_groupinfo;
3288
3289 size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >>
3290 EXT4_DESC_PER_BLOCK_BITS(sb);
3291 if (size <= sbi->s_group_info_size)
3292 return 0;
3293
3294 size = roundup_pow_of_two(sizeof(*sbi->s_group_info) * size);
3295 new_groupinfo = kvzalloc(size, GFP_KERNEL);
3296 if (!new_groupinfo) {
3297 ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group");
3298 return -ENOMEM;
3299 }
3300 rcu_read_lock();
3301 old_groupinfo = rcu_dereference(sbi->s_group_info);
3302 if (old_groupinfo)
3303 memcpy(new_groupinfo, old_groupinfo,
3304 sbi->s_group_info_size * sizeof(*sbi->s_group_info));
3305 rcu_read_unlock();
3306 rcu_assign_pointer(sbi->s_group_info, new_groupinfo);
3307 sbi->s_group_info_size = size / sizeof(*sbi->s_group_info);
3308 if (old_groupinfo)
3309 ext4_kvfree_array_rcu(old_groupinfo);
3310 ext4_debug("allocated s_groupinfo array for %d meta_bg's\n",
3311 sbi->s_group_info_size);
3312 return 0;
3313 }
3314
3315 /* Create and initialize ext4_group_info data for the given group. */
ext4_mb_add_groupinfo(struct super_block * sb,ext4_group_t group,struct ext4_group_desc * desc)3316 int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
3317 struct ext4_group_desc *desc)
3318 {
3319 int i;
3320 int metalen = 0;
3321 int idx = group >> EXT4_DESC_PER_BLOCK_BITS(sb);
3322 struct ext4_sb_info *sbi = EXT4_SB(sb);
3323 struct ext4_group_info **meta_group_info;
3324 struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
3325
3326 /*
3327 * First check if this group is the first of a reserved block.
3328 * If it's true, we have to allocate a new table of pointers
3329 * to ext4_group_info structures
3330 */
3331 if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
3332 metalen = sizeof(*meta_group_info) <<
3333 EXT4_DESC_PER_BLOCK_BITS(sb);
3334 meta_group_info = kmalloc(metalen, GFP_NOFS);
3335 if (meta_group_info == NULL) {
3336 ext4_msg(sb, KERN_ERR, "can't allocate mem "
3337 "for a buddy group");
3338 return -ENOMEM;
3339 }
3340 rcu_read_lock();
3341 rcu_dereference(sbi->s_group_info)[idx] = meta_group_info;
3342 rcu_read_unlock();
3343 }
3344
3345 meta_group_info = sbi_array_rcu_deref(sbi, s_group_info, idx);
3346 i = group & (EXT4_DESC_PER_BLOCK(sb) - 1);
3347
3348 meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_NOFS);
3349 if (meta_group_info[i] == NULL) {
3350 ext4_msg(sb, KERN_ERR, "can't allocate buddy mem");
3351 goto exit_group_info;
3352 }
3353 set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT,
3354 &(meta_group_info[i]->bb_state));
3355
3356 /*
3357 * initialize bb_free to be able to skip
3358 * empty groups without initialization
3359 */
3360 if (ext4_has_group_desc_csum(sb) &&
3361 (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
3362 meta_group_info[i]->bb_free =
3363 ext4_free_clusters_after_init(sb, group, desc);
3364 } else {
3365 meta_group_info[i]->bb_free =
3366 ext4_free_group_clusters(sb, desc);
3367 }
3368
3369 INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list);
3370 init_rwsem(&meta_group_info[i]->alloc_sem);
3371 meta_group_info[i]->bb_free_root = RB_ROOT;
3372 INIT_LIST_HEAD(&meta_group_info[i]->bb_largest_free_order_node);
3373 INIT_LIST_HEAD(&meta_group_info[i]->bb_avg_fragment_size_node);
3374 meta_group_info[i]->bb_largest_free_order = -1; /* uninit */
3375 meta_group_info[i]->bb_avg_fragment_size_order = -1; /* uninit */
3376 meta_group_info[i]->bb_group = group;
3377
3378 mb_group_bb_bitmap_alloc(sb, meta_group_info[i], group);
3379 return 0;
3380
3381 exit_group_info:
3382 /* If a meta_group_info table has been allocated, release it now */
3383 if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
3384 struct ext4_group_info ***group_info;
3385
3386 rcu_read_lock();
3387 group_info = rcu_dereference(sbi->s_group_info);
3388 kfree(group_info[idx]);
3389 group_info[idx] = NULL;
3390 rcu_read_unlock();
3391 }
3392 return -ENOMEM;
3393 } /* ext4_mb_add_groupinfo */
3394
ext4_mb_init_backend(struct super_block * sb)3395 static int ext4_mb_init_backend(struct super_block *sb)
3396 {
3397 ext4_group_t ngroups = ext4_get_groups_count(sb);
3398 ext4_group_t i;
3399 struct ext4_sb_info *sbi = EXT4_SB(sb);
3400 int err;
3401 struct ext4_group_desc *desc;
3402 struct ext4_group_info ***group_info;
3403 struct kmem_cache *cachep;
3404
3405 err = ext4_mb_alloc_groupinfo(sb, ngroups);
3406 if (err)
3407 return err;
3408
3409 sbi->s_buddy_cache = new_inode(sb);
3410 if (sbi->s_buddy_cache == NULL) {
3411 ext4_msg(sb, KERN_ERR, "can't get new inode");
3412 goto err_freesgi;
3413 }
3414 /* To avoid potentially colliding with an valid on-disk inode number,
3415 * use EXT4_BAD_INO for the buddy cache inode number. This inode is
3416 * not in the inode hash, so it should never be found by iget(), but
3417 * this will avoid confusion if it ever shows up during debugging. */
3418 sbi->s_buddy_cache->i_ino = EXT4_BAD_INO;
3419 EXT4_I(sbi->s_buddy_cache)->i_disksize = 0;
3420 for (i = 0; i < ngroups; i++) {
3421 cond_resched();
3422 desc = ext4_get_group_desc(sb, i, NULL);
3423 if (desc == NULL) {
3424 ext4_msg(sb, KERN_ERR, "can't read descriptor %u", i);
3425 goto err_freebuddy;
3426 }
3427 if (ext4_mb_add_groupinfo(sb, i, desc) != 0)
3428 goto err_freebuddy;
3429 }
3430
3431 if (ext4_has_feature_flex_bg(sb)) {
3432 /* a single flex group is supposed to be read by a single IO.
3433 * 2 ^ s_log_groups_per_flex != UINT_MAX as s_mb_prefetch is
3434 * unsigned integer, so the maximum shift is 32.
3435 */
3436 if (sbi->s_es->s_log_groups_per_flex >= 32) {
3437 ext4_msg(sb, KERN_ERR, "too many log groups per flexible block group");
3438 goto err_freebuddy;
3439 }
3440 sbi->s_mb_prefetch = min_t(uint, 1 << sbi->s_es->s_log_groups_per_flex,
3441 BLK_MAX_SEGMENT_SIZE >> (sb->s_blocksize_bits - 9));
3442 sbi->s_mb_prefetch *= 8; /* 8 prefetch IOs in flight at most */
3443 } else {
3444 sbi->s_mb_prefetch = 32;
3445 }
3446 if (sbi->s_mb_prefetch > ext4_get_groups_count(sb))
3447 sbi->s_mb_prefetch = ext4_get_groups_count(sb);
3448 /* now many real IOs to prefetch within a single allocation at cr=0
3449 * given cr=0 is an CPU-related optimization we shouldn't try to
3450 * load too many groups, at some point we should start to use what
3451 * we've got in memory.
3452 * with an average random access time 5ms, it'd take a second to get
3453 * 200 groups (* N with flex_bg), so let's make this limit 4
3454 */
3455 sbi->s_mb_prefetch_limit = sbi->s_mb_prefetch * 4;
3456 if (sbi->s_mb_prefetch_limit > ext4_get_groups_count(sb))
3457 sbi->s_mb_prefetch_limit = ext4_get_groups_count(sb);
3458
3459 return 0;
3460
3461 err_freebuddy:
3462 cachep = get_groupinfo_cache(sb->s_blocksize_bits);
3463 while (i-- > 0) {
3464 struct ext4_group_info *grp = ext4_get_group_info(sb, i);
3465
3466 if (grp)
3467 kmem_cache_free(cachep, grp);
3468 }
3469 i = sbi->s_group_info_size;
3470 rcu_read_lock();
3471 group_info = rcu_dereference(sbi->s_group_info);
3472 while (i-- > 0)
3473 kfree(group_info[i]);
3474 rcu_read_unlock();
3475 iput(sbi->s_buddy_cache);
3476 err_freesgi:
3477 rcu_read_lock();
3478 kvfree(rcu_dereference(sbi->s_group_info));
3479 rcu_read_unlock();
3480 return -ENOMEM;
3481 }
3482
ext4_groupinfo_destroy_slabs(void)3483 static void ext4_groupinfo_destroy_slabs(void)
3484 {
3485 int i;
3486
3487 for (i = 0; i < NR_GRPINFO_CACHES; i++) {
3488 kmem_cache_destroy(ext4_groupinfo_caches[i]);
3489 ext4_groupinfo_caches[i] = NULL;
3490 }
3491 }
3492
ext4_groupinfo_create_slab(size_t size)3493 static int ext4_groupinfo_create_slab(size_t size)
3494 {
3495 static DEFINE_MUTEX(ext4_grpinfo_slab_create_mutex);
3496 int slab_size;
3497 int blocksize_bits = order_base_2(size);
3498 int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
3499 struct kmem_cache *cachep;
3500
3501 if (cache_index >= NR_GRPINFO_CACHES)
3502 return -EINVAL;
3503
3504 if (unlikely(cache_index < 0))
3505 cache_index = 0;
3506
3507 mutex_lock(&ext4_grpinfo_slab_create_mutex);
3508 if (ext4_groupinfo_caches[cache_index]) {
3509 mutex_unlock(&ext4_grpinfo_slab_create_mutex);
3510 return 0; /* Already created */
3511 }
3512
3513 slab_size = offsetof(struct ext4_group_info,
3514 bb_counters[blocksize_bits + 2]);
3515
3516 cachep = kmem_cache_create(ext4_groupinfo_slab_names[cache_index],
3517 slab_size, 0, SLAB_RECLAIM_ACCOUNT,
3518 NULL);
3519
3520 ext4_groupinfo_caches[cache_index] = cachep;
3521
3522 mutex_unlock(&ext4_grpinfo_slab_create_mutex);
3523 if (!cachep) {
3524 printk(KERN_EMERG
3525 "EXT4-fs: no memory for groupinfo slab cache\n");
3526 return -ENOMEM;
3527 }
3528
3529 return 0;
3530 }
3531
ext4_discard_work(struct work_struct * work)3532 static void ext4_discard_work(struct work_struct *work)
3533 {
3534 struct ext4_sb_info *sbi = container_of(work,
3535 struct ext4_sb_info, s_discard_work);
3536 struct super_block *sb = sbi->s_sb;
3537 struct ext4_free_data *fd, *nfd;
3538 struct ext4_buddy e4b;
3539 LIST_HEAD(discard_list);
3540 ext4_group_t grp, load_grp;
3541 int err = 0;
3542
3543 spin_lock(&sbi->s_md_lock);
3544 list_splice_init(&sbi->s_discard_list, &discard_list);
3545 spin_unlock(&sbi->s_md_lock);
3546
3547 load_grp = UINT_MAX;
3548 list_for_each_entry_safe(fd, nfd, &discard_list, efd_list) {
3549 /*
3550 * If filesystem is umounting or no memory or suffering
3551 * from no space, give up the discard
3552 */
3553 if ((sb->s_flags & SB_ACTIVE) && !err &&
3554 !atomic_read(&sbi->s_retry_alloc_pending)) {
3555 grp = fd->efd_group;
3556 if (grp != load_grp) {
3557 if (load_grp != UINT_MAX)
3558 ext4_mb_unload_buddy(&e4b);
3559
3560 err = ext4_mb_load_buddy(sb, grp, &e4b);
3561 if (err) {
3562 kmem_cache_free(ext4_free_data_cachep, fd);
3563 load_grp = UINT_MAX;
3564 continue;
3565 } else {
3566 load_grp = grp;
3567 }
3568 }
3569
3570 ext4_lock_group(sb, grp);
3571 ext4_try_to_trim_range(sb, &e4b, fd->efd_start_cluster,
3572 fd->efd_start_cluster + fd->efd_count - 1, 1);
3573 ext4_unlock_group(sb, grp);
3574 }
3575 kmem_cache_free(ext4_free_data_cachep, fd);
3576 }
3577
3578 if (load_grp != UINT_MAX)
3579 ext4_mb_unload_buddy(&e4b);
3580 }
3581
ext4_mb_init(struct super_block * sb)3582 int ext4_mb_init(struct super_block *sb)
3583 {
3584 struct ext4_sb_info *sbi = EXT4_SB(sb);
3585 unsigned i, j;
3586 unsigned offset, offset_incr;
3587 unsigned max;
3588 int ret;
3589
3590 i = MB_NUM_ORDERS(sb) * sizeof(*sbi->s_mb_offsets);
3591
3592 sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL);
3593 if (sbi->s_mb_offsets == NULL) {
3594 ret = -ENOMEM;
3595 goto out;
3596 }
3597
3598 i = MB_NUM_ORDERS(sb) * sizeof(*sbi->s_mb_maxs);
3599 sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL);
3600 if (sbi->s_mb_maxs == NULL) {
3601 ret = -ENOMEM;
3602 goto out;
3603 }
3604
3605 ret = ext4_groupinfo_create_slab(sb->s_blocksize);
3606 if (ret < 0)
3607 goto out;
3608
3609 /* order 0 is regular bitmap */
3610 sbi->s_mb_maxs[0] = sb->s_blocksize << 3;
3611 sbi->s_mb_offsets[0] = 0;
3612
3613 i = 1;
3614 offset = 0;
3615 offset_incr = 1 << (sb->s_blocksize_bits - 1);
3616 max = sb->s_blocksize << 2;
3617 do {
3618 sbi->s_mb_offsets[i] = offset;
3619 sbi->s_mb_maxs[i] = max;
3620 offset += offset_incr;
3621 offset_incr = offset_incr >> 1;
3622 max = max >> 1;
3623 i++;
3624 } while (i < MB_NUM_ORDERS(sb));
3625
3626 sbi->s_mb_avg_fragment_size =
3627 kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct list_head),
3628 GFP_KERNEL);
3629 if (!sbi->s_mb_avg_fragment_size) {
3630 ret = -ENOMEM;
3631 goto out;
3632 }
3633 sbi->s_mb_avg_fragment_size_locks =
3634 kmalloc_array(MB_NUM_ORDERS(sb), sizeof(rwlock_t),
3635 GFP_KERNEL);
3636 if (!sbi->s_mb_avg_fragment_size_locks) {
3637 ret = -ENOMEM;
3638 goto out;
3639 }
3640 for (i = 0; i < MB_NUM_ORDERS(sb); i++) {
3641 INIT_LIST_HEAD(&sbi->s_mb_avg_fragment_size[i]);
3642 rwlock_init(&sbi->s_mb_avg_fragment_size_locks[i]);
3643 }
3644 sbi->s_mb_largest_free_orders =
3645 kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct list_head),
3646 GFP_KERNEL);
3647 if (!sbi->s_mb_largest_free_orders) {
3648 ret = -ENOMEM;
3649 goto out;
3650 }
3651 sbi->s_mb_largest_free_orders_locks =
3652 kmalloc_array(MB_NUM_ORDERS(sb), sizeof(rwlock_t),
3653 GFP_KERNEL);
3654 if (!sbi->s_mb_largest_free_orders_locks) {
3655 ret = -ENOMEM;
3656 goto out;
3657 }
3658 for (i = 0; i < MB_NUM_ORDERS(sb); i++) {
3659 INIT_LIST_HEAD(&sbi->s_mb_largest_free_orders[i]);
3660 rwlock_init(&sbi->s_mb_largest_free_orders_locks[i]);
3661 }
3662
3663 spin_lock_init(&sbi->s_md_lock);
3664 sbi->s_mb_free_pending = 0;
3665 INIT_LIST_HEAD(&sbi->s_freed_data_list);
3666 INIT_LIST_HEAD(&sbi->s_discard_list);
3667 INIT_WORK(&sbi->s_discard_work, ext4_discard_work);
3668 atomic_set(&sbi->s_retry_alloc_pending, 0);
3669
3670 sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN;
3671 sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN;
3672 sbi->s_mb_stats = MB_DEFAULT_STATS;
3673 sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
3674 sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
3675 sbi->s_mb_best_avail_max_trim_order = MB_DEFAULT_BEST_AVAIL_TRIM_ORDER;
3676
3677 /*
3678 * The default group preallocation is 512, which for 4k block
3679 * sizes translates to 2 megabytes. However for bigalloc file
3680 * systems, this is probably too big (i.e, if the cluster size
3681 * is 1 megabyte, then group preallocation size becomes half a
3682 * gigabyte!). As a default, we will keep a two megabyte
3683 * group pralloc size for cluster sizes up to 64k, and after
3684 * that, we will force a minimum group preallocation size of
3685 * 32 clusters. This translates to 8 megs when the cluster
3686 * size is 256k, and 32 megs when the cluster size is 1 meg,
3687 * which seems reasonable as a default.
3688 */
3689 sbi->s_mb_group_prealloc = max(MB_DEFAULT_GROUP_PREALLOC >>
3690 sbi->s_cluster_bits, 32);
3691 /*
3692 * If there is a s_stripe > 1, then we set the s_mb_group_prealloc
3693 * to the lowest multiple of s_stripe which is bigger than
3694 * the s_mb_group_prealloc as determined above. We want
3695 * the preallocation size to be an exact multiple of the
3696 * RAID stripe size so that preallocations don't fragment
3697 * the stripes.
3698 */
3699 if (sbi->s_stripe > 1) {
3700 sbi->s_mb_group_prealloc = roundup(
3701 sbi->s_mb_group_prealloc, EXT4_B2C(sbi, sbi->s_stripe));
3702 }
3703
3704 sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
3705 if (sbi->s_locality_groups == NULL) {
3706 ret = -ENOMEM;
3707 goto out;
3708 }
3709 for_each_possible_cpu(i) {
3710 struct ext4_locality_group *lg;
3711 lg = per_cpu_ptr(sbi->s_locality_groups, i);
3712 mutex_init(&lg->lg_mutex);
3713 for (j = 0; j < PREALLOC_TB_SIZE; j++)
3714 INIT_LIST_HEAD(&lg->lg_prealloc_list[j]);
3715 spin_lock_init(&lg->lg_prealloc_lock);
3716 }
3717
3718 if (bdev_nonrot(sb->s_bdev))
3719 sbi->s_mb_max_linear_groups = 0;
3720 else
3721 sbi->s_mb_max_linear_groups = MB_DEFAULT_LINEAR_LIMIT;
3722 /* init file for buddy data */
3723 ret = ext4_mb_init_backend(sb);
3724 if (ret != 0)
3725 goto out_free_locality_groups;
3726
3727 return 0;
3728
3729 out_free_locality_groups:
3730 free_percpu(sbi->s_locality_groups);
3731 sbi->s_locality_groups = NULL;
3732 out:
3733 kfree(sbi->s_mb_avg_fragment_size);
3734 kfree(sbi->s_mb_avg_fragment_size_locks);
3735 kfree(sbi->s_mb_largest_free_orders);
3736 kfree(sbi->s_mb_largest_free_orders_locks);
3737 kfree(sbi->s_mb_offsets);
3738 sbi->s_mb_offsets = NULL;
3739 kfree(sbi->s_mb_maxs);
3740 sbi->s_mb_maxs = NULL;
3741 return ret;
3742 }
3743
3744 /* need to called with the ext4 group lock held */
ext4_mb_cleanup_pa(struct ext4_group_info * grp)3745 static int ext4_mb_cleanup_pa(struct ext4_group_info *grp)
3746 {
3747 struct ext4_prealloc_space *pa;
3748 struct list_head *cur, *tmp;
3749 int count = 0;
3750
3751 list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) {
3752 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
3753 list_del(&pa->pa_group_list);
3754 count++;
3755 kmem_cache_free(ext4_pspace_cachep, pa);
3756 }
3757 return count;
3758 }
3759
ext4_mb_release(struct super_block * sb)3760 int ext4_mb_release(struct super_block *sb)
3761 {
3762 ext4_group_t ngroups = ext4_get_groups_count(sb);
3763 ext4_group_t i;
3764 int num_meta_group_infos;
3765 struct ext4_group_info *grinfo, ***group_info;
3766 struct ext4_sb_info *sbi = EXT4_SB(sb);
3767 struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
3768 int count;
3769
3770 if (test_opt(sb, DISCARD)) {
3771 /*
3772 * wait the discard work to drain all of ext4_free_data
3773 */
3774 flush_work(&sbi->s_discard_work);
3775 WARN_ON_ONCE(!list_empty(&sbi->s_discard_list));
3776 }
3777
3778 if (sbi->s_group_info) {
3779 for (i = 0; i < ngroups; i++) {
3780 cond_resched();
3781 grinfo = ext4_get_group_info(sb, i);
3782 if (!grinfo)
3783 continue;
3784 mb_group_bb_bitmap_free(grinfo);
3785 ext4_lock_group(sb, i);
3786 count = ext4_mb_cleanup_pa(grinfo);
3787 if (count)
3788 mb_debug(sb, "mballoc: %d PAs left\n",
3789 count);
3790 ext4_unlock_group(sb, i);
3791 kmem_cache_free(cachep, grinfo);
3792 }
3793 num_meta_group_infos = (ngroups +
3794 EXT4_DESC_PER_BLOCK(sb) - 1) >>
3795 EXT4_DESC_PER_BLOCK_BITS(sb);
3796 rcu_read_lock();
3797 group_info = rcu_dereference(sbi->s_group_info);
3798 for (i = 0; i < num_meta_group_infos; i++)
3799 kfree(group_info[i]);
3800 kvfree(group_info);
3801 rcu_read_unlock();
3802 }
3803 kfree(sbi->s_mb_avg_fragment_size);
3804 kfree(sbi->s_mb_avg_fragment_size_locks);
3805 kfree(sbi->s_mb_largest_free_orders);
3806 kfree(sbi->s_mb_largest_free_orders_locks);
3807 kfree(sbi->s_mb_offsets);
3808 kfree(sbi->s_mb_maxs);
3809 iput(sbi->s_buddy_cache);
3810 if (sbi->s_mb_stats) {
3811 ext4_msg(sb, KERN_INFO,
3812 "mballoc: %u blocks %u reqs (%u success)",
3813 atomic_read(&sbi->s_bal_allocated),
3814 atomic_read(&sbi->s_bal_reqs),
3815 atomic_read(&sbi->s_bal_success));
3816 ext4_msg(sb, KERN_INFO,
3817 "mballoc: %u extents scanned, %u groups scanned, %u goal hits, "
3818 "%u 2^N hits, %u breaks, %u lost",
3819 atomic_read(&sbi->s_bal_ex_scanned),
3820 atomic_read(&sbi->s_bal_groups_scanned),
3821 atomic_read(&sbi->s_bal_goals),
3822 atomic_read(&sbi->s_bal_2orders),
3823 atomic_read(&sbi->s_bal_breaks),
3824 atomic_read(&sbi->s_mb_lost_chunks));
3825 ext4_msg(sb, KERN_INFO,
3826 "mballoc: %u generated and it took %llu",
3827 atomic_read(&sbi->s_mb_buddies_generated),
3828 atomic64_read(&sbi->s_mb_generation_time));
3829 ext4_msg(sb, KERN_INFO,
3830 "mballoc: %u preallocated, %u discarded",
3831 atomic_read(&sbi->s_mb_preallocated),
3832 atomic_read(&sbi->s_mb_discarded));
3833 }
3834
3835 free_percpu(sbi->s_locality_groups);
3836
3837 return 0;
3838 }
3839
ext4_issue_discard(struct super_block * sb,ext4_group_t block_group,ext4_grpblk_t cluster,int count,struct bio ** biop)3840 static inline int ext4_issue_discard(struct super_block *sb,
3841 ext4_group_t block_group, ext4_grpblk_t cluster, int count,
3842 struct bio **biop)
3843 {
3844 ext4_fsblk_t discard_block;
3845
3846 discard_block = (EXT4_C2B(EXT4_SB(sb), cluster) +
3847 ext4_group_first_block_no(sb, block_group));
3848 count = EXT4_C2B(EXT4_SB(sb), count);
3849 trace_ext4_discard_blocks(sb,
3850 (unsigned long long) discard_block, count);
3851 if (biop) {
3852 return __blkdev_issue_discard(sb->s_bdev,
3853 (sector_t)discard_block << (sb->s_blocksize_bits - 9),
3854 (sector_t)count << (sb->s_blocksize_bits - 9),
3855 GFP_NOFS, biop);
3856 } else
3857 return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
3858 }
3859
ext4_free_data_in_buddy(struct super_block * sb,struct ext4_free_data * entry)3860 static void ext4_free_data_in_buddy(struct super_block *sb,
3861 struct ext4_free_data *entry)
3862 {
3863 struct ext4_buddy e4b;
3864 struct ext4_group_info *db;
3865 int err, count = 0;
3866
3867 mb_debug(sb, "gonna free %u blocks in group %u (0x%p):",
3868 entry->efd_count, entry->efd_group, entry);
3869
3870 err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b);
3871 /* we expect to find existing buddy because it's pinned */
3872 BUG_ON(err != 0);
3873
3874 spin_lock(&EXT4_SB(sb)->s_md_lock);
3875 EXT4_SB(sb)->s_mb_free_pending -= entry->efd_count;
3876 spin_unlock(&EXT4_SB(sb)->s_md_lock);
3877
3878 db = e4b.bd_info;
3879 /* there are blocks to put in buddy to make them really free */
3880 count += entry->efd_count;
3881 ext4_lock_group(sb, entry->efd_group);
3882 /* Take it out of per group rb tree */
3883 rb_erase(&entry->efd_node, &(db->bb_free_root));
3884 mb_free_blocks(NULL, &e4b, entry->efd_start_cluster, entry->efd_count);
3885
3886 /*
3887 * Clear the trimmed flag for the group so that the next
3888 * ext4_trim_fs can trim it.
3889 * If the volume is mounted with -o discard, online discard
3890 * is supported and the free blocks will be trimmed online.
3891 */
3892 if (!test_opt(sb, DISCARD))
3893 EXT4_MB_GRP_CLEAR_TRIMMED(db);
3894
3895 if (!db->bb_free_root.rb_node) {
3896 /* No more items in the per group rb tree
3897 * balance refcounts from ext4_mb_free_metadata()
3898 */
3899 put_page(e4b.bd_buddy_page);
3900 put_page(e4b.bd_bitmap_page);
3901 }
3902 ext4_unlock_group(sb, entry->efd_group);
3903 ext4_mb_unload_buddy(&e4b);
3904
3905 mb_debug(sb, "freed %d blocks in 1 structures\n", count);
3906 }
3907
3908 /*
3909 * This function is called by the jbd2 layer once the commit has finished,
3910 * so we know we can free the blocks that were released with that commit.
3911 */
ext4_process_freed_data(struct super_block * sb,tid_t commit_tid)3912 void ext4_process_freed_data(struct super_block *sb, tid_t commit_tid)
3913 {
3914 struct ext4_sb_info *sbi = EXT4_SB(sb);
3915 struct ext4_free_data *entry, *tmp;
3916 LIST_HEAD(freed_data_list);
3917 struct list_head *cut_pos = NULL;
3918 bool wake;
3919
3920 spin_lock(&sbi->s_md_lock);
3921 list_for_each_entry(entry, &sbi->s_freed_data_list, efd_list) {
3922 if (entry->efd_tid != commit_tid)
3923 break;
3924 cut_pos = &entry->efd_list;
3925 }
3926 if (cut_pos)
3927 list_cut_position(&freed_data_list, &sbi->s_freed_data_list,
3928 cut_pos);
3929 spin_unlock(&sbi->s_md_lock);
3930
3931 list_for_each_entry(entry, &freed_data_list, efd_list)
3932 ext4_free_data_in_buddy(sb, entry);
3933
3934 if (test_opt(sb, DISCARD)) {
3935 spin_lock(&sbi->s_md_lock);
3936 wake = list_empty(&sbi->s_discard_list);
3937 list_splice_tail(&freed_data_list, &sbi->s_discard_list);
3938 spin_unlock(&sbi->s_md_lock);
3939 if (wake)
3940 queue_work(system_unbound_wq, &sbi->s_discard_work);
3941 } else {
3942 list_for_each_entry_safe(entry, tmp, &freed_data_list, efd_list)
3943 kmem_cache_free(ext4_free_data_cachep, entry);
3944 }
3945 }
3946
ext4_init_mballoc(void)3947 int __init ext4_init_mballoc(void)
3948 {
3949 ext4_pspace_cachep = KMEM_CACHE(ext4_prealloc_space,
3950 SLAB_RECLAIM_ACCOUNT);
3951 if (ext4_pspace_cachep == NULL)
3952 goto out;
3953
3954 ext4_ac_cachep = KMEM_CACHE(ext4_allocation_context,
3955 SLAB_RECLAIM_ACCOUNT);
3956 if (ext4_ac_cachep == NULL)
3957 goto out_pa_free;
3958
3959 ext4_free_data_cachep = KMEM_CACHE(ext4_free_data,
3960 SLAB_RECLAIM_ACCOUNT);
3961 if (ext4_free_data_cachep == NULL)
3962 goto out_ac_free;
3963
3964 return 0;
3965
3966 out_ac_free:
3967 kmem_cache_destroy(ext4_ac_cachep);
3968 out_pa_free:
3969 kmem_cache_destroy(ext4_pspace_cachep);
3970 out:
3971 return -ENOMEM;
3972 }
3973
ext4_exit_mballoc(void)3974 void ext4_exit_mballoc(void)
3975 {
3976 /*
3977 * Wait for completion of call_rcu()'s on ext4_pspace_cachep
3978 * before destroying the slab cache.
3979 */
3980 rcu_barrier();
3981 kmem_cache_destroy(ext4_pspace_cachep);
3982 kmem_cache_destroy(ext4_ac_cachep);
3983 kmem_cache_destroy(ext4_free_data_cachep);
3984 ext4_groupinfo_destroy_slabs();
3985 }
3986
3987
3988 /*
3989 * Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps
3990 * Returns 0 if success or error code
3991 */
3992 static noinline_for_stack int
ext4_mb_mark_diskspace_used(struct ext4_allocation_context * ac,handle_t * handle,unsigned int reserv_clstrs)3993 ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
3994 handle_t *handle, unsigned int reserv_clstrs)
3995 {
3996 struct buffer_head *bitmap_bh = NULL;
3997 struct ext4_group_desc *gdp;
3998 struct buffer_head *gdp_bh;
3999 struct ext4_sb_info *sbi;
4000 struct super_block *sb;
4001 ext4_fsblk_t block;
4002 int err, len;
4003
4004 BUG_ON(ac->ac_status != AC_STATUS_FOUND);
4005 BUG_ON(ac->ac_b_ex.fe_len <= 0);
4006
4007 sb = ac->ac_sb;
4008 sbi = EXT4_SB(sb);
4009
4010 bitmap_bh = ext4_read_block_bitmap(sb, ac->ac_b_ex.fe_group);
4011 if (IS_ERR(bitmap_bh)) {
4012 return PTR_ERR(bitmap_bh);
4013 }
4014
4015 BUFFER_TRACE(bitmap_bh, "getting write access");
4016 err = ext4_journal_get_write_access(handle, sb, bitmap_bh,
4017 EXT4_JTR_NONE);
4018 if (err)
4019 goto out_err;
4020
4021 err = -EIO;
4022 gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, &gdp_bh);
4023 if (!gdp)
4024 goto out_err;
4025
4026 ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group,
4027 ext4_free_group_clusters(sb, gdp));
4028
4029 BUFFER_TRACE(gdp_bh, "get_write_access");
4030 err = ext4_journal_get_write_access(handle, sb, gdp_bh, EXT4_JTR_NONE);
4031 if (err)
4032 goto out_err;
4033
4034 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
4035
4036 len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
4037 if (!ext4_inode_block_valid(ac->ac_inode, block, len)) {
4038 ext4_error(sb, "Allocating blocks %llu-%llu which overlap "
4039 "fs metadata", block, block+len);
4040 /* File system mounted not to panic on error
4041 * Fix the bitmap and return EFSCORRUPTED
4042 * We leak some of the blocks here.
4043 */
4044 ext4_lock_group(sb, ac->ac_b_ex.fe_group);
4045 mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
4046 ac->ac_b_ex.fe_len);
4047 ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
4048 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
4049 if (!err)
4050 err = -EFSCORRUPTED;
4051 goto out_err;
4052 }
4053
4054 ext4_lock_group(sb, ac->ac_b_ex.fe_group);
4055 #ifdef AGGRESSIVE_CHECK
4056 {
4057 int i;
4058 for (i = 0; i < ac->ac_b_ex.fe_len; i++) {
4059 BUG_ON(mb_test_bit(ac->ac_b_ex.fe_start + i,
4060 bitmap_bh->b_data));
4061 }
4062 }
4063 #endif
4064 mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
4065 ac->ac_b_ex.fe_len);
4066 if (ext4_has_group_desc_csum(sb) &&
4067 (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
4068 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
4069 ext4_free_group_clusters_set(sb, gdp,
4070 ext4_free_clusters_after_init(sb,
4071 ac->ac_b_ex.fe_group, gdp));
4072 }
4073 len = ext4_free_group_clusters(sb, gdp) - ac->ac_b_ex.fe_len;
4074 ext4_free_group_clusters_set(sb, gdp, len);
4075 ext4_block_bitmap_csum_set(sb, gdp, bitmap_bh);
4076 ext4_group_desc_csum_set(sb, ac->ac_b_ex.fe_group, gdp);
4077
4078 ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
4079 percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len);
4080 /*
4081 * Now reduce the dirty block count also. Should not go negative
4082 */
4083 if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED))
4084 /* release all the reserved blocks if non delalloc */
4085 percpu_counter_sub(&sbi->s_dirtyclusters_counter,
4086 reserv_clstrs);
4087
4088 if (sbi->s_log_groups_per_flex) {
4089 ext4_group_t flex_group = ext4_flex_group(sbi,
4090 ac->ac_b_ex.fe_group);
4091 atomic64_sub(ac->ac_b_ex.fe_len,
4092 &sbi_array_rcu_deref(sbi, s_flex_groups,
4093 flex_group)->free_clusters);
4094 }
4095
4096 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
4097 if (err)
4098 goto out_err;
4099 err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh);
4100
4101 out_err:
4102 brelse(bitmap_bh);
4103 return err;
4104 }
4105
4106 /*
4107 * Idempotent helper for Ext4 fast commit replay path to set the state of
4108 * blocks in bitmaps and update counters.
4109 */
ext4_mb_mark_bb(struct super_block * sb,ext4_fsblk_t block,int len,int state)4110 void ext4_mb_mark_bb(struct super_block *sb, ext4_fsblk_t block,
4111 int len, int state)
4112 {
4113 struct buffer_head *bitmap_bh = NULL;
4114 struct ext4_group_desc *gdp;
4115 struct buffer_head *gdp_bh;
4116 struct ext4_sb_info *sbi = EXT4_SB(sb);
4117 ext4_group_t group;
4118 ext4_grpblk_t blkoff;
4119 int i, err = 0;
4120 int already;
4121 unsigned int clen, clen_changed, thisgrp_len;
4122
4123 while (len > 0) {
4124 ext4_get_group_no_and_offset(sb, block, &group, &blkoff);
4125
4126 /*
4127 * Check to see if we are freeing blocks across a group
4128 * boundary.
4129 * In case of flex_bg, this can happen that (block, len) may
4130 * span across more than one group. In that case we need to
4131 * get the corresponding group metadata to work with.
4132 * For this we have goto again loop.
4133 */
4134 thisgrp_len = min_t(unsigned int, (unsigned int)len,
4135 EXT4_BLOCKS_PER_GROUP(sb) - EXT4_C2B(sbi, blkoff));
4136 clen = EXT4_NUM_B2C(sbi, thisgrp_len);
4137
4138 if (!ext4_sb_block_valid(sb, NULL, block, thisgrp_len)) {
4139 ext4_error(sb, "Marking blocks in system zone - "
4140 "Block = %llu, len = %u",
4141 block, thisgrp_len);
4142 bitmap_bh = NULL;
4143 break;
4144 }
4145
4146 bitmap_bh = ext4_read_block_bitmap(sb, group);
4147 if (IS_ERR(bitmap_bh)) {
4148 err = PTR_ERR(bitmap_bh);
4149 bitmap_bh = NULL;
4150 break;
4151 }
4152
4153 err = -EIO;
4154 gdp = ext4_get_group_desc(sb, group, &gdp_bh);
4155 if (!gdp)
4156 break;
4157
4158 ext4_lock_group(sb, group);
4159 already = 0;
4160 for (i = 0; i < clen; i++)
4161 if (!mb_test_bit(blkoff + i, bitmap_bh->b_data) ==
4162 !state)
4163 already++;
4164
4165 clen_changed = clen - already;
4166 if (state)
4167 mb_set_bits(bitmap_bh->b_data, blkoff, clen);
4168 else
4169 mb_clear_bits(bitmap_bh->b_data, blkoff, clen);
4170 if (ext4_has_group_desc_csum(sb) &&
4171 (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
4172 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
4173 ext4_free_group_clusters_set(sb, gdp,
4174 ext4_free_clusters_after_init(sb, group, gdp));
4175 }
4176 if (state)
4177 clen = ext4_free_group_clusters(sb, gdp) - clen_changed;
4178 else
4179 clen = ext4_free_group_clusters(sb, gdp) + clen_changed;
4180
4181 ext4_free_group_clusters_set(sb, gdp, clen);
4182 ext4_block_bitmap_csum_set(sb, gdp, bitmap_bh);
4183 ext4_group_desc_csum_set(sb, group, gdp);
4184
4185 ext4_unlock_group(sb, group);
4186
4187 if (sbi->s_log_groups_per_flex) {
4188 ext4_group_t flex_group = ext4_flex_group(sbi, group);
4189 struct flex_groups *fg = sbi_array_rcu_deref(sbi,
4190 s_flex_groups, flex_group);
4191
4192 if (state)
4193 atomic64_sub(clen_changed, &fg->free_clusters);
4194 else
4195 atomic64_add(clen_changed, &fg->free_clusters);
4196
4197 }
4198
4199 err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh);
4200 if (err)
4201 break;
4202 sync_dirty_buffer(bitmap_bh);
4203 err = ext4_handle_dirty_metadata(NULL, NULL, gdp_bh);
4204 sync_dirty_buffer(gdp_bh);
4205 if (err)
4206 break;
4207
4208 block += thisgrp_len;
4209 len -= thisgrp_len;
4210 brelse(bitmap_bh);
4211 BUG_ON(len < 0);
4212 }
4213
4214 if (err)
4215 brelse(bitmap_bh);
4216 }
4217
4218 /*
4219 * here we normalize request for locality group
4220 * Group request are normalized to s_mb_group_prealloc, which goes to
4221 * s_strip if we set the same via mount option.
4222 * s_mb_group_prealloc can be configured via
4223 * /sys/fs/ext4/<partition>/mb_group_prealloc
4224 *
4225 * XXX: should we try to preallocate more than the group has now?
4226 */
ext4_mb_normalize_group_request(struct ext4_allocation_context * ac)4227 static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac)
4228 {
4229 struct super_block *sb = ac->ac_sb;
4230 struct ext4_locality_group *lg = ac->ac_lg;
4231
4232 BUG_ON(lg == NULL);
4233 ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc;
4234 mb_debug(sb, "goal %u blocks for locality group\n", ac->ac_g_ex.fe_len);
4235 }
4236
4237 /*
4238 * This function returns the next element to look at during inode
4239 * PA rbtree walk. We assume that we have held the inode PA rbtree lock
4240 * (ei->i_prealloc_lock)
4241 *
4242 * new_start The start of the range we want to compare
4243 * cur_start The existing start that we are comparing against
4244 * node The node of the rb_tree
4245 */
4246 static inline struct rb_node*
ext4_mb_pa_rb_next_iter(ext4_lblk_t new_start,ext4_lblk_t cur_start,struct rb_node * node)4247 ext4_mb_pa_rb_next_iter(ext4_lblk_t new_start, ext4_lblk_t cur_start, struct rb_node *node)
4248 {
4249 if (new_start < cur_start)
4250 return node->rb_left;
4251 else
4252 return node->rb_right;
4253 }
4254
4255 static inline void
ext4_mb_pa_assert_overlap(struct ext4_allocation_context * ac,ext4_lblk_t start,loff_t end)4256 ext4_mb_pa_assert_overlap(struct ext4_allocation_context *ac,
4257 ext4_lblk_t start, loff_t end)
4258 {
4259 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4260 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
4261 struct ext4_prealloc_space *tmp_pa;
4262 ext4_lblk_t tmp_pa_start;
4263 loff_t tmp_pa_end;
4264 struct rb_node *iter;
4265
4266 read_lock(&ei->i_prealloc_lock);
4267 for (iter = ei->i_prealloc_node.rb_node; iter;
4268 iter = ext4_mb_pa_rb_next_iter(start, tmp_pa_start, iter)) {
4269 tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
4270 pa_node.inode_node);
4271 tmp_pa_start = tmp_pa->pa_lstart;
4272 tmp_pa_end = pa_logical_end(sbi, tmp_pa);
4273
4274 spin_lock(&tmp_pa->pa_lock);
4275 if (tmp_pa->pa_deleted == 0)
4276 BUG_ON(!(start >= tmp_pa_end || end <= tmp_pa_start));
4277 spin_unlock(&tmp_pa->pa_lock);
4278 }
4279 read_unlock(&ei->i_prealloc_lock);
4280 }
4281
4282 /*
4283 * Given an allocation context "ac" and a range "start", "end", check
4284 * and adjust boundaries if the range overlaps with any of the existing
4285 * preallocatoins stored in the corresponding inode of the allocation context.
4286 *
4287 * Parameters:
4288 * ac allocation context
4289 * start start of the new range
4290 * end end of the new range
4291 */
4292 static inline void
ext4_mb_pa_adjust_overlap(struct ext4_allocation_context * ac,ext4_lblk_t * start,loff_t * end)4293 ext4_mb_pa_adjust_overlap(struct ext4_allocation_context *ac,
4294 ext4_lblk_t *start, loff_t *end)
4295 {
4296 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
4297 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4298 struct ext4_prealloc_space *tmp_pa = NULL, *left_pa = NULL, *right_pa = NULL;
4299 struct rb_node *iter;
4300 ext4_lblk_t new_start, tmp_pa_start, right_pa_start = -1;
4301 loff_t new_end, tmp_pa_end, left_pa_end = -1;
4302
4303 new_start = *start;
4304 new_end = *end;
4305
4306 /*
4307 * Adjust the normalized range so that it doesn't overlap with any
4308 * existing preallocated blocks(PAs). Make sure to hold the rbtree lock
4309 * so it doesn't change underneath us.
4310 */
4311 read_lock(&ei->i_prealloc_lock);
4312
4313 /* Step 1: find any one immediate neighboring PA of the normalized range */
4314 for (iter = ei->i_prealloc_node.rb_node; iter;
4315 iter = ext4_mb_pa_rb_next_iter(ac->ac_o_ex.fe_logical,
4316 tmp_pa_start, iter)) {
4317 tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
4318 pa_node.inode_node);
4319 tmp_pa_start = tmp_pa->pa_lstart;
4320 tmp_pa_end = pa_logical_end(sbi, tmp_pa);
4321
4322 /* PA must not overlap original request */
4323 spin_lock(&tmp_pa->pa_lock);
4324 if (tmp_pa->pa_deleted == 0)
4325 BUG_ON(!(ac->ac_o_ex.fe_logical >= tmp_pa_end ||
4326 ac->ac_o_ex.fe_logical < tmp_pa_start));
4327 spin_unlock(&tmp_pa->pa_lock);
4328 }
4329
4330 /*
4331 * Step 2: check if the found PA is left or right neighbor and
4332 * get the other neighbor
4333 */
4334 if (tmp_pa) {
4335 if (tmp_pa->pa_lstart < ac->ac_o_ex.fe_logical) {
4336 struct rb_node *tmp;
4337
4338 left_pa = tmp_pa;
4339 tmp = rb_next(&left_pa->pa_node.inode_node);
4340 if (tmp) {
4341 right_pa = rb_entry(tmp,
4342 struct ext4_prealloc_space,
4343 pa_node.inode_node);
4344 }
4345 } else {
4346 struct rb_node *tmp;
4347
4348 right_pa = tmp_pa;
4349 tmp = rb_prev(&right_pa->pa_node.inode_node);
4350 if (tmp) {
4351 left_pa = rb_entry(tmp,
4352 struct ext4_prealloc_space,
4353 pa_node.inode_node);
4354 }
4355 }
4356 }
4357
4358 /* Step 3: get the non deleted neighbors */
4359 if (left_pa) {
4360 for (iter = &left_pa->pa_node.inode_node;;
4361 iter = rb_prev(iter)) {
4362 if (!iter) {
4363 left_pa = NULL;
4364 break;
4365 }
4366
4367 tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
4368 pa_node.inode_node);
4369 left_pa = tmp_pa;
4370 spin_lock(&tmp_pa->pa_lock);
4371 if (tmp_pa->pa_deleted == 0) {
4372 spin_unlock(&tmp_pa->pa_lock);
4373 break;
4374 }
4375 spin_unlock(&tmp_pa->pa_lock);
4376 }
4377 }
4378
4379 if (right_pa) {
4380 for (iter = &right_pa->pa_node.inode_node;;
4381 iter = rb_next(iter)) {
4382 if (!iter) {
4383 right_pa = NULL;
4384 break;
4385 }
4386
4387 tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
4388 pa_node.inode_node);
4389 right_pa = tmp_pa;
4390 spin_lock(&tmp_pa->pa_lock);
4391 if (tmp_pa->pa_deleted == 0) {
4392 spin_unlock(&tmp_pa->pa_lock);
4393 break;
4394 }
4395 spin_unlock(&tmp_pa->pa_lock);
4396 }
4397 }
4398
4399 if (left_pa) {
4400 left_pa_end = pa_logical_end(sbi, left_pa);
4401 BUG_ON(left_pa_end > ac->ac_o_ex.fe_logical);
4402 }
4403
4404 if (right_pa) {
4405 right_pa_start = right_pa->pa_lstart;
4406 BUG_ON(right_pa_start <= ac->ac_o_ex.fe_logical);
4407 }
4408
4409 /* Step 4: trim our normalized range to not overlap with the neighbors */
4410 if (left_pa) {
4411 if (left_pa_end > new_start)
4412 new_start = left_pa_end;
4413 }
4414
4415 if (right_pa) {
4416 if (right_pa_start < new_end)
4417 new_end = right_pa_start;
4418 }
4419 read_unlock(&ei->i_prealloc_lock);
4420
4421 /* XXX: extra loop to check we really don't overlap preallocations */
4422 ext4_mb_pa_assert_overlap(ac, new_start, new_end);
4423
4424 *start = new_start;
4425 *end = new_end;
4426 }
4427
4428 /*
4429 * Normalization means making request better in terms of
4430 * size and alignment
4431 */
4432 static noinline_for_stack void
ext4_mb_normalize_request(struct ext4_allocation_context * ac,struct ext4_allocation_request * ar)4433 ext4_mb_normalize_request(struct ext4_allocation_context *ac,
4434 struct ext4_allocation_request *ar)
4435 {
4436 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4437 struct ext4_super_block *es = sbi->s_es;
4438 int bsbits, max;
4439 loff_t size, start_off, end;
4440 loff_t orig_size __maybe_unused;
4441 ext4_lblk_t start;
4442
4443 /* do normalize only data requests, metadata requests
4444 do not need preallocation */
4445 if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
4446 return;
4447
4448 /* sometime caller may want exact blocks */
4449 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
4450 return;
4451
4452 /* caller may indicate that preallocation isn't
4453 * required (it's a tail, for example) */
4454 if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC)
4455 return;
4456
4457 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) {
4458 ext4_mb_normalize_group_request(ac);
4459 return ;
4460 }
4461
4462 bsbits = ac->ac_sb->s_blocksize_bits;
4463
4464 /* first, let's learn actual file size
4465 * given current request is allocated */
4466 size = extent_logical_end(sbi, &ac->ac_o_ex);
4467 size = size << bsbits;
4468 if (size < i_size_read(ac->ac_inode))
4469 size = i_size_read(ac->ac_inode);
4470 orig_size = size;
4471
4472 /* max size of free chunks */
4473 max = 2 << bsbits;
4474
4475 #define NRL_CHECK_SIZE(req, size, max, chunk_size) \
4476 (req <= (size) || max <= (chunk_size))
4477
4478 /* first, try to predict filesize */
4479 /* XXX: should this table be tunable? */
4480 start_off = 0;
4481 if (size <= 16 * 1024) {
4482 size = 16 * 1024;
4483 } else if (size <= 32 * 1024) {
4484 size = 32 * 1024;
4485 } else if (size <= 64 * 1024) {
4486 size = 64 * 1024;
4487 } else if (size <= 128 * 1024) {
4488 size = 128 * 1024;
4489 } else if (size <= 256 * 1024) {
4490 size = 256 * 1024;
4491 } else if (size <= 512 * 1024) {
4492 size = 512 * 1024;
4493 } else if (size <= 1024 * 1024) {
4494 size = 1024 * 1024;
4495 } else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) {
4496 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
4497 (21 - bsbits)) << 21;
4498 size = 2 * 1024 * 1024;
4499 } else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) {
4500 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
4501 (22 - bsbits)) << 22;
4502 size = 4 * 1024 * 1024;
4503 } else if (NRL_CHECK_SIZE(EXT4_C2B(sbi, ac->ac_o_ex.fe_len),
4504 (8<<20)>>bsbits, max, 8 * 1024)) {
4505 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
4506 (23 - bsbits)) << 23;
4507 size = 8 * 1024 * 1024;
4508 } else {
4509 start_off = (loff_t) ac->ac_o_ex.fe_logical << bsbits;
4510 size = (loff_t) EXT4_C2B(sbi,
4511 ac->ac_o_ex.fe_len) << bsbits;
4512 }
4513 size = size >> bsbits;
4514 start = start_off >> bsbits;
4515
4516 /*
4517 * For tiny groups (smaller than 8MB) the chosen allocation
4518 * alignment may be larger than group size. Make sure the
4519 * alignment does not move allocation to a different group which
4520 * makes mballoc fail assertions later.
4521 */
4522 start = max(start, rounddown(ac->ac_o_ex.fe_logical,
4523 (ext4_lblk_t)EXT4_BLOCKS_PER_GROUP(ac->ac_sb)));
4524
4525 /* avoid unnecessary preallocation that may trigger assertions */
4526 if (start + size > EXT_MAX_BLOCKS)
4527 size = EXT_MAX_BLOCKS - start;
4528
4529 /* don't cover already allocated blocks in selected range */
4530 if (ar->pleft && start <= ar->lleft) {
4531 size -= ar->lleft + 1 - start;
4532 start = ar->lleft + 1;
4533 }
4534 if (ar->pright && start + size - 1 >= ar->lright)
4535 size -= start + size - ar->lright;
4536
4537 /*
4538 * Trim allocation request for filesystems with artificially small
4539 * groups.
4540 */
4541 if (size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb))
4542 size = EXT4_BLOCKS_PER_GROUP(ac->ac_sb);
4543
4544 end = start + size;
4545
4546 ext4_mb_pa_adjust_overlap(ac, &start, &end);
4547
4548 size = end - start;
4549
4550 /*
4551 * In this function "start" and "size" are normalized for better
4552 * alignment and length such that we could preallocate more blocks.
4553 * This normalization is done such that original request of
4554 * ac->ac_o_ex.fe_logical & fe_len should always lie within "start" and
4555 * "size" boundaries.
4556 * (Note fe_len can be relaxed since FS block allocation API does not
4557 * provide gurantee on number of contiguous blocks allocation since that
4558 * depends upon free space left, etc).
4559 * In case of inode pa, later we use the allocated blocks
4560 * [pa_pstart + fe_logical - pa_lstart, fe_len/size] from the preallocated
4561 * range of goal/best blocks [start, size] to put it at the
4562 * ac_o_ex.fe_logical extent of this inode.
4563 * (See ext4_mb_use_inode_pa() for more details)
4564 */
4565 if (start + size <= ac->ac_o_ex.fe_logical ||
4566 start > ac->ac_o_ex.fe_logical) {
4567 ext4_msg(ac->ac_sb, KERN_ERR,
4568 "start %lu, size %lu, fe_logical %lu",
4569 (unsigned long) start, (unsigned long) size,
4570 (unsigned long) ac->ac_o_ex.fe_logical);
4571 BUG();
4572 }
4573 BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
4574
4575 /* now prepare goal request */
4576
4577 /* XXX: is it better to align blocks WRT to logical
4578 * placement or satisfy big request as is */
4579 ac->ac_g_ex.fe_logical = start;
4580 ac->ac_g_ex.fe_len = EXT4_NUM_B2C(sbi, size);
4581 ac->ac_orig_goal_len = ac->ac_g_ex.fe_len;
4582
4583 /* define goal start in order to merge */
4584 if (ar->pright && (ar->lright == (start + size)) &&
4585 ar->pright >= size &&
4586 ar->pright - size >= le32_to_cpu(es->s_first_data_block)) {
4587 /* merge to the right */
4588 ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size,
4589 &ac->ac_g_ex.fe_group,
4590 &ac->ac_g_ex.fe_start);
4591 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
4592 }
4593 if (ar->pleft && (ar->lleft + 1 == start) &&
4594 ar->pleft + 1 < ext4_blocks_count(es)) {
4595 /* merge to the left */
4596 ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1,
4597 &ac->ac_g_ex.fe_group,
4598 &ac->ac_g_ex.fe_start);
4599 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
4600 }
4601
4602 mb_debug(ac->ac_sb, "goal: %lld(was %lld) blocks at %u\n", size,
4603 orig_size, start);
4604 }
4605
ext4_mb_collect_stats(struct ext4_allocation_context * ac)4606 static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
4607 {
4608 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4609
4610 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len >= 1) {
4611 atomic_inc(&sbi->s_bal_reqs);
4612 atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
4613 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
4614 atomic_inc(&sbi->s_bal_success);
4615
4616 atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
4617 for (int i=0; i<EXT4_MB_NUM_CRS; i++) {
4618 atomic_add(ac->ac_cX_found[i], &sbi->s_bal_cX_ex_scanned[i]);
4619 }
4620
4621 atomic_add(ac->ac_groups_scanned, &sbi->s_bal_groups_scanned);
4622 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
4623 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
4624 atomic_inc(&sbi->s_bal_goals);
4625 /* did we allocate as much as normalizer originally wanted? */
4626 if (ac->ac_f_ex.fe_len == ac->ac_orig_goal_len)
4627 atomic_inc(&sbi->s_bal_len_goals);
4628
4629 if (ac->ac_found > sbi->s_mb_max_to_scan)
4630 atomic_inc(&sbi->s_bal_breaks);
4631 }
4632
4633 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
4634 trace_ext4_mballoc_alloc(ac);
4635 else
4636 trace_ext4_mballoc_prealloc(ac);
4637 }
4638
4639 /*
4640 * Called on failure; free up any blocks from the inode PA for this
4641 * context. We don't need this for MB_GROUP_PA because we only change
4642 * pa_free in ext4_mb_release_context(), but on failure, we've already
4643 * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed.
4644 */
ext4_discard_allocated_blocks(struct ext4_allocation_context * ac)4645 static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac)
4646 {
4647 struct ext4_prealloc_space *pa = ac->ac_pa;
4648 struct ext4_buddy e4b;
4649 int err;
4650
4651 if (pa == NULL) {
4652 if (ac->ac_f_ex.fe_len == 0)
4653 return;
4654 err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b);
4655 if (WARN_RATELIMIT(err,
4656 "ext4: mb_load_buddy failed (%d)", err))
4657 /*
4658 * This should never happen since we pin the
4659 * pages in the ext4_allocation_context so
4660 * ext4_mb_load_buddy() should never fail.
4661 */
4662 return;
4663 ext4_lock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
4664 mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start,
4665 ac->ac_f_ex.fe_len);
4666 ext4_unlock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
4667 ext4_mb_unload_buddy(&e4b);
4668 return;
4669 }
4670 if (pa->pa_type == MB_INODE_PA) {
4671 spin_lock(&pa->pa_lock);
4672 pa->pa_free += ac->ac_b_ex.fe_len;
4673 spin_unlock(&pa->pa_lock);
4674 }
4675 }
4676
4677 /*
4678 * use blocks preallocated to inode
4679 */
ext4_mb_use_inode_pa(struct ext4_allocation_context * ac,struct ext4_prealloc_space * pa)4680 static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac,
4681 struct ext4_prealloc_space *pa)
4682 {
4683 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4684 ext4_fsblk_t start;
4685 ext4_fsblk_t end;
4686 int len;
4687
4688 /* found preallocated blocks, use them */
4689 start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart);
4690 end = min(pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len),
4691 start + EXT4_C2B(sbi, ac->ac_o_ex.fe_len));
4692 len = EXT4_NUM_B2C(sbi, end - start);
4693 ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group,
4694 &ac->ac_b_ex.fe_start);
4695 ac->ac_b_ex.fe_len = len;
4696 ac->ac_status = AC_STATUS_FOUND;
4697 ac->ac_pa = pa;
4698
4699 BUG_ON(start < pa->pa_pstart);
4700 BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len));
4701 BUG_ON(pa->pa_free < len);
4702 BUG_ON(ac->ac_b_ex.fe_len <= 0);
4703 pa->pa_free -= len;
4704
4705 mb_debug(ac->ac_sb, "use %llu/%d from inode pa %p\n", start, len, pa);
4706 }
4707
4708 /*
4709 * use blocks preallocated to locality group
4710 */
ext4_mb_use_group_pa(struct ext4_allocation_context * ac,struct ext4_prealloc_space * pa)4711 static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac,
4712 struct ext4_prealloc_space *pa)
4713 {
4714 unsigned int len = ac->ac_o_ex.fe_len;
4715
4716 ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart,
4717 &ac->ac_b_ex.fe_group,
4718 &ac->ac_b_ex.fe_start);
4719 ac->ac_b_ex.fe_len = len;
4720 ac->ac_status = AC_STATUS_FOUND;
4721 ac->ac_pa = pa;
4722
4723 /* we don't correct pa_pstart or pa_len here to avoid
4724 * possible race when the group is being loaded concurrently
4725 * instead we correct pa later, after blocks are marked
4726 * in on-disk bitmap -- see ext4_mb_release_context()
4727 * Other CPUs are prevented from allocating from this pa by lg_mutex
4728 */
4729 mb_debug(ac->ac_sb, "use %u/%u from group pa %p\n",
4730 pa->pa_lstart, len, pa);
4731 }
4732
4733 /*
4734 * Return the prealloc space that have minimal distance
4735 * from the goal block. @cpa is the prealloc
4736 * space that is having currently known minimal distance
4737 * from the goal block.
4738 */
4739 static struct ext4_prealloc_space *
ext4_mb_check_group_pa(ext4_fsblk_t goal_block,struct ext4_prealloc_space * pa,struct ext4_prealloc_space * cpa)4740 ext4_mb_check_group_pa(ext4_fsblk_t goal_block,
4741 struct ext4_prealloc_space *pa,
4742 struct ext4_prealloc_space *cpa)
4743 {
4744 ext4_fsblk_t cur_distance, new_distance;
4745
4746 if (cpa == NULL) {
4747 atomic_inc(&pa->pa_count);
4748 return pa;
4749 }
4750 cur_distance = abs(goal_block - cpa->pa_pstart);
4751 new_distance = abs(goal_block - pa->pa_pstart);
4752
4753 if (cur_distance <= new_distance)
4754 return cpa;
4755
4756 /* drop the previous reference */
4757 atomic_dec(&cpa->pa_count);
4758 atomic_inc(&pa->pa_count);
4759 return pa;
4760 }
4761
4762 /*
4763 * check if found pa meets EXT4_MB_HINT_GOAL_ONLY
4764 */
4765 static bool
ext4_mb_pa_goal_check(struct ext4_allocation_context * ac,struct ext4_prealloc_space * pa)4766 ext4_mb_pa_goal_check(struct ext4_allocation_context *ac,
4767 struct ext4_prealloc_space *pa)
4768 {
4769 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4770 ext4_fsblk_t start;
4771
4772 if (likely(!(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)))
4773 return true;
4774
4775 /*
4776 * If EXT4_MB_HINT_GOAL_ONLY is set, ac_g_ex will not be adjusted
4777 * in ext4_mb_normalize_request and will keep same with ac_o_ex
4778 * from ext4_mb_initialize_context. Choose ac_g_ex here to keep
4779 * consistent with ext4_mb_find_by_goal.
4780 */
4781 start = pa->pa_pstart +
4782 (ac->ac_g_ex.fe_logical - pa->pa_lstart);
4783 if (ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex) != start)
4784 return false;
4785
4786 if (ac->ac_g_ex.fe_len > pa->pa_len -
4787 EXT4_B2C(sbi, ac->ac_g_ex.fe_logical - pa->pa_lstart))
4788 return false;
4789
4790 return true;
4791 }
4792
4793 /*
4794 * search goal blocks in preallocated space
4795 */
4796 static noinline_for_stack bool
ext4_mb_use_preallocated(struct ext4_allocation_context * ac)4797 ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
4798 {
4799 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4800 int order, i;
4801 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
4802 struct ext4_locality_group *lg;
4803 struct ext4_prealloc_space *tmp_pa = NULL, *cpa = NULL;
4804 struct rb_node *iter;
4805 ext4_fsblk_t goal_block;
4806
4807 /* only data can be preallocated */
4808 if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
4809 return false;
4810
4811 /*
4812 * first, try per-file preallocation by searching the inode pa rbtree.
4813 *
4814 * Here, we can't do a direct traversal of the tree because
4815 * ext4_mb_discard_group_preallocation() can paralelly mark the pa
4816 * deleted and that can cause direct traversal to skip some entries.
4817 */
4818 read_lock(&ei->i_prealloc_lock);
4819
4820 if (RB_EMPTY_ROOT(&ei->i_prealloc_node)) {
4821 goto try_group_pa;
4822 }
4823
4824 /*
4825 * Step 1: Find a pa with logical start immediately adjacent to the
4826 * original logical start. This could be on the left or right.
4827 *
4828 * (tmp_pa->pa_lstart never changes so we can skip locking for it).
4829 */
4830 for (iter = ei->i_prealloc_node.rb_node; iter;
4831 iter = ext4_mb_pa_rb_next_iter(ac->ac_o_ex.fe_logical,
4832 tmp_pa->pa_lstart, iter)) {
4833 tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
4834 pa_node.inode_node);
4835 }
4836
4837 /*
4838 * Step 2: The adjacent pa might be to the right of logical start, find
4839 * the left adjacent pa. After this step we'd have a valid tmp_pa whose
4840 * logical start is towards the left of original request's logical start
4841 */
4842 if (tmp_pa->pa_lstart > ac->ac_o_ex.fe_logical) {
4843 struct rb_node *tmp;
4844 tmp = rb_prev(&tmp_pa->pa_node.inode_node);
4845
4846 if (tmp) {
4847 tmp_pa = rb_entry(tmp, struct ext4_prealloc_space,
4848 pa_node.inode_node);
4849 } else {
4850 /*
4851 * If there is no adjacent pa to the left then finding
4852 * an overlapping pa is not possible hence stop searching
4853 * inode pa tree
4854 */
4855 goto try_group_pa;
4856 }
4857 }
4858
4859 BUG_ON(!(tmp_pa && tmp_pa->pa_lstart <= ac->ac_o_ex.fe_logical));
4860
4861 /*
4862 * Step 3: If the left adjacent pa is deleted, keep moving left to find
4863 * the first non deleted adjacent pa. After this step we should have a
4864 * valid tmp_pa which is guaranteed to be non deleted.
4865 */
4866 for (iter = &tmp_pa->pa_node.inode_node;; iter = rb_prev(iter)) {
4867 if (!iter) {
4868 /*
4869 * no non deleted left adjacent pa, so stop searching
4870 * inode pa tree
4871 */
4872 goto try_group_pa;
4873 }
4874 tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
4875 pa_node.inode_node);
4876 spin_lock(&tmp_pa->pa_lock);
4877 if (tmp_pa->pa_deleted == 0) {
4878 /*
4879 * We will keep holding the pa_lock from
4880 * this point on because we don't want group discard
4881 * to delete this pa underneath us. Since group
4882 * discard is anyways an ENOSPC operation it
4883 * should be okay for it to wait a few more cycles.
4884 */
4885 break;
4886 } else {
4887 spin_unlock(&tmp_pa->pa_lock);
4888 }
4889 }
4890
4891 BUG_ON(!(tmp_pa && tmp_pa->pa_lstart <= ac->ac_o_ex.fe_logical));
4892 BUG_ON(tmp_pa->pa_deleted == 1);
4893
4894 /*
4895 * Step 4: We now have the non deleted left adjacent pa. Only this
4896 * pa can possibly satisfy the request hence check if it overlaps
4897 * original logical start and stop searching if it doesn't.
4898 */
4899 if (ac->ac_o_ex.fe_logical >= pa_logical_end(sbi, tmp_pa)) {
4900 spin_unlock(&tmp_pa->pa_lock);
4901 goto try_group_pa;
4902 }
4903
4904 /* non-extent files can't have physical blocks past 2^32 */
4905 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) &&
4906 (tmp_pa->pa_pstart + EXT4_C2B(sbi, tmp_pa->pa_len) >
4907 EXT4_MAX_BLOCK_FILE_PHYS)) {
4908 /*
4909 * Since PAs don't overlap, we won't find any other PA to
4910 * satisfy this.
4911 */
4912 spin_unlock(&tmp_pa->pa_lock);
4913 goto try_group_pa;
4914 }
4915
4916 if (tmp_pa->pa_free && likely(ext4_mb_pa_goal_check(ac, tmp_pa))) {
4917 atomic_inc(&tmp_pa->pa_count);
4918 ext4_mb_use_inode_pa(ac, tmp_pa);
4919 spin_unlock(&tmp_pa->pa_lock);
4920 read_unlock(&ei->i_prealloc_lock);
4921 return true;
4922 } else {
4923 /*
4924 * We found a valid overlapping pa but couldn't use it because
4925 * it had no free blocks. This should ideally never happen
4926 * because:
4927 *
4928 * 1. When a new inode pa is added to rbtree it must have
4929 * pa_free > 0 since otherwise we won't actually need
4930 * preallocation.
4931 *
4932 * 2. An inode pa that is in the rbtree can only have it's
4933 * pa_free become zero when another thread calls:
4934 * ext4_mb_new_blocks
4935 * ext4_mb_use_preallocated
4936 * ext4_mb_use_inode_pa
4937 *
4938 * 3. Further, after the above calls make pa_free == 0, we will
4939 * immediately remove it from the rbtree in:
4940 * ext4_mb_new_blocks
4941 * ext4_mb_release_context
4942 * ext4_mb_put_pa
4943 *
4944 * 4. Since the pa_free becoming 0 and pa_free getting removed
4945 * from tree both happen in ext4_mb_new_blocks, which is always
4946 * called with i_data_sem held for data allocations, we can be
4947 * sure that another process will never see a pa in rbtree with
4948 * pa_free == 0.
4949 */
4950 WARN_ON_ONCE(tmp_pa->pa_free == 0);
4951 }
4952 spin_unlock(&tmp_pa->pa_lock);
4953 try_group_pa:
4954 read_unlock(&ei->i_prealloc_lock);
4955
4956 /* can we use group allocation? */
4957 if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC))
4958 return false;
4959
4960 /* inode may have no locality group for some reason */
4961 lg = ac->ac_lg;
4962 if (lg == NULL)
4963 return false;
4964 order = fls(ac->ac_o_ex.fe_len) - 1;
4965 if (order > PREALLOC_TB_SIZE - 1)
4966 /* The max size of hash table is PREALLOC_TB_SIZE */
4967 order = PREALLOC_TB_SIZE - 1;
4968
4969 goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex);
4970 /*
4971 * search for the prealloc space that is having
4972 * minimal distance from the goal block.
4973 */
4974 for (i = order; i < PREALLOC_TB_SIZE; i++) {
4975 rcu_read_lock();
4976 list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[i],
4977 pa_node.lg_list) {
4978 spin_lock(&tmp_pa->pa_lock);
4979 if (tmp_pa->pa_deleted == 0 &&
4980 tmp_pa->pa_free >= ac->ac_o_ex.fe_len) {
4981
4982 cpa = ext4_mb_check_group_pa(goal_block,
4983 tmp_pa, cpa);
4984 }
4985 spin_unlock(&tmp_pa->pa_lock);
4986 }
4987 rcu_read_unlock();
4988 }
4989 if (cpa) {
4990 ext4_mb_use_group_pa(ac, cpa);
4991 return true;
4992 }
4993 return false;
4994 }
4995
4996 /*
4997 * the function goes through all preallocation in this group and marks them
4998 * used in in-core bitmap. buddy must be generated from this bitmap
4999 * Need to be called with ext4 group lock held
5000 */
5001 static noinline_for_stack
ext4_mb_generate_from_pa(struct super_block * sb,void * bitmap,ext4_group_t group)5002 void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
5003 ext4_group_t group)
5004 {
5005 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
5006 struct ext4_prealloc_space *pa;
5007 struct list_head *cur;
5008 ext4_group_t groupnr;
5009 ext4_grpblk_t start;
5010 int preallocated = 0;
5011 int len;
5012
5013 if (!grp)
5014 return;
5015
5016 /* all form of preallocation discards first load group,
5017 * so the only competing code is preallocation use.
5018 * we don't need any locking here
5019 * notice we do NOT ignore preallocations with pa_deleted
5020 * otherwise we could leave used blocks available for
5021 * allocation in buddy when concurrent ext4_mb_put_pa()
5022 * is dropping preallocation
5023 */
5024 list_for_each(cur, &grp->bb_prealloc_list) {
5025 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
5026 spin_lock(&pa->pa_lock);
5027 ext4_get_group_no_and_offset(sb, pa->pa_pstart,
5028 &groupnr, &start);
5029 len = pa->pa_len;
5030 spin_unlock(&pa->pa_lock);
5031 if (unlikely(len == 0))
5032 continue;
5033 BUG_ON(groupnr != group);
5034 mb_set_bits(bitmap, start, len);
5035 preallocated += len;
5036 }
5037 mb_debug(sb, "preallocated %d for group %u\n", preallocated, group);
5038 }
5039
ext4_mb_mark_pa_deleted(struct super_block * sb,struct ext4_prealloc_space * pa)5040 static void ext4_mb_mark_pa_deleted(struct super_block *sb,
5041 struct ext4_prealloc_space *pa)
5042 {
5043 struct ext4_inode_info *ei;
5044
5045 if (pa->pa_deleted) {
5046 ext4_warning(sb, "deleted pa, type:%d, pblk:%llu, lblk:%u, len:%d\n",
5047 pa->pa_type, pa->pa_pstart, pa->pa_lstart,
5048 pa->pa_len);
5049 return;
5050 }
5051
5052 pa->pa_deleted = 1;
5053
5054 if (pa->pa_type == MB_INODE_PA) {
5055 ei = EXT4_I(pa->pa_inode);
5056 atomic_dec(&ei->i_prealloc_active);
5057 }
5058 }
5059
ext4_mb_pa_free(struct ext4_prealloc_space * pa)5060 static inline void ext4_mb_pa_free(struct ext4_prealloc_space *pa)
5061 {
5062 BUG_ON(!pa);
5063 BUG_ON(atomic_read(&pa->pa_count));
5064 BUG_ON(pa->pa_deleted == 0);
5065 kmem_cache_free(ext4_pspace_cachep, pa);
5066 }
5067
ext4_mb_pa_callback(struct rcu_head * head)5068 static void ext4_mb_pa_callback(struct rcu_head *head)
5069 {
5070 struct ext4_prealloc_space *pa;
5071
5072 pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu);
5073 ext4_mb_pa_free(pa);
5074 }
5075
5076 /*
5077 * drops a reference to preallocated space descriptor
5078 * if this was the last reference and the space is consumed
5079 */
ext4_mb_put_pa(struct ext4_allocation_context * ac,struct super_block * sb,struct ext4_prealloc_space * pa)5080 static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
5081 struct super_block *sb, struct ext4_prealloc_space *pa)
5082 {
5083 ext4_group_t grp;
5084 ext4_fsblk_t grp_blk;
5085 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
5086
5087 /* in this short window concurrent discard can set pa_deleted */
5088 spin_lock(&pa->pa_lock);
5089 if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) {
5090 spin_unlock(&pa->pa_lock);
5091 return;
5092 }
5093
5094 if (pa->pa_deleted == 1) {
5095 spin_unlock(&pa->pa_lock);
5096 return;
5097 }
5098
5099 ext4_mb_mark_pa_deleted(sb, pa);
5100 spin_unlock(&pa->pa_lock);
5101
5102 grp_blk = pa->pa_pstart;
5103 /*
5104 * If doing group-based preallocation, pa_pstart may be in the
5105 * next group when pa is used up
5106 */
5107 if (pa->pa_type == MB_GROUP_PA)
5108 grp_blk--;
5109
5110 grp = ext4_get_group_number(sb, grp_blk);
5111
5112 /*
5113 * possible race:
5114 *
5115 * P1 (buddy init) P2 (regular allocation)
5116 * find block B in PA
5117 * copy on-disk bitmap to buddy
5118 * mark B in on-disk bitmap
5119 * drop PA from group
5120 * mark all PAs in buddy
5121 *
5122 * thus, P1 initializes buddy with B available. to prevent this
5123 * we make "copy" and "mark all PAs" atomic and serialize "drop PA"
5124 * against that pair
5125 */
5126 ext4_lock_group(sb, grp);
5127 list_del(&pa->pa_group_list);
5128 ext4_unlock_group(sb, grp);
5129
5130 if (pa->pa_type == MB_INODE_PA) {
5131 write_lock(pa->pa_node_lock.inode_lock);
5132 rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node);
5133 write_unlock(pa->pa_node_lock.inode_lock);
5134 ext4_mb_pa_free(pa);
5135 } else {
5136 spin_lock(pa->pa_node_lock.lg_lock);
5137 list_del_rcu(&pa->pa_node.lg_list);
5138 spin_unlock(pa->pa_node_lock.lg_lock);
5139 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
5140 }
5141 }
5142
ext4_mb_pa_rb_insert(struct rb_root * root,struct rb_node * new)5143 static void ext4_mb_pa_rb_insert(struct rb_root *root, struct rb_node *new)
5144 {
5145 struct rb_node **iter = &root->rb_node, *parent = NULL;
5146 struct ext4_prealloc_space *iter_pa, *new_pa;
5147 ext4_lblk_t iter_start, new_start;
5148
5149 while (*iter) {
5150 iter_pa = rb_entry(*iter, struct ext4_prealloc_space,
5151 pa_node.inode_node);
5152 new_pa = rb_entry(new, struct ext4_prealloc_space,
5153 pa_node.inode_node);
5154 iter_start = iter_pa->pa_lstart;
5155 new_start = new_pa->pa_lstart;
5156
5157 parent = *iter;
5158 if (new_start < iter_start)
5159 iter = &((*iter)->rb_left);
5160 else
5161 iter = &((*iter)->rb_right);
5162 }
5163
5164 rb_link_node(new, parent, iter);
5165 rb_insert_color(new, root);
5166 }
5167
5168 /*
5169 * creates new preallocated space for given inode
5170 */
5171 static noinline_for_stack void
ext4_mb_new_inode_pa(struct ext4_allocation_context * ac)5172 ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
5173 {
5174 struct super_block *sb = ac->ac_sb;
5175 struct ext4_sb_info *sbi = EXT4_SB(sb);
5176 struct ext4_prealloc_space *pa;
5177 struct ext4_group_info *grp;
5178 struct ext4_inode_info *ei;
5179
5180 /* preallocate only when found space is larger then requested */
5181 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
5182 BUG_ON(ac->ac_status != AC_STATUS_FOUND);
5183 BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
5184 BUG_ON(ac->ac_pa == NULL);
5185
5186 pa = ac->ac_pa;
5187
5188 if (ac->ac_b_ex.fe_len < ac->ac_orig_goal_len) {
5189 struct ext4_free_extent ex = {
5190 .fe_logical = ac->ac_g_ex.fe_logical,
5191 .fe_len = ac->ac_orig_goal_len,
5192 };
5193 loff_t orig_goal_end = extent_logical_end(sbi, &ex);
5194 loff_t o_ex_end = extent_logical_end(sbi, &ac->ac_o_ex);
5195
5196 /*
5197 * We can't allocate as much as normalizer wants, so we try
5198 * to get proper lstart to cover the original request, except
5199 * when the goal doesn't cover the original request as below:
5200 *
5201 * orig_ex:2045/2055(10), isize:8417280 -> normalized:0/2048
5202 * best_ex:0/200(200) -> adjusted: 1848/2048(200)
5203 */
5204 BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical);
5205 BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len);
5206
5207 /*
5208 * Use the below logic for adjusting best extent as it keeps
5209 * fragmentation in check while ensuring logical range of best
5210 * extent doesn't overflow out of goal extent:
5211 *
5212 * 1. Check if best ex can be kept at end of goal (before
5213 * cr_best_avail trimmed it) and still cover original start
5214 * 2. Else, check if best ex can be kept at start of goal and
5215 * still cover original end
5216 * 3. Else, keep the best ex at start of original request.
5217 */
5218 ex.fe_len = ac->ac_b_ex.fe_len;
5219
5220 ex.fe_logical = orig_goal_end - EXT4_C2B(sbi, ex.fe_len);
5221 if (ac->ac_o_ex.fe_logical >= ex.fe_logical)
5222 goto adjust_bex;
5223
5224 ex.fe_logical = ac->ac_g_ex.fe_logical;
5225 if (o_ex_end <= extent_logical_end(sbi, &ex))
5226 goto adjust_bex;
5227
5228 ex.fe_logical = ac->ac_o_ex.fe_logical;
5229 adjust_bex:
5230 ac->ac_b_ex.fe_logical = ex.fe_logical;
5231
5232 BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
5233 BUG_ON(extent_logical_end(sbi, &ex) > orig_goal_end);
5234 }
5235
5236 pa->pa_lstart = ac->ac_b_ex.fe_logical;
5237 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
5238 pa->pa_len = ac->ac_b_ex.fe_len;
5239 pa->pa_free = pa->pa_len;
5240 spin_lock_init(&pa->pa_lock);
5241 INIT_LIST_HEAD(&pa->pa_group_list);
5242 pa->pa_deleted = 0;
5243 pa->pa_type = MB_INODE_PA;
5244
5245 mb_debug(sb, "new inode pa %p: %llu/%d for %u\n", pa, pa->pa_pstart,
5246 pa->pa_len, pa->pa_lstart);
5247 trace_ext4_mb_new_inode_pa(ac, pa);
5248
5249 atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
5250 ext4_mb_use_inode_pa(ac, pa);
5251
5252 ei = EXT4_I(ac->ac_inode);
5253 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
5254 if (!grp)
5255 return;
5256
5257 pa->pa_node_lock.inode_lock = &ei->i_prealloc_lock;
5258 pa->pa_inode = ac->ac_inode;
5259
5260 list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
5261
5262 write_lock(pa->pa_node_lock.inode_lock);
5263 ext4_mb_pa_rb_insert(&ei->i_prealloc_node, &pa->pa_node.inode_node);
5264 write_unlock(pa->pa_node_lock.inode_lock);
5265 atomic_inc(&ei->i_prealloc_active);
5266 }
5267
5268 /*
5269 * creates new preallocated space for locality group inodes belongs to
5270 */
5271 static noinline_for_stack void
ext4_mb_new_group_pa(struct ext4_allocation_context * ac)5272 ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
5273 {
5274 struct super_block *sb = ac->ac_sb;
5275 struct ext4_locality_group *lg;
5276 struct ext4_prealloc_space *pa;
5277 struct ext4_group_info *grp;
5278
5279 /* preallocate only when found space is larger then requested */
5280 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
5281 BUG_ON(ac->ac_status != AC_STATUS_FOUND);
5282 BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
5283 BUG_ON(ac->ac_pa == NULL);
5284
5285 pa = ac->ac_pa;
5286
5287 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
5288 pa->pa_lstart = pa->pa_pstart;
5289 pa->pa_len = ac->ac_b_ex.fe_len;
5290 pa->pa_free = pa->pa_len;
5291 spin_lock_init(&pa->pa_lock);
5292 INIT_LIST_HEAD(&pa->pa_node.lg_list);
5293 INIT_LIST_HEAD(&pa->pa_group_list);
5294 pa->pa_deleted = 0;
5295 pa->pa_type = MB_GROUP_PA;
5296
5297 mb_debug(sb, "new group pa %p: %llu/%d for %u\n", pa, pa->pa_pstart,
5298 pa->pa_len, pa->pa_lstart);
5299 trace_ext4_mb_new_group_pa(ac, pa);
5300
5301 ext4_mb_use_group_pa(ac, pa);
5302 atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
5303
5304 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
5305 if (!grp)
5306 return;
5307 lg = ac->ac_lg;
5308 BUG_ON(lg == NULL);
5309
5310 pa->pa_node_lock.lg_lock = &lg->lg_prealloc_lock;
5311 pa->pa_inode = NULL;
5312
5313 list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
5314
5315 /*
5316 * We will later add the new pa to the right bucket
5317 * after updating the pa_free in ext4_mb_release_context
5318 */
5319 }
5320
ext4_mb_new_preallocation(struct ext4_allocation_context * ac)5321 static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac)
5322 {
5323 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
5324 ext4_mb_new_group_pa(ac);
5325 else
5326 ext4_mb_new_inode_pa(ac);
5327 }
5328
5329 /*
5330 * finds all unused blocks in on-disk bitmap, frees them in
5331 * in-core bitmap and buddy.
5332 * @pa must be unlinked from inode and group lists, so that
5333 * nobody else can find/use it.
5334 * the caller MUST hold group/inode locks.
5335 * TODO: optimize the case when there are no in-core structures yet
5336 */
5337 static noinline_for_stack int
ext4_mb_release_inode_pa(struct ext4_buddy * e4b,struct buffer_head * bitmap_bh,struct ext4_prealloc_space * pa)5338 ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
5339 struct ext4_prealloc_space *pa)
5340 {
5341 struct super_block *sb = e4b->bd_sb;
5342 struct ext4_sb_info *sbi = EXT4_SB(sb);
5343 unsigned int end;
5344 unsigned int next;
5345 ext4_group_t group;
5346 ext4_grpblk_t bit;
5347 unsigned long long grp_blk_start;
5348 int free = 0;
5349
5350 BUG_ON(pa->pa_deleted == 0);
5351 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
5352 grp_blk_start = pa->pa_pstart - EXT4_C2B(sbi, bit);
5353 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
5354 end = bit + pa->pa_len;
5355
5356 while (bit < end) {
5357 bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit);
5358 if (bit >= end)
5359 break;
5360 next = mb_find_next_bit(bitmap_bh->b_data, end, bit);
5361 mb_debug(sb, "free preallocated %u/%u in group %u\n",
5362 (unsigned) ext4_group_first_block_no(sb, group) + bit,
5363 (unsigned) next - bit, (unsigned) group);
5364 free += next - bit;
5365
5366 trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit);
5367 trace_ext4_mb_release_inode_pa(pa, (grp_blk_start +
5368 EXT4_C2B(sbi, bit)),
5369 next - bit);
5370 mb_free_blocks(pa->pa_inode, e4b, bit, next - bit);
5371 bit = next + 1;
5372 }
5373 if (free != pa->pa_free) {
5374 ext4_msg(e4b->bd_sb, KERN_CRIT,
5375 "pa %p: logic %lu, phys. %lu, len %d",
5376 pa, (unsigned long) pa->pa_lstart,
5377 (unsigned long) pa->pa_pstart,
5378 pa->pa_len);
5379 ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u",
5380 free, pa->pa_free);
5381 /*
5382 * pa is already deleted so we use the value obtained
5383 * from the bitmap and continue.
5384 */
5385 }
5386 atomic_add(free, &sbi->s_mb_discarded);
5387
5388 return 0;
5389 }
5390
5391 static noinline_for_stack int
ext4_mb_release_group_pa(struct ext4_buddy * e4b,struct ext4_prealloc_space * pa)5392 ext4_mb_release_group_pa(struct ext4_buddy *e4b,
5393 struct ext4_prealloc_space *pa)
5394 {
5395 struct super_block *sb = e4b->bd_sb;
5396 ext4_group_t group;
5397 ext4_grpblk_t bit;
5398
5399 trace_ext4_mb_release_group_pa(sb, pa);
5400 BUG_ON(pa->pa_deleted == 0);
5401 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
5402 if (unlikely(group != e4b->bd_group && pa->pa_len != 0)) {
5403 ext4_warning(sb, "bad group: expected %u, group %u, pa_start %llu",
5404 e4b->bd_group, group, pa->pa_pstart);
5405 return 0;
5406 }
5407 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
5408 atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
5409 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
5410
5411 return 0;
5412 }
5413
5414 /*
5415 * releases all preallocations in given group
5416 *
5417 * first, we need to decide discard policy:
5418 * - when do we discard
5419 * 1) ENOSPC
5420 * - how many do we discard
5421 * 1) how many requested
5422 */
5423 static noinline_for_stack int
ext4_mb_discard_group_preallocations(struct super_block * sb,ext4_group_t group,int * busy)5424 ext4_mb_discard_group_preallocations(struct super_block *sb,
5425 ext4_group_t group, int *busy)
5426 {
5427 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
5428 struct buffer_head *bitmap_bh = NULL;
5429 struct ext4_prealloc_space *pa, *tmp;
5430 LIST_HEAD(list);
5431 struct ext4_buddy e4b;
5432 struct ext4_inode_info *ei;
5433 int err;
5434 int free = 0;
5435
5436 if (!grp)
5437 return 0;
5438 mb_debug(sb, "discard preallocation for group %u\n", group);
5439 if (list_empty(&grp->bb_prealloc_list))
5440 goto out_dbg;
5441
5442 bitmap_bh = ext4_read_block_bitmap(sb, group);
5443 if (IS_ERR(bitmap_bh)) {
5444 err = PTR_ERR(bitmap_bh);
5445 ext4_error_err(sb, -err,
5446 "Error %d reading block bitmap for %u",
5447 err, group);
5448 goto out_dbg;
5449 }
5450
5451 err = ext4_mb_load_buddy(sb, group, &e4b);
5452 if (err) {
5453 ext4_warning(sb, "Error %d loading buddy information for %u",
5454 err, group);
5455 put_bh(bitmap_bh);
5456 goto out_dbg;
5457 }
5458
5459 ext4_lock_group(sb, group);
5460 list_for_each_entry_safe(pa, tmp,
5461 &grp->bb_prealloc_list, pa_group_list) {
5462 spin_lock(&pa->pa_lock);
5463 if (atomic_read(&pa->pa_count)) {
5464 spin_unlock(&pa->pa_lock);
5465 *busy = 1;
5466 continue;
5467 }
5468 if (pa->pa_deleted) {
5469 spin_unlock(&pa->pa_lock);
5470 continue;
5471 }
5472
5473 /* seems this one can be freed ... */
5474 ext4_mb_mark_pa_deleted(sb, pa);
5475
5476 if (!free)
5477 this_cpu_inc(discard_pa_seq);
5478
5479 /* we can trust pa_free ... */
5480 free += pa->pa_free;
5481
5482 spin_unlock(&pa->pa_lock);
5483
5484 list_del(&pa->pa_group_list);
5485 list_add(&pa->u.pa_tmp_list, &list);
5486 }
5487
5488 /* now free all selected PAs */
5489 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
5490
5491 /* remove from object (inode or locality group) */
5492 if (pa->pa_type == MB_GROUP_PA) {
5493 spin_lock(pa->pa_node_lock.lg_lock);
5494 list_del_rcu(&pa->pa_node.lg_list);
5495 spin_unlock(pa->pa_node_lock.lg_lock);
5496 } else {
5497 write_lock(pa->pa_node_lock.inode_lock);
5498 ei = EXT4_I(pa->pa_inode);
5499 rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node);
5500 write_unlock(pa->pa_node_lock.inode_lock);
5501 }
5502
5503 list_del(&pa->u.pa_tmp_list);
5504
5505 if (pa->pa_type == MB_GROUP_PA) {
5506 ext4_mb_release_group_pa(&e4b, pa);
5507 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
5508 } else {
5509 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
5510 ext4_mb_pa_free(pa);
5511 }
5512 }
5513
5514 ext4_unlock_group(sb, group);
5515 ext4_mb_unload_buddy(&e4b);
5516 put_bh(bitmap_bh);
5517 out_dbg:
5518 mb_debug(sb, "discarded (%d) blocks preallocated for group %u bb_free (%d)\n",
5519 free, group, grp->bb_free);
5520 return free;
5521 }
5522
5523 /*
5524 * releases all non-used preallocated blocks for given inode
5525 *
5526 * It's important to discard preallocations under i_data_sem
5527 * We don't want another block to be served from the prealloc
5528 * space when we are discarding the inode prealloc space.
5529 *
5530 * FIXME!! Make sure it is valid at all the call sites
5531 */
ext4_discard_preallocations(struct inode * inode,unsigned int needed)5532 void ext4_discard_preallocations(struct inode *inode, unsigned int needed)
5533 {
5534 struct ext4_inode_info *ei = EXT4_I(inode);
5535 struct super_block *sb = inode->i_sb;
5536 struct buffer_head *bitmap_bh = NULL;
5537 struct ext4_prealloc_space *pa, *tmp;
5538 ext4_group_t group = 0;
5539 LIST_HEAD(list);
5540 struct ext4_buddy e4b;
5541 struct rb_node *iter;
5542 int err;
5543
5544 if (!S_ISREG(inode->i_mode)) {
5545 return;
5546 }
5547
5548 if (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY)
5549 return;
5550
5551 mb_debug(sb, "discard preallocation for inode %lu\n",
5552 inode->i_ino);
5553 trace_ext4_discard_preallocations(inode,
5554 atomic_read(&ei->i_prealloc_active), needed);
5555
5556 if (needed == 0)
5557 needed = UINT_MAX;
5558
5559 repeat:
5560 /* first, collect all pa's in the inode */
5561 write_lock(&ei->i_prealloc_lock);
5562 for (iter = rb_first(&ei->i_prealloc_node); iter && needed;
5563 iter = rb_next(iter)) {
5564 pa = rb_entry(iter, struct ext4_prealloc_space,
5565 pa_node.inode_node);
5566 BUG_ON(pa->pa_node_lock.inode_lock != &ei->i_prealloc_lock);
5567
5568 spin_lock(&pa->pa_lock);
5569 if (atomic_read(&pa->pa_count)) {
5570 /* this shouldn't happen often - nobody should
5571 * use preallocation while we're discarding it */
5572 spin_unlock(&pa->pa_lock);
5573 write_unlock(&ei->i_prealloc_lock);
5574 ext4_msg(sb, KERN_ERR,
5575 "uh-oh! used pa while discarding");
5576 WARN_ON(1);
5577 schedule_timeout_uninterruptible(HZ);
5578 goto repeat;
5579
5580 }
5581 if (pa->pa_deleted == 0) {
5582 ext4_mb_mark_pa_deleted(sb, pa);
5583 spin_unlock(&pa->pa_lock);
5584 rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node);
5585 list_add(&pa->u.pa_tmp_list, &list);
5586 needed--;
5587 continue;
5588 }
5589
5590 /* someone is deleting pa right now */
5591 spin_unlock(&pa->pa_lock);
5592 write_unlock(&ei->i_prealloc_lock);
5593
5594 /* we have to wait here because pa_deleted
5595 * doesn't mean pa is already unlinked from
5596 * the list. as we might be called from
5597 * ->clear_inode() the inode will get freed
5598 * and concurrent thread which is unlinking
5599 * pa from inode's list may access already
5600 * freed memory, bad-bad-bad */
5601
5602 /* XXX: if this happens too often, we can
5603 * add a flag to force wait only in case
5604 * of ->clear_inode(), but not in case of
5605 * regular truncate */
5606 schedule_timeout_uninterruptible(HZ);
5607 goto repeat;
5608 }
5609 write_unlock(&ei->i_prealloc_lock);
5610
5611 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
5612 BUG_ON(pa->pa_type != MB_INODE_PA);
5613 group = ext4_get_group_number(sb, pa->pa_pstart);
5614
5615 err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
5616 GFP_NOFS|__GFP_NOFAIL);
5617 if (err) {
5618 ext4_error_err(sb, -err, "Error %d loading buddy information for %u",
5619 err, group);
5620 continue;
5621 }
5622
5623 bitmap_bh = ext4_read_block_bitmap(sb, group);
5624 if (IS_ERR(bitmap_bh)) {
5625 err = PTR_ERR(bitmap_bh);
5626 ext4_error_err(sb, -err, "Error %d reading block bitmap for %u",
5627 err, group);
5628 ext4_mb_unload_buddy(&e4b);
5629 continue;
5630 }
5631
5632 ext4_lock_group(sb, group);
5633 list_del(&pa->pa_group_list);
5634 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
5635 ext4_unlock_group(sb, group);
5636
5637 ext4_mb_unload_buddy(&e4b);
5638 put_bh(bitmap_bh);
5639
5640 list_del(&pa->u.pa_tmp_list);
5641 ext4_mb_pa_free(pa);
5642 }
5643 }
5644
ext4_mb_pa_alloc(struct ext4_allocation_context * ac)5645 static int ext4_mb_pa_alloc(struct ext4_allocation_context *ac)
5646 {
5647 struct ext4_prealloc_space *pa;
5648
5649 BUG_ON(ext4_pspace_cachep == NULL);
5650 pa = kmem_cache_zalloc(ext4_pspace_cachep, GFP_NOFS);
5651 if (!pa)
5652 return -ENOMEM;
5653 atomic_set(&pa->pa_count, 1);
5654 ac->ac_pa = pa;
5655 return 0;
5656 }
5657
ext4_mb_pa_put_free(struct ext4_allocation_context * ac)5658 static void ext4_mb_pa_put_free(struct ext4_allocation_context *ac)
5659 {
5660 struct ext4_prealloc_space *pa = ac->ac_pa;
5661
5662 BUG_ON(!pa);
5663 ac->ac_pa = NULL;
5664 WARN_ON(!atomic_dec_and_test(&pa->pa_count));
5665 /*
5666 * current function is only called due to an error or due to
5667 * len of found blocks < len of requested blocks hence the PA has not
5668 * been added to grp->bb_prealloc_list. So we don't need to lock it
5669 */
5670 pa->pa_deleted = 1;
5671 ext4_mb_pa_free(pa);
5672 }
5673
5674 #ifdef CONFIG_EXT4_DEBUG
ext4_mb_show_pa(struct super_block * sb)5675 static inline void ext4_mb_show_pa(struct super_block *sb)
5676 {
5677 ext4_group_t i, ngroups;
5678
5679 if (ext4_forced_shutdown(sb))
5680 return;
5681
5682 ngroups = ext4_get_groups_count(sb);
5683 mb_debug(sb, "groups: ");
5684 for (i = 0; i < ngroups; i++) {
5685 struct ext4_group_info *grp = ext4_get_group_info(sb, i);
5686 struct ext4_prealloc_space *pa;
5687 ext4_grpblk_t start;
5688 struct list_head *cur;
5689
5690 if (!grp)
5691 continue;
5692 ext4_lock_group(sb, i);
5693 list_for_each(cur, &grp->bb_prealloc_list) {
5694 pa = list_entry(cur, struct ext4_prealloc_space,
5695 pa_group_list);
5696 spin_lock(&pa->pa_lock);
5697 ext4_get_group_no_and_offset(sb, pa->pa_pstart,
5698 NULL, &start);
5699 spin_unlock(&pa->pa_lock);
5700 mb_debug(sb, "PA:%u:%d:%d\n", i, start,
5701 pa->pa_len);
5702 }
5703 ext4_unlock_group(sb, i);
5704 mb_debug(sb, "%u: %d/%d\n", i, grp->bb_free,
5705 grp->bb_fragments);
5706 }
5707 }
5708
ext4_mb_show_ac(struct ext4_allocation_context * ac)5709 static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
5710 {
5711 struct super_block *sb = ac->ac_sb;
5712
5713 if (ext4_forced_shutdown(sb))
5714 return;
5715
5716 mb_debug(sb, "Can't allocate:"
5717 " Allocation context details:");
5718 mb_debug(sb, "status %u flags 0x%x",
5719 ac->ac_status, ac->ac_flags);
5720 mb_debug(sb, "orig %lu/%lu/%lu@%lu, "
5721 "goal %lu/%lu/%lu@%lu, "
5722 "best %lu/%lu/%lu@%lu cr %d",
5723 (unsigned long)ac->ac_o_ex.fe_group,
5724 (unsigned long)ac->ac_o_ex.fe_start,
5725 (unsigned long)ac->ac_o_ex.fe_len,
5726 (unsigned long)ac->ac_o_ex.fe_logical,
5727 (unsigned long)ac->ac_g_ex.fe_group,
5728 (unsigned long)ac->ac_g_ex.fe_start,
5729 (unsigned long)ac->ac_g_ex.fe_len,
5730 (unsigned long)ac->ac_g_ex.fe_logical,
5731 (unsigned long)ac->ac_b_ex.fe_group,
5732 (unsigned long)ac->ac_b_ex.fe_start,
5733 (unsigned long)ac->ac_b_ex.fe_len,
5734 (unsigned long)ac->ac_b_ex.fe_logical,
5735 (int)ac->ac_criteria);
5736 mb_debug(sb, "%u found", ac->ac_found);
5737 mb_debug(sb, "used pa: %s, ", ac->ac_pa ? "yes" : "no");
5738 if (ac->ac_pa)
5739 mb_debug(sb, "pa_type %s\n", ac->ac_pa->pa_type == MB_GROUP_PA ?
5740 "group pa" : "inode pa");
5741 ext4_mb_show_pa(sb);
5742 }
5743 #else
ext4_mb_show_pa(struct super_block * sb)5744 static inline void ext4_mb_show_pa(struct super_block *sb)
5745 {
5746 }
ext4_mb_show_ac(struct ext4_allocation_context * ac)5747 static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac)
5748 {
5749 ext4_mb_show_pa(ac->ac_sb);
5750 }
5751 #endif
5752
5753 /*
5754 * We use locality group preallocation for small size file. The size of the
5755 * file is determined by the current size or the resulting size after
5756 * allocation which ever is larger
5757 *
5758 * One can tune this size via /sys/fs/ext4/<partition>/mb_stream_req
5759 */
ext4_mb_group_or_file(struct ext4_allocation_context * ac)5760 static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
5761 {
5762 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
5763 int bsbits = ac->ac_sb->s_blocksize_bits;
5764 loff_t size, isize;
5765 bool inode_pa_eligible, group_pa_eligible;
5766
5767 if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
5768 return;
5769
5770 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
5771 return;
5772
5773 group_pa_eligible = sbi->s_mb_group_prealloc > 0;
5774 inode_pa_eligible = true;
5775 size = extent_logical_end(sbi, &ac->ac_o_ex);
5776 isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1)
5777 >> bsbits;
5778
5779 /* No point in using inode preallocation for closed files */
5780 if ((size == isize) && !ext4_fs_is_busy(sbi) &&
5781 !inode_is_open_for_write(ac->ac_inode))
5782 inode_pa_eligible = false;
5783
5784 size = max(size, isize);
5785 /* Don't use group allocation for large files */
5786 if (size > sbi->s_mb_stream_request)
5787 group_pa_eligible = false;
5788
5789 if (!group_pa_eligible) {
5790 if (inode_pa_eligible)
5791 ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
5792 else
5793 ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC;
5794 return;
5795 }
5796
5797 BUG_ON(ac->ac_lg != NULL);
5798 /*
5799 * locality group prealloc space are per cpu. The reason for having
5800 * per cpu locality group is to reduce the contention between block
5801 * request from multiple CPUs.
5802 */
5803 ac->ac_lg = raw_cpu_ptr(sbi->s_locality_groups);
5804
5805 /* we're going to use group allocation */
5806 ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC;
5807
5808 /* serialize all allocations in the group */
5809 mutex_lock(&ac->ac_lg->lg_mutex);
5810 }
5811
5812 static noinline_for_stack void
ext4_mb_initialize_context(struct ext4_allocation_context * ac,struct ext4_allocation_request * ar)5813 ext4_mb_initialize_context(struct ext4_allocation_context *ac,
5814 struct ext4_allocation_request *ar)
5815 {
5816 struct super_block *sb = ar->inode->i_sb;
5817 struct ext4_sb_info *sbi = EXT4_SB(sb);
5818 struct ext4_super_block *es = sbi->s_es;
5819 ext4_group_t group;
5820 unsigned int len;
5821 ext4_fsblk_t goal;
5822 ext4_grpblk_t block;
5823
5824 /* we can't allocate > group size */
5825 len = ar->len;
5826
5827 /* just a dirty hack to filter too big requests */
5828 if (len >= EXT4_CLUSTERS_PER_GROUP(sb))
5829 len = EXT4_CLUSTERS_PER_GROUP(sb);
5830
5831 /* start searching from the goal */
5832 goal = ar->goal;
5833 if (goal < le32_to_cpu(es->s_first_data_block) ||
5834 goal >= ext4_blocks_count(es))
5835 goal = le32_to_cpu(es->s_first_data_block);
5836 ext4_get_group_no_and_offset(sb, goal, &group, &block);
5837
5838 /* set up allocation goals */
5839 ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical);
5840 ac->ac_status = AC_STATUS_CONTINUE;
5841 ac->ac_sb = sb;
5842 ac->ac_inode = ar->inode;
5843 ac->ac_o_ex.fe_logical = ac->ac_b_ex.fe_logical;
5844 ac->ac_o_ex.fe_group = group;
5845 ac->ac_o_ex.fe_start = block;
5846 ac->ac_o_ex.fe_len = len;
5847 ac->ac_g_ex = ac->ac_o_ex;
5848 ac->ac_orig_goal_len = ac->ac_g_ex.fe_len;
5849 ac->ac_flags = ar->flags;
5850
5851 /* we have to define context: we'll work with a file or
5852 * locality group. this is a policy, actually */
5853 ext4_mb_group_or_file(ac);
5854
5855 mb_debug(sb, "init ac: %u blocks @ %u, goal %u, flags 0x%x, 2^%d, "
5856 "left: %u/%u, right %u/%u to %swritable\n",
5857 (unsigned) ar->len, (unsigned) ar->logical,
5858 (unsigned) ar->goal, ac->ac_flags, ac->ac_2order,
5859 (unsigned) ar->lleft, (unsigned) ar->pleft,
5860 (unsigned) ar->lright, (unsigned) ar->pright,
5861 inode_is_open_for_write(ar->inode) ? "" : "non-");
5862 }
5863
5864 static noinline_for_stack void
ext4_mb_discard_lg_preallocations(struct super_block * sb,struct ext4_locality_group * lg,int order,int total_entries)5865 ext4_mb_discard_lg_preallocations(struct super_block *sb,
5866 struct ext4_locality_group *lg,
5867 int order, int total_entries)
5868 {
5869 ext4_group_t group = 0;
5870 struct ext4_buddy e4b;
5871 LIST_HEAD(discard_list);
5872 struct ext4_prealloc_space *pa, *tmp;
5873
5874 mb_debug(sb, "discard locality group preallocation\n");
5875
5876 spin_lock(&lg->lg_prealloc_lock);
5877 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order],
5878 pa_node.lg_list,
5879 lockdep_is_held(&lg->lg_prealloc_lock)) {
5880 spin_lock(&pa->pa_lock);
5881 if (atomic_read(&pa->pa_count)) {
5882 /*
5883 * This is the pa that we just used
5884 * for block allocation. So don't
5885 * free that
5886 */
5887 spin_unlock(&pa->pa_lock);
5888 continue;
5889 }
5890 if (pa->pa_deleted) {
5891 spin_unlock(&pa->pa_lock);
5892 continue;
5893 }
5894 /* only lg prealloc space */
5895 BUG_ON(pa->pa_type != MB_GROUP_PA);
5896
5897 /* seems this one can be freed ... */
5898 ext4_mb_mark_pa_deleted(sb, pa);
5899 spin_unlock(&pa->pa_lock);
5900
5901 list_del_rcu(&pa->pa_node.lg_list);
5902 list_add(&pa->u.pa_tmp_list, &discard_list);
5903
5904 total_entries--;
5905 if (total_entries <= 5) {
5906 /*
5907 * we want to keep only 5 entries
5908 * allowing it to grow to 8. This
5909 * mak sure we don't call discard
5910 * soon for this list.
5911 */
5912 break;
5913 }
5914 }
5915 spin_unlock(&lg->lg_prealloc_lock);
5916
5917 list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) {
5918 int err;
5919
5920 group = ext4_get_group_number(sb, pa->pa_pstart);
5921 err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
5922 GFP_NOFS|__GFP_NOFAIL);
5923 if (err) {
5924 ext4_error_err(sb, -err, "Error %d loading buddy information for %u",
5925 err, group);
5926 continue;
5927 }
5928 ext4_lock_group(sb, group);
5929 list_del(&pa->pa_group_list);
5930 ext4_mb_release_group_pa(&e4b, pa);
5931 ext4_unlock_group(sb, group);
5932
5933 ext4_mb_unload_buddy(&e4b);
5934 list_del(&pa->u.pa_tmp_list);
5935 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
5936 }
5937 }
5938
5939 /*
5940 * We have incremented pa_count. So it cannot be freed at this
5941 * point. Also we hold lg_mutex. So no parallel allocation is
5942 * possible from this lg. That means pa_free cannot be updated.
5943 *
5944 * A parallel ext4_mb_discard_group_preallocations is possible.
5945 * which can cause the lg_prealloc_list to be updated.
5946 */
5947
ext4_mb_add_n_trim(struct ext4_allocation_context * ac)5948 static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
5949 {
5950 int order, added = 0, lg_prealloc_count = 1;
5951 struct super_block *sb = ac->ac_sb;
5952 struct ext4_locality_group *lg = ac->ac_lg;
5953 struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa;
5954
5955 order = fls(pa->pa_free) - 1;
5956 if (order > PREALLOC_TB_SIZE - 1)
5957 /* The max size of hash table is PREALLOC_TB_SIZE */
5958 order = PREALLOC_TB_SIZE - 1;
5959 /* Add the prealloc space to lg */
5960 spin_lock(&lg->lg_prealloc_lock);
5961 list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order],
5962 pa_node.lg_list,
5963 lockdep_is_held(&lg->lg_prealloc_lock)) {
5964 spin_lock(&tmp_pa->pa_lock);
5965 if (tmp_pa->pa_deleted) {
5966 spin_unlock(&tmp_pa->pa_lock);
5967 continue;
5968 }
5969 if (!added && pa->pa_free < tmp_pa->pa_free) {
5970 /* Add to the tail of the previous entry */
5971 list_add_tail_rcu(&pa->pa_node.lg_list,
5972 &tmp_pa->pa_node.lg_list);
5973 added = 1;
5974 /*
5975 * we want to count the total
5976 * number of entries in the list
5977 */
5978 }
5979 spin_unlock(&tmp_pa->pa_lock);
5980 lg_prealloc_count++;
5981 }
5982 if (!added)
5983 list_add_tail_rcu(&pa->pa_node.lg_list,
5984 &lg->lg_prealloc_list[order]);
5985 spin_unlock(&lg->lg_prealloc_lock);
5986
5987 /* Now trim the list to be not more than 8 elements */
5988 if (lg_prealloc_count > 8)
5989 ext4_mb_discard_lg_preallocations(sb, lg,
5990 order, lg_prealloc_count);
5991 }
5992
5993 /*
5994 * release all resource we used in allocation
5995 */
ext4_mb_release_context(struct ext4_allocation_context * ac)5996 static int ext4_mb_release_context(struct ext4_allocation_context *ac)
5997 {
5998 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
5999 struct ext4_prealloc_space *pa = ac->ac_pa;
6000 if (pa) {
6001 if (pa->pa_type == MB_GROUP_PA) {
6002 /* see comment in ext4_mb_use_group_pa() */
6003 spin_lock(&pa->pa_lock);
6004 pa->pa_pstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
6005 pa->pa_lstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
6006 pa->pa_free -= ac->ac_b_ex.fe_len;
6007 pa->pa_len -= ac->ac_b_ex.fe_len;
6008 spin_unlock(&pa->pa_lock);
6009
6010 /*
6011 * We want to add the pa to the right bucket.
6012 * Remove it from the list and while adding
6013 * make sure the list to which we are adding
6014 * doesn't grow big.
6015 */
6016 if (likely(pa->pa_free)) {
6017 spin_lock(pa->pa_node_lock.lg_lock);
6018 list_del_rcu(&pa->pa_node.lg_list);
6019 spin_unlock(pa->pa_node_lock.lg_lock);
6020 ext4_mb_add_n_trim(ac);
6021 }
6022 }
6023
6024 ext4_mb_put_pa(ac, ac->ac_sb, pa);
6025 }
6026 if (ac->ac_bitmap_page)
6027 put_page(ac->ac_bitmap_page);
6028 if (ac->ac_buddy_page)
6029 put_page(ac->ac_buddy_page);
6030 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
6031 mutex_unlock(&ac->ac_lg->lg_mutex);
6032 ext4_mb_collect_stats(ac);
6033 return 0;
6034 }
6035
ext4_mb_discard_preallocations(struct super_block * sb,int needed)6036 static int ext4_mb_discard_preallocations(struct super_block *sb, int needed)
6037 {
6038 ext4_group_t i, ngroups = ext4_get_groups_count(sb);
6039 int ret;
6040 int freed = 0, busy = 0;
6041 int retry = 0;
6042
6043 trace_ext4_mb_discard_preallocations(sb, needed);
6044
6045 if (needed == 0)
6046 needed = EXT4_CLUSTERS_PER_GROUP(sb) + 1;
6047 repeat:
6048 for (i = 0; i < ngroups && needed > 0; i++) {
6049 ret = ext4_mb_discard_group_preallocations(sb, i, &busy);
6050 freed += ret;
6051 needed -= ret;
6052 cond_resched();
6053 }
6054
6055 if (needed > 0 && busy && ++retry < 3) {
6056 busy = 0;
6057 goto repeat;
6058 }
6059
6060 return freed;
6061 }
6062
ext4_mb_discard_preallocations_should_retry(struct super_block * sb,struct ext4_allocation_context * ac,u64 * seq)6063 static bool ext4_mb_discard_preallocations_should_retry(struct super_block *sb,
6064 struct ext4_allocation_context *ac, u64 *seq)
6065 {
6066 int freed;
6067 u64 seq_retry = 0;
6068 bool ret = false;
6069
6070 freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len);
6071 if (freed) {
6072 ret = true;
6073 goto out_dbg;
6074 }
6075 seq_retry = ext4_get_discard_pa_seq_sum();
6076 if (!(ac->ac_flags & EXT4_MB_STRICT_CHECK) || seq_retry != *seq) {
6077 ac->ac_flags |= EXT4_MB_STRICT_CHECK;
6078 *seq = seq_retry;
6079 ret = true;
6080 }
6081
6082 out_dbg:
6083 mb_debug(sb, "freed %d, retry ? %s\n", freed, ret ? "yes" : "no");
6084 return ret;
6085 }
6086
6087 /*
6088 * Simple allocator for Ext4 fast commit replay path. It searches for blocks
6089 * linearly starting at the goal block and also excludes the blocks which
6090 * are going to be in use after fast commit replay.
6091 */
6092 static ext4_fsblk_t
ext4_mb_new_blocks_simple(struct ext4_allocation_request * ar,int * errp)6093 ext4_mb_new_blocks_simple(struct ext4_allocation_request *ar, int *errp)
6094 {
6095 struct buffer_head *bitmap_bh;
6096 struct super_block *sb = ar->inode->i_sb;
6097 struct ext4_sb_info *sbi = EXT4_SB(sb);
6098 ext4_group_t group, nr;
6099 ext4_grpblk_t blkoff;
6100 ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
6101 ext4_grpblk_t i = 0;
6102 ext4_fsblk_t goal, block;
6103 struct ext4_super_block *es = sbi->s_es;
6104
6105 goal = ar->goal;
6106 if (goal < le32_to_cpu(es->s_first_data_block) ||
6107 goal >= ext4_blocks_count(es))
6108 goal = le32_to_cpu(es->s_first_data_block);
6109
6110 ar->len = 0;
6111 ext4_get_group_no_and_offset(sb, goal, &group, &blkoff);
6112 for (nr = ext4_get_groups_count(sb); nr > 0; nr--) {
6113 bitmap_bh = ext4_read_block_bitmap(sb, group);
6114 if (IS_ERR(bitmap_bh)) {
6115 *errp = PTR_ERR(bitmap_bh);
6116 pr_warn("Failed to read block bitmap\n");
6117 return 0;
6118 }
6119
6120 while (1) {
6121 i = mb_find_next_zero_bit(bitmap_bh->b_data, max,
6122 blkoff);
6123 if (i >= max)
6124 break;
6125 if (ext4_fc_replay_check_excluded(sb,
6126 ext4_group_first_block_no(sb, group) +
6127 EXT4_C2B(sbi, i))) {
6128 blkoff = i + 1;
6129 } else
6130 break;
6131 }
6132 brelse(bitmap_bh);
6133 if (i < max)
6134 break;
6135
6136 if (++group >= ext4_get_groups_count(sb))
6137 group = 0;
6138
6139 blkoff = 0;
6140 }
6141
6142 if (i >= max) {
6143 *errp = -ENOSPC;
6144 return 0;
6145 }
6146
6147 block = ext4_group_first_block_no(sb, group) + EXT4_C2B(sbi, i);
6148 ext4_mb_mark_bb(sb, block, 1, 1);
6149 ar->len = 1;
6150
6151 *errp = 0;
6152 return block;
6153 }
6154
6155 /*
6156 * Main entry point into mballoc to allocate blocks
6157 * it tries to use preallocation first, then falls back
6158 * to usual allocation
6159 */
ext4_mb_new_blocks(handle_t * handle,struct ext4_allocation_request * ar,int * errp)6160 ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
6161 struct ext4_allocation_request *ar, int *errp)
6162 {
6163 struct ext4_allocation_context *ac = NULL;
6164 struct ext4_sb_info *sbi;
6165 struct super_block *sb;
6166 ext4_fsblk_t block = 0;
6167 unsigned int inquota = 0;
6168 unsigned int reserv_clstrs = 0;
6169 int retries = 0;
6170 u64 seq;
6171
6172 might_sleep();
6173 sb = ar->inode->i_sb;
6174 sbi = EXT4_SB(sb);
6175
6176 trace_ext4_request_blocks(ar);
6177 if (sbi->s_mount_state & EXT4_FC_REPLAY)
6178 return ext4_mb_new_blocks_simple(ar, errp);
6179
6180 /* Allow to use superuser reservation for quota file */
6181 if (ext4_is_quota_file(ar->inode))
6182 ar->flags |= EXT4_MB_USE_ROOT_BLOCKS;
6183
6184 if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) {
6185 /* Without delayed allocation we need to verify
6186 * there is enough free blocks to do block allocation
6187 * and verify allocation doesn't exceed the quota limits.
6188 */
6189 while (ar->len &&
6190 ext4_claim_free_clusters(sbi, ar->len, ar->flags)) {
6191
6192 /* let others to free the space */
6193 cond_resched();
6194 ar->len = ar->len >> 1;
6195 }
6196 if (!ar->len) {
6197 ext4_mb_show_pa(sb);
6198 *errp = -ENOSPC;
6199 return 0;
6200 }
6201 reserv_clstrs = ar->len;
6202 if (ar->flags & EXT4_MB_USE_ROOT_BLOCKS) {
6203 dquot_alloc_block_nofail(ar->inode,
6204 EXT4_C2B(sbi, ar->len));
6205 } else {
6206 while (ar->len &&
6207 dquot_alloc_block(ar->inode,
6208 EXT4_C2B(sbi, ar->len))) {
6209
6210 ar->flags |= EXT4_MB_HINT_NOPREALLOC;
6211 ar->len--;
6212 }
6213 }
6214 inquota = ar->len;
6215 if (ar->len == 0) {
6216 *errp = -EDQUOT;
6217 goto out;
6218 }
6219 }
6220
6221 ac = kmem_cache_zalloc(ext4_ac_cachep, GFP_NOFS);
6222 if (!ac) {
6223 ar->len = 0;
6224 *errp = -ENOMEM;
6225 goto out;
6226 }
6227
6228 ext4_mb_initialize_context(ac, ar);
6229
6230 ac->ac_op = EXT4_MB_HISTORY_PREALLOC;
6231 seq = this_cpu_read(discard_pa_seq);
6232 if (!ext4_mb_use_preallocated(ac)) {
6233 ac->ac_op = EXT4_MB_HISTORY_ALLOC;
6234 ext4_mb_normalize_request(ac, ar);
6235
6236 *errp = ext4_mb_pa_alloc(ac);
6237 if (*errp)
6238 goto errout;
6239 repeat:
6240 /* allocate space in core */
6241 *errp = ext4_mb_regular_allocator(ac);
6242 /*
6243 * pa allocated above is added to grp->bb_prealloc_list only
6244 * when we were able to allocate some block i.e. when
6245 * ac->ac_status == AC_STATUS_FOUND.
6246 * And error from above mean ac->ac_status != AC_STATUS_FOUND
6247 * So we have to free this pa here itself.
6248 */
6249 if (*errp) {
6250 ext4_mb_pa_put_free(ac);
6251 ext4_discard_allocated_blocks(ac);
6252 goto errout;
6253 }
6254 if (ac->ac_status == AC_STATUS_FOUND &&
6255 ac->ac_o_ex.fe_len >= ac->ac_f_ex.fe_len)
6256 ext4_mb_pa_put_free(ac);
6257 }
6258 if (likely(ac->ac_status == AC_STATUS_FOUND)) {
6259 *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs);
6260 if (*errp) {
6261 ext4_discard_allocated_blocks(ac);
6262 goto errout;
6263 } else {
6264 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
6265 ar->len = ac->ac_b_ex.fe_len;
6266 }
6267 } else {
6268 if (++retries < 3 &&
6269 ext4_mb_discard_preallocations_should_retry(sb, ac, &seq))
6270 goto repeat;
6271 /*
6272 * If block allocation fails then the pa allocated above
6273 * needs to be freed here itself.
6274 */
6275 ext4_mb_pa_put_free(ac);
6276 *errp = -ENOSPC;
6277 }
6278
6279 if (*errp) {
6280 errout:
6281 ac->ac_b_ex.fe_len = 0;
6282 ar->len = 0;
6283 ext4_mb_show_ac(ac);
6284 }
6285 ext4_mb_release_context(ac);
6286 kmem_cache_free(ext4_ac_cachep, ac);
6287 out:
6288 if (inquota && ar->len < inquota)
6289 dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len));
6290 if (!ar->len) {
6291 if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0)
6292 /* release all the reserved blocks if non delalloc */
6293 percpu_counter_sub(&sbi->s_dirtyclusters_counter,
6294 reserv_clstrs);
6295 }
6296
6297 trace_ext4_allocate_blocks(ar, (unsigned long long)block);
6298
6299 return block;
6300 }
6301
6302 /*
6303 * We can merge two free data extents only if the physical blocks
6304 * are contiguous, AND the extents were freed by the same transaction,
6305 * AND the blocks are associated with the same group.
6306 */
ext4_try_merge_freed_extent(struct ext4_sb_info * sbi,struct ext4_free_data * entry,struct ext4_free_data * new_entry,struct rb_root * entry_rb_root)6307 static void ext4_try_merge_freed_extent(struct ext4_sb_info *sbi,
6308 struct ext4_free_data *entry,
6309 struct ext4_free_data *new_entry,
6310 struct rb_root *entry_rb_root)
6311 {
6312 if ((entry->efd_tid != new_entry->efd_tid) ||
6313 (entry->efd_group != new_entry->efd_group))
6314 return;
6315 if (entry->efd_start_cluster + entry->efd_count ==
6316 new_entry->efd_start_cluster) {
6317 new_entry->efd_start_cluster = entry->efd_start_cluster;
6318 new_entry->efd_count += entry->efd_count;
6319 } else if (new_entry->efd_start_cluster + new_entry->efd_count ==
6320 entry->efd_start_cluster) {
6321 new_entry->efd_count += entry->efd_count;
6322 } else
6323 return;
6324 spin_lock(&sbi->s_md_lock);
6325 list_del(&entry->efd_list);
6326 spin_unlock(&sbi->s_md_lock);
6327 rb_erase(&entry->efd_node, entry_rb_root);
6328 kmem_cache_free(ext4_free_data_cachep, entry);
6329 }
6330
6331 static noinline_for_stack void
ext4_mb_free_metadata(handle_t * handle,struct ext4_buddy * e4b,struct ext4_free_data * new_entry)6332 ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
6333 struct ext4_free_data *new_entry)
6334 {
6335 ext4_group_t group = e4b->bd_group;
6336 ext4_grpblk_t cluster;
6337 ext4_grpblk_t clusters = new_entry->efd_count;
6338 struct ext4_free_data *entry;
6339 struct ext4_group_info *db = e4b->bd_info;
6340 struct super_block *sb = e4b->bd_sb;
6341 struct ext4_sb_info *sbi = EXT4_SB(sb);
6342 struct rb_node **n = &db->bb_free_root.rb_node, *node;
6343 struct rb_node *parent = NULL, *new_node;
6344
6345 BUG_ON(!ext4_handle_valid(handle));
6346 BUG_ON(e4b->bd_bitmap_page == NULL);
6347 BUG_ON(e4b->bd_buddy_page == NULL);
6348
6349 new_node = &new_entry->efd_node;
6350 cluster = new_entry->efd_start_cluster;
6351
6352 if (!*n) {
6353 /* first free block exent. We need to
6354 protect buddy cache from being freed,
6355 * otherwise we'll refresh it from
6356 * on-disk bitmap and lose not-yet-available
6357 * blocks */
6358 get_page(e4b->bd_buddy_page);
6359 get_page(e4b->bd_bitmap_page);
6360 }
6361 while (*n) {
6362 parent = *n;
6363 entry = rb_entry(parent, struct ext4_free_data, efd_node);
6364 if (cluster < entry->efd_start_cluster)
6365 n = &(*n)->rb_left;
6366 else if (cluster >= (entry->efd_start_cluster + entry->efd_count))
6367 n = &(*n)->rb_right;
6368 else {
6369 ext4_grp_locked_error(sb, group, 0,
6370 ext4_group_first_block_no(sb, group) +
6371 EXT4_C2B(sbi, cluster),
6372 "Block already on to-be-freed list");
6373 kmem_cache_free(ext4_free_data_cachep, new_entry);
6374 return;
6375 }
6376 }
6377
6378 rb_link_node(new_node, parent, n);
6379 rb_insert_color(new_node, &db->bb_free_root);
6380
6381 /* Now try to see the extent can be merged to left and right */
6382 node = rb_prev(new_node);
6383 if (node) {
6384 entry = rb_entry(node, struct ext4_free_data, efd_node);
6385 ext4_try_merge_freed_extent(sbi, entry, new_entry,
6386 &(db->bb_free_root));
6387 }
6388
6389 node = rb_next(new_node);
6390 if (node) {
6391 entry = rb_entry(node, struct ext4_free_data, efd_node);
6392 ext4_try_merge_freed_extent(sbi, entry, new_entry,
6393 &(db->bb_free_root));
6394 }
6395
6396 spin_lock(&sbi->s_md_lock);
6397 list_add_tail(&new_entry->efd_list, &sbi->s_freed_data_list);
6398 sbi->s_mb_free_pending += clusters;
6399 spin_unlock(&sbi->s_md_lock);
6400 }
6401
ext4_free_blocks_simple(struct inode * inode,ext4_fsblk_t block,unsigned long count)6402 static void ext4_free_blocks_simple(struct inode *inode, ext4_fsblk_t block,
6403 unsigned long count)
6404 {
6405 struct buffer_head *bitmap_bh;
6406 struct super_block *sb = inode->i_sb;
6407 struct ext4_group_desc *gdp;
6408 struct buffer_head *gdp_bh;
6409 ext4_group_t group;
6410 ext4_grpblk_t blkoff;
6411 int already_freed = 0, err, i;
6412
6413 ext4_get_group_no_and_offset(sb, block, &group, &blkoff);
6414 bitmap_bh = ext4_read_block_bitmap(sb, group);
6415 if (IS_ERR(bitmap_bh)) {
6416 pr_warn("Failed to read block bitmap\n");
6417 return;
6418 }
6419 gdp = ext4_get_group_desc(sb, group, &gdp_bh);
6420 if (!gdp)
6421 goto err_out;
6422
6423 for (i = 0; i < count; i++) {
6424 if (!mb_test_bit(blkoff + i, bitmap_bh->b_data))
6425 already_freed++;
6426 }
6427 mb_clear_bits(bitmap_bh->b_data, blkoff, count);
6428 err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh);
6429 if (err)
6430 goto err_out;
6431 ext4_free_group_clusters_set(
6432 sb, gdp, ext4_free_group_clusters(sb, gdp) +
6433 count - already_freed);
6434 ext4_block_bitmap_csum_set(sb, gdp, bitmap_bh);
6435 ext4_group_desc_csum_set(sb, group, gdp);
6436 ext4_handle_dirty_metadata(NULL, NULL, gdp_bh);
6437 sync_dirty_buffer(bitmap_bh);
6438 sync_dirty_buffer(gdp_bh);
6439
6440 err_out:
6441 brelse(bitmap_bh);
6442 }
6443
6444 /**
6445 * ext4_mb_clear_bb() -- helper function for freeing blocks.
6446 * Used by ext4_free_blocks()
6447 * @handle: handle for this transaction
6448 * @inode: inode
6449 * @block: starting physical block to be freed
6450 * @count: number of blocks to be freed
6451 * @flags: flags used by ext4_free_blocks
6452 */
ext4_mb_clear_bb(handle_t * handle,struct inode * inode,ext4_fsblk_t block,unsigned long count,int flags)6453 static void ext4_mb_clear_bb(handle_t *handle, struct inode *inode,
6454 ext4_fsblk_t block, unsigned long count,
6455 int flags)
6456 {
6457 struct buffer_head *bitmap_bh = NULL;
6458 struct super_block *sb = inode->i_sb;
6459 struct ext4_group_desc *gdp;
6460 struct ext4_group_info *grp;
6461 unsigned int overflow;
6462 ext4_grpblk_t bit;
6463 struct buffer_head *gd_bh;
6464 ext4_group_t block_group;
6465 struct ext4_sb_info *sbi;
6466 struct ext4_buddy e4b;
6467 unsigned int count_clusters;
6468 int err = 0;
6469 int ret;
6470
6471 sbi = EXT4_SB(sb);
6472
6473 if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
6474 !ext4_inode_block_valid(inode, block, count)) {
6475 ext4_error(sb, "Freeing blocks in system zone - "
6476 "Block = %llu, count = %lu", block, count);
6477 /* err = 0. ext4_std_error should be a no op */
6478 goto error_return;
6479 }
6480 flags |= EXT4_FREE_BLOCKS_VALIDATED;
6481
6482 do_more:
6483 overflow = 0;
6484 ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
6485
6486 grp = ext4_get_group_info(sb, block_group);
6487 if (unlikely(!grp || EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
6488 return;
6489
6490 /*
6491 * Check to see if we are freeing blocks across a group
6492 * boundary.
6493 */
6494 if (EXT4_C2B(sbi, bit) + count > EXT4_BLOCKS_PER_GROUP(sb)) {
6495 overflow = EXT4_C2B(sbi, bit) + count -
6496 EXT4_BLOCKS_PER_GROUP(sb);
6497 count -= overflow;
6498 /* The range changed so it's no longer validated */
6499 flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
6500 }
6501 count_clusters = EXT4_NUM_B2C(sbi, count);
6502 bitmap_bh = ext4_read_block_bitmap(sb, block_group);
6503 if (IS_ERR(bitmap_bh)) {
6504 err = PTR_ERR(bitmap_bh);
6505 bitmap_bh = NULL;
6506 goto error_return;
6507 }
6508 gdp = ext4_get_group_desc(sb, block_group, &gd_bh);
6509 if (!gdp) {
6510 err = -EIO;
6511 goto error_return;
6512 }
6513
6514 if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
6515 !ext4_inode_block_valid(inode, block, count)) {
6516 ext4_error(sb, "Freeing blocks in system zone - "
6517 "Block = %llu, count = %lu", block, count);
6518 /* err = 0. ext4_std_error should be a no op */
6519 goto error_return;
6520 }
6521
6522 BUFFER_TRACE(bitmap_bh, "getting write access");
6523 err = ext4_journal_get_write_access(handle, sb, bitmap_bh,
6524 EXT4_JTR_NONE);
6525 if (err)
6526 goto error_return;
6527
6528 /*
6529 * We are about to modify some metadata. Call the journal APIs
6530 * to unshare ->b_data if a currently-committing transaction is
6531 * using it
6532 */
6533 BUFFER_TRACE(gd_bh, "get_write_access");
6534 err = ext4_journal_get_write_access(handle, sb, gd_bh, EXT4_JTR_NONE);
6535 if (err)
6536 goto error_return;
6537 #ifdef AGGRESSIVE_CHECK
6538 {
6539 int i;
6540 for (i = 0; i < count_clusters; i++)
6541 BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data));
6542 }
6543 #endif
6544 trace_ext4_mballoc_free(sb, inode, block_group, bit, count_clusters);
6545
6546 /* __GFP_NOFAIL: retry infinitely, ignore TIF_MEMDIE and memcg limit. */
6547 err = ext4_mb_load_buddy_gfp(sb, block_group, &e4b,
6548 GFP_NOFS|__GFP_NOFAIL);
6549 if (err)
6550 goto error_return;
6551
6552 /*
6553 * We need to make sure we don't reuse the freed block until after the
6554 * transaction is committed. We make an exception if the inode is to be
6555 * written in writeback mode since writeback mode has weak data
6556 * consistency guarantees.
6557 */
6558 if (ext4_handle_valid(handle) &&
6559 ((flags & EXT4_FREE_BLOCKS_METADATA) ||
6560 !ext4_should_writeback_data(inode))) {
6561 struct ext4_free_data *new_entry;
6562 /*
6563 * We use __GFP_NOFAIL because ext4_free_blocks() is not allowed
6564 * to fail.
6565 */
6566 new_entry = kmem_cache_alloc(ext4_free_data_cachep,
6567 GFP_NOFS|__GFP_NOFAIL);
6568 new_entry->efd_start_cluster = bit;
6569 new_entry->efd_group = block_group;
6570 new_entry->efd_count = count_clusters;
6571 new_entry->efd_tid = handle->h_transaction->t_tid;
6572
6573 ext4_lock_group(sb, block_group);
6574 mb_clear_bits(bitmap_bh->b_data, bit, count_clusters);
6575 ext4_mb_free_metadata(handle, &e4b, new_entry);
6576 } else {
6577 /* need to update group_info->bb_free and bitmap
6578 * with group lock held. generate_buddy look at
6579 * them with group lock_held
6580 */
6581 if (test_opt(sb, DISCARD)) {
6582 err = ext4_issue_discard(sb, block_group, bit,
6583 count_clusters, NULL);
6584 if (err && err != -EOPNOTSUPP)
6585 ext4_msg(sb, KERN_WARNING, "discard request in"
6586 " group:%u block:%d count:%lu failed"
6587 " with %d", block_group, bit, count,
6588 err);
6589 } else
6590 EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info);
6591
6592 ext4_lock_group(sb, block_group);
6593 mb_clear_bits(bitmap_bh->b_data, bit, count_clusters);
6594 mb_free_blocks(inode, &e4b, bit, count_clusters);
6595 }
6596
6597 ret = ext4_free_group_clusters(sb, gdp) + count_clusters;
6598 ext4_free_group_clusters_set(sb, gdp, ret);
6599 ext4_block_bitmap_csum_set(sb, gdp, bitmap_bh);
6600 ext4_group_desc_csum_set(sb, block_group, gdp);
6601 ext4_unlock_group(sb, block_group);
6602
6603 if (sbi->s_log_groups_per_flex) {
6604 ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
6605 atomic64_add(count_clusters,
6606 &sbi_array_rcu_deref(sbi, s_flex_groups,
6607 flex_group)->free_clusters);
6608 }
6609
6610 /*
6611 * on a bigalloc file system, defer the s_freeclusters_counter
6612 * update to the caller (ext4_remove_space and friends) so they
6613 * can determine if a cluster freed here should be rereserved
6614 */
6615 if (!(flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)) {
6616 if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
6617 dquot_free_block(inode, EXT4_C2B(sbi, count_clusters));
6618 percpu_counter_add(&sbi->s_freeclusters_counter,
6619 count_clusters);
6620 }
6621
6622 ext4_mb_unload_buddy(&e4b);
6623
6624 /* We dirtied the bitmap block */
6625 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
6626 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
6627
6628 /* And the group descriptor block */
6629 BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
6630 ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
6631 if (!err)
6632 err = ret;
6633
6634 if (overflow && !err) {
6635 block += count;
6636 count = overflow;
6637 put_bh(bitmap_bh);
6638 /* The range changed so it's no longer validated */
6639 flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
6640 goto do_more;
6641 }
6642 error_return:
6643 brelse(bitmap_bh);
6644 ext4_std_error(sb, err);
6645 }
6646
6647 /**
6648 * ext4_free_blocks() -- Free given blocks and update quota
6649 * @handle: handle for this transaction
6650 * @inode: inode
6651 * @bh: optional buffer of the block to be freed
6652 * @block: starting physical block to be freed
6653 * @count: number of blocks to be freed
6654 * @flags: flags used by ext4_free_blocks
6655 */
ext4_free_blocks(handle_t * handle,struct inode * inode,struct buffer_head * bh,ext4_fsblk_t block,unsigned long count,int flags)6656 void ext4_free_blocks(handle_t *handle, struct inode *inode,
6657 struct buffer_head *bh, ext4_fsblk_t block,
6658 unsigned long count, int flags)
6659 {
6660 struct super_block *sb = inode->i_sb;
6661 unsigned int overflow;
6662 struct ext4_sb_info *sbi;
6663
6664 sbi = EXT4_SB(sb);
6665
6666 if (bh) {
6667 if (block)
6668 BUG_ON(block != bh->b_blocknr);
6669 else
6670 block = bh->b_blocknr;
6671 }
6672
6673 if (sbi->s_mount_state & EXT4_FC_REPLAY) {
6674 ext4_free_blocks_simple(inode, block, EXT4_NUM_B2C(sbi, count));
6675 return;
6676 }
6677
6678 might_sleep();
6679
6680 if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
6681 !ext4_inode_block_valid(inode, block, count)) {
6682 ext4_error(sb, "Freeing blocks not in datazone - "
6683 "block = %llu, count = %lu", block, count);
6684 return;
6685 }
6686 flags |= EXT4_FREE_BLOCKS_VALIDATED;
6687
6688 ext4_debug("freeing block %llu\n", block);
6689 trace_ext4_free_blocks(inode, block, count, flags);
6690
6691 if (bh && (flags & EXT4_FREE_BLOCKS_FORGET)) {
6692 BUG_ON(count > 1);
6693
6694 ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA,
6695 inode, bh, block);
6696 }
6697
6698 /*
6699 * If the extent to be freed does not begin on a cluster
6700 * boundary, we need to deal with partial clusters at the
6701 * beginning and end of the extent. Normally we will free
6702 * blocks at the beginning or the end unless we are explicitly
6703 * requested to avoid doing so.
6704 */
6705 overflow = EXT4_PBLK_COFF(sbi, block);
6706 if (overflow) {
6707 if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) {
6708 overflow = sbi->s_cluster_ratio - overflow;
6709 block += overflow;
6710 if (count > overflow)
6711 count -= overflow;
6712 else
6713 return;
6714 } else {
6715 block -= overflow;
6716 count += overflow;
6717 }
6718 /* The range changed so it's no longer validated */
6719 flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
6720 }
6721 overflow = EXT4_LBLK_COFF(sbi, count);
6722 if (overflow) {
6723 if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) {
6724 if (count > overflow)
6725 count -= overflow;
6726 else
6727 return;
6728 } else
6729 count += sbi->s_cluster_ratio - overflow;
6730 /* The range changed so it's no longer validated */
6731 flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
6732 }
6733
6734 if (!bh && (flags & EXT4_FREE_BLOCKS_FORGET)) {
6735 int i;
6736 int is_metadata = flags & EXT4_FREE_BLOCKS_METADATA;
6737
6738 for (i = 0; i < count; i++) {
6739 cond_resched();
6740 if (is_metadata)
6741 bh = sb_find_get_block(inode->i_sb, block + i);
6742 ext4_forget(handle, is_metadata, inode, bh, block + i);
6743 }
6744 }
6745
6746 ext4_mb_clear_bb(handle, inode, block, count, flags);
6747 }
6748
6749 /**
6750 * ext4_group_add_blocks() -- Add given blocks to an existing group
6751 * @handle: handle to this transaction
6752 * @sb: super block
6753 * @block: start physical block to add to the block group
6754 * @count: number of blocks to free
6755 *
6756 * This marks the blocks as free in the bitmap and buddy.
6757 */
ext4_group_add_blocks(handle_t * handle,struct super_block * sb,ext4_fsblk_t block,unsigned long count)6758 int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
6759 ext4_fsblk_t block, unsigned long count)
6760 {
6761 struct buffer_head *bitmap_bh = NULL;
6762 struct buffer_head *gd_bh;
6763 ext4_group_t block_group;
6764 ext4_grpblk_t bit;
6765 unsigned int i;
6766 struct ext4_group_desc *desc;
6767 struct ext4_sb_info *sbi = EXT4_SB(sb);
6768 struct ext4_buddy e4b;
6769 int err = 0, ret, free_clusters_count;
6770 ext4_grpblk_t clusters_freed;
6771 ext4_fsblk_t first_cluster = EXT4_B2C(sbi, block);
6772 ext4_fsblk_t last_cluster = EXT4_B2C(sbi, block + count - 1);
6773 unsigned long cluster_count = last_cluster - first_cluster + 1;
6774
6775 ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1);
6776
6777 if (count == 0)
6778 return 0;
6779
6780 ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
6781 /*
6782 * Check to see if we are freeing blocks across a group
6783 * boundary.
6784 */
6785 if (bit + cluster_count > EXT4_CLUSTERS_PER_GROUP(sb)) {
6786 ext4_warning(sb, "too many blocks added to group %u",
6787 block_group);
6788 err = -EINVAL;
6789 goto error_return;
6790 }
6791
6792 bitmap_bh = ext4_read_block_bitmap(sb, block_group);
6793 if (IS_ERR(bitmap_bh)) {
6794 err = PTR_ERR(bitmap_bh);
6795 bitmap_bh = NULL;
6796 goto error_return;
6797 }
6798
6799 desc = ext4_get_group_desc(sb, block_group, &gd_bh);
6800 if (!desc) {
6801 err = -EIO;
6802 goto error_return;
6803 }
6804
6805 if (!ext4_sb_block_valid(sb, NULL, block, count)) {
6806 ext4_error(sb, "Adding blocks in system zones - "
6807 "Block = %llu, count = %lu",
6808 block, count);
6809 err = -EINVAL;
6810 goto error_return;
6811 }
6812
6813 BUFFER_TRACE(bitmap_bh, "getting write access");
6814 err = ext4_journal_get_write_access(handle, sb, bitmap_bh,
6815 EXT4_JTR_NONE);
6816 if (err)
6817 goto error_return;
6818
6819 /*
6820 * We are about to modify some metadata. Call the journal APIs
6821 * to unshare ->b_data if a currently-committing transaction is
6822 * using it
6823 */
6824 BUFFER_TRACE(gd_bh, "get_write_access");
6825 err = ext4_journal_get_write_access(handle, sb, gd_bh, EXT4_JTR_NONE);
6826 if (err)
6827 goto error_return;
6828
6829 for (i = 0, clusters_freed = 0; i < cluster_count; i++) {
6830 BUFFER_TRACE(bitmap_bh, "clear bit");
6831 if (!mb_test_bit(bit + i, bitmap_bh->b_data)) {
6832 ext4_error(sb, "bit already cleared for block %llu",
6833 (ext4_fsblk_t)(block + i));
6834 BUFFER_TRACE(bitmap_bh, "bit already cleared");
6835 } else {
6836 clusters_freed++;
6837 }
6838 }
6839
6840 err = ext4_mb_load_buddy(sb, block_group, &e4b);
6841 if (err)
6842 goto error_return;
6843
6844 /*
6845 * need to update group_info->bb_free and bitmap
6846 * with group lock held. generate_buddy look at
6847 * them with group lock_held
6848 */
6849 ext4_lock_group(sb, block_group);
6850 mb_clear_bits(bitmap_bh->b_data, bit, cluster_count);
6851 mb_free_blocks(NULL, &e4b, bit, cluster_count);
6852 free_clusters_count = clusters_freed +
6853 ext4_free_group_clusters(sb, desc);
6854 ext4_free_group_clusters_set(sb, desc, free_clusters_count);
6855 ext4_block_bitmap_csum_set(sb, desc, bitmap_bh);
6856 ext4_group_desc_csum_set(sb, block_group, desc);
6857 ext4_unlock_group(sb, block_group);
6858 percpu_counter_add(&sbi->s_freeclusters_counter,
6859 clusters_freed);
6860
6861 if (sbi->s_log_groups_per_flex) {
6862 ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
6863 atomic64_add(clusters_freed,
6864 &sbi_array_rcu_deref(sbi, s_flex_groups,
6865 flex_group)->free_clusters);
6866 }
6867
6868 ext4_mb_unload_buddy(&e4b);
6869
6870 /* We dirtied the bitmap block */
6871 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
6872 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
6873
6874 /* And the group descriptor block */
6875 BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
6876 ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
6877 if (!err)
6878 err = ret;
6879
6880 error_return:
6881 brelse(bitmap_bh);
6882 ext4_std_error(sb, err);
6883 return err;
6884 }
6885
6886 /**
6887 * ext4_trim_extent -- function to TRIM one single free extent in the group
6888 * @sb: super block for the file system
6889 * @start: starting block of the free extent in the alloc. group
6890 * @count: number of blocks to TRIM
6891 * @e4b: ext4 buddy for the group
6892 *
6893 * Trim "count" blocks starting at "start" in the "group". To assure that no
6894 * one will allocate those blocks, mark it as used in buddy bitmap. This must
6895 * be called with under the group lock.
6896 */
ext4_trim_extent(struct super_block * sb,int start,int count,struct ext4_buddy * e4b)6897 static int ext4_trim_extent(struct super_block *sb,
6898 int start, int count, struct ext4_buddy *e4b)
6899 __releases(bitlock)
6900 __acquires(bitlock)
6901 {
6902 struct ext4_free_extent ex;
6903 ext4_group_t group = e4b->bd_group;
6904 int ret = 0;
6905
6906 trace_ext4_trim_extent(sb, group, start, count);
6907
6908 assert_spin_locked(ext4_group_lock_ptr(sb, group));
6909
6910 ex.fe_start = start;
6911 ex.fe_group = group;
6912 ex.fe_len = count;
6913
6914 /*
6915 * Mark blocks used, so no one can reuse them while
6916 * being trimmed.
6917 */
6918 mb_mark_used(e4b, &ex);
6919 ext4_unlock_group(sb, group);
6920 ret = ext4_issue_discard(sb, group, start, count, NULL);
6921 ext4_lock_group(sb, group);
6922 mb_free_blocks(NULL, e4b, start, ex.fe_len);
6923 return ret;
6924 }
6925
ext4_last_grp_cluster(struct super_block * sb,ext4_group_t grp)6926 static ext4_grpblk_t ext4_last_grp_cluster(struct super_block *sb,
6927 ext4_group_t grp)
6928 {
6929 unsigned long nr_clusters_in_group;
6930
6931 if (grp < (ext4_get_groups_count(sb) - 1))
6932 nr_clusters_in_group = EXT4_CLUSTERS_PER_GROUP(sb);
6933 else
6934 nr_clusters_in_group = (ext4_blocks_count(EXT4_SB(sb)->s_es) -
6935 ext4_group_first_block_no(sb, grp))
6936 >> EXT4_CLUSTER_BITS(sb);
6937
6938 return nr_clusters_in_group - 1;
6939 }
6940
ext4_trim_interrupted(void)6941 static bool ext4_trim_interrupted(void)
6942 {
6943 return fatal_signal_pending(current) || freezing(current);
6944 }
6945
ext4_try_to_trim_range(struct super_block * sb,struct ext4_buddy * e4b,ext4_grpblk_t start,ext4_grpblk_t max,ext4_grpblk_t minblocks)6946 static int ext4_try_to_trim_range(struct super_block *sb,
6947 struct ext4_buddy *e4b, ext4_grpblk_t start,
6948 ext4_grpblk_t max, ext4_grpblk_t minblocks)
6949 __acquires(ext4_group_lock_ptr(sb, e4b->bd_group))
6950 __releases(ext4_group_lock_ptr(sb, e4b->bd_group))
6951 {
6952 ext4_grpblk_t next, count, free_count, last, origin_start;
6953 bool set_trimmed = false;
6954 void *bitmap;
6955
6956 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
6957 return 0;
6958
6959 last = ext4_last_grp_cluster(sb, e4b->bd_group);
6960 bitmap = e4b->bd_bitmap;
6961 if (start == 0 && max >= last)
6962 set_trimmed = true;
6963 origin_start = start;
6964 start = max(e4b->bd_info->bb_first_free, start);
6965 count = 0;
6966 free_count = 0;
6967
6968 while (start <= max) {
6969 start = mb_find_next_zero_bit(bitmap, max + 1, start);
6970 if (start > max)
6971 break;
6972
6973 next = mb_find_next_bit(bitmap, last + 1, start);
6974 if (origin_start == 0 && next >= last)
6975 set_trimmed = true;
6976
6977 if ((next - start) >= minblocks) {
6978 int ret = ext4_trim_extent(sb, start, next - start, e4b);
6979
6980 if (ret && ret != -EOPNOTSUPP)
6981 return count;
6982 count += next - start;
6983 }
6984 free_count += next - start;
6985 start = next + 1;
6986
6987 if (ext4_trim_interrupted())
6988 return count;
6989
6990 if (need_resched()) {
6991 ext4_unlock_group(sb, e4b->bd_group);
6992 cond_resched();
6993 ext4_lock_group(sb, e4b->bd_group);
6994 }
6995
6996 if ((e4b->bd_info->bb_free - free_count) < minblocks)
6997 break;
6998 }
6999
7000 if (set_trimmed)
7001 EXT4_MB_GRP_SET_TRIMMED(e4b->bd_info);
7002
7003 return count;
7004 }
7005
7006 /**
7007 * ext4_trim_all_free -- function to trim all free space in alloc. group
7008 * @sb: super block for file system
7009 * @group: group to be trimmed
7010 * @start: first group block to examine
7011 * @max: last group block to examine
7012 * @minblocks: minimum extent block count
7013 *
7014 * ext4_trim_all_free walks through group's block bitmap searching for free
7015 * extents. When the free extent is found, mark it as used in group buddy
7016 * bitmap. Then issue a TRIM command on this extent and free the extent in
7017 * the group buddy bitmap.
7018 */
7019 static ext4_grpblk_t
ext4_trim_all_free(struct super_block * sb,ext4_group_t group,ext4_grpblk_t start,ext4_grpblk_t max,ext4_grpblk_t minblocks)7020 ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
7021 ext4_grpblk_t start, ext4_grpblk_t max,
7022 ext4_grpblk_t minblocks)
7023 {
7024 struct ext4_buddy e4b;
7025 int ret;
7026
7027 trace_ext4_trim_all_free(sb, group, start, max);
7028
7029 ret = ext4_mb_load_buddy(sb, group, &e4b);
7030 if (ret) {
7031 ext4_warning(sb, "Error %d loading buddy information for %u",
7032 ret, group);
7033 return ret;
7034 }
7035
7036 ext4_lock_group(sb, group);
7037
7038 if (!EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) ||
7039 minblocks < EXT4_SB(sb)->s_last_trim_minblks)
7040 ret = ext4_try_to_trim_range(sb, &e4b, start, max, minblocks);
7041 else
7042 ret = 0;
7043
7044 ext4_unlock_group(sb, group);
7045 ext4_mb_unload_buddy(&e4b);
7046
7047 ext4_debug("trimmed %d blocks in the group %d\n",
7048 ret, group);
7049
7050 return ret;
7051 }
7052
7053 /**
7054 * ext4_trim_fs() -- trim ioctl handle function
7055 * @sb: superblock for filesystem
7056 * @range: fstrim_range structure
7057 *
7058 * start: First Byte to trim
7059 * len: number of Bytes to trim from start
7060 * minlen: minimum extent length in Bytes
7061 * ext4_trim_fs goes through all allocation groups containing Bytes from
7062 * start to start+len. For each such a group ext4_trim_all_free function
7063 * is invoked to trim all free space.
7064 */
ext4_trim_fs(struct super_block * sb,struct fstrim_range * range)7065 int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
7066 {
7067 unsigned int discard_granularity = bdev_discard_granularity(sb->s_bdev);
7068 struct ext4_group_info *grp;
7069 ext4_group_t group, first_group, last_group;
7070 ext4_grpblk_t cnt = 0, first_cluster, last_cluster;
7071 uint64_t start, end, minlen, trimmed = 0;
7072 ext4_fsblk_t first_data_blk =
7073 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
7074 ext4_fsblk_t max_blks = ext4_blocks_count(EXT4_SB(sb)->s_es);
7075 int ret = 0;
7076
7077 start = range->start >> sb->s_blocksize_bits;
7078 end = start + (range->len >> sb->s_blocksize_bits) - 1;
7079 minlen = EXT4_NUM_B2C(EXT4_SB(sb),
7080 range->minlen >> sb->s_blocksize_bits);
7081
7082 if (minlen > EXT4_CLUSTERS_PER_GROUP(sb) ||
7083 start >= max_blks ||
7084 range->len < sb->s_blocksize)
7085 return -EINVAL;
7086 /* No point to try to trim less than discard granularity */
7087 if (range->minlen < discard_granularity) {
7088 minlen = EXT4_NUM_B2C(EXT4_SB(sb),
7089 discard_granularity >> sb->s_blocksize_bits);
7090 if (minlen > EXT4_CLUSTERS_PER_GROUP(sb))
7091 goto out;
7092 }
7093 if (end >= max_blks - 1)
7094 end = max_blks - 1;
7095 if (end <= first_data_blk)
7096 goto out;
7097 if (start < first_data_blk)
7098 start = first_data_blk;
7099
7100 /* Determine first and last group to examine based on start and end */
7101 ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start,
7102 &first_group, &first_cluster);
7103 ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) end,
7104 &last_group, &last_cluster);
7105
7106 /* end now represents the last cluster to discard in this group */
7107 end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
7108
7109 for (group = first_group; group <= last_group; group++) {
7110 if (ext4_trim_interrupted())
7111 break;
7112 grp = ext4_get_group_info(sb, group);
7113 if (!grp)
7114 continue;
7115 /* We only do this if the grp has never been initialized */
7116 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
7117 ret = ext4_mb_init_group(sb, group, GFP_NOFS);
7118 if (ret)
7119 break;
7120 }
7121
7122 /*
7123 * For all the groups except the last one, last cluster will
7124 * always be EXT4_CLUSTERS_PER_GROUP(sb)-1, so we only need to
7125 * change it for the last group, note that last_cluster is
7126 * already computed earlier by ext4_get_group_no_and_offset()
7127 */
7128 if (group == last_group)
7129 end = last_cluster;
7130 if (grp->bb_free >= minlen) {
7131 cnt = ext4_trim_all_free(sb, group, first_cluster,
7132 end, minlen);
7133 if (cnt < 0) {
7134 ret = cnt;
7135 break;
7136 }
7137 trimmed += cnt;
7138 }
7139
7140 /*
7141 * For every group except the first one, we are sure
7142 * that the first cluster to discard will be cluster #0.
7143 */
7144 first_cluster = 0;
7145 }
7146
7147 if (!ret)
7148 EXT4_SB(sb)->s_last_trim_minblks = minlen;
7149
7150 out:
7151 range->len = EXT4_C2B(EXT4_SB(sb), trimmed) << sb->s_blocksize_bits;
7152 return ret;
7153 }
7154
7155 /* Iterate all the free extents in the group. */
7156 int
ext4_mballoc_query_range(struct super_block * sb,ext4_group_t group,ext4_grpblk_t start,ext4_grpblk_t end,ext4_mballoc_query_range_fn formatter,void * priv)7157 ext4_mballoc_query_range(
7158 struct super_block *sb,
7159 ext4_group_t group,
7160 ext4_grpblk_t start,
7161 ext4_grpblk_t end,
7162 ext4_mballoc_query_range_fn formatter,
7163 void *priv)
7164 {
7165 void *bitmap;
7166 ext4_grpblk_t next;
7167 struct ext4_buddy e4b;
7168 int error;
7169
7170 error = ext4_mb_load_buddy(sb, group, &e4b);
7171 if (error)
7172 return error;
7173 bitmap = e4b.bd_bitmap;
7174
7175 ext4_lock_group(sb, group);
7176
7177 start = max(e4b.bd_info->bb_first_free, start);
7178 if (end >= EXT4_CLUSTERS_PER_GROUP(sb))
7179 end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
7180
7181 while (start <= end) {
7182 start = mb_find_next_zero_bit(bitmap, end + 1, start);
7183 if (start > end)
7184 break;
7185 next = mb_find_next_bit(bitmap, end + 1, start);
7186
7187 ext4_unlock_group(sb, group);
7188 error = formatter(sb, group, start, next - start, priv);
7189 if (error)
7190 goto out_unload;
7191 ext4_lock_group(sb, group);
7192
7193 start = next + 1;
7194 }
7195
7196 ext4_unlock_group(sb, group);
7197 out_unload:
7198 ext4_mb_unload_buddy(&e4b);
7199
7200 return error;
7201 }
7202