• Home
  • Raw
  • Download

Lines Matching +full:locality +full:- +full:specific

1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
18 #include <linux/backing-dev.h>
24 * - test ext4_ext_search_left() and ext4_ext_search_right()
25 * - search for metadata in few groups
28 * - normalization should take into account whether file is still open
29 * - discard preallocations if no free space left (policy?)
30 * - don't normalize tails
31 * - quota
32 * - reservation for superuser
35 * - bitmap read-ahead (proposed by Oleg Drokin aka green)
36 * - track min/max extents in each group for better group selection
37 * - mb_mark_used() may allocate chunk right after splitting buddy
38 * - tree of groups sorted by number of free blocks
39 * - error handling
50 * is larger. If the size is less than sbi->s_mb_stream_request we
60 * ext4_inode_info->i_prealloc_list, which contains list of prealloc
64 * pa_lstart -> the logical start block for this prealloc space
65 * pa_pstart -> the physical start block for this prealloc space
66 * pa_len -> length for this prealloc space (in clusters)
67 * pa_free -> free space available in this prealloc space (in clusters)
79 * have the group allocation flag set then we look at the locality group
84 * The reason for having a per cpu locality group is to reduce the contention
87 * The locality group prealloc space is used looking at whether we have
90 * If we can't allocate blocks via inode prealloc or/and locality group
122 * sbi->s_mb_group_prealloc. The default value of s_mb_group_prealloc is
123 * dependent on the cluster size; for non-bigalloc file systems, it is
126 * terms of number of blocks. If we have mounted the file system with -O
128 * smallest multiple of the stripe value (sbi->s_stripe) which is
134 * 1) Array of largest free order lists (sbi->s_mb_largest_free_orders)
136 * Locking: sbi->s_mb_largest_free_orders_locks(array of rw locks)
141 * number of buddy bitmap orders possible) number of lists. Group-infos are
144 * 2) Average fragment size lists (sbi->s_mb_avg_fragment_size)
146 * Locking: sbi->s_mb_avg_fragment_size_locks(array of rw locks)
148 * This is an array of lists where in the i-th list there are groups with
150 * is computed as ext4_group_info->bb_free / ext4_group_info->bb_fragments.
192 * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The
195 * stripe size (sbi->s_stripe), we try to search for contiguous block in
197 * not, we search in the specific group using bitmap for best extents. The
210 * non-linear fashion. While that may not matter on non-rotational devices, for
225 * - on-disk bitmap
226 * - in-core buddy (actually includes buddy and bitmap)
227 * - preallocation descriptors (PAs)
230 * - inode
231 * assiged to specific inode and can be used for this inode only.
232 * it describes part of inode's space preallocated to specific
239 * - locality group
240 * assigned to specific locality group which does not translate to
246 * in-core buddy = on-disk bitmap + preallocation descriptors
249 * - allocated blocks (persistent)
250 * - preallocated blocks (non-persistent)
254 * literally -- time is discrete and delimited by locks.
257 * blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA.
260 * - init buddy: buddy = on-disk + PAs
261 * - new PA: buddy += N; PA = N
262 * - use inode PA: on-disk += N; PA -= N
263 * - discard inode PA buddy -= on-disk - PA; PA = 0
264 * - use locality group PA on-disk += N; PA -= N
265 * - discard locality group PA buddy -= PA; PA = 0
266 * note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap
268 * bits from PA, only from on-disk bitmap
272 * killing performance on high-end SMP hardware. let's try to relax it using
276 * nobody can re-allocate that block
277 * 3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has
279 * on-disk bitmap if buddy has same bit set or/and PA covers corresponded
283 * - init buddy vs.
284 * - new PA
287 * - use inode PA
288 * we need to make sure that either on-disk bitmap or PA has uptodate data
289 * given (3) we care that PA-=N operation doesn't interfere with init
290 * - discard inode PA
292 * - use locality group PA
293 * again PA-=N must be serialized with init
294 * - discard locality group PA
296 * - new PA vs.
297 * - use inode PA
299 * - discard inode PA
301 * - use locality group PA
303 * - discard locality group PA
305 * - use inode PA
306 * - use inode PA
308 * - discard inode PA
310 * - use locality group PA
311 * nothing wrong here -- they're different PAs covering different blocks
312 * - discard locality group PA
316 * - PA is referenced and while it is no discard is possible
317 * - PA is referenced until block isn't marked in on-disk bitmap
318 * - PA changes only after on-disk bitmap
319 * - discard must not compete with init. either init is done before
321 * - buddy init as sum of on-disk bitmap and PAs is done atomically
331 * - allocation:
334 * mark bits in on-disk bitmap
337 * - use preallocation:
338 * find proper PA (per-inode or group)
340 * mark bits in on-disk bitmap
344 * - free:
346 * mark bits in on-disk bitmap
349 * - discard preallocations in group:
352 * load on-disk bitmap
354 * remove PA from object (inode or locality group)
355 * mark free blocks in-core
357 * - discard inode's preallocations:
364 * - bitlock on a group (group)
365 * - object (inode/locality) (object)
366 * - per-pa lock (pa)
367 * - cr_power2_aligned lists lock (cr_power2_aligned)
368 * - cr_goal_len_fast lists lock (cr_goal_len_fast)
371 * - new pa
375 * - find and use pa:
378 * - release consumed pa:
383 * - generate in-core bitmap:
387 * - discard all for given object (inode, locality group):
392 * - discard all for given group:
398 * - allocation path (ext4_mb_regular_allocator)
445 * sure that all the PAs on grp->bb_prealloc_list got freed or if it's empty.
507 ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix; in mb_find_next_zero_bit()
520 ret = ext4_find_next_bit(addr, tmpmax, start) - fix; in mb_find_next_bit()
530 BUG_ON(e4b->bd_bitmap == e4b->bd_buddy); in mb_find_buddy()
533 if (order > e4b->bd_blkbits + 1) { in mb_find_buddy()
540 *max = 1 << (e4b->bd_blkbits + 3); in mb_find_buddy()
541 return e4b->bd_bitmap; in mb_find_buddy()
544 bb = e4b->bd_buddy + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order]; in mb_find_buddy()
545 *max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order]; in mb_find_buddy()
555 struct super_block *sb = e4b->bd_sb; in mb_free_blocks_double()
557 if (unlikely(e4b->bd_info->bb_bitmap == NULL)) in mb_free_blocks_double()
559 assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group)); in mb_free_blocks_double()
561 if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) { in mb_free_blocks_double()
564 blocknr = ext4_group_first_block_no(sb, e4b->bd_group); in mb_free_blocks_double()
566 ext4_grp_locked_error(sb, e4b->bd_group, in mb_free_blocks_double()
567 inode ? inode->i_ino : 0, in mb_free_blocks_double()
572 ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group, in mb_free_blocks_double()
575 mb_clear_bit(first + i, e4b->bd_info->bb_bitmap); in mb_free_blocks_double()
583 if (unlikely(e4b->bd_info->bb_bitmap == NULL)) in mb_mark_used_double()
585 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); in mb_mark_used_double()
587 BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap)); in mb_mark_used_double()
588 mb_set_bit(first + i, e4b->bd_info->bb_bitmap); in mb_mark_used_double()
594 if (unlikely(e4b->bd_info->bb_bitmap == NULL)) in mb_cmp_bitmaps()
596 if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) { in mb_cmp_bitmaps()
599 b1 = (unsigned char *) e4b->bd_info->bb_bitmap; in mb_cmp_bitmaps()
601 for (i = 0; i < e4b->bd_sb->s_blocksize; i++) { in mb_cmp_bitmaps()
603 ext4_msg(e4b->bd_sb, KERN_ERR, in mb_cmp_bitmaps()
607 e4b->bd_group, i, i * 8, b1[i], b2[i]); in mb_cmp_bitmaps()
619 grp->bb_bitmap = kmalloc(sb->s_blocksize, GFP_NOFS); in mb_group_bb_bitmap_alloc()
620 if (!grp->bb_bitmap) in mb_group_bb_bitmap_alloc()
625 kfree(grp->bb_bitmap); in mb_group_bb_bitmap_alloc()
626 grp->bb_bitmap = NULL; in mb_group_bb_bitmap_alloc()
630 memcpy(grp->bb_bitmap, bh->b_data, sb->s_blocksize); in mb_group_bb_bitmap_alloc()
636 kfree(grp->bb_bitmap); in mb_group_bb_bitmap_free()
682 struct super_block *sb = e4b->bd_sb; in __mb_check_buddy()
683 int order = e4b->bd_blkbits + 1; in __mb_check_buddy()
697 if (e4b->bd_info->bb_check_counter++ % 10) in __mb_check_buddy()
703 buddy2 = mb_find_buddy(e4b, order - 1, &max2); in __mb_check_buddy()
727 !mb_test_bit(k, e4b->bd_bitmap)); in __mb_check_buddy()
731 MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count); in __mb_check_buddy()
732 order--; in __mb_check_buddy()
735 fstart = -1; in __mb_check_buddy()
739 MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free); in __mb_check_buddy()
740 if (fstart == -1) { in __mb_check_buddy()
746 fstart = -1; in __mb_check_buddy()
748 for (j = 0; j < e4b->bd_blkbits + 1; j++) { in __mb_check_buddy()
755 MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info)); in __mb_check_buddy()
756 MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments); in __mb_check_buddy()
758 grp = ext4_get_group_info(sb, e4b->bd_group); in __mb_check_buddy()
761 list_for_each(cur, &grp->bb_prealloc_list) { in __mb_check_buddy()
765 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k); in __mb_check_buddy()
766 MB_CHECK_ASSERT(groupnr == e4b->bd_group); in __mb_check_buddy()
767 for (i = 0; i < pa->pa_len; i++) in __mb_check_buddy()
797 border = 2 << sb->s_blocksize_bits; in ext4_mb_mark_free_simple()
801 max = ffs(first | border) - 1; in ext4_mb_mark_free_simple()
804 min = fls(len) - 1; in ext4_mb_mark_free_simple()
811 grp->bb_counters[min]++; in ext4_mb_mark_free_simple()
814 buddy + sbi->s_mb_offsets[min]); in ext4_mb_mark_free_simple()
816 len -= chunk; in ext4_mb_mark_free_simple()
829 order = fls(len) - 2; in mb_avg_fragment_size_order()
833 order--; in mb_avg_fragment_size_order()
844 if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || grp->bb_fragments == 0) in mb_update_avg_fragment_size()
848 grp->bb_free / grp->bb_fragments); in mb_update_avg_fragment_size()
849 if (new_order == grp->bb_avg_fragment_size_order) in mb_update_avg_fragment_size()
852 if (grp->bb_avg_fragment_size_order != -1) { in mb_update_avg_fragment_size()
853 write_lock(&sbi->s_mb_avg_fragment_size_locks[ in mb_update_avg_fragment_size()
854 grp->bb_avg_fragment_size_order]); in mb_update_avg_fragment_size()
855 list_del(&grp->bb_avg_fragment_size_node); in mb_update_avg_fragment_size()
856 write_unlock(&sbi->s_mb_avg_fragment_size_locks[ in mb_update_avg_fragment_size()
857 grp->bb_avg_fragment_size_order]); in mb_update_avg_fragment_size()
859 grp->bb_avg_fragment_size_order = new_order; in mb_update_avg_fragment_size()
860 write_lock(&sbi->s_mb_avg_fragment_size_locks[ in mb_update_avg_fragment_size()
861 grp->bb_avg_fragment_size_order]); in mb_update_avg_fragment_size()
862 list_add_tail(&grp->bb_avg_fragment_size_node, in mb_update_avg_fragment_size()
863 &sbi->s_mb_avg_fragment_size[grp->bb_avg_fragment_size_order]); in mb_update_avg_fragment_size()
864 write_unlock(&sbi->s_mb_avg_fragment_size_locks[ in mb_update_avg_fragment_size()
865 grp->bb_avg_fragment_size_order]); in mb_update_avg_fragment_size()
875 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_choose_next_group_p2_aligned()
879 if (ac->ac_status == AC_STATUS_FOUND) in ext4_mb_choose_next_group_p2_aligned()
882 if (unlikely(sbi->s_mb_stats && ac->ac_flags & EXT4_MB_CR_POWER2_ALIGNED_OPTIMIZED)) in ext4_mb_choose_next_group_p2_aligned()
883 atomic_inc(&sbi->s_bal_p2_aligned_bad_suggestions); in ext4_mb_choose_next_group_p2_aligned()
885 for (i = ac->ac_2order; i < MB_NUM_ORDERS(ac->ac_sb); i++) { in ext4_mb_choose_next_group_p2_aligned()
886 if (list_empty(&sbi->s_mb_largest_free_orders[i])) in ext4_mb_choose_next_group_p2_aligned()
888 read_lock(&sbi->s_mb_largest_free_orders_locks[i]); in ext4_mb_choose_next_group_p2_aligned()
889 if (list_empty(&sbi->s_mb_largest_free_orders[i])) { in ext4_mb_choose_next_group_p2_aligned()
890 read_unlock(&sbi->s_mb_largest_free_orders_locks[i]); in ext4_mb_choose_next_group_p2_aligned()
893 list_for_each_entry(iter, &sbi->s_mb_largest_free_orders[i], in ext4_mb_choose_next_group_p2_aligned()
895 if (sbi->s_mb_stats) in ext4_mb_choose_next_group_p2_aligned()
896 atomic64_inc(&sbi->s_bal_cX_groups_considered[CR_POWER2_ALIGNED]); in ext4_mb_choose_next_group_p2_aligned()
897 if (likely(ext4_mb_good_group(ac, iter->bb_group, CR_POWER2_ALIGNED))) { in ext4_mb_choose_next_group_p2_aligned()
898 *group = iter->bb_group; in ext4_mb_choose_next_group_p2_aligned()
899 ac->ac_flags |= EXT4_MB_CR_POWER2_ALIGNED_OPTIMIZED; in ext4_mb_choose_next_group_p2_aligned()
900 read_unlock(&sbi->s_mb_largest_free_orders_locks[i]); in ext4_mb_choose_next_group_p2_aligned()
904 read_unlock(&sbi->s_mb_largest_free_orders_locks[i]); in ext4_mb_choose_next_group_p2_aligned()
917 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_find_good_group_avg_frag_lists()
918 struct list_head *frag_list = &sbi->s_mb_avg_fragment_size[order]; in ext4_mb_find_good_group_avg_frag_lists()
919 rwlock_t *frag_list_lock = &sbi->s_mb_avg_fragment_size_locks[order]; in ext4_mb_find_good_group_avg_frag_lists()
921 enum criteria cr = ac->ac_criteria; in ext4_mb_find_good_group_avg_frag_lists()
931 if (sbi->s_mb_stats) in ext4_mb_find_good_group_avg_frag_lists()
932 atomic64_inc(&sbi->s_bal_cX_groups_considered[cr]); in ext4_mb_find_good_group_avg_frag_lists()
933 if (likely(ext4_mb_good_group(ac, iter->bb_group, cr))) { in ext4_mb_find_good_group_avg_frag_lists()
949 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_choose_next_group_goal_fast()
953 if (unlikely(ac->ac_flags & EXT4_MB_CR_GOAL_LEN_FAST_OPTIMIZED)) { in ext4_mb_choose_next_group_goal_fast()
954 if (sbi->s_mb_stats) in ext4_mb_choose_next_group_goal_fast()
955 atomic_inc(&sbi->s_bal_goal_fast_bad_suggestions); in ext4_mb_choose_next_group_goal_fast()
958 for (i = mb_avg_fragment_size_order(ac->ac_sb, ac->ac_g_ex.fe_len); in ext4_mb_choose_next_group_goal_fast()
959 i < MB_NUM_ORDERS(ac->ac_sb); i++) { in ext4_mb_choose_next_group_goal_fast()
962 *group = grp->bb_group; in ext4_mb_choose_next_group_goal_fast()
963 ac->ac_flags |= EXT4_MB_CR_GOAL_LEN_FAST_OPTIMIZED; in ext4_mb_choose_next_group_goal_fast()
972 * request len. However, allocation request for non-regular in ext4_mb_choose_next_group_goal_fast()
976 if (ac->ac_flags & EXT4_MB_HINT_DATA) in ext4_mb_choose_next_group_goal_fast()
994 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_choose_next_group_best_avail()
999 if (unlikely(ac->ac_flags & EXT4_MB_CR_BEST_AVAIL_LEN_OPTIMIZED)) { in ext4_mb_choose_next_group_best_avail()
1000 if (sbi->s_mb_stats) in ext4_mb_choose_next_group_best_avail()
1001 atomic_inc(&sbi->s_bal_best_avail_bad_suggestions); in ext4_mb_choose_next_group_best_avail()
1010 order = fls(ac->ac_g_ex.fe_len) - 1; in ext4_mb_choose_next_group_best_avail()
1011 min_order = order - sbi->s_mb_best_avail_max_trim_order; in ext4_mb_choose_next_group_best_avail()
1015 if (sbi->s_stripe > 0) { in ext4_mb_choose_next_group_best_avail()
1020 num_stripe_clusters = EXT4_NUM_B2C(sbi, sbi->s_stripe); in ext4_mb_choose_next_group_best_avail()
1026 min_order = fls(num_stripe_clusters) - 1; in ext4_mb_choose_next_group_best_avail()
1029 if (1 << min_order < ac->ac_o_ex.fe_len) in ext4_mb_choose_next_group_best_avail()
1030 min_order = fls(ac->ac_o_ex.fe_len); in ext4_mb_choose_next_group_best_avail()
1032 for (i = order; i >= min_order; i--) { in ext4_mb_choose_next_group_best_avail()
1039 ac->ac_g_ex.fe_len = 1 << i; in ext4_mb_choose_next_group_best_avail()
1047 ac->ac_g_ex.fe_len = roundup(ac->ac_g_ex.fe_len, in ext4_mb_choose_next_group_best_avail()
1051 frag_order = mb_avg_fragment_size_order(ac->ac_sb, in ext4_mb_choose_next_group_best_avail()
1052 ac->ac_g_ex.fe_len); in ext4_mb_choose_next_group_best_avail()
1056 *group = grp->bb_group; in ext4_mb_choose_next_group_best_avail()
1057 ac->ac_flags |= EXT4_MB_CR_BEST_AVAIL_LEN_OPTIMIZED; in ext4_mb_choose_next_group_best_avail()
1063 ac->ac_g_ex.fe_len = ac->ac_orig_goal_len; in ext4_mb_choose_next_group_best_avail()
1069 if (unlikely(!test_opt2(ac->ac_sb, MB_OPTIMIZE_SCAN))) in should_optimize_scan()
1071 if (ac->ac_criteria >= CR_GOAL_LEN_SLOW) in should_optimize_scan()
1073 if (!ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) in should_optimize_scan()
1089 if (ac->ac_groups_linear_remaining) { in next_linear_group()
1090 ac->ac_groups_linear_remaining--; in next_linear_group()
1097 * Artificially restricted ngroups for non-extent in next_linear_group()
1119 *new_cr = ac->ac_criteria; in ext4_mb_choose_next_group()
1121 if (!should_optimize_scan(ac) || ac->ac_groups_linear_remaining) { in ext4_mb_choose_next_group()
1151 for (i = MB_NUM_ORDERS(sb) - 1; i >= 0; i--) in mb_set_largest_free_order()
1152 if (grp->bb_counters[i] > 0) in mb_set_largest_free_order()
1156 i == grp->bb_largest_free_order) { in mb_set_largest_free_order()
1157 grp->bb_largest_free_order = i; in mb_set_largest_free_order()
1161 if (grp->bb_largest_free_order >= 0) { in mb_set_largest_free_order()
1162 write_lock(&sbi->s_mb_largest_free_orders_locks[ in mb_set_largest_free_order()
1163 grp->bb_largest_free_order]); in mb_set_largest_free_order()
1164 list_del_init(&grp->bb_largest_free_order_node); in mb_set_largest_free_order()
1165 write_unlock(&sbi->s_mb_largest_free_orders_locks[ in mb_set_largest_free_order()
1166 grp->bb_largest_free_order]); in mb_set_largest_free_order()
1168 grp->bb_largest_free_order = i; in mb_set_largest_free_order()
1169 if (grp->bb_largest_free_order >= 0 && grp->bb_free) { in mb_set_largest_free_order()
1170 write_lock(&sbi->s_mb_largest_free_orders_locks[ in mb_set_largest_free_order()
1171 grp->bb_largest_free_order]); in mb_set_largest_free_order()
1172 list_add_tail(&grp->bb_largest_free_order_node, in mb_set_largest_free_order()
1173 &sbi->s_mb_largest_free_orders[grp->bb_largest_free_order]); in mb_set_largest_free_order()
1174 write_unlock(&sbi->s_mb_largest_free_orders_locks[ in mb_set_largest_free_order()
1175 grp->bb_largest_free_order]); in mb_set_largest_free_order()
1194 * of on-disk bitmap and preallocations */ in ext4_mb_generate_buddy()
1196 grp->bb_first_free = i; in ext4_mb_generate_buddy()
1201 len = i - first; in ext4_mb_generate_buddy()
1206 grp->bb_counters[0]++; in ext4_mb_generate_buddy()
1210 grp->bb_fragments = fragments; in ext4_mb_generate_buddy()
1212 if (free != grp->bb_free) { in ext4_mb_generate_buddy()
1216 free, grp->bb_free); in ext4_mb_generate_buddy()
1221 grp->bb_free = free; in ext4_mb_generate_buddy()
1228 clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state)); in ext4_mb_generate_buddy()
1230 period = get_cycles() - period; in ext4_mb_generate_buddy()
1231 atomic_inc(&sbi->s_mb_buddies_generated); in ext4_mb_generate_buddy()
1232 atomic64_add(period, &sbi->s_mb_generation_time); in ext4_mb_generate_buddy()
1244 e4b->bd_info->bb_fragments = 0; in mb_regenerate_buddy()
1245 memset(e4b->bd_info->bb_counters, 0, in mb_regenerate_buddy()
1246 sizeof(*e4b->bd_info->bb_counters) * in mb_regenerate_buddy()
1247 (e4b->bd_sb->s_blocksize_bits + 2)); in mb_regenerate_buddy()
1249 ext4_mb_generate_buddy(e4b->bd_sb, e4b->bd_buddy, in mb_regenerate_buddy()
1250 e4b->bd_bitmap, e4b->bd_group, e4b->bd_info); in mb_regenerate_buddy()
1291 inode = page->mapping->host; in ext4_mb_init_cache()
1292 sb = inode->i_sb; in ext4_mb_init_cache()
1297 mb_debug(sb, "init page %lu\n", page->index); in ext4_mb_init_cache()
1308 return -ENOMEM; in ext4_mb_init_cache()
1312 first_group = page->index * blocks_per_page / 2; in ext4_mb_init_cache()
1352 first_block = page->index * blocks_per_page; in ext4_mb_init_cache()
1358 if (!bh[group - first_group]) in ext4_mb_init_cache()
1362 if (!buffer_verified(bh[group - first_group])) in ext4_mb_init_cache()
1374 bitmap = bh[group - first_group]->b_data; in ext4_mb_init_cache()
1382 err = -EFSCORRUPTED; in ext4_mb_init_cache()
1389 group, page->index, i * blocksize); in ext4_mb_init_cache()
1391 grinfo->bb_fragments = 0; in ext4_mb_init_cache()
1392 memset(grinfo->bb_counters, 0, in ext4_mb_init_cache()
1393 sizeof(*grinfo->bb_counters) * in ext4_mb_init_cache()
1408 group, page->index, i * blocksize); in ext4_mb_init_cache()
1415 /* mark all preallocated blks used in in-core bitmap */ in ext4_mb_init_cache()
1417 WARN_ON_ONCE(!RB_EMPTY_ROOT(&grinfo->bb_free_root)); in ext4_mb_init_cache()
1442 * are on the same page e4b->bd_buddy_page is NULL and return value is 0.
1447 struct inode *inode = EXT4_SB(sb)->s_buddy_cache; in ext4_mb_get_buddy_page_lock()
1452 e4b->bd_buddy_page = NULL; in ext4_mb_get_buddy_page_lock()
1453 e4b->bd_bitmap_page = NULL; in ext4_mb_get_buddy_page_lock()
1455 blocks_per_page = PAGE_SIZE / sb->s_blocksize; in ext4_mb_get_buddy_page_lock()
1464 page = find_or_create_page(inode->i_mapping, pnum, gfp); in ext4_mb_get_buddy_page_lock()
1466 return -ENOMEM; in ext4_mb_get_buddy_page_lock()
1467 BUG_ON(page->mapping != inode->i_mapping); in ext4_mb_get_buddy_page_lock()
1468 e4b->bd_bitmap_page = page; in ext4_mb_get_buddy_page_lock()
1469 e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize); in ext4_mb_get_buddy_page_lock()
1478 page = find_or_create_page(inode->i_mapping, pnum, gfp); in ext4_mb_get_buddy_page_lock()
1480 return -ENOMEM; in ext4_mb_get_buddy_page_lock()
1481 BUG_ON(page->mapping != inode->i_mapping); in ext4_mb_get_buddy_page_lock()
1482 e4b->bd_buddy_page = page; in ext4_mb_get_buddy_page_lock()
1488 if (e4b->bd_bitmap_page) { in ext4_mb_put_buddy_page_lock()
1489 unlock_page(e4b->bd_bitmap_page); in ext4_mb_put_buddy_page_lock()
1490 put_page(e4b->bd_bitmap_page); in ext4_mb_put_buddy_page_lock()
1492 if (e4b->bd_buddy_page) { in ext4_mb_put_buddy_page_lock()
1493 unlock_page(e4b->bd_buddy_page); in ext4_mb_put_buddy_page_lock()
1494 put_page(e4b->bd_buddy_page); in ext4_mb_put_buddy_page_lock()
1516 return -EFSCORRUPTED; in ext4_mb_init_group()
1541 ret = -EIO; in ext4_mb_init_group()
1560 ret = -EIO; in ext4_mb_init_group()
1585 struct inode *inode = sbi->s_buddy_cache; in ext4_mb_load_buddy_gfp()
1590 blocks_per_page = PAGE_SIZE / sb->s_blocksize; in ext4_mb_load_buddy_gfp()
1593 return -EFSCORRUPTED; in ext4_mb_load_buddy_gfp()
1595 e4b->bd_blkbits = sb->s_blocksize_bits; in ext4_mb_load_buddy_gfp()
1596 e4b->bd_info = grp; in ext4_mb_load_buddy_gfp()
1597 e4b->bd_sb = sb; in ext4_mb_load_buddy_gfp()
1598 e4b->bd_group = group; in ext4_mb_load_buddy_gfp()
1599 e4b->bd_buddy_page = NULL; in ext4_mb_load_buddy_gfp()
1600 e4b->bd_bitmap_page = NULL; in ext4_mb_load_buddy_gfp()
1623 page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED); in ext4_mb_load_buddy_gfp()
1635 page = find_or_create_page(inode->i_mapping, pnum, gfp); in ext4_mb_load_buddy_gfp()
1637 if (WARN_RATELIMIT(page->mapping != inode->i_mapping, in ext4_mb_load_buddy_gfp()
1638 "ext4: bitmap's paging->mapping != inode->i_mapping\n")) { in ext4_mb_load_buddy_gfp()
1641 ret = -EINVAL; in ext4_mb_load_buddy_gfp()
1651 (poff * sb->s_blocksize)); in ext4_mb_load_buddy_gfp()
1657 ret = -ENOMEM; in ext4_mb_load_buddy_gfp()
1661 ret = -EIO; in ext4_mb_load_buddy_gfp()
1666 e4b->bd_bitmap_page = page; in ext4_mb_load_buddy_gfp()
1667 e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize); in ext4_mb_load_buddy_gfp()
1673 page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED); in ext4_mb_load_buddy_gfp()
1677 page = find_or_create_page(inode->i_mapping, pnum, gfp); in ext4_mb_load_buddy_gfp()
1679 if (WARN_RATELIMIT(page->mapping != inode->i_mapping, in ext4_mb_load_buddy_gfp()
1680 "ext4: buddy bitmap's page->mapping != inode->i_mapping\n")) { in ext4_mb_load_buddy_gfp()
1683 ret = -EINVAL; in ext4_mb_load_buddy_gfp()
1687 ret = ext4_mb_init_cache(page, e4b->bd_bitmap, in ext4_mb_load_buddy_gfp()
1698 ret = -ENOMEM; in ext4_mb_load_buddy_gfp()
1702 ret = -EIO; in ext4_mb_load_buddy_gfp()
1707 e4b->bd_buddy_page = page; in ext4_mb_load_buddy_gfp()
1708 e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize); in ext4_mb_load_buddy_gfp()
1715 if (e4b->bd_bitmap_page) in ext4_mb_load_buddy_gfp()
1716 put_page(e4b->bd_bitmap_page); in ext4_mb_load_buddy_gfp()
1718 e4b->bd_buddy = NULL; in ext4_mb_load_buddy_gfp()
1719 e4b->bd_bitmap = NULL; in ext4_mb_load_buddy_gfp()
1731 if (e4b->bd_bitmap_page) in ext4_mb_unload_buddy()
1732 put_page(e4b->bd_bitmap_page); in ext4_mb_unload_buddy()
1733 if (e4b->bd_buddy_page) in ext4_mb_unload_buddy()
1734 put_page(e4b->bd_buddy_page); in ext4_mb_unload_buddy()
1743 BUG_ON(e4b->bd_bitmap == e4b->bd_buddy); in mb_find_order_for_block()
1744 BUG_ON(block >= (1 << (e4b->bd_blkbits + 3))); in mb_find_order_for_block()
1746 while (order <= e4b->bd_blkbits + 1) { in mb_find_order_for_block()
1763 if ((cur & 31) == 0 && (len - cur) >= 32) { in mb_clear_bits()
1776 * will return first found zero bit if any, -1 otherwise
1781 int zero_bit = -1; in mb_test_and_clear_bits()
1785 if ((cur & 31) == 0 && (len - cur) >= 32) { in mb_test_and_clear_bits()
1788 if (*addr != (__u32)(-1) && zero_bit == -1) in mb_test_and_clear_bits()
1794 if (!mb_test_and_clear_bit(cur, bm) && zero_bit == -1) in mb_test_and_clear_bits()
1808 if ((cur & 31) == 0 && (len - cur) >= 32) { in mb_set_bits()
1824 (*bit) -= side; in mb_buddy_adjust_border()
1830 return -1; in mb_buddy_adjust_border()
1853 * --------------------------------- in mb_buddy_mark_free()
1855 * --------------------------------- in mb_buddy_mark_free()
1857 * --------------------------------- in mb_buddy_mark_free()
1873 e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&first, buddy, -1); in mb_buddy_mark_free()
1875 e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&last, buddy, 1); in mb_buddy_mark_free()
1882 mb_clear_bits(buddy, first, last - first + 1); in mb_buddy_mark_free()
1883 e4b->bd_info->bb_counters[order - 1] += last - first + 1; in mb_buddy_mark_free()
1898 int last = first + count - 1; in mb_free_blocks()
1899 struct super_block *sb = e4b->bd_sb; in mb_free_blocks()
1903 BUG_ON(last >= (sb->s_blocksize << 3)); in mb_free_blocks()
1904 assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group)); in mb_free_blocks()
1906 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) in mb_free_blocks()
1916 left_is_free = !mb_test_bit(first - 1, e4b->bd_bitmap); in mb_free_blocks()
1917 block = mb_test_and_clear_bits(e4b->bd_bitmap, first, count); in mb_free_blocks()
1918 if (last + 1 < EXT4_SB(sb)->s_mb_maxs[0]) in mb_free_blocks()
1919 right_is_free = !mb_test_bit(last + 1, e4b->bd_bitmap); in mb_free_blocks()
1921 if (unlikely(block != -1)) { in mb_free_blocks()
1929 if (sbi->s_mount_state & EXT4_FC_REPLAY) { in mb_free_blocks()
1934 blocknr = ext4_group_first_block_no(sb, e4b->bd_group); in mb_free_blocks()
1936 ext4_grp_locked_error(sb, e4b->bd_group, in mb_free_blocks()
1937 inode ? inode->i_ino : 0, blocknr, in mb_free_blocks()
1940 ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group, in mb_free_blocks()
1946 e4b->bd_info->bb_free += count; in mb_free_blocks()
1947 if (first < e4b->bd_info->bb_first_free) in mb_free_blocks()
1948 e4b->bd_info->bb_first_free = first; in mb_free_blocks()
1952 e4b->bd_info->bb_fragments--; in mb_free_blocks()
1954 e4b->bd_info->bb_fragments++; in mb_free_blocks()
1964 e4b->bd_info->bb_counters[0] += left_is_free ? -1 : 1; in mb_free_blocks()
1967 last -= !right_is_free; in mb_free_blocks()
1968 e4b->bd_info->bb_counters[0] += right_is_free ? -1 : 1; in mb_free_blocks()
1974 mb_set_largest_free_order(sb, e4b->bd_info); in mb_free_blocks()
1975 mb_update_avg_fragment_size(sb, e4b->bd_info); in mb_free_blocks()
1987 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); in mb_find_extent()
1994 ex->fe_len = 0; in mb_find_extent()
1995 ex->fe_start = 0; in mb_find_extent()
1996 ex->fe_group = 0; in mb_find_extent()
2004 ex->fe_len = 1 << order; in mb_find_extent()
2005 ex->fe_start = block << order; in mb_find_extent()
2006 ex->fe_group = e4b->bd_group; in mb_find_extent()
2009 next = next - ex->fe_start; in mb_find_extent()
2010 ex->fe_len -= next; in mb_find_extent()
2011 ex->fe_start += next; in mb_find_extent()
2013 while (needed > ex->fe_len && in mb_find_extent()
2020 if (mb_test_bit(next, e4b->bd_bitmap)) in mb_find_extent()
2026 ex->fe_len += 1 << order; in mb_find_extent()
2029 if (ex->fe_start + ex->fe_len > EXT4_CLUSTERS_PER_GROUP(e4b->bd_sb)) { in mb_find_extent()
2032 ext4_grp_locked_error(e4b->bd_sb, e4b->bd_group, 0, 0, in mb_find_extent()
2035 block, order, needed, ex->fe_group, ex->fe_start, in mb_find_extent()
2036 ex->fe_len, ex->fe_logical); in mb_find_extent()
2037 ex->fe_len = 0; in mb_find_extent()
2038 ex->fe_start = 0; in mb_find_extent()
2039 ex->fe_group = 0; in mb_find_extent()
2041 return ex->fe_len; in mb_find_extent()
2050 int start = ex->fe_start; in mb_mark_used()
2051 int len = ex->fe_len; in mb_mark_used()
2057 BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3)); in mb_mark_used()
2058 BUG_ON(e4b->bd_group != ex->fe_group); in mb_mark_used()
2059 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); in mb_mark_used()
2064 e4b->bd_info->bb_free -= len; in mb_mark_used()
2065 if (e4b->bd_info->bb_first_free == start) in mb_mark_used()
2066 e4b->bd_info->bb_first_free += len; in mb_mark_used()
2070 mlen = !mb_test_bit(start - 1, e4b->bd_bitmap); in mb_mark_used()
2071 if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0]) in mb_mark_used()
2072 max = !mb_test_bit(start + len, e4b->bd_bitmap); in mb_mark_used()
2074 e4b->bd_info->bb_fragments++; in mb_mark_used()
2076 e4b->bd_info->bb_fragments--; in mb_mark_used()
2092 e4b->bd_info->bb_counters[ord]--; in mb_mark_used()
2094 len -= mlen; in mb_mark_used()
2107 e4b->bd_info->bb_counters[ord]--; in mb_mark_used()
2109 ord--; in mb_mark_used()
2114 e4b->bd_info->bb_counters[ord]++; in mb_mark_used()
2115 e4b->bd_info->bb_counters[ord]++; in mb_mark_used()
2118 mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info); in mb_mark_used()
2120 mb_update_avg_fragment_size(e4b->bd_sb, e4b->bd_info); in mb_mark_used()
2121 mb_set_bits(e4b->bd_bitmap, ex->fe_start, len0); in mb_mark_used()
2133 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_use_best_found()
2136 BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group); in ext4_mb_use_best_found()
2137 BUG_ON(ac->ac_status == AC_STATUS_FOUND); in ext4_mb_use_best_found()
2139 ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len); in ext4_mb_use_best_found()
2140 ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical; in ext4_mb_use_best_found()
2141 ret = mb_mark_used(e4b, &ac->ac_b_ex); in ext4_mb_use_best_found()
2145 ac->ac_f_ex = ac->ac_b_ex; in ext4_mb_use_best_found()
2147 ac->ac_status = AC_STATUS_FOUND; in ext4_mb_use_best_found()
2148 ac->ac_tail = ret & 0xffff; in ext4_mb_use_best_found()
2149 ac->ac_buddy = ret >> 16; in ext4_mb_use_best_found()
2158 ac->ac_bitmap_page = e4b->bd_bitmap_page; in ext4_mb_use_best_found()
2159 get_page(ac->ac_bitmap_page); in ext4_mb_use_best_found()
2160 ac->ac_buddy_page = e4b->bd_buddy_page; in ext4_mb_use_best_found()
2161 get_page(ac->ac_buddy_page); in ext4_mb_use_best_found()
2163 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) { in ext4_mb_use_best_found()
2164 spin_lock(&sbi->s_md_lock); in ext4_mb_use_best_found()
2165 sbi->s_mb_last_group = ac->ac_f_ex.fe_group; in ext4_mb_use_best_found()
2166 sbi->s_mb_last_start = ac->ac_f_ex.fe_start; in ext4_mb_use_best_found()
2167 spin_unlock(&sbi->s_md_lock); in ext4_mb_use_best_found()
2174 if (ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len) in ext4_mb_use_best_found()
2183 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_check_limits()
2184 struct ext4_free_extent *bex = &ac->ac_b_ex; in ext4_mb_check_limits()
2185 struct ext4_free_extent *gex = &ac->ac_g_ex; in ext4_mb_check_limits()
2187 if (ac->ac_status == AC_STATUS_FOUND) in ext4_mb_check_limits()
2192 if (ac->ac_found > sbi->s_mb_max_to_scan && in ext4_mb_check_limits()
2193 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) { in ext4_mb_check_limits()
2194 ac->ac_status = AC_STATUS_BREAK; in ext4_mb_check_limits()
2201 if (bex->fe_len < gex->fe_len) in ext4_mb_check_limits()
2204 if (finish_group || ac->ac_found > sbi->s_mb_min_to_scan) in ext4_mb_check_limits()
2222 * upto a max of sbi->s_mb_max_to_scan times (default 200). After
2226 * upto a max of sbi->s_mb_min_to_scan times (default 10) before
2236 struct ext4_free_extent *bex = &ac->ac_b_ex; in ext4_mb_measure_extent()
2237 struct ext4_free_extent *gex = &ac->ac_g_ex; in ext4_mb_measure_extent()
2239 BUG_ON(ex->fe_len <= 0); in ext4_mb_measure_extent()
2240 BUG_ON(ex->fe_len > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb)); in ext4_mb_measure_extent()
2241 BUG_ON(ex->fe_start >= EXT4_CLUSTERS_PER_GROUP(ac->ac_sb)); in ext4_mb_measure_extent()
2242 BUG_ON(ac->ac_status != AC_STATUS_CONTINUE); in ext4_mb_measure_extent()
2244 ac->ac_found++; in ext4_mb_measure_extent()
2245 ac->ac_cX_found[ac->ac_criteria]++; in ext4_mb_measure_extent()
2248 * The special case - take what you catch first in ext4_mb_measure_extent()
2250 if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) { in ext4_mb_measure_extent()
2259 if (ex->fe_len == gex->fe_len) { in ext4_mb_measure_extent()
2268 if (bex->fe_len == 0) { in ext4_mb_measure_extent()
2276 if (bex->fe_len < gex->fe_len) { in ext4_mb_measure_extent()
2279 if (ex->fe_len > bex->fe_len) in ext4_mb_measure_extent()
2281 } else if (ex->fe_len > gex->fe_len) { in ext4_mb_measure_extent()
2285 if (ex->fe_len < bex->fe_len) in ext4_mb_measure_extent()
2296 struct ext4_free_extent ex = ac->ac_b_ex; in ext4_mb_try_best_found()
2302 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b); in ext4_mb_try_best_found()
2306 ext4_lock_group(ac->ac_sb, group); in ext4_mb_try_best_found()
2307 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) in ext4_mb_try_best_found()
2313 ac->ac_b_ex = ex; in ext4_mb_try_best_found()
2318 ext4_unlock_group(ac->ac_sb, group); in ext4_mb_try_best_found()
2326 ext4_group_t group = ac->ac_g_ex.fe_group; in ext4_mb_find_by_goal()
2329 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_find_by_goal()
2330 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); in ext4_mb_find_by_goal()
2334 return -EFSCORRUPTED; in ext4_mb_find_by_goal()
2335 if (!(ac->ac_flags & (EXT4_MB_HINT_TRY_GOAL | EXT4_MB_HINT_GOAL_ONLY))) in ext4_mb_find_by_goal()
2337 if (grp->bb_free == 0) in ext4_mb_find_by_goal()
2340 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b); in ext4_mb_find_by_goal()
2344 ext4_lock_group(ac->ac_sb, group); in ext4_mb_find_by_goal()
2345 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) in ext4_mb_find_by_goal()
2348 max = mb_find_extent(e4b, ac->ac_g_ex.fe_start, in ext4_mb_find_by_goal()
2349 ac->ac_g_ex.fe_len, &ex); in ext4_mb_find_by_goal()
2352 if (max >= ac->ac_g_ex.fe_len && in ext4_mb_find_by_goal()
2353 ac->ac_g_ex.fe_len == EXT4_B2C(sbi, sbi->s_stripe)) { in ext4_mb_find_by_goal()
2356 start = ext4_grp_offs_to_block(ac->ac_sb, &ex); in ext4_mb_find_by_goal()
2357 /* use do_div to get remainder (would be 64-bit modulo) */ in ext4_mb_find_by_goal()
2358 if (do_div(start, sbi->s_stripe) == 0) { in ext4_mb_find_by_goal()
2359 ac->ac_found++; in ext4_mb_find_by_goal()
2360 ac->ac_b_ex = ex; in ext4_mb_find_by_goal()
2363 } else if (max >= ac->ac_g_ex.fe_len) { in ext4_mb_find_by_goal()
2365 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group); in ext4_mb_find_by_goal()
2366 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start); in ext4_mb_find_by_goal()
2367 ac->ac_found++; in ext4_mb_find_by_goal()
2368 ac->ac_b_ex = ex; in ext4_mb_find_by_goal()
2370 } else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) { in ext4_mb_find_by_goal()
2374 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group); in ext4_mb_find_by_goal()
2375 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start); in ext4_mb_find_by_goal()
2376 ac->ac_found++; in ext4_mb_find_by_goal()
2377 ac->ac_b_ex = ex; in ext4_mb_find_by_goal()
2381 ext4_unlock_group(ac->ac_sb, group); in ext4_mb_find_by_goal()
2395 struct super_block *sb = ac->ac_sb; in ext4_mb_simple_scan_group()
2396 struct ext4_group_info *grp = e4b->bd_info; in ext4_mb_simple_scan_group()
2402 BUG_ON(ac->ac_2order <= 0); in ext4_mb_simple_scan_group()
2403 for (i = ac->ac_2order; i < MB_NUM_ORDERS(sb); i++) { in ext4_mb_simple_scan_group()
2404 if (grp->bb_counters[i] == 0) in ext4_mb_simple_scan_group()
2414 ext4_grp_locked_error(ac->ac_sb, e4b->bd_group, 0, 0, in ext4_mb_simple_scan_group()
2416 grp->bb_counters[i], i); in ext4_mb_simple_scan_group()
2417 ext4_mark_group_bitmap_corrupted(ac->ac_sb, in ext4_mb_simple_scan_group()
2418 e4b->bd_group, in ext4_mb_simple_scan_group()
2422 ac->ac_found++; in ext4_mb_simple_scan_group()
2423 ac->ac_cX_found[ac->ac_criteria]++; in ext4_mb_simple_scan_group()
2425 ac->ac_b_ex.fe_len = 1 << i; in ext4_mb_simple_scan_group()
2426 ac->ac_b_ex.fe_start = k << i; in ext4_mb_simple_scan_group()
2427 ac->ac_b_ex.fe_group = e4b->bd_group; in ext4_mb_simple_scan_group()
2431 BUG_ON(ac->ac_f_ex.fe_len != ac->ac_g_ex.fe_len); in ext4_mb_simple_scan_group()
2433 if (EXT4_SB(sb)->s_mb_stats) in ext4_mb_simple_scan_group()
2434 atomic_inc(&EXT4_SB(sb)->s_bal_2orders); in ext4_mb_simple_scan_group()
2449 struct super_block *sb = ac->ac_sb; in ext4_mb_complex_scan_group()
2450 void *bitmap = e4b->bd_bitmap; in ext4_mb_complex_scan_group()
2455 free = e4b->bd_info->bb_free; in ext4_mb_complex_scan_group()
2459 i = e4b->bd_info->bb_first_free; in ext4_mb_complex_scan_group()
2461 while (free && ac->ac_status == AC_STATUS_CONTINUE) { in ext4_mb_complex_scan_group()
2470 ext4_grp_locked_error(sb, e4b->bd_group, 0, 0, in ext4_mb_complex_scan_group()
2474 ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group, in ext4_mb_complex_scan_group()
2479 if (!ext4_mb_cr_expensive(ac->ac_criteria)) { in ext4_mb_complex_scan_group()
2488 freelen = j - i; in ext4_mb_complex_scan_group()
2490 if (freelen < ac->ac_g_ex.fe_len) { in ext4_mb_complex_scan_group()
2492 free -= freelen; in ext4_mb_complex_scan_group()
2497 mb_find_extent(e4b, i, ac->ac_g_ex.fe_len, &ex); in ext4_mb_complex_scan_group()
2501 ext4_grp_locked_error(sb, e4b->bd_group, 0, 0, in ext4_mb_complex_scan_group()
2505 ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group, in ext4_mb_complex_scan_group()
2518 free -= ex.fe_len; in ext4_mb_complex_scan_group()
2526 * we try to find stripe-aligned chunks for stripe-size-multiple requests
2532 struct super_block *sb = ac->ac_sb; in ext4_mb_scan_aligned()
2534 void *bitmap = e4b->bd_bitmap; in ext4_mb_scan_aligned()
2541 BUG_ON(sbi->s_stripe == 0); in ext4_mb_scan_aligned()
2543 /* find first stripe-aligned block in group */ in ext4_mb_scan_aligned()
2544 first_group_block = ext4_group_first_block_no(sb, e4b->bd_group); in ext4_mb_scan_aligned()
2546 a = first_group_block + sbi->s_stripe - 1; in ext4_mb_scan_aligned()
2547 do_div(a, sbi->s_stripe); in ext4_mb_scan_aligned()
2548 i = (a * sbi->s_stripe) - first_group_block; in ext4_mb_scan_aligned()
2550 stripe = EXT4_B2C(sbi, sbi->s_stripe); in ext4_mb_scan_aligned()
2556 ac->ac_found++; in ext4_mb_scan_aligned()
2557 ac->ac_cX_found[ac->ac_criteria]++; in ext4_mb_scan_aligned()
2559 ac->ac_b_ex = ex; in ext4_mb_scan_aligned()
2577 int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb)); in ext4_mb_good_group()
2578 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); in ext4_mb_good_group()
2585 free = grp->bb_free; in ext4_mb_good_group()
2589 fragments = grp->bb_fragments; in ext4_mb_good_group()
2595 BUG_ON(ac->ac_2order == 0); in ext4_mb_good_group()
2598 if ((ac->ac_flags & EXT4_MB_HINT_DATA) && in ext4_mb_good_group()
2603 if (free < ac->ac_g_ex.fe_len) in ext4_mb_good_group()
2606 if (ac->ac_2order >= MB_NUM_ORDERS(ac->ac_sb)) in ext4_mb_good_group()
2609 if (grp->bb_largest_free_order < ac->ac_2order) in ext4_mb_good_group()
2615 if ((free / fragments) >= ac->ac_g_ex.fe_len) in ext4_mb_good_group()
2619 if (free >= ac->ac_g_ex.fe_len) in ext4_mb_good_group()
2645 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); in ext4_mb_good_group_nolock()
2646 struct super_block *sb = ac->ac_sb; in ext4_mb_good_group_nolock()
2648 bool should_lock = ac->ac_flags & EXT4_MB_STRICT_CHECK; in ext4_mb_good_group_nolock()
2653 return -EFSCORRUPTED; in ext4_mb_good_group_nolock()
2654 if (sbi->s_mb_stats) in ext4_mb_good_group_nolock()
2655 atomic64_inc(&sbi->s_bal_cX_groups_considered[ac->ac_criteria]); in ext4_mb_good_group_nolock()
2660 free = grp->bb_free; in ext4_mb_good_group_nolock()
2668 if (cr < CR_ANY_FREE && free < ac->ac_g_ex.fe_len) in ext4_mb_good_group_nolock()
2693 (!sbi->s_log_groups_per_flex || in ext4_mb_good_group_nolock()
2694 ((group & ((1 << sbi->s_log_groups_per_flex) - 1)) != 0)) && in ext4_mb_good_group_nolock()
2696 (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) in ext4_mb_good_group_nolock()
2728 while (nr-- > 0) { in ext4_mb_prefetch()
2775 while (nr-- > 0) { in ext4_mb_prefetch_fini()
2778 group--; in ext4_mb_prefetch_fini()
2802 sb = ac->ac_sb; in ext4_mb_regular_allocator()
2805 /* non-extent files are limited to low blocks/groups */ in ext4_mb_regular_allocator()
2806 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS))) in ext4_mb_regular_allocator()
2807 ngroups = sbi->s_blockfile_groups; in ext4_mb_regular_allocator()
2809 BUG_ON(ac->ac_status == AC_STATUS_FOUND); in ext4_mb_regular_allocator()
2813 if (err || ac->ac_status == AC_STATUS_FOUND) in ext4_mb_regular_allocator()
2816 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) in ext4_mb_regular_allocator()
2820 * ac->ac_2order is set only if the fe_len is a power of 2 in ext4_mb_regular_allocator()
2821 * if ac->ac_2order is set we also set criteria to CR_POWER2_ALIGNED in ext4_mb_regular_allocator()
2824 i = fls(ac->ac_g_ex.fe_len); in ext4_mb_regular_allocator()
2825 ac->ac_2order = 0; in ext4_mb_regular_allocator()
2830 * We also support searching for power-of-two requests only for in ext4_mb_regular_allocator()
2833 if (i >= sbi->s_mb_order2_reqs && i <= MB_NUM_ORDERS(sb)) { in ext4_mb_regular_allocator()
2834 if (is_power_of_2(ac->ac_g_ex.fe_len)) in ext4_mb_regular_allocator()
2835 ac->ac_2order = array_index_nospec(i - 1, in ext4_mb_regular_allocator()
2840 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) { in ext4_mb_regular_allocator()
2842 spin_lock(&sbi->s_md_lock); in ext4_mb_regular_allocator()
2843 ac->ac_g_ex.fe_group = sbi->s_mb_last_group; in ext4_mb_regular_allocator()
2844 ac->ac_g_ex.fe_start = sbi->s_mb_last_start; in ext4_mb_regular_allocator()
2845 spin_unlock(&sbi->s_md_lock); in ext4_mb_regular_allocator()
2849 * Let's just scan groups to find more-less suitable blocks We in ext4_mb_regular_allocator()
2853 if (ac->ac_2order) in ext4_mb_regular_allocator()
2856 for (; cr < EXT4_MB_NUM_CRS && ac->ac_status == AC_STATUS_CONTINUE; cr++) { in ext4_mb_regular_allocator()
2857 ac->ac_criteria = cr; in ext4_mb_regular_allocator()
2862 group = ac->ac_g_ex.fe_group; in ext4_mb_regular_allocator()
2863 ac->ac_groups_linear_remaining = sbi->s_mb_max_linear_groups; in ext4_mb_regular_allocator()
2884 prefetch_ios < sbi->s_mb_prefetch_limit)) { in ext4_mb_regular_allocator()
2885 nr = sbi->s_mb_prefetch; in ext4_mb_regular_allocator()
2887 nr = 1 << sbi->s_log_groups_per_flex; in ext4_mb_regular_allocator()
2888 nr -= group & (nr - 1); in ext4_mb_regular_allocator()
2889 nr = min(nr, sbi->s_mb_prefetch); in ext4_mb_regular_allocator()
2920 ac->ac_groups_scanned++; in ext4_mb_regular_allocator()
2925 sbi->s_stripe && in ext4_mb_regular_allocator()
2926 !(ac->ac_g_ex.fe_len % in ext4_mb_regular_allocator()
2927 EXT4_B2C(sbi, sbi->s_stripe))) in ext4_mb_regular_allocator()
2935 if (ac->ac_status != AC_STATUS_CONTINUE) in ext4_mb_regular_allocator()
2939 if (sbi->s_mb_stats && i == ngroups) in ext4_mb_regular_allocator()
2940 atomic64_inc(&sbi->s_bal_cX_failed[cr]); in ext4_mb_regular_allocator()
2942 if (i == ngroups && ac->ac_criteria == CR_BEST_AVAIL_LEN) in ext4_mb_regular_allocator()
2945 ac->ac_g_ex.fe_len = ac->ac_orig_goal_len; in ext4_mb_regular_allocator()
2948 if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND && in ext4_mb_regular_allocator()
2949 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) { in ext4_mb_regular_allocator()
2955 if (ac->ac_status != AC_STATUS_FOUND) { in ext4_mb_regular_allocator()
2961 lost = atomic_inc_return(&sbi->s_mb_lost_chunks); in ext4_mb_regular_allocator()
2963 ac->ac_b_ex.fe_group, ac->ac_b_ex.fe_start, in ext4_mb_regular_allocator()
2964 ac->ac_b_ex.fe_len, lost); in ext4_mb_regular_allocator()
2966 ac->ac_b_ex.fe_group = 0; in ext4_mb_regular_allocator()
2967 ac->ac_b_ex.fe_start = 0; in ext4_mb_regular_allocator()
2968 ac->ac_b_ex.fe_len = 0; in ext4_mb_regular_allocator()
2969 ac->ac_status = AC_STATUS_CONTINUE; in ext4_mb_regular_allocator()
2970 ac->ac_flags |= EXT4_MB_HINT_FIRST; in ext4_mb_regular_allocator()
2976 if (sbi->s_mb_stats && ac->ac_status == AC_STATUS_FOUND) in ext4_mb_regular_allocator()
2977 atomic64_inc(&sbi->s_bal_cX_hits[ac->ac_criteria]); in ext4_mb_regular_allocator()
2979 if (!err && ac->ac_status != AC_STATUS_FOUND && first_err) in ext4_mb_regular_allocator()
2983 ac->ac_b_ex.fe_len, ac->ac_o_ex.fe_len, ac->ac_status, in ext4_mb_regular_allocator()
2984 ac->ac_flags, cr, err); in ext4_mb_regular_allocator()
2994 struct super_block *sb = pde_data(file_inode(seq->file)); in ext4_mb_seq_groups_start()
3005 struct super_block *sb = pde_data(file_inode(seq->file)); in ext4_mb_seq_groups_next()
3017 struct super_block *sb = pde_data(file_inode(seq->file)); in ext4_mb_seq_groups_show()
3024 sb->s_blocksize_bits, in ext4_mb_seq_groups_show()
3031 group--; in ext4_mb_seq_groups_show()
3047 seq_printf(seq, "#%-5u: I/O error\n", group); in ext4_mb_seq_groups_show()
3058 seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free, in ext4_mb_seq_groups_show()
3061 seq_printf(seq, " %-5u", i <= blocksize_bits + 1 ? in ext4_mb_seq_groups_show()
3081 struct super_block *sb = seq->private; in ext4_seq_mb_stats_show()
3085 if (!sbi->s_mb_stats) { in ext4_seq_mb_stats_show()
3092 seq_printf(seq, "\treqs: %u\n", atomic_read(&sbi->s_bal_reqs)); in ext4_seq_mb_stats_show()
3093 seq_printf(seq, "\tsuccess: %u\n", atomic_read(&sbi->s_bal_success)); in ext4_seq_mb_stats_show()
3096 atomic_read(&sbi->s_bal_groups_scanned)); in ext4_seq_mb_stats_show()
3101 atomic64_read(&sbi->s_bal_cX_hits[CR_POWER2_ALIGNED])); in ext4_seq_mb_stats_show()
3105 &sbi->s_bal_cX_groups_considered[CR_POWER2_ALIGNED])); in ext4_seq_mb_stats_show()
3107 atomic_read(&sbi->s_bal_cX_ex_scanned[CR_POWER2_ALIGNED])); in ext4_seq_mb_stats_show()
3109 atomic64_read(&sbi->s_bal_cX_failed[CR_POWER2_ALIGNED])); in ext4_seq_mb_stats_show()
3111 atomic_read(&sbi->s_bal_p2_aligned_bad_suggestions)); in ext4_seq_mb_stats_show()
3116 atomic64_read(&sbi->s_bal_cX_hits[CR_GOAL_LEN_FAST])); in ext4_seq_mb_stats_show()
3119 &sbi->s_bal_cX_groups_considered[CR_GOAL_LEN_FAST])); in ext4_seq_mb_stats_show()
3121 atomic_read(&sbi->s_bal_cX_ex_scanned[CR_GOAL_LEN_FAST])); in ext4_seq_mb_stats_show()
3123 atomic64_read(&sbi->s_bal_cX_failed[CR_GOAL_LEN_FAST])); in ext4_seq_mb_stats_show()
3125 atomic_read(&sbi->s_bal_goal_fast_bad_suggestions)); in ext4_seq_mb_stats_show()
3130 atomic64_read(&sbi->s_bal_cX_hits[CR_BEST_AVAIL_LEN])); in ext4_seq_mb_stats_show()
3134 &sbi->s_bal_cX_groups_considered[CR_BEST_AVAIL_LEN])); in ext4_seq_mb_stats_show()
3136 atomic_read(&sbi->s_bal_cX_ex_scanned[CR_BEST_AVAIL_LEN])); in ext4_seq_mb_stats_show()
3138 atomic64_read(&sbi->s_bal_cX_failed[CR_BEST_AVAIL_LEN])); in ext4_seq_mb_stats_show()
3140 atomic_read(&sbi->s_bal_best_avail_bad_suggestions)); in ext4_seq_mb_stats_show()
3145 atomic64_read(&sbi->s_bal_cX_hits[CR_GOAL_LEN_SLOW])); in ext4_seq_mb_stats_show()
3148 &sbi->s_bal_cX_groups_considered[CR_GOAL_LEN_SLOW])); in ext4_seq_mb_stats_show()
3150 atomic_read(&sbi->s_bal_cX_ex_scanned[CR_GOAL_LEN_SLOW])); in ext4_seq_mb_stats_show()
3152 atomic64_read(&sbi->s_bal_cX_failed[CR_GOAL_LEN_SLOW])); in ext4_seq_mb_stats_show()
3157 atomic64_read(&sbi->s_bal_cX_hits[CR_ANY_FREE])); in ext4_seq_mb_stats_show()
3160 atomic64_read(&sbi->s_bal_cX_groups_considered[CR_ANY_FREE])); in ext4_seq_mb_stats_show()
3162 atomic_read(&sbi->s_bal_cX_ex_scanned[CR_ANY_FREE])); in ext4_seq_mb_stats_show()
3164 atomic64_read(&sbi->s_bal_cX_failed[CR_ANY_FREE])); in ext4_seq_mb_stats_show()
3168 atomic_read(&sbi->s_bal_ex_scanned)); in ext4_seq_mb_stats_show()
3169 seq_printf(seq, "\t\tgoal_hits: %u\n", atomic_read(&sbi->s_bal_goals)); in ext4_seq_mb_stats_show()
3171 atomic_read(&sbi->s_bal_len_goals)); in ext4_seq_mb_stats_show()
3172 seq_printf(seq, "\t\t2^n_hits: %u\n", atomic_read(&sbi->s_bal_2orders)); in ext4_seq_mb_stats_show()
3173 seq_printf(seq, "\t\tbreaks: %u\n", atomic_read(&sbi->s_bal_breaks)); in ext4_seq_mb_stats_show()
3174 seq_printf(seq, "\t\tlost: %u\n", atomic_read(&sbi->s_mb_lost_chunks)); in ext4_seq_mb_stats_show()
3176 atomic_read(&sbi->s_mb_buddies_generated), in ext4_seq_mb_stats_show()
3179 atomic64_read(&sbi->s_mb_generation_time)); in ext4_seq_mb_stats_show()
3181 atomic_read(&sbi->s_mb_preallocated)); in ext4_seq_mb_stats_show()
3182 seq_printf(seq, "\tdiscarded: %u\n", atomic_read(&sbi->s_mb_discarded)); in ext4_seq_mb_stats_show()
3187 __acquires(&EXT4_SB(sb)->s_mb_rb_lock) in ext4_mb_seq_structs_summary_start()
3189 struct super_block *sb = pde_data(file_inode(seq->file)); in ext4_mb_seq_structs_summary_start()
3200 struct super_block *sb = pde_data(file_inode(seq->file)); in ext4_mb_seq_structs_summary_next()
3212 struct super_block *sb = pde_data(file_inode(seq->file)); in ext4_mb_seq_structs_summary_show()
3218 position--; in ext4_mb_seq_structs_summary_show()
3220 position -= MB_NUM_ORDERS(sb); in ext4_mb_seq_structs_summary_show()
3225 read_lock(&sbi->s_mb_avg_fragment_size_locks[position]); in ext4_mb_seq_structs_summary_show()
3226 list_for_each_entry(grp, &sbi->s_mb_avg_fragment_size[position], in ext4_mb_seq_structs_summary_show()
3229 read_unlock(&sbi->s_mb_avg_fragment_size_locks[position]); in ext4_mb_seq_structs_summary_show()
3241 read_lock(&sbi->s_mb_largest_free_orders_locks[position]); in ext4_mb_seq_structs_summary_show()
3242 list_for_each_entry(grp, &sbi->s_mb_largest_free_orders[position], in ext4_mb_seq_structs_summary_show()
3245 read_unlock(&sbi->s_mb_largest_free_orders_locks[position]); in ext4_mb_seq_structs_summary_show()
3265 int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE; in get_groupinfo_cache()
3273 * Allocate the top-level s_group_info array for the specified number
3282 size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >> in ext4_mb_alloc_groupinfo()
3284 if (size <= sbi->s_group_info_size) in ext4_mb_alloc_groupinfo()
3287 size = roundup_pow_of_two(sizeof(*sbi->s_group_info) * size); in ext4_mb_alloc_groupinfo()
3291 return -ENOMEM; in ext4_mb_alloc_groupinfo()
3294 old_groupinfo = rcu_dereference(sbi->s_group_info); in ext4_mb_alloc_groupinfo()
3297 sbi->s_group_info_size * sizeof(*sbi->s_group_info)); in ext4_mb_alloc_groupinfo()
3299 rcu_assign_pointer(sbi->s_group_info, new_groupinfo); in ext4_mb_alloc_groupinfo()
3300 sbi->s_group_info_size = size / sizeof(*sbi->s_group_info); in ext4_mb_alloc_groupinfo()
3304 sbi->s_group_info_size); in ext4_mb_alloc_groupinfo()
3317 struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits); in ext4_mb_add_groupinfo()
3331 return -ENOMEM; in ext4_mb_add_groupinfo()
3334 rcu_dereference(sbi->s_group_info)[idx] = meta_group_info; in ext4_mb_add_groupinfo()
3339 i = group & (EXT4_DESC_PER_BLOCK(sb) - 1); in ext4_mb_add_groupinfo()
3347 &(meta_group_info[i]->bb_state)); in ext4_mb_add_groupinfo()
3354 (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { in ext4_mb_add_groupinfo()
3355 meta_group_info[i]->bb_free = in ext4_mb_add_groupinfo()
3358 meta_group_info[i]->bb_free = in ext4_mb_add_groupinfo()
3362 INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list); in ext4_mb_add_groupinfo()
3363 init_rwsem(&meta_group_info[i]->alloc_sem); in ext4_mb_add_groupinfo()
3364 meta_group_info[i]->bb_free_root = RB_ROOT; in ext4_mb_add_groupinfo()
3365 INIT_LIST_HEAD(&meta_group_info[i]->bb_largest_free_order_node); in ext4_mb_add_groupinfo()
3366 INIT_LIST_HEAD(&meta_group_info[i]->bb_avg_fragment_size_node); in ext4_mb_add_groupinfo()
3367 meta_group_info[i]->bb_largest_free_order = -1; /* uninit */ in ext4_mb_add_groupinfo()
3368 meta_group_info[i]->bb_avg_fragment_size_order = -1; /* uninit */ in ext4_mb_add_groupinfo()
3369 meta_group_info[i]->bb_group = group; in ext4_mb_add_groupinfo()
3380 group_info = rcu_dereference(sbi->s_group_info); in ext4_mb_add_groupinfo()
3385 return -ENOMEM; in ext4_mb_add_groupinfo()
3402 sbi->s_buddy_cache = new_inode(sb); in ext4_mb_init_backend()
3403 if (sbi->s_buddy_cache == NULL) { in ext4_mb_init_backend()
3407 /* To avoid potentially colliding with an valid on-disk inode number, in ext4_mb_init_backend()
3411 sbi->s_buddy_cache->i_ino = EXT4_BAD_INO; in ext4_mb_init_backend()
3412 EXT4_I(sbi->s_buddy_cache)->i_disksize = 0; in ext4_mb_init_backend()
3429 if (sbi->s_es->s_log_groups_per_flex >= 32) { in ext4_mb_init_backend()
3433 sbi->s_mb_prefetch = min_t(uint, 1 << sbi->s_es->s_log_groups_per_flex, in ext4_mb_init_backend()
3434 BLK_MAX_SEGMENT_SIZE >> (sb->s_blocksize_bits - 9)); in ext4_mb_init_backend()
3435 sbi->s_mb_prefetch *= 8; /* 8 prefetch IOs in flight at most */ in ext4_mb_init_backend()
3437 sbi->s_mb_prefetch = 32; in ext4_mb_init_backend()
3439 if (sbi->s_mb_prefetch > ext4_get_groups_count(sb)) in ext4_mb_init_backend()
3440 sbi->s_mb_prefetch = ext4_get_groups_count(sb); in ext4_mb_init_backend()
3442 * given cr=0 is an CPU-related optimization we shouldn't try to in ext4_mb_init_backend()
3448 sbi->s_mb_prefetch_limit = sbi->s_mb_prefetch * 4; in ext4_mb_init_backend()
3449 if (sbi->s_mb_prefetch_limit > ext4_get_groups_count(sb)) in ext4_mb_init_backend()
3450 sbi->s_mb_prefetch_limit = ext4_get_groups_count(sb); in ext4_mb_init_backend()
3455 cachep = get_groupinfo_cache(sb->s_blocksize_bits); in ext4_mb_init_backend()
3456 while (i-- > 0) { in ext4_mb_init_backend()
3462 i = sbi->s_group_info_size; in ext4_mb_init_backend()
3464 group_info = rcu_dereference(sbi->s_group_info); in ext4_mb_init_backend()
3465 while (i-- > 0) in ext4_mb_init_backend()
3468 iput(sbi->s_buddy_cache); in ext4_mb_init_backend()
3471 kvfree(rcu_dereference(sbi->s_group_info)); in ext4_mb_init_backend()
3473 return -ENOMEM; in ext4_mb_init_backend()
3491 int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE; in ext4_groupinfo_create_slab()
3495 return -EINVAL; in ext4_groupinfo_create_slab()
3518 "EXT4-fs: no memory for groupinfo slab cache\n"); in ext4_groupinfo_create_slab()
3519 return -ENOMEM; in ext4_groupinfo_create_slab()
3529 struct super_block *sb = sbi->s_sb; in ext4_discard_work()
3536 spin_lock(&sbi->s_md_lock); in ext4_discard_work()
3537 list_splice_init(&sbi->s_discard_list, &discard_list); in ext4_discard_work()
3538 spin_unlock(&sbi->s_md_lock); in ext4_discard_work()
3546 if ((sb->s_flags & SB_ACTIVE) && !err && in ext4_discard_work()
3547 !atomic_read(&sbi->s_retry_alloc_pending)) { in ext4_discard_work()
3548 grp = fd->efd_group; in ext4_discard_work()
3564 ext4_try_to_trim_range(sb, &e4b, fd->efd_start_cluster, in ext4_discard_work()
3565 fd->efd_start_cluster + fd->efd_count - 1, 1); in ext4_discard_work()
3583 i = MB_NUM_ORDERS(sb) * sizeof(*sbi->s_mb_offsets); in ext4_mb_init()
3585 sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL); in ext4_mb_init()
3586 if (sbi->s_mb_offsets == NULL) { in ext4_mb_init()
3587 ret = -ENOMEM; in ext4_mb_init()
3591 i = MB_NUM_ORDERS(sb) * sizeof(*sbi->s_mb_maxs); in ext4_mb_init()
3592 sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL); in ext4_mb_init()
3593 if (sbi->s_mb_maxs == NULL) { in ext4_mb_init()
3594 ret = -ENOMEM; in ext4_mb_init()
3598 ret = ext4_groupinfo_create_slab(sb->s_blocksize); in ext4_mb_init()
3603 sbi->s_mb_maxs[0] = sb->s_blocksize << 3; in ext4_mb_init()
3604 sbi->s_mb_offsets[0] = 0; in ext4_mb_init()
3608 offset_incr = 1 << (sb->s_blocksize_bits - 1); in ext4_mb_init()
3609 max = sb->s_blocksize << 2; in ext4_mb_init()
3611 sbi->s_mb_offsets[i] = offset; in ext4_mb_init()
3612 sbi->s_mb_maxs[i] = max; in ext4_mb_init()
3619 sbi->s_mb_avg_fragment_size = in ext4_mb_init()
3622 if (!sbi->s_mb_avg_fragment_size) { in ext4_mb_init()
3623 ret = -ENOMEM; in ext4_mb_init()
3626 sbi->s_mb_avg_fragment_size_locks = in ext4_mb_init()
3629 if (!sbi->s_mb_avg_fragment_size_locks) { in ext4_mb_init()
3630 ret = -ENOMEM; in ext4_mb_init()
3634 INIT_LIST_HEAD(&sbi->s_mb_avg_fragment_size[i]); in ext4_mb_init()
3635 rwlock_init(&sbi->s_mb_avg_fragment_size_locks[i]); in ext4_mb_init()
3637 sbi->s_mb_largest_free_orders = in ext4_mb_init()
3640 if (!sbi->s_mb_largest_free_orders) { in ext4_mb_init()
3641 ret = -ENOMEM; in ext4_mb_init()
3644 sbi->s_mb_largest_free_orders_locks = in ext4_mb_init()
3647 if (!sbi->s_mb_largest_free_orders_locks) { in ext4_mb_init()
3648 ret = -ENOMEM; in ext4_mb_init()
3652 INIT_LIST_HEAD(&sbi->s_mb_largest_free_orders[i]); in ext4_mb_init()
3653 rwlock_init(&sbi->s_mb_largest_free_orders_locks[i]); in ext4_mb_init()
3656 spin_lock_init(&sbi->s_md_lock); in ext4_mb_init()
3657 sbi->s_mb_free_pending = 0; in ext4_mb_init()
3658 INIT_LIST_HEAD(&sbi->s_freed_data_list); in ext4_mb_init()
3659 INIT_LIST_HEAD(&sbi->s_discard_list); in ext4_mb_init()
3660 INIT_WORK(&sbi->s_discard_work, ext4_discard_work); in ext4_mb_init()
3661 atomic_set(&sbi->s_retry_alloc_pending, 0); in ext4_mb_init()
3663 sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN; in ext4_mb_init()
3664 sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN; in ext4_mb_init()
3665 sbi->s_mb_stats = MB_DEFAULT_STATS; in ext4_mb_init()
3666 sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD; in ext4_mb_init()
3667 sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS; in ext4_mb_init()
3668 sbi->s_mb_best_avail_max_trim_order = MB_DEFAULT_BEST_AVAIL_TRIM_ORDER; in ext4_mb_init()
3682 sbi->s_mb_group_prealloc = max(MB_DEFAULT_GROUP_PREALLOC >> in ext4_mb_init()
3683 sbi->s_cluster_bits, 32); in ext4_mb_init()
3692 if (sbi->s_stripe > 1) { in ext4_mb_init()
3693 sbi->s_mb_group_prealloc = roundup( in ext4_mb_init()
3694 sbi->s_mb_group_prealloc, EXT4_B2C(sbi, sbi->s_stripe)); in ext4_mb_init()
3697 sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group); in ext4_mb_init()
3698 if (sbi->s_locality_groups == NULL) { in ext4_mb_init()
3699 ret = -ENOMEM; in ext4_mb_init()
3704 lg = per_cpu_ptr(sbi->s_locality_groups, i); in ext4_mb_init()
3705 mutex_init(&lg->lg_mutex); in ext4_mb_init()
3707 INIT_LIST_HEAD(&lg->lg_prealloc_list[j]); in ext4_mb_init()
3708 spin_lock_init(&lg->lg_prealloc_lock); in ext4_mb_init()
3711 if (bdev_nonrot(sb->s_bdev)) in ext4_mb_init()
3712 sbi->s_mb_max_linear_groups = 0; in ext4_mb_init()
3714 sbi->s_mb_max_linear_groups = MB_DEFAULT_LINEAR_LIMIT; in ext4_mb_init()
3723 free_percpu(sbi->s_locality_groups); in ext4_mb_init()
3724 sbi->s_locality_groups = NULL; in ext4_mb_init()
3726 kfree(sbi->s_mb_avg_fragment_size); in ext4_mb_init()
3727 kfree(sbi->s_mb_avg_fragment_size_locks); in ext4_mb_init()
3728 kfree(sbi->s_mb_largest_free_orders); in ext4_mb_init()
3729 kfree(sbi->s_mb_largest_free_orders_locks); in ext4_mb_init()
3730 kfree(sbi->s_mb_offsets); in ext4_mb_init()
3731 sbi->s_mb_offsets = NULL; in ext4_mb_init()
3732 kfree(sbi->s_mb_maxs); in ext4_mb_init()
3733 sbi->s_mb_maxs = NULL; in ext4_mb_init()
3744 list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) { in ext4_mb_cleanup_pa()
3746 list_del(&pa->pa_group_list); in ext4_mb_cleanup_pa()
3760 struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits); in ext4_mb_release()
3767 flush_work(&sbi->s_discard_work); in ext4_mb_release()
3768 WARN_ON_ONCE(!list_empty(&sbi->s_discard_list)); in ext4_mb_release()
3771 if (sbi->s_group_info) { in ext4_mb_release()
3787 EXT4_DESC_PER_BLOCK(sb) - 1) >> in ext4_mb_release()
3790 group_info = rcu_dereference(sbi->s_group_info); in ext4_mb_release()
3796 kfree(sbi->s_mb_avg_fragment_size); in ext4_mb_release()
3797 kfree(sbi->s_mb_avg_fragment_size_locks); in ext4_mb_release()
3798 kfree(sbi->s_mb_largest_free_orders); in ext4_mb_release()
3799 kfree(sbi->s_mb_largest_free_orders_locks); in ext4_mb_release()
3800 kfree(sbi->s_mb_offsets); in ext4_mb_release()
3801 kfree(sbi->s_mb_maxs); in ext4_mb_release()
3802 iput(sbi->s_buddy_cache); in ext4_mb_release()
3803 if (sbi->s_mb_stats) { in ext4_mb_release()
3806 atomic_read(&sbi->s_bal_allocated), in ext4_mb_release()
3807 atomic_read(&sbi->s_bal_reqs), in ext4_mb_release()
3808 atomic_read(&sbi->s_bal_success)); in ext4_mb_release()
3812 atomic_read(&sbi->s_bal_ex_scanned), in ext4_mb_release()
3813 atomic_read(&sbi->s_bal_groups_scanned), in ext4_mb_release()
3814 atomic_read(&sbi->s_bal_goals), in ext4_mb_release()
3815 atomic_read(&sbi->s_bal_2orders), in ext4_mb_release()
3816 atomic_read(&sbi->s_bal_breaks), in ext4_mb_release()
3817 atomic_read(&sbi->s_mb_lost_chunks)); in ext4_mb_release()
3820 atomic_read(&sbi->s_mb_buddies_generated), in ext4_mb_release()
3821 atomic64_read(&sbi->s_mb_generation_time)); in ext4_mb_release()
3824 atomic_read(&sbi->s_mb_preallocated), in ext4_mb_release()
3825 atomic_read(&sbi->s_mb_discarded)); in ext4_mb_release()
3828 free_percpu(sbi->s_locality_groups); in ext4_mb_release()
3845 return __blkdev_issue_discard(sb->s_bdev, in ext4_issue_discard()
3846 (sector_t)discard_block << (sb->s_blocksize_bits - 9), in ext4_issue_discard()
3847 (sector_t)count << (sb->s_blocksize_bits - 9), in ext4_issue_discard()
3861 entry->efd_count, entry->efd_group, entry); in ext4_free_data_in_buddy()
3863 err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b); in ext4_free_data_in_buddy()
3867 spin_lock(&EXT4_SB(sb)->s_md_lock); in ext4_free_data_in_buddy()
3868 EXT4_SB(sb)->s_mb_free_pending -= entry->efd_count; in ext4_free_data_in_buddy()
3869 spin_unlock(&EXT4_SB(sb)->s_md_lock); in ext4_free_data_in_buddy()
3873 count += entry->efd_count; in ext4_free_data_in_buddy()
3874 ext4_lock_group(sb, entry->efd_group); in ext4_free_data_in_buddy()
3876 rb_erase(&entry->efd_node, &(db->bb_free_root)); in ext4_free_data_in_buddy()
3877 mb_free_blocks(NULL, &e4b, entry->efd_start_cluster, entry->efd_count); in ext4_free_data_in_buddy()
3882 * If the volume is mounted with -o discard, online discard in ext4_free_data_in_buddy()
3888 if (!db->bb_free_root.rb_node) { in ext4_free_data_in_buddy()
3895 ext4_unlock_group(sb, entry->efd_group); in ext4_free_data_in_buddy()
3913 spin_lock(&sbi->s_md_lock); in ext4_process_freed_data()
3914 list_for_each_entry(entry, &sbi->s_freed_data_list, efd_list) { in ext4_process_freed_data()
3915 if (entry->efd_tid != commit_tid) in ext4_process_freed_data()
3917 cut_pos = &entry->efd_list; in ext4_process_freed_data()
3920 list_cut_position(&freed_data_list, &sbi->s_freed_data_list, in ext4_process_freed_data()
3922 spin_unlock(&sbi->s_md_lock); in ext4_process_freed_data()
3928 spin_lock(&sbi->s_md_lock); in ext4_process_freed_data()
3929 wake = list_empty(&sbi->s_discard_list); in ext4_process_freed_data()
3930 list_splice_tail(&freed_data_list, &sbi->s_discard_list); in ext4_process_freed_data()
3931 spin_unlock(&sbi->s_md_lock); in ext4_process_freed_data()
3933 queue_work(system_unbound_wq, &sbi->s_discard_work); in ext4_process_freed_data()
3964 return -ENOMEM; in ext4_init_mballoc()
3982 * Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps
3997 BUG_ON(ac->ac_status != AC_STATUS_FOUND); in ext4_mb_mark_diskspace_used()
3998 BUG_ON(ac->ac_b_ex.fe_len <= 0); in ext4_mb_mark_diskspace_used()
4000 sb = ac->ac_sb; in ext4_mb_mark_diskspace_used()
4003 bitmap_bh = ext4_read_block_bitmap(sb, ac->ac_b_ex.fe_group); in ext4_mb_mark_diskspace_used()
4014 err = -EIO; in ext4_mb_mark_diskspace_used()
4015 gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, &gdp_bh); in ext4_mb_mark_diskspace_used()
4019 ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group, in ext4_mb_mark_diskspace_used()
4027 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); in ext4_mb_mark_diskspace_used()
4029 len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len); in ext4_mb_mark_diskspace_used()
4030 if (!ext4_inode_block_valid(ac->ac_inode, block, len)) { in ext4_mb_mark_diskspace_used()
4031 ext4_error(sb, "Allocating blocks %llu-%llu which overlap " in ext4_mb_mark_diskspace_used()
4037 ext4_lock_group(sb, ac->ac_b_ex.fe_group); in ext4_mb_mark_diskspace_used()
4038 mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start, in ext4_mb_mark_diskspace_used()
4039 ac->ac_b_ex.fe_len); in ext4_mb_mark_diskspace_used()
4040 ext4_unlock_group(sb, ac->ac_b_ex.fe_group); in ext4_mb_mark_diskspace_used()
4043 err = -EFSCORRUPTED; in ext4_mb_mark_diskspace_used()
4047 ext4_lock_group(sb, ac->ac_b_ex.fe_group); in ext4_mb_mark_diskspace_used()
4051 for (i = 0; i < ac->ac_b_ex.fe_len; i++) { in ext4_mb_mark_diskspace_used()
4052 BUG_ON(mb_test_bit(ac->ac_b_ex.fe_start + i, in ext4_mb_mark_diskspace_used()
4053 bitmap_bh->b_data)); in ext4_mb_mark_diskspace_used()
4057 mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start, in ext4_mb_mark_diskspace_used()
4058 ac->ac_b_ex.fe_len); in ext4_mb_mark_diskspace_used()
4060 (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { in ext4_mb_mark_diskspace_used()
4061 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); in ext4_mb_mark_diskspace_used()
4064 ac->ac_b_ex.fe_group, gdp)); in ext4_mb_mark_diskspace_used()
4066 len = ext4_free_group_clusters(sb, gdp) - ac->ac_b_ex.fe_len; in ext4_mb_mark_diskspace_used()
4069 ext4_group_desc_csum_set(sb, ac->ac_b_ex.fe_group, gdp); in ext4_mb_mark_diskspace_used()
4071 ext4_unlock_group(sb, ac->ac_b_ex.fe_group); in ext4_mb_mark_diskspace_used()
4072 percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len); in ext4_mb_mark_diskspace_used()
4076 if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED)) in ext4_mb_mark_diskspace_used()
4078 percpu_counter_sub(&sbi->s_dirtyclusters_counter, in ext4_mb_mark_diskspace_used()
4081 if (sbi->s_log_groups_per_flex) { in ext4_mb_mark_diskspace_used()
4083 ac->ac_b_ex.fe_group); in ext4_mb_mark_diskspace_used()
4084 atomic64_sub(ac->ac_b_ex.fe_len, in ext4_mb_mark_diskspace_used()
4086 flex_group)->free_clusters); in ext4_mb_mark_diskspace_used()
4128 EXT4_BLOCKS_PER_GROUP(sb) - EXT4_C2B(sbi, blkoff)); in ext4_mb_mark_bb()
4132 ext4_error(sb, "Marking blocks in system zone - " in ext4_mb_mark_bb()
4146 err = -EIO; in ext4_mb_mark_bb()
4154 if (!mb_test_bit(blkoff + i, bitmap_bh->b_data) == in ext4_mb_mark_bb()
4158 clen_changed = clen - already; in ext4_mb_mark_bb()
4160 mb_set_bits(bitmap_bh->b_data, blkoff, clen); in ext4_mb_mark_bb()
4162 mb_clear_bits(bitmap_bh->b_data, blkoff, clen); in ext4_mb_mark_bb()
4164 (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { in ext4_mb_mark_bb()
4165 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); in ext4_mb_mark_bb()
4170 clen = ext4_free_group_clusters(sb, gdp) - clen_changed; in ext4_mb_mark_bb()
4180 if (sbi->s_log_groups_per_flex) { in ext4_mb_mark_bb()
4186 atomic64_sub(clen_changed, &fg->free_clusters); in ext4_mb_mark_bb()
4188 atomic64_add(clen_changed, &fg->free_clusters); in ext4_mb_mark_bb()
4202 len -= thisgrp_len; in ext4_mb_mark_bb()
4212 * here we normalize request for locality group
4222 struct super_block *sb = ac->ac_sb; in ext4_mb_normalize_group_request()
4223 struct ext4_locality_group *lg = ac->ac_lg; in ext4_mb_normalize_group_request()
4226 ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc; in ext4_mb_normalize_group_request()
4227 mb_debug(sb, "goal %u blocks for locality group\n", ac->ac_g_ex.fe_len); in ext4_mb_normalize_group_request()
4233 * (ei->i_prealloc_lock)
4243 return node->rb_left; in ext4_mb_pa_rb_next_iter()
4245 return node->rb_right; in ext4_mb_pa_rb_next_iter()
4252 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_pa_assert_overlap()
4253 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); in ext4_mb_pa_assert_overlap()
4259 read_lock(&ei->i_prealloc_lock); in ext4_mb_pa_assert_overlap()
4260 for (iter = ei->i_prealloc_node.rb_node; iter; in ext4_mb_pa_assert_overlap()
4264 tmp_pa_start = tmp_pa->pa_lstart; in ext4_mb_pa_assert_overlap()
4267 spin_lock(&tmp_pa->pa_lock); in ext4_mb_pa_assert_overlap()
4268 if (tmp_pa->pa_deleted == 0) in ext4_mb_pa_assert_overlap()
4270 spin_unlock(&tmp_pa->pa_lock); in ext4_mb_pa_assert_overlap()
4272 read_unlock(&ei->i_prealloc_lock); in ext4_mb_pa_assert_overlap()
4289 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); in ext4_mb_pa_adjust_overlap()
4290 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_pa_adjust_overlap()
4293 ext4_lblk_t new_start, tmp_pa_start, right_pa_start = -1; in ext4_mb_pa_adjust_overlap()
4294 loff_t new_end, tmp_pa_end, left_pa_end = -1; in ext4_mb_pa_adjust_overlap()
4304 read_lock(&ei->i_prealloc_lock); in ext4_mb_pa_adjust_overlap()
4307 for (iter = ei->i_prealloc_node.rb_node; iter; in ext4_mb_pa_adjust_overlap()
4308 iter = ext4_mb_pa_rb_next_iter(ac->ac_o_ex.fe_logical, in ext4_mb_pa_adjust_overlap()
4312 tmp_pa_start = tmp_pa->pa_lstart; in ext4_mb_pa_adjust_overlap()
4316 spin_lock(&tmp_pa->pa_lock); in ext4_mb_pa_adjust_overlap()
4317 if (tmp_pa->pa_deleted == 0) in ext4_mb_pa_adjust_overlap()
4318 BUG_ON(!(ac->ac_o_ex.fe_logical >= tmp_pa_end || in ext4_mb_pa_adjust_overlap()
4319 ac->ac_o_ex.fe_logical < tmp_pa_start)); in ext4_mb_pa_adjust_overlap()
4320 spin_unlock(&tmp_pa->pa_lock); in ext4_mb_pa_adjust_overlap()
4328 if (tmp_pa->pa_lstart < ac->ac_o_ex.fe_logical) { in ext4_mb_pa_adjust_overlap()
4332 tmp = rb_next(&left_pa->pa_node.inode_node); in ext4_mb_pa_adjust_overlap()
4342 tmp = rb_prev(&right_pa->pa_node.inode_node); in ext4_mb_pa_adjust_overlap()
4353 for (iter = &left_pa->pa_node.inode_node;; in ext4_mb_pa_adjust_overlap()
4363 spin_lock(&tmp_pa->pa_lock); in ext4_mb_pa_adjust_overlap()
4364 if (tmp_pa->pa_deleted == 0) { in ext4_mb_pa_adjust_overlap()
4365 spin_unlock(&tmp_pa->pa_lock); in ext4_mb_pa_adjust_overlap()
4368 spin_unlock(&tmp_pa->pa_lock); in ext4_mb_pa_adjust_overlap()
4373 for (iter = &right_pa->pa_node.inode_node;; in ext4_mb_pa_adjust_overlap()
4383 spin_lock(&tmp_pa->pa_lock); in ext4_mb_pa_adjust_overlap()
4384 if (tmp_pa->pa_deleted == 0) { in ext4_mb_pa_adjust_overlap()
4385 spin_unlock(&tmp_pa->pa_lock); in ext4_mb_pa_adjust_overlap()
4388 spin_unlock(&tmp_pa->pa_lock); in ext4_mb_pa_adjust_overlap()
4394 BUG_ON(left_pa_end > ac->ac_o_ex.fe_logical); in ext4_mb_pa_adjust_overlap()
4398 right_pa_start = right_pa->pa_lstart; in ext4_mb_pa_adjust_overlap()
4399 BUG_ON(right_pa_start <= ac->ac_o_ex.fe_logical); in ext4_mb_pa_adjust_overlap()
4412 read_unlock(&ei->i_prealloc_lock); in ext4_mb_pa_adjust_overlap()
4429 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_normalize_request()
4430 struct ext4_super_block *es = sbi->s_es; in ext4_mb_normalize_request()
4438 if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) in ext4_mb_normalize_request()
4442 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) in ext4_mb_normalize_request()
4447 if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC) in ext4_mb_normalize_request()
4450 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) { in ext4_mb_normalize_request()
4455 bsbits = ac->ac_sb->s_blocksize_bits; in ext4_mb_normalize_request()
4459 size = extent_logical_end(sbi, &ac->ac_o_ex); in ext4_mb_normalize_request()
4461 if (size < i_size_read(ac->ac_inode)) in ext4_mb_normalize_request()
4462 size = i_size_read(ac->ac_inode); in ext4_mb_normalize_request()
4489 start_off = ((loff_t)ac->ac_o_ex.fe_logical >> in ext4_mb_normalize_request()
4490 (21 - bsbits)) << 21; in ext4_mb_normalize_request()
4493 start_off = ((loff_t)ac->ac_o_ex.fe_logical >> in ext4_mb_normalize_request()
4494 (22 - bsbits)) << 22; in ext4_mb_normalize_request()
4496 } else if (NRL_CHECK_SIZE(EXT4_C2B(sbi, ac->ac_o_ex.fe_len), in ext4_mb_normalize_request()
4498 start_off = ((loff_t)ac->ac_o_ex.fe_logical >> in ext4_mb_normalize_request()
4499 (23 - bsbits)) << 23; in ext4_mb_normalize_request()
4502 start_off = (loff_t) ac->ac_o_ex.fe_logical << bsbits; in ext4_mb_normalize_request()
4504 ac->ac_o_ex.fe_len) << bsbits; in ext4_mb_normalize_request()
4515 start = max(start, rounddown(ac->ac_o_ex.fe_logical, in ext4_mb_normalize_request()
4516 (ext4_lblk_t)EXT4_BLOCKS_PER_GROUP(ac->ac_sb))); in ext4_mb_normalize_request()
4520 size = EXT_MAX_BLOCKS - start; in ext4_mb_normalize_request()
4523 if (ar->pleft && start <= ar->lleft) { in ext4_mb_normalize_request()
4524 size -= ar->lleft + 1 - start; in ext4_mb_normalize_request()
4525 start = ar->lleft + 1; in ext4_mb_normalize_request()
4527 if (ar->pright && start + size - 1 >= ar->lright) in ext4_mb_normalize_request()
4528 size -= start + size - ar->lright; in ext4_mb_normalize_request()
4534 if (size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb)) in ext4_mb_normalize_request()
4535 size = EXT4_BLOCKS_PER_GROUP(ac->ac_sb); in ext4_mb_normalize_request()
4541 size = end - start; in ext4_mb_normalize_request()
4547 * ac->ac_o_ex.fe_logical & fe_len should always lie within "start" and in ext4_mb_normalize_request()
4553 * [pa_pstart + fe_logical - pa_lstart, fe_len/size] from the preallocated in ext4_mb_normalize_request()
4558 if (start + size <= ac->ac_o_ex.fe_logical || in ext4_mb_normalize_request()
4559 start > ac->ac_o_ex.fe_logical) { in ext4_mb_normalize_request()
4560 ext4_msg(ac->ac_sb, KERN_ERR, in ext4_mb_normalize_request()
4563 (unsigned long) ac->ac_o_ex.fe_logical); in ext4_mb_normalize_request()
4566 BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb)); in ext4_mb_normalize_request()
4572 ac->ac_g_ex.fe_logical = start; in ext4_mb_normalize_request()
4573 ac->ac_g_ex.fe_len = EXT4_NUM_B2C(sbi, size); in ext4_mb_normalize_request()
4574 ac->ac_orig_goal_len = ac->ac_g_ex.fe_len; in ext4_mb_normalize_request()
4577 if (ar->pright && (ar->lright == (start + size)) && in ext4_mb_normalize_request()
4578 ar->pright >= size && in ext4_mb_normalize_request()
4579 ar->pright - size >= le32_to_cpu(es->s_first_data_block)) { in ext4_mb_normalize_request()
4581 ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size, in ext4_mb_normalize_request()
4582 &ac->ac_g_ex.fe_group, in ext4_mb_normalize_request()
4583 &ac->ac_g_ex.fe_start); in ext4_mb_normalize_request()
4584 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL; in ext4_mb_normalize_request()
4586 if (ar->pleft && (ar->lleft + 1 == start) && in ext4_mb_normalize_request()
4587 ar->pleft + 1 < ext4_blocks_count(es)) { in ext4_mb_normalize_request()
4589 ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1, in ext4_mb_normalize_request()
4590 &ac->ac_g_ex.fe_group, in ext4_mb_normalize_request()
4591 &ac->ac_g_ex.fe_start); in ext4_mb_normalize_request()
4592 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL; in ext4_mb_normalize_request()
4595 mb_debug(ac->ac_sb, "goal: %lld(was %lld) blocks at %u\n", size, in ext4_mb_normalize_request()
4601 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_collect_stats()
4603 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len >= 1) { in ext4_mb_collect_stats()
4604 atomic_inc(&sbi->s_bal_reqs); in ext4_mb_collect_stats()
4605 atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated); in ext4_mb_collect_stats()
4606 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len) in ext4_mb_collect_stats()
4607 atomic_inc(&sbi->s_bal_success); in ext4_mb_collect_stats()
4609 atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned); in ext4_mb_collect_stats()
4611 atomic_add(ac->ac_cX_found[i], &sbi->s_bal_cX_ex_scanned[i]); in ext4_mb_collect_stats()
4614 atomic_add(ac->ac_groups_scanned, &sbi->s_bal_groups_scanned); in ext4_mb_collect_stats()
4615 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start && in ext4_mb_collect_stats()
4616 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group) in ext4_mb_collect_stats()
4617 atomic_inc(&sbi->s_bal_goals); in ext4_mb_collect_stats()
4619 if (ac->ac_f_ex.fe_len == ac->ac_orig_goal_len) in ext4_mb_collect_stats()
4620 atomic_inc(&sbi->s_bal_len_goals); in ext4_mb_collect_stats()
4622 if (ac->ac_found > sbi->s_mb_max_to_scan) in ext4_mb_collect_stats()
4623 atomic_inc(&sbi->s_bal_breaks); in ext4_mb_collect_stats()
4626 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC) in ext4_mb_collect_stats()
4636 * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed.
4640 struct ext4_prealloc_space *pa = ac->ac_pa; in ext4_discard_allocated_blocks()
4645 if (ac->ac_f_ex.fe_len == 0) in ext4_discard_allocated_blocks()
4647 err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b); in ext4_discard_allocated_blocks()
4656 ext4_lock_group(ac->ac_sb, ac->ac_f_ex.fe_group); in ext4_discard_allocated_blocks()
4657 mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start, in ext4_discard_allocated_blocks()
4658 ac->ac_f_ex.fe_len); in ext4_discard_allocated_blocks()
4659 ext4_unlock_group(ac->ac_sb, ac->ac_f_ex.fe_group); in ext4_discard_allocated_blocks()
4663 if (pa->pa_type == MB_INODE_PA) { in ext4_discard_allocated_blocks()
4664 spin_lock(&pa->pa_lock); in ext4_discard_allocated_blocks()
4665 pa->pa_free += ac->ac_b_ex.fe_len; in ext4_discard_allocated_blocks()
4666 spin_unlock(&pa->pa_lock); in ext4_discard_allocated_blocks()
4676 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_use_inode_pa()
4682 start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart); in ext4_mb_use_inode_pa()
4683 end = min(pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len), in ext4_mb_use_inode_pa()
4684 start + EXT4_C2B(sbi, ac->ac_o_ex.fe_len)); in ext4_mb_use_inode_pa()
4685 len = EXT4_NUM_B2C(sbi, end - start); in ext4_mb_use_inode_pa()
4686 ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group, in ext4_mb_use_inode_pa()
4687 &ac->ac_b_ex.fe_start); in ext4_mb_use_inode_pa()
4688 ac->ac_b_ex.fe_len = len; in ext4_mb_use_inode_pa()
4689 ac->ac_status = AC_STATUS_FOUND; in ext4_mb_use_inode_pa()
4690 ac->ac_pa = pa; in ext4_mb_use_inode_pa()
4692 BUG_ON(start < pa->pa_pstart); in ext4_mb_use_inode_pa()
4693 BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len)); in ext4_mb_use_inode_pa()
4694 BUG_ON(pa->pa_free < len); in ext4_mb_use_inode_pa()
4695 BUG_ON(ac->ac_b_ex.fe_len <= 0); in ext4_mb_use_inode_pa()
4696 pa->pa_free -= len; in ext4_mb_use_inode_pa()
4698 mb_debug(ac->ac_sb, "use %llu/%d from inode pa %p\n", start, len, pa); in ext4_mb_use_inode_pa()
4702 * use blocks preallocated to locality group
4707 unsigned int len = ac->ac_o_ex.fe_len; in ext4_mb_use_group_pa()
4709 ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart, in ext4_mb_use_group_pa()
4710 &ac->ac_b_ex.fe_group, in ext4_mb_use_group_pa()
4711 &ac->ac_b_ex.fe_start); in ext4_mb_use_group_pa()
4712 ac->ac_b_ex.fe_len = len; in ext4_mb_use_group_pa()
4713 ac->ac_status = AC_STATUS_FOUND; in ext4_mb_use_group_pa()
4714 ac->ac_pa = pa; in ext4_mb_use_group_pa()
4719 * in on-disk bitmap -- see ext4_mb_release_context() in ext4_mb_use_group_pa()
4722 mb_debug(ac->ac_sb, "use %u/%u from group pa %p\n", in ext4_mb_use_group_pa()
4723 pa->pa_lstart, len, pa); in ext4_mb_use_group_pa()
4740 atomic_inc(&pa->pa_count); in ext4_mb_check_group_pa()
4743 cur_distance = abs(goal_block - cpa->pa_pstart); in ext4_mb_check_group_pa()
4744 new_distance = abs(goal_block - pa->pa_pstart); in ext4_mb_check_group_pa()
4750 atomic_dec(&cpa->pa_count); in ext4_mb_check_group_pa()
4751 atomic_inc(&pa->pa_count); in ext4_mb_check_group_pa()
4762 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_pa_goal_check()
4765 if (likely(!(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))) in ext4_mb_pa_goal_check()
4774 start = pa->pa_pstart + in ext4_mb_pa_goal_check()
4775 (ac->ac_g_ex.fe_logical - pa->pa_lstart); in ext4_mb_pa_goal_check()
4776 if (ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex) != start) in ext4_mb_pa_goal_check()
4779 if (ac->ac_g_ex.fe_len > pa->pa_len - in ext4_mb_pa_goal_check()
4780 EXT4_B2C(sbi, ac->ac_g_ex.fe_logical - pa->pa_lstart)) in ext4_mb_pa_goal_check()
4792 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_use_preallocated()
4794 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); in ext4_mb_use_preallocated()
4801 if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) in ext4_mb_use_preallocated()
4805 * first, try per-file preallocation by searching the inode pa rbtree. in ext4_mb_use_preallocated()
4811 read_lock(&ei->i_prealloc_lock); in ext4_mb_use_preallocated()
4813 if (RB_EMPTY_ROOT(&ei->i_prealloc_node)) { in ext4_mb_use_preallocated()
4821 * (tmp_pa->pa_lstart never changes so we can skip locking for it). in ext4_mb_use_preallocated()
4823 for (iter = ei->i_prealloc_node.rb_node; iter; in ext4_mb_use_preallocated()
4824 iter = ext4_mb_pa_rb_next_iter(ac->ac_o_ex.fe_logical, in ext4_mb_use_preallocated()
4825 tmp_pa->pa_lstart, iter)) { in ext4_mb_use_preallocated()
4835 if (tmp_pa->pa_lstart > ac->ac_o_ex.fe_logical) { in ext4_mb_use_preallocated()
4837 tmp = rb_prev(&tmp_pa->pa_node.inode_node); in ext4_mb_use_preallocated()
4852 BUG_ON(!(tmp_pa && tmp_pa->pa_lstart <= ac->ac_o_ex.fe_logical)); in ext4_mb_use_preallocated()
4859 for (iter = &tmp_pa->pa_node.inode_node;; iter = rb_prev(iter)) { in ext4_mb_use_preallocated()
4869 spin_lock(&tmp_pa->pa_lock); in ext4_mb_use_preallocated()
4870 if (tmp_pa->pa_deleted == 0) { in ext4_mb_use_preallocated()
4880 spin_unlock(&tmp_pa->pa_lock); in ext4_mb_use_preallocated()
4884 BUG_ON(!(tmp_pa && tmp_pa->pa_lstart <= ac->ac_o_ex.fe_logical)); in ext4_mb_use_preallocated()
4885 BUG_ON(tmp_pa->pa_deleted == 1); in ext4_mb_use_preallocated()
4892 if (ac->ac_o_ex.fe_logical >= pa_logical_end(sbi, tmp_pa)) { in ext4_mb_use_preallocated()
4893 spin_unlock(&tmp_pa->pa_lock); in ext4_mb_use_preallocated()
4897 /* non-extent files can't have physical blocks past 2^32 */ in ext4_mb_use_preallocated()
4898 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) && in ext4_mb_use_preallocated()
4899 (tmp_pa->pa_pstart + EXT4_C2B(sbi, tmp_pa->pa_len) > in ext4_mb_use_preallocated()
4905 spin_unlock(&tmp_pa->pa_lock); in ext4_mb_use_preallocated()
4909 if (tmp_pa->pa_free && likely(ext4_mb_pa_goal_check(ac, tmp_pa))) { in ext4_mb_use_preallocated()
4910 atomic_inc(&tmp_pa->pa_count); in ext4_mb_use_preallocated()
4912 spin_unlock(&tmp_pa->pa_lock); in ext4_mb_use_preallocated()
4913 read_unlock(&ei->i_prealloc_lock); in ext4_mb_use_preallocated()
4943 WARN_ON_ONCE(tmp_pa->pa_free == 0); in ext4_mb_use_preallocated()
4945 spin_unlock(&tmp_pa->pa_lock); in ext4_mb_use_preallocated()
4947 read_unlock(&ei->i_prealloc_lock); in ext4_mb_use_preallocated()
4950 if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)) in ext4_mb_use_preallocated()
4953 /* inode may have no locality group for some reason */ in ext4_mb_use_preallocated()
4954 lg = ac->ac_lg; in ext4_mb_use_preallocated()
4957 order = fls(ac->ac_o_ex.fe_len) - 1; in ext4_mb_use_preallocated()
4958 if (order > PREALLOC_TB_SIZE - 1) in ext4_mb_use_preallocated()
4960 order = PREALLOC_TB_SIZE - 1; in ext4_mb_use_preallocated()
4962 goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex); in ext4_mb_use_preallocated()
4969 list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[i], in ext4_mb_use_preallocated()
4971 spin_lock(&tmp_pa->pa_lock); in ext4_mb_use_preallocated()
4972 if (tmp_pa->pa_deleted == 0 && in ext4_mb_use_preallocated()
4973 tmp_pa->pa_free >= ac->ac_o_ex.fe_len) { in ext4_mb_use_preallocated()
4978 spin_unlock(&tmp_pa->pa_lock); in ext4_mb_use_preallocated()
4991 * used in in-core bitmap. buddy must be generated from this bitmap
5017 list_for_each(cur, &grp->bb_prealloc_list) { in ext4_mb_generate_from_pa()
5019 spin_lock(&pa->pa_lock); in ext4_mb_generate_from_pa()
5020 ext4_get_group_no_and_offset(sb, pa->pa_pstart, in ext4_mb_generate_from_pa()
5022 len = pa->pa_len; in ext4_mb_generate_from_pa()
5023 spin_unlock(&pa->pa_lock); in ext4_mb_generate_from_pa()
5038 if (pa->pa_deleted) { in ext4_mb_mark_pa_deleted()
5040 pa->pa_type, pa->pa_pstart, pa->pa_lstart, in ext4_mb_mark_pa_deleted()
5041 pa->pa_len); in ext4_mb_mark_pa_deleted()
5045 pa->pa_deleted = 1; in ext4_mb_mark_pa_deleted()
5047 if (pa->pa_type == MB_INODE_PA) { in ext4_mb_mark_pa_deleted()
5048 ei = EXT4_I(pa->pa_inode); in ext4_mb_mark_pa_deleted()
5049 atomic_dec(&ei->i_prealloc_active); in ext4_mb_mark_pa_deleted()
5056 BUG_ON(atomic_read(&pa->pa_count)); in ext4_mb_pa_free()
5057 BUG_ON(pa->pa_deleted == 0); in ext4_mb_pa_free()
5078 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); in ext4_mb_put_pa()
5081 spin_lock(&pa->pa_lock); in ext4_mb_put_pa()
5082 if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) { in ext4_mb_put_pa()
5083 spin_unlock(&pa->pa_lock); in ext4_mb_put_pa()
5087 if (pa->pa_deleted == 1) { in ext4_mb_put_pa()
5088 spin_unlock(&pa->pa_lock); in ext4_mb_put_pa()
5093 spin_unlock(&pa->pa_lock); in ext4_mb_put_pa()
5095 grp_blk = pa->pa_pstart; in ext4_mb_put_pa()
5097 * If doing group-based preallocation, pa_pstart may be in the in ext4_mb_put_pa()
5100 if (pa->pa_type == MB_GROUP_PA) in ext4_mb_put_pa()
5101 grp_blk--; in ext4_mb_put_pa()
5110 * copy on-disk bitmap to buddy in ext4_mb_put_pa()
5111 * mark B in on-disk bitmap in ext4_mb_put_pa()
5120 list_del(&pa->pa_group_list); in ext4_mb_put_pa()
5123 if (pa->pa_type == MB_INODE_PA) { in ext4_mb_put_pa()
5124 write_lock(pa->pa_node_lock.inode_lock); in ext4_mb_put_pa()
5125 rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node); in ext4_mb_put_pa()
5126 write_unlock(pa->pa_node_lock.inode_lock); in ext4_mb_put_pa()
5129 spin_lock(pa->pa_node_lock.lg_lock); in ext4_mb_put_pa()
5130 list_del_rcu(&pa->pa_node.lg_list); in ext4_mb_put_pa()
5131 spin_unlock(pa->pa_node_lock.lg_lock); in ext4_mb_put_pa()
5132 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); in ext4_mb_put_pa()
5138 struct rb_node **iter = &root->rb_node, *parent = NULL; in ext4_mb_pa_rb_insert()
5147 iter_start = iter_pa->pa_lstart; in ext4_mb_pa_rb_insert()
5148 new_start = new_pa->pa_lstart; in ext4_mb_pa_rb_insert()
5152 iter = &((*iter)->rb_left); in ext4_mb_pa_rb_insert()
5154 iter = &((*iter)->rb_right); in ext4_mb_pa_rb_insert()
5167 struct super_block *sb = ac->ac_sb; in ext4_mb_new_inode_pa()
5174 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len); in ext4_mb_new_inode_pa()
5175 BUG_ON(ac->ac_status != AC_STATUS_FOUND); in ext4_mb_new_inode_pa()
5176 BUG_ON(!S_ISREG(ac->ac_inode->i_mode)); in ext4_mb_new_inode_pa()
5177 BUG_ON(ac->ac_pa == NULL); in ext4_mb_new_inode_pa()
5179 pa = ac->ac_pa; in ext4_mb_new_inode_pa()
5181 if (ac->ac_b_ex.fe_len < ac->ac_orig_goal_len) { in ext4_mb_new_inode_pa()
5183 .fe_logical = ac->ac_g_ex.fe_logical, in ext4_mb_new_inode_pa()
5184 .fe_len = ac->ac_orig_goal_len, in ext4_mb_new_inode_pa()
5191 BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical); in ext4_mb_new_inode_pa()
5192 BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len); in ext4_mb_new_inode_pa()
5205 ex.fe_len = ac->ac_b_ex.fe_len; in ext4_mb_new_inode_pa()
5207 ex.fe_logical = orig_goal_end - EXT4_C2B(sbi, ex.fe_len); in ext4_mb_new_inode_pa()
5208 if (ac->ac_o_ex.fe_logical >= ex.fe_logical) in ext4_mb_new_inode_pa()
5211 ex.fe_logical = ac->ac_g_ex.fe_logical; in ext4_mb_new_inode_pa()
5212 if (ac->ac_o_ex.fe_logical < extent_logical_end(sbi, &ex)) in ext4_mb_new_inode_pa()
5215 ex.fe_logical = ac->ac_o_ex.fe_logical; in ext4_mb_new_inode_pa()
5217 ac->ac_b_ex.fe_logical = ex.fe_logical; in ext4_mb_new_inode_pa()
5219 BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical); in ext4_mb_new_inode_pa()
5220 BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len); in ext4_mb_new_inode_pa()
5224 pa->pa_lstart = ac->ac_b_ex.fe_logical; in ext4_mb_new_inode_pa()
5225 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); in ext4_mb_new_inode_pa()
5226 pa->pa_len = ac->ac_b_ex.fe_len; in ext4_mb_new_inode_pa()
5227 pa->pa_free = pa->pa_len; in ext4_mb_new_inode_pa()
5228 spin_lock_init(&pa->pa_lock); in ext4_mb_new_inode_pa()
5229 INIT_LIST_HEAD(&pa->pa_group_list); in ext4_mb_new_inode_pa()
5230 pa->pa_deleted = 0; in ext4_mb_new_inode_pa()
5231 pa->pa_type = MB_INODE_PA; in ext4_mb_new_inode_pa()
5233 mb_debug(sb, "new inode pa %p: %llu/%d for %u\n", pa, pa->pa_pstart, in ext4_mb_new_inode_pa()
5234 pa->pa_len, pa->pa_lstart); in ext4_mb_new_inode_pa()
5237 atomic_add(pa->pa_free, &sbi->s_mb_preallocated); in ext4_mb_new_inode_pa()
5240 ei = EXT4_I(ac->ac_inode); in ext4_mb_new_inode_pa()
5241 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); in ext4_mb_new_inode_pa()
5245 pa->pa_node_lock.inode_lock = &ei->i_prealloc_lock; in ext4_mb_new_inode_pa()
5246 pa->pa_inode = ac->ac_inode; in ext4_mb_new_inode_pa()
5248 list_add(&pa->pa_group_list, &grp->bb_prealloc_list); in ext4_mb_new_inode_pa()
5250 write_lock(pa->pa_node_lock.inode_lock); in ext4_mb_new_inode_pa()
5251 ext4_mb_pa_rb_insert(&ei->i_prealloc_node, &pa->pa_node.inode_node); in ext4_mb_new_inode_pa()
5252 write_unlock(pa->pa_node_lock.inode_lock); in ext4_mb_new_inode_pa()
5253 atomic_inc(&ei->i_prealloc_active); in ext4_mb_new_inode_pa()
5257 * creates new preallocated space for locality group inodes belongs to
5262 struct super_block *sb = ac->ac_sb; in ext4_mb_new_group_pa()
5268 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len); in ext4_mb_new_group_pa()
5269 BUG_ON(ac->ac_status != AC_STATUS_FOUND); in ext4_mb_new_group_pa()
5270 BUG_ON(!S_ISREG(ac->ac_inode->i_mode)); in ext4_mb_new_group_pa()
5271 BUG_ON(ac->ac_pa == NULL); in ext4_mb_new_group_pa()
5273 pa = ac->ac_pa; in ext4_mb_new_group_pa()
5275 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); in ext4_mb_new_group_pa()
5276 pa->pa_lstart = pa->pa_pstart; in ext4_mb_new_group_pa()
5277 pa->pa_len = ac->ac_b_ex.fe_len; in ext4_mb_new_group_pa()
5278 pa->pa_free = pa->pa_len; in ext4_mb_new_group_pa()
5279 spin_lock_init(&pa->pa_lock); in ext4_mb_new_group_pa()
5280 INIT_LIST_HEAD(&pa->pa_node.lg_list); in ext4_mb_new_group_pa()
5281 INIT_LIST_HEAD(&pa->pa_group_list); in ext4_mb_new_group_pa()
5282 pa->pa_deleted = 0; in ext4_mb_new_group_pa()
5283 pa->pa_type = MB_GROUP_PA; in ext4_mb_new_group_pa()
5285 mb_debug(sb, "new group pa %p: %llu/%d for %u\n", pa, pa->pa_pstart, in ext4_mb_new_group_pa()
5286 pa->pa_len, pa->pa_lstart); in ext4_mb_new_group_pa()
5290 atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated); in ext4_mb_new_group_pa()
5292 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); in ext4_mb_new_group_pa()
5295 lg = ac->ac_lg; in ext4_mb_new_group_pa()
5298 pa->pa_node_lock.lg_lock = &lg->lg_prealloc_lock; in ext4_mb_new_group_pa()
5299 pa->pa_inode = NULL; in ext4_mb_new_group_pa()
5301 list_add(&pa->pa_group_list, &grp->bb_prealloc_list); in ext4_mb_new_group_pa()
5311 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) in ext4_mb_new_preallocation()
5318 * finds all unused blocks in on-disk bitmap, frees them in
5319 * in-core bitmap and buddy.
5323 * TODO: optimize the case when there are no in-core structures yet
5329 struct super_block *sb = e4b->bd_sb; in ext4_mb_release_inode_pa()
5338 BUG_ON(pa->pa_deleted == 0); in ext4_mb_release_inode_pa()
5339 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); in ext4_mb_release_inode_pa()
5340 grp_blk_start = pa->pa_pstart - EXT4_C2B(sbi, bit); in ext4_mb_release_inode_pa()
5341 BUG_ON(group != e4b->bd_group && pa->pa_len != 0); in ext4_mb_release_inode_pa()
5342 end = bit + pa->pa_len; in ext4_mb_release_inode_pa()
5345 bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit); in ext4_mb_release_inode_pa()
5348 next = mb_find_next_bit(bitmap_bh->b_data, end, bit); in ext4_mb_release_inode_pa()
5351 (unsigned) next - bit, (unsigned) group); in ext4_mb_release_inode_pa()
5352 free += next - bit; in ext4_mb_release_inode_pa()
5354 trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit); in ext4_mb_release_inode_pa()
5357 next - bit); in ext4_mb_release_inode_pa()
5358 mb_free_blocks(pa->pa_inode, e4b, bit, next - bit); in ext4_mb_release_inode_pa()
5361 if (free != pa->pa_free) { in ext4_mb_release_inode_pa()
5362 ext4_msg(e4b->bd_sb, KERN_CRIT, in ext4_mb_release_inode_pa()
5364 pa, (unsigned long) pa->pa_lstart, in ext4_mb_release_inode_pa()
5365 (unsigned long) pa->pa_pstart, in ext4_mb_release_inode_pa()
5366 pa->pa_len); in ext4_mb_release_inode_pa()
5368 free, pa->pa_free); in ext4_mb_release_inode_pa()
5374 atomic_add(free, &sbi->s_mb_discarded); in ext4_mb_release_inode_pa()
5383 struct super_block *sb = e4b->bd_sb; in ext4_mb_release_group_pa()
5388 BUG_ON(pa->pa_deleted == 0); in ext4_mb_release_group_pa()
5389 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); in ext4_mb_release_group_pa()
5390 if (unlikely(group != e4b->bd_group && pa->pa_len != 0)) { in ext4_mb_release_group_pa()
5392 e4b->bd_group, group, pa->pa_pstart); in ext4_mb_release_group_pa()
5395 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len); in ext4_mb_release_group_pa()
5396 atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded); in ext4_mb_release_group_pa()
5397 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len); in ext4_mb_release_group_pa()
5406 * - when do we discard
5408 * - how many do we discard
5427 if (list_empty(&grp->bb_prealloc_list)) in ext4_mb_discard_group_preallocations()
5433 ext4_error_err(sb, -err, in ext4_mb_discard_group_preallocations()
5449 &grp->bb_prealloc_list, pa_group_list) { in ext4_mb_discard_group_preallocations()
5450 spin_lock(&pa->pa_lock); in ext4_mb_discard_group_preallocations()
5451 if (atomic_read(&pa->pa_count)) { in ext4_mb_discard_group_preallocations()
5452 spin_unlock(&pa->pa_lock); in ext4_mb_discard_group_preallocations()
5456 if (pa->pa_deleted) { in ext4_mb_discard_group_preallocations()
5457 spin_unlock(&pa->pa_lock); in ext4_mb_discard_group_preallocations()
5468 free += pa->pa_free; in ext4_mb_discard_group_preallocations()
5470 spin_unlock(&pa->pa_lock); in ext4_mb_discard_group_preallocations()
5472 list_del(&pa->pa_group_list); in ext4_mb_discard_group_preallocations()
5473 list_add(&pa->u.pa_tmp_list, &list); in ext4_mb_discard_group_preallocations()
5479 /* remove from object (inode or locality group) */ in ext4_mb_discard_group_preallocations()
5480 if (pa->pa_type == MB_GROUP_PA) { in ext4_mb_discard_group_preallocations()
5481 spin_lock(pa->pa_node_lock.lg_lock); in ext4_mb_discard_group_preallocations()
5482 list_del_rcu(&pa->pa_node.lg_list); in ext4_mb_discard_group_preallocations()
5483 spin_unlock(pa->pa_node_lock.lg_lock); in ext4_mb_discard_group_preallocations()
5485 write_lock(pa->pa_node_lock.inode_lock); in ext4_mb_discard_group_preallocations()
5486 ei = EXT4_I(pa->pa_inode); in ext4_mb_discard_group_preallocations()
5487 rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node); in ext4_mb_discard_group_preallocations()
5488 write_unlock(pa->pa_node_lock.inode_lock); in ext4_mb_discard_group_preallocations()
5491 list_del(&pa->u.pa_tmp_list); in ext4_mb_discard_group_preallocations()
5493 if (pa->pa_type == MB_GROUP_PA) { in ext4_mb_discard_group_preallocations()
5495 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); in ext4_mb_discard_group_preallocations()
5507 free, group, grp->bb_free); in ext4_mb_discard_group_preallocations()
5512 * releases all non-used preallocated blocks for given inode
5523 struct super_block *sb = inode->i_sb; in ext4_discard_preallocations()
5532 if (!S_ISREG(inode->i_mode)) { in ext4_discard_preallocations()
5536 if (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY) in ext4_discard_preallocations()
5540 inode->i_ino); in ext4_discard_preallocations()
5542 atomic_read(&ei->i_prealloc_active), needed); in ext4_discard_preallocations()
5549 write_lock(&ei->i_prealloc_lock); in ext4_discard_preallocations()
5550 for (iter = rb_first(&ei->i_prealloc_node); iter && needed; in ext4_discard_preallocations()
5554 BUG_ON(pa->pa_node_lock.inode_lock != &ei->i_prealloc_lock); in ext4_discard_preallocations()
5556 spin_lock(&pa->pa_lock); in ext4_discard_preallocations()
5557 if (atomic_read(&pa->pa_count)) { in ext4_discard_preallocations()
5558 /* this shouldn't happen often - nobody should in ext4_discard_preallocations()
5560 spin_unlock(&pa->pa_lock); in ext4_discard_preallocations()
5561 write_unlock(&ei->i_prealloc_lock); in ext4_discard_preallocations()
5563 "uh-oh! used pa while discarding"); in ext4_discard_preallocations()
5569 if (pa->pa_deleted == 0) { in ext4_discard_preallocations()
5571 spin_unlock(&pa->pa_lock); in ext4_discard_preallocations()
5572 rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node); in ext4_discard_preallocations()
5573 list_add(&pa->u.pa_tmp_list, &list); in ext4_discard_preallocations()
5574 needed--; in ext4_discard_preallocations()
5579 spin_unlock(&pa->pa_lock); in ext4_discard_preallocations()
5580 write_unlock(&ei->i_prealloc_lock); in ext4_discard_preallocations()
5585 * ->clear_inode() the inode will get freed in ext4_discard_preallocations()
5588 * freed memory, bad-bad-bad */ in ext4_discard_preallocations()
5592 * of ->clear_inode(), but not in case of in ext4_discard_preallocations()
5597 write_unlock(&ei->i_prealloc_lock); in ext4_discard_preallocations()
5600 BUG_ON(pa->pa_type != MB_INODE_PA); in ext4_discard_preallocations()
5601 group = ext4_get_group_number(sb, pa->pa_pstart); in ext4_discard_preallocations()
5606 ext4_error_err(sb, -err, "Error %d loading buddy information for %u", in ext4_discard_preallocations()
5614 ext4_error_err(sb, -err, "Error %d reading block bitmap for %u", in ext4_discard_preallocations()
5621 list_del(&pa->pa_group_list); in ext4_discard_preallocations()
5628 list_del(&pa->u.pa_tmp_list); in ext4_discard_preallocations()
5640 return -ENOMEM; in ext4_mb_pa_alloc()
5641 atomic_set(&pa->pa_count, 1); in ext4_mb_pa_alloc()
5642 ac->ac_pa = pa; in ext4_mb_pa_alloc()
5648 struct ext4_prealloc_space *pa = ac->ac_pa; in ext4_mb_pa_put_free()
5651 ac->ac_pa = NULL; in ext4_mb_pa_put_free()
5652 WARN_ON(!atomic_dec_and_test(&pa->pa_count)); in ext4_mb_pa_put_free()
5656 * been added to grp->bb_prealloc_list. So we don't need to lock it in ext4_mb_pa_put_free()
5658 pa->pa_deleted = 1; in ext4_mb_pa_put_free()
5681 list_for_each(cur, &grp->bb_prealloc_list) { in ext4_mb_show_pa()
5684 spin_lock(&pa->pa_lock); in ext4_mb_show_pa()
5685 ext4_get_group_no_and_offset(sb, pa->pa_pstart, in ext4_mb_show_pa()
5687 spin_unlock(&pa->pa_lock); in ext4_mb_show_pa()
5689 pa->pa_len); in ext4_mb_show_pa()
5692 mb_debug(sb, "%u: %d/%d\n", i, grp->bb_free, in ext4_mb_show_pa()
5693 grp->bb_fragments); in ext4_mb_show_pa()
5699 struct super_block *sb = ac->ac_sb; in ext4_mb_show_ac()
5707 ac->ac_status, ac->ac_flags); in ext4_mb_show_ac()
5711 (unsigned long)ac->ac_o_ex.fe_group, in ext4_mb_show_ac()
5712 (unsigned long)ac->ac_o_ex.fe_start, in ext4_mb_show_ac()
5713 (unsigned long)ac->ac_o_ex.fe_len, in ext4_mb_show_ac()
5714 (unsigned long)ac->ac_o_ex.fe_logical, in ext4_mb_show_ac()
5715 (unsigned long)ac->ac_g_ex.fe_group, in ext4_mb_show_ac()
5716 (unsigned long)ac->ac_g_ex.fe_start, in ext4_mb_show_ac()
5717 (unsigned long)ac->ac_g_ex.fe_len, in ext4_mb_show_ac()
5718 (unsigned long)ac->ac_g_ex.fe_logical, in ext4_mb_show_ac()
5719 (unsigned long)ac->ac_b_ex.fe_group, in ext4_mb_show_ac()
5720 (unsigned long)ac->ac_b_ex.fe_start, in ext4_mb_show_ac()
5721 (unsigned long)ac->ac_b_ex.fe_len, in ext4_mb_show_ac()
5722 (unsigned long)ac->ac_b_ex.fe_logical, in ext4_mb_show_ac()
5723 (int)ac->ac_criteria); in ext4_mb_show_ac()
5724 mb_debug(sb, "%u found", ac->ac_found); in ext4_mb_show_ac()
5725 mb_debug(sb, "used pa: %s, ", ac->ac_pa ? "yes" : "no"); in ext4_mb_show_ac()
5726 if (ac->ac_pa) in ext4_mb_show_ac()
5727 mb_debug(sb, "pa_type %s\n", ac->ac_pa->pa_type == MB_GROUP_PA ? in ext4_mb_show_ac()
5737 ext4_mb_show_pa(ac->ac_sb); in ext4_mb_show_ac()
5742 * We use locality group preallocation for small size file. The size of the
5750 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_group_or_file()
5751 int bsbits = ac->ac_sb->s_blocksize_bits; in ext4_mb_group_or_file()
5755 if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) in ext4_mb_group_or_file()
5758 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) in ext4_mb_group_or_file()
5761 group_pa_eligible = sbi->s_mb_group_prealloc > 0; in ext4_mb_group_or_file()
5763 size = extent_logical_end(sbi, &ac->ac_o_ex); in ext4_mb_group_or_file()
5764 isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1) in ext4_mb_group_or_file()
5769 !inode_is_open_for_write(ac->ac_inode)) in ext4_mb_group_or_file()
5774 if (size > sbi->s_mb_stream_request) in ext4_mb_group_or_file()
5779 ac->ac_flags |= EXT4_MB_STREAM_ALLOC; in ext4_mb_group_or_file()
5781 ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC; in ext4_mb_group_or_file()
5785 BUG_ON(ac->ac_lg != NULL); in ext4_mb_group_or_file()
5787 * locality group prealloc space are per cpu. The reason for having in ext4_mb_group_or_file()
5788 * per cpu locality group is to reduce the contention between block in ext4_mb_group_or_file()
5791 ac->ac_lg = raw_cpu_ptr(sbi->s_locality_groups); in ext4_mb_group_or_file()
5794 ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC; in ext4_mb_group_or_file()
5797 mutex_lock(&ac->ac_lg->lg_mutex); in ext4_mb_group_or_file()
5804 struct super_block *sb = ar->inode->i_sb; in ext4_mb_initialize_context()
5806 struct ext4_super_block *es = sbi->s_es; in ext4_mb_initialize_context()
5813 len = ar->len; in ext4_mb_initialize_context()
5820 goal = ar->goal; in ext4_mb_initialize_context()
5821 if (goal < le32_to_cpu(es->s_first_data_block) || in ext4_mb_initialize_context()
5823 goal = le32_to_cpu(es->s_first_data_block); in ext4_mb_initialize_context()
5827 ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical); in ext4_mb_initialize_context()
5828 ac->ac_status = AC_STATUS_CONTINUE; in ext4_mb_initialize_context()
5829 ac->ac_sb = sb; in ext4_mb_initialize_context()
5830 ac->ac_inode = ar->inode; in ext4_mb_initialize_context()
5831 ac->ac_o_ex.fe_logical = ac->ac_b_ex.fe_logical; in ext4_mb_initialize_context()
5832 ac->ac_o_ex.fe_group = group; in ext4_mb_initialize_context()
5833 ac->ac_o_ex.fe_start = block; in ext4_mb_initialize_context()
5834 ac->ac_o_ex.fe_len = len; in ext4_mb_initialize_context()
5835 ac->ac_g_ex = ac->ac_o_ex; in ext4_mb_initialize_context()
5836 ac->ac_orig_goal_len = ac->ac_g_ex.fe_len; in ext4_mb_initialize_context()
5837 ac->ac_flags = ar->flags; in ext4_mb_initialize_context()
5840 * locality group. this is a policy, actually */ in ext4_mb_initialize_context()
5845 (unsigned) ar->len, (unsigned) ar->logical, in ext4_mb_initialize_context()
5846 (unsigned) ar->goal, ac->ac_flags, ac->ac_2order, in ext4_mb_initialize_context()
5847 (unsigned) ar->lleft, (unsigned) ar->pleft, in ext4_mb_initialize_context()
5848 (unsigned) ar->lright, (unsigned) ar->pright, in ext4_mb_initialize_context()
5849 inode_is_open_for_write(ar->inode) ? "" : "non-"); in ext4_mb_initialize_context()
5862 mb_debug(sb, "discard locality group preallocation\n"); in ext4_mb_discard_lg_preallocations()
5864 spin_lock(&lg->lg_prealloc_lock); in ext4_mb_discard_lg_preallocations()
5865 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order], in ext4_mb_discard_lg_preallocations()
5867 lockdep_is_held(&lg->lg_prealloc_lock)) { in ext4_mb_discard_lg_preallocations()
5868 spin_lock(&pa->pa_lock); in ext4_mb_discard_lg_preallocations()
5869 if (atomic_read(&pa->pa_count)) { in ext4_mb_discard_lg_preallocations()
5875 spin_unlock(&pa->pa_lock); in ext4_mb_discard_lg_preallocations()
5878 if (pa->pa_deleted) { in ext4_mb_discard_lg_preallocations()
5879 spin_unlock(&pa->pa_lock); in ext4_mb_discard_lg_preallocations()
5883 BUG_ON(pa->pa_type != MB_GROUP_PA); in ext4_mb_discard_lg_preallocations()
5887 spin_unlock(&pa->pa_lock); in ext4_mb_discard_lg_preallocations()
5889 list_del_rcu(&pa->pa_node.lg_list); in ext4_mb_discard_lg_preallocations()
5890 list_add(&pa->u.pa_tmp_list, &discard_list); in ext4_mb_discard_lg_preallocations()
5892 total_entries--; in ext4_mb_discard_lg_preallocations()
5903 spin_unlock(&lg->lg_prealloc_lock); in ext4_mb_discard_lg_preallocations()
5908 group = ext4_get_group_number(sb, pa->pa_pstart); in ext4_mb_discard_lg_preallocations()
5912 ext4_error_err(sb, -err, "Error %d loading buddy information for %u", in ext4_mb_discard_lg_preallocations()
5917 list_del(&pa->pa_group_list); in ext4_mb_discard_lg_preallocations()
5922 list_del(&pa->u.pa_tmp_list); in ext4_mb_discard_lg_preallocations()
5923 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); in ext4_mb_discard_lg_preallocations()
5939 struct super_block *sb = ac->ac_sb; in ext4_mb_add_n_trim()
5940 struct ext4_locality_group *lg = ac->ac_lg; in ext4_mb_add_n_trim()
5941 struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa; in ext4_mb_add_n_trim()
5943 order = fls(pa->pa_free) - 1; in ext4_mb_add_n_trim()
5944 if (order > PREALLOC_TB_SIZE - 1) in ext4_mb_add_n_trim()
5946 order = PREALLOC_TB_SIZE - 1; in ext4_mb_add_n_trim()
5948 spin_lock(&lg->lg_prealloc_lock); in ext4_mb_add_n_trim()
5949 list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order], in ext4_mb_add_n_trim()
5951 lockdep_is_held(&lg->lg_prealloc_lock)) { in ext4_mb_add_n_trim()
5952 spin_lock(&tmp_pa->pa_lock); in ext4_mb_add_n_trim()
5953 if (tmp_pa->pa_deleted) { in ext4_mb_add_n_trim()
5954 spin_unlock(&tmp_pa->pa_lock); in ext4_mb_add_n_trim()
5957 if (!added && pa->pa_free < tmp_pa->pa_free) { in ext4_mb_add_n_trim()
5959 list_add_tail_rcu(&pa->pa_node.lg_list, in ext4_mb_add_n_trim()
5960 &tmp_pa->pa_node.lg_list); in ext4_mb_add_n_trim()
5967 spin_unlock(&tmp_pa->pa_lock); in ext4_mb_add_n_trim()
5971 list_add_tail_rcu(&pa->pa_node.lg_list, in ext4_mb_add_n_trim()
5972 &lg->lg_prealloc_list[order]); in ext4_mb_add_n_trim()
5973 spin_unlock(&lg->lg_prealloc_lock); in ext4_mb_add_n_trim()
5986 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_release_context()
5987 struct ext4_prealloc_space *pa = ac->ac_pa; in ext4_mb_release_context()
5989 if (pa->pa_type == MB_GROUP_PA) { in ext4_mb_release_context()
5991 spin_lock(&pa->pa_lock); in ext4_mb_release_context()
5992 pa->pa_pstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len); in ext4_mb_release_context()
5993 pa->pa_lstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len); in ext4_mb_release_context()
5994 pa->pa_free -= ac->ac_b_ex.fe_len; in ext4_mb_release_context()
5995 pa->pa_len -= ac->ac_b_ex.fe_len; in ext4_mb_release_context()
5996 spin_unlock(&pa->pa_lock); in ext4_mb_release_context()
6004 if (likely(pa->pa_free)) { in ext4_mb_release_context()
6005 spin_lock(pa->pa_node_lock.lg_lock); in ext4_mb_release_context()
6006 list_del_rcu(&pa->pa_node.lg_list); in ext4_mb_release_context()
6007 spin_unlock(pa->pa_node_lock.lg_lock); in ext4_mb_release_context()
6012 ext4_mb_put_pa(ac, ac->ac_sb, pa); in ext4_mb_release_context()
6014 if (ac->ac_bitmap_page) in ext4_mb_release_context()
6015 put_page(ac->ac_bitmap_page); in ext4_mb_release_context()
6016 if (ac->ac_buddy_page) in ext4_mb_release_context()
6017 put_page(ac->ac_buddy_page); in ext4_mb_release_context()
6018 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) in ext4_mb_release_context()
6019 mutex_unlock(&ac->ac_lg->lg_mutex); in ext4_mb_release_context()
6039 needed -= ret; in ext4_mb_discard_preallocations()
6058 freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len); in ext4_mb_discard_preallocations_should_retry()
6064 if (!(ac->ac_flags & EXT4_MB_STRICT_CHECK) || seq_retry != *seq) { in ext4_mb_discard_preallocations_should_retry()
6065 ac->ac_flags |= EXT4_MB_STRICT_CHECK; in ext4_mb_discard_preallocations_should_retry()
6084 struct super_block *sb = ar->inode->i_sb; in ext4_mb_new_blocks_simple()
6091 struct ext4_super_block *es = sbi->s_es; in ext4_mb_new_blocks_simple()
6093 goal = ar->goal; in ext4_mb_new_blocks_simple()
6094 if (goal < le32_to_cpu(es->s_first_data_block) || in ext4_mb_new_blocks_simple()
6096 goal = le32_to_cpu(es->s_first_data_block); in ext4_mb_new_blocks_simple()
6098 ar->len = 0; in ext4_mb_new_blocks_simple()
6100 for (nr = ext4_get_groups_count(sb); nr > 0; nr--) { in ext4_mb_new_blocks_simple()
6109 i = mb_find_next_zero_bit(bitmap_bh->b_data, max, in ext4_mb_new_blocks_simple()
6131 *errp = -ENOSPC; in ext4_mb_new_blocks_simple()
6137 ar->len = 1; in ext4_mb_new_blocks_simple()
6160 sb = ar->inode->i_sb; in ext4_mb_new_blocks()
6164 if (sbi->s_mount_state & EXT4_FC_REPLAY) in ext4_mb_new_blocks()
6168 if (ext4_is_quota_file(ar->inode)) in ext4_mb_new_blocks()
6169 ar->flags |= EXT4_MB_USE_ROOT_BLOCKS; in ext4_mb_new_blocks()
6171 if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) { in ext4_mb_new_blocks()
6176 while (ar->len && in ext4_mb_new_blocks()
6177 ext4_claim_free_clusters(sbi, ar->len, ar->flags)) { in ext4_mb_new_blocks()
6181 ar->len = ar->len >> 1; in ext4_mb_new_blocks()
6183 if (!ar->len) { in ext4_mb_new_blocks()
6185 *errp = -ENOSPC; in ext4_mb_new_blocks()
6188 reserv_clstrs = ar->len; in ext4_mb_new_blocks()
6189 if (ar->flags & EXT4_MB_USE_ROOT_BLOCKS) { in ext4_mb_new_blocks()
6190 dquot_alloc_block_nofail(ar->inode, in ext4_mb_new_blocks()
6191 EXT4_C2B(sbi, ar->len)); in ext4_mb_new_blocks()
6193 while (ar->len && in ext4_mb_new_blocks()
6194 dquot_alloc_block(ar->inode, in ext4_mb_new_blocks()
6195 EXT4_C2B(sbi, ar->len))) { in ext4_mb_new_blocks()
6197 ar->flags |= EXT4_MB_HINT_NOPREALLOC; in ext4_mb_new_blocks()
6198 ar->len--; in ext4_mb_new_blocks()
6201 inquota = ar->len; in ext4_mb_new_blocks()
6202 if (ar->len == 0) { in ext4_mb_new_blocks()
6203 *errp = -EDQUOT; in ext4_mb_new_blocks()
6210 ar->len = 0; in ext4_mb_new_blocks()
6211 *errp = -ENOMEM; in ext4_mb_new_blocks()
6217 ac->ac_op = EXT4_MB_HISTORY_PREALLOC; in ext4_mb_new_blocks()
6220 ac->ac_op = EXT4_MB_HISTORY_ALLOC; in ext4_mb_new_blocks()
6230 * pa allocated above is added to grp->bb_prealloc_list only in ext4_mb_new_blocks()
6232 * ac->ac_status == AC_STATUS_FOUND. in ext4_mb_new_blocks()
6233 * And error from above mean ac->ac_status != AC_STATUS_FOUND in ext4_mb_new_blocks()
6241 if (ac->ac_status == AC_STATUS_FOUND && in ext4_mb_new_blocks()
6242 ac->ac_o_ex.fe_len >= ac->ac_f_ex.fe_len) in ext4_mb_new_blocks()
6245 if (likely(ac->ac_status == AC_STATUS_FOUND)) { in ext4_mb_new_blocks()
6251 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); in ext4_mb_new_blocks()
6252 ar->len = ac->ac_b_ex.fe_len; in ext4_mb_new_blocks()
6263 *errp = -ENOSPC; in ext4_mb_new_blocks()
6268 ac->ac_b_ex.fe_len = 0; in ext4_mb_new_blocks()
6269 ar->len = 0; in ext4_mb_new_blocks()
6275 if (inquota && ar->len < inquota) in ext4_mb_new_blocks()
6276 dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len)); in ext4_mb_new_blocks()
6277 if (!ar->len) { in ext4_mb_new_blocks()
6278 if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) in ext4_mb_new_blocks()
6280 percpu_counter_sub(&sbi->s_dirtyclusters_counter, in ext4_mb_new_blocks()
6299 if ((entry->efd_tid != new_entry->efd_tid) || in ext4_try_merge_freed_extent()
6300 (entry->efd_group != new_entry->efd_group)) in ext4_try_merge_freed_extent()
6302 if (entry->efd_start_cluster + entry->efd_count == in ext4_try_merge_freed_extent()
6303 new_entry->efd_start_cluster) { in ext4_try_merge_freed_extent()
6304 new_entry->efd_start_cluster = entry->efd_start_cluster; in ext4_try_merge_freed_extent()
6305 new_entry->efd_count += entry->efd_count; in ext4_try_merge_freed_extent()
6306 } else if (new_entry->efd_start_cluster + new_entry->efd_count == in ext4_try_merge_freed_extent()
6307 entry->efd_start_cluster) { in ext4_try_merge_freed_extent()
6308 new_entry->efd_count += entry->efd_count; in ext4_try_merge_freed_extent()
6311 spin_lock(&sbi->s_md_lock); in ext4_try_merge_freed_extent()
6312 list_del(&entry->efd_list); in ext4_try_merge_freed_extent()
6313 spin_unlock(&sbi->s_md_lock); in ext4_try_merge_freed_extent()
6314 rb_erase(&entry->efd_node, entry_rb_root); in ext4_try_merge_freed_extent()
6322 ext4_group_t group = e4b->bd_group; in ext4_mb_free_metadata()
6324 ext4_grpblk_t clusters = new_entry->efd_count; in ext4_mb_free_metadata()
6326 struct ext4_group_info *db = e4b->bd_info; in ext4_mb_free_metadata()
6327 struct super_block *sb = e4b->bd_sb; in ext4_mb_free_metadata()
6329 struct rb_node **n = &db->bb_free_root.rb_node, *node; in ext4_mb_free_metadata()
6333 BUG_ON(e4b->bd_bitmap_page == NULL); in ext4_mb_free_metadata()
6334 BUG_ON(e4b->bd_buddy_page == NULL); in ext4_mb_free_metadata()
6336 new_node = &new_entry->efd_node; in ext4_mb_free_metadata()
6337 cluster = new_entry->efd_start_cluster; in ext4_mb_free_metadata()
6343 * on-disk bitmap and lose not-yet-available in ext4_mb_free_metadata()
6345 get_page(e4b->bd_buddy_page); in ext4_mb_free_metadata()
6346 get_page(e4b->bd_bitmap_page); in ext4_mb_free_metadata()
6351 if (cluster < entry->efd_start_cluster) in ext4_mb_free_metadata()
6352 n = &(*n)->rb_left; in ext4_mb_free_metadata()
6353 else if (cluster >= (entry->efd_start_cluster + entry->efd_count)) in ext4_mb_free_metadata()
6354 n = &(*n)->rb_right; in ext4_mb_free_metadata()
6359 "Block already on to-be-freed list"); in ext4_mb_free_metadata()
6366 rb_insert_color(new_node, &db->bb_free_root); in ext4_mb_free_metadata()
6373 &(db->bb_free_root)); in ext4_mb_free_metadata()
6380 &(db->bb_free_root)); in ext4_mb_free_metadata()
6383 spin_lock(&sbi->s_md_lock); in ext4_mb_free_metadata()
6384 list_add_tail(&new_entry->efd_list, &sbi->s_freed_data_list); in ext4_mb_free_metadata()
6385 sbi->s_mb_free_pending += clusters; in ext4_mb_free_metadata()
6386 spin_unlock(&sbi->s_md_lock); in ext4_mb_free_metadata()
6393 struct super_block *sb = inode->i_sb; in ext4_free_blocks_simple()
6411 if (!mb_test_bit(blkoff + i, bitmap_bh->b_data)) in ext4_free_blocks_simple()
6414 mb_clear_bits(bitmap_bh->b_data, blkoff, count); in ext4_free_blocks_simple()
6420 count - already_freed); in ext4_free_blocks_simple()
6432 * ext4_mb_clear_bb() -- helper function for freeing blocks.
6445 struct super_block *sb = inode->i_sb; in ext4_mb_clear_bb()
6462 ext4_error(sb, "Freeing blocks in system zone - " in ext4_mb_clear_bb()
6482 overflow = EXT4_C2B(sbi, bit) + count - in ext4_mb_clear_bb()
6484 count -= overflow; in ext4_mb_clear_bb()
6497 err = -EIO; in ext4_mb_clear_bb()
6503 ext4_error(sb, "Freeing blocks in system zone - " in ext4_mb_clear_bb()
6517 * to unshare ->b_data if a currently-committing transaction is in ext4_mb_clear_bb()
6528 BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data)); in ext4_mb_clear_bb()
6555 new_entry->efd_start_cluster = bit; in ext4_mb_clear_bb()
6556 new_entry->efd_group = block_group; in ext4_mb_clear_bb()
6557 new_entry->efd_count = count_clusters; in ext4_mb_clear_bb()
6558 new_entry->efd_tid = handle->h_transaction->t_tid; in ext4_mb_clear_bb()
6561 mb_clear_bits(bitmap_bh->b_data, bit, count_clusters); in ext4_mb_clear_bb()
6564 /* need to update group_info->bb_free and bitmap in ext4_mb_clear_bb()
6571 if (err && err != -EOPNOTSUPP) in ext4_mb_clear_bb()
6580 mb_clear_bits(bitmap_bh->b_data, bit, count_clusters); in ext4_mb_clear_bb()
6590 if (sbi->s_log_groups_per_flex) { in ext4_mb_clear_bb()
6594 flex_group)->free_clusters); in ext4_mb_clear_bb()
6605 percpu_counter_add(&sbi->s_freeclusters_counter, in ext4_mb_clear_bb()
6635 * ext4_free_blocks() -- Free given blocks and update quota
6647 struct super_block *sb = inode->i_sb; in ext4_free_blocks()
6655 BUG_ON(block != bh->b_blocknr); in ext4_free_blocks()
6657 block = bh->b_blocknr; in ext4_free_blocks()
6660 if (sbi->s_mount_state & EXT4_FC_REPLAY) { in ext4_free_blocks()
6669 ext4_error(sb, "Freeing blocks not in datazone - " in ext4_free_blocks()
6695 overflow = sbi->s_cluster_ratio - overflow; in ext4_free_blocks()
6698 count -= overflow; in ext4_free_blocks()
6702 block -= overflow; in ext4_free_blocks()
6712 count -= overflow; in ext4_free_blocks()
6716 count += sbi->s_cluster_ratio - overflow; in ext4_free_blocks()
6728 bh = sb_find_get_block(inode->i_sb, block + i); in ext4_free_blocks()
6737 * ext4_group_add_blocks() -- Add given blocks to an existing group
6759 ext4_fsblk_t last_cluster = EXT4_B2C(sbi, block + count - 1); in ext4_group_add_blocks()
6760 unsigned long cluster_count = last_cluster - first_cluster + 1; in ext4_group_add_blocks()
6762 ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1); in ext4_group_add_blocks()
6775 err = -EINVAL; in ext4_group_add_blocks()
6788 err = -EIO; in ext4_group_add_blocks()
6793 ext4_error(sb, "Adding blocks in system zones - " in ext4_group_add_blocks()
6796 err = -EINVAL; in ext4_group_add_blocks()
6808 * to unshare ->b_data if a currently-committing transaction is in ext4_group_add_blocks()
6818 if (!mb_test_bit(bit + i, bitmap_bh->b_data)) { in ext4_group_add_blocks()
6832 * need to update group_info->bb_free and bitmap in ext4_group_add_blocks()
6837 mb_clear_bits(bitmap_bh->b_data, bit, cluster_count); in ext4_group_add_blocks()
6845 percpu_counter_add(&sbi->s_freeclusters_counter, in ext4_group_add_blocks()
6848 if (sbi->s_log_groups_per_flex) { in ext4_group_add_blocks()
6852 flex_group)->free_clusters); in ext4_group_add_blocks()
6874 * ext4_trim_extent -- function to TRIM one single free extent in the group
6890 ext4_group_t group = e4b->bd_group; in ext4_trim_extent()
6918 if (grp < (ext4_get_groups_count(sb) - 1)) in ext4_last_grp_cluster()
6921 nr_clusters_in_group = (ext4_blocks_count(EXT4_SB(sb)->s_es) - in ext4_last_grp_cluster()
6925 return nr_clusters_in_group - 1; in ext4_last_grp_cluster()
6936 __acquires(ext4_group_lock_ptr(sb, e4b->bd_group)) in ext4_try_to_trim_range()
6937 __releases(ext4_group_lock_ptr(sb, e4b->bd_group)) in ext4_try_to_trim_range()
6943 last = ext4_last_grp_cluster(sb, e4b->bd_group); in ext4_try_to_trim_range()
6944 bitmap = e4b->bd_bitmap; in ext4_try_to_trim_range()
6948 start = max(e4b->bd_info->bb_first_free, start); in ext4_try_to_trim_range()
6961 if ((next - start) >= minblocks) { in ext4_try_to_trim_range()
6962 int ret = ext4_trim_extent(sb, start, next - start, e4b); in ext4_try_to_trim_range()
6964 if (ret && ret != -EOPNOTSUPP) in ext4_try_to_trim_range()
6966 count += next - start; in ext4_try_to_trim_range()
6968 free_count += next - start; in ext4_try_to_trim_range()
6975 ext4_unlock_group(sb, e4b->bd_group); in ext4_try_to_trim_range()
6977 ext4_lock_group(sb, e4b->bd_group); in ext4_try_to_trim_range()
6980 if ((e4b->bd_info->bb_free - free_count) < minblocks) in ext4_try_to_trim_range()
6985 EXT4_MB_GRP_SET_TRIMMED(e4b->bd_info); in ext4_try_to_trim_range()
6991 * ext4_trim_all_free -- function to trim all free space in alloc. group
7023 minblocks < EXT4_SB(sb)->s_last_trim_minblks) in ext4_trim_all_free()
7038 * ext4_trim_fs() -- trim ioctl handle function
7051 unsigned int discard_granularity = bdev_discard_granularity(sb->s_bdev); in ext4_trim_fs()
7057 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block); in ext4_trim_fs()
7058 ext4_fsblk_t max_blks = ext4_blocks_count(EXT4_SB(sb)->s_es); in ext4_trim_fs()
7061 start = range->start >> sb->s_blocksize_bits; in ext4_trim_fs()
7062 end = start + (range->len >> sb->s_blocksize_bits) - 1; in ext4_trim_fs()
7064 range->minlen >> sb->s_blocksize_bits); in ext4_trim_fs()
7068 range->len < sb->s_blocksize) in ext4_trim_fs()
7069 return -EINVAL; in ext4_trim_fs()
7071 if (range->minlen < discard_granularity) { in ext4_trim_fs()
7073 discard_granularity >> sb->s_blocksize_bits); in ext4_trim_fs()
7077 if (end >= max_blks - 1) in ext4_trim_fs()
7078 end = max_blks - 1; in ext4_trim_fs()
7091 end = EXT4_CLUSTERS_PER_GROUP(sb) - 1; in ext4_trim_fs()
7108 * always be EXT4_CLUSTERS_PER_GROUP(sb)-1, so we only need to in ext4_trim_fs()
7114 if (grp->bb_free >= minlen) { in ext4_trim_fs()
7132 EXT4_SB(sb)->s_last_trim_minblks = minlen; in ext4_trim_fs()
7135 range->len = EXT4_C2B(EXT4_SB(sb), trimmed) << sb->s_blocksize_bits; in ext4_trim_fs()
7161 start = max(e4b.bd_info->bb_first_free, start); in ext4_mballoc_query_range()
7163 end = EXT4_CLUSTERS_PER_GROUP(sb) - 1; in ext4_mballoc_query_range()
7172 error = formatter(sb, group, start, next - start, priv); in ext4_mballoc_query_range()