• Home
  • Raw
  • Download

Lines Matching +full:1 +full:ac

101  *  [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
134 * 1) Array of largest free order lists (sbi->s_mb_largest_free_orders)
149 * average fragment size >= 2^i and < 2^(i+1). The average fragment size
160 * in the data structure (1) above where largest_free_order = order of the
163 * lookup in O(1) time.
168 * size group lists (data structure 2) in O(1) time.
177 * suitable block group in O(1) time and results in faster allocation at the
274 * 1) if buddy is referenced, it's already initialized
420 static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac);
422 static bool ext4_mb_good_group(struct ext4_allocation_context *ac,
431 * 1. We sample the percpu discard_pa_seq counter before trying for block
533 if (order > e4b->bd_blkbits + 1) { in mb_find_buddy()
540 *max = 1 << (e4b->bd_blkbits + 3); in mb_find_buddy()
683 int order = e4b->bd_blkbits + 1; in __mb_check_buddy()
700 while (order > 1) { in __mb_check_buddy()
703 buddy2 = mb_find_buddy(e4b, order - 1, &max2); in __mb_check_buddy()
713 if (!mb_test_bit(i << 1, buddy2)) { in __mb_check_buddy()
715 mb_test_bit((i<<1)+1, buddy2)); in __mb_check_buddy()
720 /* both bits in buddy2 must be 1 */ in __mb_check_buddy()
721 MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2)); in __mb_check_buddy()
722 MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2)); in __mb_check_buddy()
724 for (j = 0; j < (1 << order); j++) { in __mb_check_buddy()
725 k = (i * (1 << order)) + j; in __mb_check_buddy()
735 fstart = -1; in __mb_check_buddy()
740 if (fstart == -1) { in __mb_check_buddy()
746 fstart = -1; in __mb_check_buddy()
748 for (j = 0; j < e4b->bd_blkbits + 1; j++) { in __mb_check_buddy()
801 max = ffs(first | border) - 1; in ext4_mb_mark_free_simple()
804 min = fls(len) - 1; in ext4_mb_mark_free_simple()
808 chunk = 1 << min; in ext4_mb_mark_free_simple()
826 * We don't bother with a special lists groups with only 1 block free in mb_avg_fragment_size_order()
835 order = MB_NUM_ORDERS(sb) - 1; in mb_avg_fragment_size_order()
854 if (grp->bb_avg_fragment_size_order != -1) { in mb_update_avg_fragment_size()
874 static void ext4_mb_choose_next_group_p2_aligned(struct ext4_allocation_context *ac, in ext4_mb_choose_next_group_p2_aligned() argument
877 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_choose_next_group_p2_aligned()
881 if (ac->ac_status == AC_STATUS_FOUND) in ext4_mb_choose_next_group_p2_aligned()
884 if (unlikely(sbi->s_mb_stats && ac->ac_flags & EXT4_MB_CR_POWER2_ALIGNED_OPTIMIZED)) in ext4_mb_choose_next_group_p2_aligned()
887 for (i = ac->ac_2order; i < MB_NUM_ORDERS(ac->ac_sb); i++) { in ext4_mb_choose_next_group_p2_aligned()
899 if (likely(ext4_mb_good_group(ac, iter->bb_group, CR_POWER2_ALIGNED))) { in ext4_mb_choose_next_group_p2_aligned()
901 ac->ac_flags |= EXT4_MB_CR_POWER2_ALIGNED_OPTIMIZED; in ext4_mb_choose_next_group_p2_aligned()
917 ext4_mb_find_good_group_avg_frag_lists(struct ext4_allocation_context *ac, int order) in ext4_mb_find_good_group_avg_frag_lists() argument
919 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_find_good_group_avg_frag_lists()
923 enum criteria cr = ac->ac_criteria; in ext4_mb_find_good_group_avg_frag_lists()
935 if (likely(ext4_mb_good_group(ac, iter->bb_group, cr))) { in ext4_mb_find_good_group_avg_frag_lists()
948 static void ext4_mb_choose_next_group_goal_fast(struct ext4_allocation_context *ac, in ext4_mb_choose_next_group_goal_fast() argument
951 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_choose_next_group_goal_fast()
955 if (unlikely(ac->ac_flags & EXT4_MB_CR_GOAL_LEN_FAST_OPTIMIZED)) { in ext4_mb_choose_next_group_goal_fast()
960 for (i = mb_avg_fragment_size_order(ac->ac_sb, ac->ac_g_ex.fe_len); in ext4_mb_choose_next_group_goal_fast()
961 i < MB_NUM_ORDERS(ac->ac_sb); i++) { in ext4_mb_choose_next_group_goal_fast()
962 grp = ext4_mb_find_good_group_avg_frag_lists(ac, i); in ext4_mb_choose_next_group_goal_fast()
965 ac->ac_flags |= EXT4_MB_CR_GOAL_LEN_FAST_OPTIMIZED; in ext4_mb_choose_next_group_goal_fast()
978 if (ac->ac_flags & EXT4_MB_HINT_DATA) in ext4_mb_choose_next_group_goal_fast()
993 static void ext4_mb_choose_next_group_best_avail(struct ext4_allocation_context *ac, in ext4_mb_choose_next_group_best_avail() argument
996 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_choose_next_group_best_avail()
1001 if (unlikely(ac->ac_flags & EXT4_MB_CR_BEST_AVAIL_LEN_OPTIMIZED)) { in ext4_mb_choose_next_group_best_avail()
1008 * retrieving back the length using (1 << order) inaccurate. Hence, use in ext4_mb_choose_next_group_best_avail()
1012 order = fls(ac->ac_g_ex.fe_len) - 1; in ext4_mb_choose_next_group_best_avail()
1013 if (WARN_ON_ONCE(order - 1 > MB_NUM_ORDERS(ac->ac_sb))) in ext4_mb_choose_next_group_best_avail()
1014 order = MB_NUM_ORDERS(ac->ac_sb); in ext4_mb_choose_next_group_best_avail()
1025 if (1 << min_order < num_stripe_clusters) in ext4_mb_choose_next_group_best_avail()
1027 * We consider 1 order less because later we round in ext4_mb_choose_next_group_best_avail()
1030 min_order = fls(num_stripe_clusters) - 1; in ext4_mb_choose_next_group_best_avail()
1033 if (1 << min_order < ac->ac_o_ex.fe_len) in ext4_mb_choose_next_group_best_avail()
1034 min_order = fls(ac->ac_o_ex.fe_len); in ext4_mb_choose_next_group_best_avail()
1043 ac->ac_g_ex.fe_len = 1 << i; in ext4_mb_choose_next_group_best_avail()
1051 ac->ac_g_ex.fe_len = roundup(ac->ac_g_ex.fe_len, in ext4_mb_choose_next_group_best_avail()
1055 frag_order = mb_avg_fragment_size_order(ac->ac_sb, in ext4_mb_choose_next_group_best_avail()
1056 ac->ac_g_ex.fe_len); in ext4_mb_choose_next_group_best_avail()
1058 grp = ext4_mb_find_good_group_avg_frag_lists(ac, frag_order); in ext4_mb_choose_next_group_best_avail()
1061 ac->ac_flags |= EXT4_MB_CR_BEST_AVAIL_LEN_OPTIMIZED; in ext4_mb_choose_next_group_best_avail()
1067 ac->ac_g_ex.fe_len = ac->ac_orig_goal_len; in ext4_mb_choose_next_group_best_avail()
1071 static inline int should_optimize_scan(struct ext4_allocation_context *ac) in should_optimize_scan() argument
1073 if (unlikely(!test_opt2(ac->ac_sb, MB_OPTIMIZE_SCAN))) in should_optimize_scan()
1075 if (ac->ac_criteria >= CR_GOAL_LEN_SLOW) in should_optimize_scan()
1077 if (!ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) in should_optimize_scan()
1079 return 1; in should_optimize_scan()
1087 next_linear_group(struct ext4_allocation_context *ac, ext4_group_t group, in next_linear_group() argument
1090 if (!should_optimize_scan(ac)) in next_linear_group()
1093 if (ac->ac_groups_linear_remaining) { in next_linear_group()
1094 ac->ac_groups_linear_remaining--; in next_linear_group()
1104 return group + 1 >= ngroups ? 0 : group + 1; in next_linear_group()
1110 * @ac Allocation Context
1120 static void ext4_mb_choose_next_group(struct ext4_allocation_context *ac, in ext4_mb_choose_next_group() argument
1123 *new_cr = ac->ac_criteria; in ext4_mb_choose_next_group()
1125 if (!should_optimize_scan(ac) || ac->ac_groups_linear_remaining) { in ext4_mb_choose_next_group()
1126 *group = next_linear_group(ac, *group, ngroups); in ext4_mb_choose_next_group()
1131 ext4_mb_choose_next_group_p2_aligned(ac, new_cr, group, ngroups); in ext4_mb_choose_next_group()
1133 ext4_mb_choose_next_group_goal_fast(ac, new_cr, group, ngroups); in ext4_mb_choose_next_group()
1135 ext4_mb_choose_next_group_best_avail(ac, new_cr, group, ngroups); in ext4_mb_choose_next_group()
1141 WARN_ON(1); in ext4_mb_choose_next_group()
1155 for (i = MB_NUM_ORDERS(sb) - 1; i >= 0; i--) in mb_set_largest_free_order()
1207 if (len > 1) in ext4_mb_generate_buddy()
1242 int order = 1; in mb_regenerate_buddy()
1264 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
1303 groups_per_page = blocks_per_page >> 1; in ext4_mb_init_cache()
1305 groups_per_page = 1; in ext4_mb_init_cache()
1308 if (groups_per_page > 1) { in ext4_mb_init_cache()
1358 group = (first_block + i) >> 1; in ext4_mb_init_cache()
1389 if ((first_block + i) & 1) { in ext4_mb_init_cache()
1744 int order = 1, max; in mb_find_order_for_block()
1748 BUG_ON(block >= (1 << (e4b->bd_blkbits + 3))); in mb_find_order_for_block()
1750 while (order <= e4b->bd_blkbits + 1) { in mb_find_order_for_block()
1780 * will return first found zero bit if any, -1 otherwise
1785 int zero_bit = -1; in mb_test_and_clear_bits()
1792 if (*addr != (__u32)(-1) && zero_bit == -1) in mb_test_and_clear_bits()
1798 if (!mb_test_and_clear_bit(cur, bm) && zero_bit == -1) in mb_test_and_clear_bits()
1829 return 1; in mb_buddy_adjust_border()
1834 return -1; in mb_buddy_adjust_border()
1841 int order = 1; in mb_buddy_mark_free()
1858 * | 1 | 1 | 1 | 1 | in mb_buddy_mark_free()
1860 * | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | in mb_buddy_mark_free()
1862 * 0 1 2 3 4 5 6 7 in mb_buddy_mark_free()
1865 * Neither [1] nor [6] is aligned to above layer. in mb_buddy_mark_free()
1876 if (first & 1) in mb_buddy_mark_free()
1877 e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&first, buddy, -1); in mb_buddy_mark_free()
1878 if (!(last & 1)) in mb_buddy_mark_free()
1879 e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&last, buddy, 1); in mb_buddy_mark_free()
1886 mb_clear_bits(buddy, first, last - first + 1); in mb_buddy_mark_free()
1887 e4b->bd_info->bb_counters[order - 1] += last - first + 1; in mb_buddy_mark_free()
1890 first >>= 1; in mb_buddy_mark_free()
1891 last >>= 1; in mb_buddy_mark_free()
1902 int last = first + count - 1; in mb_free_blocks()
1920 left_is_free = !mb_test_bit(first - 1, e4b->bd_bitmap); in mb_free_blocks()
1922 if (last + 1 < EXT4_SB(sb)->s_mb_maxs[0]) in mb_free_blocks()
1923 right_is_free = !mb_test_bit(last + 1, e4b->bd_bitmap); in mb_free_blocks()
1925 if (unlikely(block != -1)) { in mb_free_blocks()
1966 if (first & 1) { in mb_free_blocks()
1968 e4b->bd_info->bb_counters[0] += left_is_free ? -1 : 1; in mb_free_blocks()
1970 if (!(last & 1)) { in mb_free_blocks()
1972 e4b->bd_info->bb_counters[0] += right_is_free ? -1 : 1; in mb_free_blocks()
1976 mb_buddy_mark_free(e4b, first >> 1, last >> 1); in mb_free_blocks()
2008 ex->fe_len = 1 << order; in mb_find_extent()
2020 if (block + 1 >= max) in mb_find_extent()
2023 next = (block + 1) * (1 << order); in mb_find_extent()
2030 ex->fe_len += 1 << order; in mb_find_extent()
2035 WARN_ON(1); in mb_find_extent()
2074 mlen = !mb_test_bit(start - 1, e4b->bd_bitmap); in mb_mark_used()
2087 if (((start >> ord) << ord) == start && len >= (1 << ord)) { in mb_mark_used()
2089 mlen = 1 << ord; in mb_mark_used()
2114 cur = (start >> ord) & ~1U; in mb_mark_used()
2117 mb_clear_bit(cur + 1, buddy); in mb_mark_used()
2134 static void ext4_mb_use_best_found(struct ext4_allocation_context *ac, in ext4_mb_use_best_found() argument
2137 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_use_best_found()
2140 BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group); in ext4_mb_use_best_found()
2141 BUG_ON(ac->ac_status == AC_STATUS_FOUND); in ext4_mb_use_best_found()
2143 ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len); in ext4_mb_use_best_found()
2144 ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical; in ext4_mb_use_best_found()
2145 ret = mb_mark_used(e4b, &ac->ac_b_ex); in ext4_mb_use_best_found()
2149 ac->ac_f_ex = ac->ac_b_ex; in ext4_mb_use_best_found()
2151 ac->ac_status = AC_STATUS_FOUND; in ext4_mb_use_best_found()
2152 ac->ac_tail = ret & 0xffff; in ext4_mb_use_best_found()
2153 ac->ac_buddy = ret >> 16; in ext4_mb_use_best_found()
2162 ac->ac_bitmap_page = e4b->bd_bitmap_page; in ext4_mb_use_best_found()
2163 get_page(ac->ac_bitmap_page); in ext4_mb_use_best_found()
2164 ac->ac_buddy_page = e4b->bd_buddy_page; in ext4_mb_use_best_found()
2165 get_page(ac->ac_buddy_page); in ext4_mb_use_best_found()
2167 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) { in ext4_mb_use_best_found()
2169 sbi->s_mb_last_group = ac->ac_f_ex.fe_group; in ext4_mb_use_best_found()
2170 sbi->s_mb_last_start = ac->ac_f_ex.fe_start; in ext4_mb_use_best_found()
2178 if (ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len) in ext4_mb_use_best_found()
2179 ext4_mb_new_preallocation(ac); in ext4_mb_use_best_found()
2183 static void ext4_mb_check_limits(struct ext4_allocation_context *ac, in ext4_mb_check_limits() argument
2187 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_check_limits()
2188 struct ext4_free_extent *bex = &ac->ac_b_ex; in ext4_mb_check_limits()
2189 struct ext4_free_extent *gex = &ac->ac_g_ex; in ext4_mb_check_limits()
2191 if (ac->ac_status == AC_STATUS_FOUND) in ext4_mb_check_limits()
2196 if (ac->ac_found > sbi->s_mb_max_to_scan && in ext4_mb_check_limits()
2197 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) { in ext4_mb_check_limits()
2198 ac->ac_status = AC_STATUS_BREAK; in ext4_mb_check_limits()
2208 if (finish_group || ac->ac_found > sbi->s_mb_min_to_scan) in ext4_mb_check_limits()
2209 ext4_mb_use_best_found(ac, e4b); in ext4_mb_check_limits()
2236 static void ext4_mb_measure_extent(struct ext4_allocation_context *ac, in ext4_mb_measure_extent() argument
2240 struct ext4_free_extent *bex = &ac->ac_b_ex; in ext4_mb_measure_extent()
2241 struct ext4_free_extent *gex = &ac->ac_g_ex; in ext4_mb_measure_extent()
2244 BUG_ON(ex->fe_len > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb)); in ext4_mb_measure_extent()
2245 BUG_ON(ex->fe_start >= EXT4_CLUSTERS_PER_GROUP(ac->ac_sb)); in ext4_mb_measure_extent()
2246 BUG_ON(ac->ac_status != AC_STATUS_CONTINUE); in ext4_mb_measure_extent()
2248 ac->ac_found++; in ext4_mb_measure_extent()
2249 ac->ac_cX_found[ac->ac_criteria]++; in ext4_mb_measure_extent()
2254 if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) { in ext4_mb_measure_extent()
2256 ext4_mb_use_best_found(ac, e4b); in ext4_mb_measure_extent()
2265 ext4_mb_use_best_found(ac, e4b); in ext4_mb_measure_extent()
2293 ext4_mb_check_limits(ac, e4b, 0); in ext4_mb_measure_extent()
2297 void ext4_mb_try_best_found(struct ext4_allocation_context *ac, in ext4_mb_try_best_found() argument
2300 struct ext4_free_extent ex = ac->ac_b_ex; in ext4_mb_try_best_found()
2306 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b); in ext4_mb_try_best_found()
2310 ext4_lock_group(ac->ac_sb, group); in ext4_mb_try_best_found()
2317 ac->ac_b_ex = ex; in ext4_mb_try_best_found()
2318 ext4_mb_use_best_found(ac, e4b); in ext4_mb_try_best_found()
2322 ext4_unlock_group(ac->ac_sb, group); in ext4_mb_try_best_found()
2327 int ext4_mb_find_by_goal(struct ext4_allocation_context *ac, in ext4_mb_find_by_goal() argument
2330 ext4_group_t group = ac->ac_g_ex.fe_group; in ext4_mb_find_by_goal()
2333 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_find_by_goal()
2334 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); in ext4_mb_find_by_goal()
2339 if (!(ac->ac_flags & (EXT4_MB_HINT_TRY_GOAL | EXT4_MB_HINT_GOAL_ONLY))) in ext4_mb_find_by_goal()
2344 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b); in ext4_mb_find_by_goal()
2348 ext4_lock_group(ac->ac_sb, group); in ext4_mb_find_by_goal()
2352 max = mb_find_extent(e4b, ac->ac_g_ex.fe_start, in ext4_mb_find_by_goal()
2353 ac->ac_g_ex.fe_len, &ex); in ext4_mb_find_by_goal()
2356 if (max >= ac->ac_g_ex.fe_len && in ext4_mb_find_by_goal()
2357 ac->ac_g_ex.fe_len == EXT4_B2C(sbi, sbi->s_stripe)) { in ext4_mb_find_by_goal()
2360 start = ext4_grp_offs_to_block(ac->ac_sb, &ex); in ext4_mb_find_by_goal()
2363 ac->ac_found++; in ext4_mb_find_by_goal()
2364 ac->ac_b_ex = ex; in ext4_mb_find_by_goal()
2365 ext4_mb_use_best_found(ac, e4b); in ext4_mb_find_by_goal()
2367 } else if (max >= ac->ac_g_ex.fe_len) { in ext4_mb_find_by_goal()
2369 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group); in ext4_mb_find_by_goal()
2370 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start); in ext4_mb_find_by_goal()
2371 ac->ac_found++; in ext4_mb_find_by_goal()
2372 ac->ac_b_ex = ex; in ext4_mb_find_by_goal()
2373 ext4_mb_use_best_found(ac, e4b); in ext4_mb_find_by_goal()
2374 } else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) { in ext4_mb_find_by_goal()
2378 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group); in ext4_mb_find_by_goal()
2379 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start); in ext4_mb_find_by_goal()
2380 ac->ac_found++; in ext4_mb_find_by_goal()
2381 ac->ac_b_ex = ex; in ext4_mb_find_by_goal()
2382 ext4_mb_use_best_found(ac, e4b); in ext4_mb_find_by_goal()
2385 ext4_unlock_group(ac->ac_sb, group); in ext4_mb_find_by_goal()
2396 void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac, in ext4_mb_simple_scan_group() argument
2399 struct super_block *sb = ac->ac_sb; in ext4_mb_simple_scan_group()
2406 BUG_ON(ac->ac_2order <= 0); in ext4_mb_simple_scan_group()
2407 for (i = ac->ac_2order; i < MB_NUM_ORDERS(sb); i++) { in ext4_mb_simple_scan_group()
2418 ext4_grp_locked_error(ac->ac_sb, e4b->bd_group, 0, 0, in ext4_mb_simple_scan_group()
2421 ext4_mark_group_bitmap_corrupted(ac->ac_sb, in ext4_mb_simple_scan_group()
2426 ac->ac_found++; in ext4_mb_simple_scan_group()
2427 ac->ac_cX_found[ac->ac_criteria]++; in ext4_mb_simple_scan_group()
2429 ac->ac_b_ex.fe_len = 1 << i; in ext4_mb_simple_scan_group()
2430 ac->ac_b_ex.fe_start = k << i; in ext4_mb_simple_scan_group()
2431 ac->ac_b_ex.fe_group = e4b->bd_group; in ext4_mb_simple_scan_group()
2433 ext4_mb_use_best_found(ac, e4b); in ext4_mb_simple_scan_group()
2435 BUG_ON(ac->ac_f_ex.fe_len != ac->ac_g_ex.fe_len); in ext4_mb_simple_scan_group()
2450 void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac, in ext4_mb_complex_scan_group() argument
2453 struct super_block *sb = ac->ac_sb; in ext4_mb_complex_scan_group()
2465 while (free && ac->ac_status == AC_STATUS_CONTINUE) { in ext4_mb_complex_scan_group()
2483 if (!ext4_mb_cr_expensive(ac->ac_criteria)) { in ext4_mb_complex_scan_group()
2494 if (freelen < ac->ac_g_ex.fe_len) { in ext4_mb_complex_scan_group()
2501 mb_find_extent(e4b, i, ac->ac_g_ex.fe_len, &ex); in ext4_mb_complex_scan_group()
2519 ext4_mb_measure_extent(ac, &ex, e4b); in ext4_mb_complex_scan_group()
2525 ext4_mb_check_limits(ac, e4b, 1); in ext4_mb_complex_scan_group()
2533 void ext4_mb_scan_aligned(struct ext4_allocation_context *ac, in ext4_mb_scan_aligned() argument
2536 struct super_block *sb = ac->ac_sb; in ext4_mb_scan_aligned()
2550 a = first_group_block + sbi->s_stripe - 1; in ext4_mb_scan_aligned()
2560 ac->ac_found++; in ext4_mb_scan_aligned()
2561 ac->ac_cX_found[ac->ac_criteria]++; in ext4_mb_scan_aligned()
2563 ac->ac_b_ex = ex; in ext4_mb_scan_aligned()
2564 ext4_mb_use_best_found(ac, e4b); in ext4_mb_scan_aligned()
2574 * Returns either 1 or 0 indicating that the group is either suitable
2577 static bool ext4_mb_good_group(struct ext4_allocation_context *ac, in ext4_mb_good_group() argument
2581 int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb)); in ext4_mb_good_group()
2582 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); in ext4_mb_good_group()
2599 BUG_ON(ac->ac_2order == 0); in ext4_mb_good_group()
2602 if ((ac->ac_flags & EXT4_MB_HINT_DATA) && in ext4_mb_good_group()
2607 if (free < ac->ac_g_ex.fe_len) in ext4_mb_good_group()
2610 if (ac->ac_2order >= MB_NUM_ORDERS(ac->ac_sb)) in ext4_mb_good_group()
2613 if (grp->bb_largest_free_order < ac->ac_2order) in ext4_mb_good_group()
2619 if ((free / fragments) >= ac->ac_g_ex.fe_len) in ext4_mb_good_group()
2623 if (free >= ac->ac_g_ex.fe_len) in ext4_mb_good_group()
2646 static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac, in ext4_mb_good_group_nolock() argument
2649 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); in ext4_mb_good_group_nolock()
2650 struct super_block *sb = ac->ac_sb; in ext4_mb_good_group_nolock()
2652 bool should_lock = ac->ac_flags & EXT4_MB_STRICT_CHECK; in ext4_mb_good_group_nolock()
2659 atomic64_inc(&sbi->s_bal_cX_groups_considered[ac->ac_criteria]); in ext4_mb_good_group_nolock()
2672 if (cr < CR_ANY_FREE && free < ac->ac_g_ex.fe_len) in ext4_mb_good_group_nolock()
2698 ((group & ((1 << sbi->s_log_groups_per_flex) - 1)) != 0)) && in ext4_mb_good_group_nolock()
2711 ret = ext4_mb_good_group(ac, group, cr); in ext4_mb_good_group_nolock()
2795 ext4_mb_regular_allocator(struct ext4_allocation_context *ac) in ext4_mb_regular_allocator() argument
2806 sb = ac->ac_sb; in ext4_mb_regular_allocator()
2810 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS))) in ext4_mb_regular_allocator()
2813 BUG_ON(ac->ac_status == AC_STATUS_FOUND); in ext4_mb_regular_allocator()
2816 err = ext4_mb_find_by_goal(ac, &e4b); in ext4_mb_regular_allocator()
2817 if (err || ac->ac_status == AC_STATUS_FOUND) in ext4_mb_regular_allocator()
2820 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) in ext4_mb_regular_allocator()
2824 * ac->ac_2order is set only if the fe_len is a power of 2 in ext4_mb_regular_allocator()
2825 * if ac->ac_2order is set we also set criteria to CR_POWER2_ALIGNED in ext4_mb_regular_allocator()
2828 i = fls(ac->ac_g_ex.fe_len); in ext4_mb_regular_allocator()
2829 ac->ac_2order = 0; in ext4_mb_regular_allocator()
2838 if (is_power_of_2(ac->ac_g_ex.fe_len)) in ext4_mb_regular_allocator()
2839 ac->ac_2order = array_index_nospec(i - 1, in ext4_mb_regular_allocator()
2844 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) { in ext4_mb_regular_allocator()
2847 ac->ac_g_ex.fe_group = sbi->s_mb_last_group; in ext4_mb_regular_allocator()
2848 ac->ac_g_ex.fe_start = sbi->s_mb_last_start; in ext4_mb_regular_allocator()
2857 if (ac->ac_2order) in ext4_mb_regular_allocator()
2860 for (; cr < EXT4_MB_NUM_CRS && ac->ac_status == AC_STATUS_CONTINUE; cr++) { in ext4_mb_regular_allocator()
2861 ac->ac_criteria = cr; in ext4_mb_regular_allocator()
2866 group = ac->ac_g_ex.fe_group; in ext4_mb_regular_allocator()
2867 ac->ac_groups_linear_remaining = sbi->s_mb_max_linear_groups; in ext4_mb_regular_allocator()
2871 ext4_mb_choose_next_group(ac, &new_cr, &group, ngroups)) { in ext4_mb_regular_allocator()
2891 nr = 1 << sbi->s_log_groups_per_flex; in ext4_mb_regular_allocator()
2892 nr -= group & (nr - 1); in ext4_mb_regular_allocator()
2900 ret = ext4_mb_good_group_nolock(ac, group, cr); in ext4_mb_regular_allocator()
2917 ret = ext4_mb_good_group(ac, group, cr); in ext4_mb_regular_allocator()
2924 ac->ac_groups_scanned++; in ext4_mb_regular_allocator()
2926 ext4_mb_simple_scan_group(ac, &e4b); in ext4_mb_regular_allocator()
2930 !(ac->ac_g_ex.fe_len % in ext4_mb_regular_allocator()
2932 ext4_mb_scan_aligned(ac, &e4b); in ext4_mb_regular_allocator()
2934 ext4_mb_complex_scan_group(ac, &e4b); in ext4_mb_regular_allocator()
2939 if (ac->ac_status != AC_STATUS_CONTINUE) in ext4_mb_regular_allocator()
2946 if (i == ngroups && ac->ac_criteria == CR_BEST_AVAIL_LEN) in ext4_mb_regular_allocator()
2949 ac->ac_g_ex.fe_len = ac->ac_orig_goal_len; in ext4_mb_regular_allocator()
2952 if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND && in ext4_mb_regular_allocator()
2953 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) { in ext4_mb_regular_allocator()
2958 ext4_mb_try_best_found(ac, &e4b); in ext4_mb_regular_allocator()
2959 if (ac->ac_status != AC_STATUS_FOUND) { in ext4_mb_regular_allocator()
2967 ac->ac_b_ex.fe_group, ac->ac_b_ex.fe_start, in ext4_mb_regular_allocator()
2968 ac->ac_b_ex.fe_len, lost); in ext4_mb_regular_allocator()
2970 ac->ac_b_ex.fe_group = 0; in ext4_mb_regular_allocator()
2971 ac->ac_b_ex.fe_start = 0; in ext4_mb_regular_allocator()
2972 ac->ac_b_ex.fe_len = 0; in ext4_mb_regular_allocator()
2973 ac->ac_status = AC_STATUS_CONTINUE; in ext4_mb_regular_allocator()
2974 ac->ac_flags |= EXT4_MB_HINT_FIRST; in ext4_mb_regular_allocator()
2980 if (sbi->s_mb_stats && ac->ac_status == AC_STATUS_FOUND) in ext4_mb_regular_allocator()
2981 atomic64_inc(&sbi->s_bal_cX_hits[ac->ac_criteria]); in ext4_mb_regular_allocator()
2983 if (!err && ac->ac_status != AC_STATUS_FOUND && first_err) in ext4_mb_regular_allocator()
2987 ac->ac_b_ex.fe_len, ac->ac_o_ex.fe_len, ac->ac_status, in ext4_mb_regular_allocator()
2988 ac->ac_flags, cr, err); in ext4_mb_regular_allocator()
3003 group = *pos + 1; in ext4_mb_seq_groups_start()
3015 group = *pos + 1; in ext4_mb_seq_groups_next()
3038 " 2^0 2^1 2^2 2^3 2^4 2^5 2^6 " in ext4_mb_seq_groups_show()
3054 buddy_loaded = 1; in ext4_mb_seq_groups_show()
3065 seq_printf(seq, " %-5u", i <= blocksize_bits + 1 ? in ext4_mb_seq_groups_show()
3096 "\tTo enable, please write \"1\" to sysfs file mb_stats.\n"); in ext4_seq_mb_stats_show()
3201 position = *pos + 1; in ext4_mb_seq_structs_summary_start()
3213 position = *pos + 1; in ext4_mb_seq_structs_summary_next()
3244 test_opt2(sb, MB_OPTIMIZE_SCAN) ? 1 : 0); in ext4_mb_seq_structs_summary_show()
3289 size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >> in ext4_mb_alloc_groupinfo()
3346 i = group & (EXT4_DESC_PER_BLOCK(sb) - 1); in ext4_mb_add_groupinfo()
3374 meta_group_info[i]->bb_largest_free_order = -1; /* uninit */ in ext4_mb_add_groupinfo()
3375 meta_group_info[i]->bb_avg_fragment_size_order = -1; /* uninit */ in ext4_mb_add_groupinfo()
3440 sbi->s_mb_prefetch = min_t(uint, 1 << sbi->s_es->s_log_groups_per_flex, in ext4_mb_init_backend()
3572 fd->efd_start_cluster + fd->efd_count - 1, 1); in ext4_discard_work()
3613 i = 1; in ext4_mb_init()
3615 offset_incr = 1 << (sb->s_blocksize_bits - 1); in ext4_mb_init()
3621 offset_incr = offset_incr >> 1; in ext4_mb_init()
3622 max = max >> 1; in ext4_mb_init()
3681 * is 1 megabyte, then group preallocation size becomes half a in ext4_mb_init()
3686 * size is 256k, and 32 megs when the cluster size is 1 meg, in ext4_mb_init()
3692 * If there is a s_stripe > 1, then we set the s_mb_group_prealloc in ext4_mb_init()
3699 if (sbi->s_stripe > 1) { in ext4_mb_init()
3794 EXT4_DESC_PER_BLOCK(sb) - 1) >> in ext4_mb_release()
3902 mb_debug(sb, "freed %d blocks in 1 structures\n", count); in ext4_free_data_in_buddy()
3986 * Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps
3990 ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, in ext4_mb_mark_diskspace_used() argument
4001 BUG_ON(ac->ac_status != AC_STATUS_FOUND); in ext4_mb_mark_diskspace_used()
4002 BUG_ON(ac->ac_b_ex.fe_len <= 0); in ext4_mb_mark_diskspace_used()
4004 sb = ac->ac_sb; in ext4_mb_mark_diskspace_used()
4007 bitmap_bh = ext4_read_block_bitmap(sb, ac->ac_b_ex.fe_group); in ext4_mb_mark_diskspace_used()
4019 gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, &gdp_bh); in ext4_mb_mark_diskspace_used()
4023 ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group, in ext4_mb_mark_diskspace_used()
4031 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); in ext4_mb_mark_diskspace_used()
4033 len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len); in ext4_mb_mark_diskspace_used()
4034 if (!ext4_inode_block_valid(ac->ac_inode, block, len)) { in ext4_mb_mark_diskspace_used()
4041 ext4_lock_group(sb, ac->ac_b_ex.fe_group); in ext4_mb_mark_diskspace_used()
4042 mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start, in ext4_mb_mark_diskspace_used()
4043 ac->ac_b_ex.fe_len); in ext4_mb_mark_diskspace_used()
4044 ext4_unlock_group(sb, ac->ac_b_ex.fe_group); in ext4_mb_mark_diskspace_used()
4051 ext4_lock_group(sb, ac->ac_b_ex.fe_group); in ext4_mb_mark_diskspace_used()
4055 for (i = 0; i < ac->ac_b_ex.fe_len; i++) { in ext4_mb_mark_diskspace_used()
4056 BUG_ON(mb_test_bit(ac->ac_b_ex.fe_start + i, in ext4_mb_mark_diskspace_used()
4061 mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start, in ext4_mb_mark_diskspace_used()
4062 ac->ac_b_ex.fe_len); in ext4_mb_mark_diskspace_used()
4068 ac->ac_b_ex.fe_group, gdp)); in ext4_mb_mark_diskspace_used()
4070 len = ext4_free_group_clusters(sb, gdp) - ac->ac_b_ex.fe_len; in ext4_mb_mark_diskspace_used()
4073 ext4_group_desc_csum_set(sb, ac->ac_b_ex.fe_group, gdp); in ext4_mb_mark_diskspace_used()
4075 ext4_unlock_group(sb, ac->ac_b_ex.fe_group); in ext4_mb_mark_diskspace_used()
4076 percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len); in ext4_mb_mark_diskspace_used()
4080 if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED)) in ext4_mb_mark_diskspace_used()
4087 ac->ac_b_ex.fe_group); in ext4_mb_mark_diskspace_used()
4088 atomic64_sub(ac->ac_b_ex.fe_len, in ext4_mb_mark_diskspace_used()
4224 static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac) in ext4_mb_normalize_group_request() argument
4226 struct super_block *sb = ac->ac_sb; in ext4_mb_normalize_group_request()
4227 struct ext4_locality_group *lg = ac->ac_lg; in ext4_mb_normalize_group_request()
4230 ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc; in ext4_mb_normalize_group_request()
4231 mb_debug(sb, "goal %u blocks for locality group\n", ac->ac_g_ex.fe_len); in ext4_mb_normalize_group_request()
4253 ext4_mb_pa_assert_overlap(struct ext4_allocation_context *ac, in ext4_mb_pa_assert_overlap() argument
4256 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_pa_assert_overlap()
4257 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); in ext4_mb_pa_assert_overlap()
4280 * Given an allocation context "ac" and a range "start", "end", check
4285 * ac allocation context
4290 ext4_mb_pa_adjust_overlap(struct ext4_allocation_context *ac, in ext4_mb_pa_adjust_overlap() argument
4293 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); in ext4_mb_pa_adjust_overlap()
4294 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_pa_adjust_overlap()
4297 ext4_lblk_t new_start, tmp_pa_start, right_pa_start = -1; in ext4_mb_pa_adjust_overlap()
4298 loff_t new_end, tmp_pa_end, left_pa_end = -1; in ext4_mb_pa_adjust_overlap()
4310 /* Step 1: find any one immediate neighboring PA of the normalized range */ in ext4_mb_pa_adjust_overlap()
4312 iter = ext4_mb_pa_rb_next_iter(ac->ac_o_ex.fe_logical, in ext4_mb_pa_adjust_overlap()
4322 BUG_ON(!(ac->ac_o_ex.fe_logical >= tmp_pa_end || in ext4_mb_pa_adjust_overlap()
4323 ac->ac_o_ex.fe_logical < tmp_pa_start)); in ext4_mb_pa_adjust_overlap()
4332 if (tmp_pa->pa_lstart < ac->ac_o_ex.fe_logical) { in ext4_mb_pa_adjust_overlap()
4398 BUG_ON(left_pa_end > ac->ac_o_ex.fe_logical); in ext4_mb_pa_adjust_overlap()
4403 BUG_ON(right_pa_start <= ac->ac_o_ex.fe_logical); in ext4_mb_pa_adjust_overlap()
4419 ext4_mb_pa_assert_overlap(ac, new_start, new_end); in ext4_mb_pa_adjust_overlap()
4430 ext4_mb_normalize_request(struct ext4_allocation_context *ac, in ext4_mb_normalize_request() argument
4433 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_normalize_request()
4442 if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) in ext4_mb_normalize_request()
4446 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) in ext4_mb_normalize_request()
4451 if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC) in ext4_mb_normalize_request()
4454 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) { in ext4_mb_normalize_request()
4455 ext4_mb_normalize_group_request(ac); in ext4_mb_normalize_request()
4459 bsbits = ac->ac_sb->s_blocksize_bits; in ext4_mb_normalize_request()
4463 size = extent_logical_end(sbi, &ac->ac_o_ex); in ext4_mb_normalize_request()
4465 if (size < i_size_read(ac->ac_inode)) in ext4_mb_normalize_request()
4466 size = i_size_read(ac->ac_inode); in ext4_mb_normalize_request()
4493 start_off = ((loff_t)ac->ac_o_ex.fe_logical >> in ext4_mb_normalize_request()
4497 start_off = ((loff_t)ac->ac_o_ex.fe_logical >> in ext4_mb_normalize_request()
4500 } else if (NRL_CHECK_SIZE(EXT4_C2B(sbi, ac->ac_o_ex.fe_len), in ext4_mb_normalize_request()
4502 start_off = ((loff_t)ac->ac_o_ex.fe_logical >> in ext4_mb_normalize_request()
4506 start_off = (loff_t) ac->ac_o_ex.fe_logical << bsbits; in ext4_mb_normalize_request()
4508 ac->ac_o_ex.fe_len) << bsbits; in ext4_mb_normalize_request()
4519 start = max(start, rounddown(ac->ac_o_ex.fe_logical, in ext4_mb_normalize_request()
4520 (ext4_lblk_t)EXT4_BLOCKS_PER_GROUP(ac->ac_sb))); in ext4_mb_normalize_request()
4528 size -= ar->lleft + 1 - start; in ext4_mb_normalize_request()
4529 start = ar->lleft + 1; in ext4_mb_normalize_request()
4531 if (ar->pright && start + size - 1 >= ar->lright) in ext4_mb_normalize_request()
4538 if (size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb)) in ext4_mb_normalize_request()
4539 size = EXT4_BLOCKS_PER_GROUP(ac->ac_sb); in ext4_mb_normalize_request()
4543 ext4_mb_pa_adjust_overlap(ac, &start, &end); in ext4_mb_normalize_request()
4551 * ac->ac_o_ex.fe_logical & fe_len should always lie within "start" and in ext4_mb_normalize_request()
4562 if (start + size <= ac->ac_o_ex.fe_logical || in ext4_mb_normalize_request()
4563 start > ac->ac_o_ex.fe_logical) { in ext4_mb_normalize_request()
4564 ext4_msg(ac->ac_sb, KERN_ERR, in ext4_mb_normalize_request()
4567 (unsigned long) ac->ac_o_ex.fe_logical); in ext4_mb_normalize_request()
4570 BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb)); in ext4_mb_normalize_request()
4576 ac->ac_g_ex.fe_logical = start; in ext4_mb_normalize_request()
4577 ac->ac_g_ex.fe_len = EXT4_NUM_B2C(sbi, size); in ext4_mb_normalize_request()
4578 ac->ac_orig_goal_len = ac->ac_g_ex.fe_len; in ext4_mb_normalize_request()
4585 ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size, in ext4_mb_normalize_request()
4586 &ac->ac_g_ex.fe_group, in ext4_mb_normalize_request()
4587 &ac->ac_g_ex.fe_start); in ext4_mb_normalize_request()
4588 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL; in ext4_mb_normalize_request()
4590 if (ar->pleft && (ar->lleft + 1 == start) && in ext4_mb_normalize_request()
4591 ar->pleft + 1 < ext4_blocks_count(es)) { in ext4_mb_normalize_request()
4593 ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1, in ext4_mb_normalize_request()
4594 &ac->ac_g_ex.fe_group, in ext4_mb_normalize_request()
4595 &ac->ac_g_ex.fe_start); in ext4_mb_normalize_request()
4596 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL; in ext4_mb_normalize_request()
4599 mb_debug(ac->ac_sb, "goal: %lld(was %lld) blocks at %u\n", size, in ext4_mb_normalize_request()
4603 static void ext4_mb_collect_stats(struct ext4_allocation_context *ac) in ext4_mb_collect_stats() argument
4605 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_collect_stats()
4607 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len >= 1) { in ext4_mb_collect_stats()
4609 atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated); in ext4_mb_collect_stats()
4610 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len) in ext4_mb_collect_stats()
4613 atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned); in ext4_mb_collect_stats()
4615 atomic_add(ac->ac_cX_found[i], &sbi->s_bal_cX_ex_scanned[i]); in ext4_mb_collect_stats()
4618 atomic_add(ac->ac_groups_scanned, &sbi->s_bal_groups_scanned); in ext4_mb_collect_stats()
4619 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start && in ext4_mb_collect_stats()
4620 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group) in ext4_mb_collect_stats()
4623 if (ac->ac_f_ex.fe_len == ac->ac_orig_goal_len) in ext4_mb_collect_stats()
4626 if (ac->ac_found > sbi->s_mb_max_to_scan) in ext4_mb_collect_stats()
4630 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC) in ext4_mb_collect_stats()
4631 trace_ext4_mballoc_alloc(ac); in ext4_mb_collect_stats()
4633 trace_ext4_mballoc_prealloc(ac); in ext4_mb_collect_stats()
4640 * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed.
4642 static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac) in ext4_discard_allocated_blocks() argument
4644 struct ext4_prealloc_space *pa = ac->ac_pa; in ext4_discard_allocated_blocks()
4649 if (ac->ac_f_ex.fe_len == 0) in ext4_discard_allocated_blocks()
4651 err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b); in ext4_discard_allocated_blocks()
4660 ext4_lock_group(ac->ac_sb, ac->ac_f_ex.fe_group); in ext4_discard_allocated_blocks()
4661 mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start, in ext4_discard_allocated_blocks()
4662 ac->ac_f_ex.fe_len); in ext4_discard_allocated_blocks()
4663 ext4_unlock_group(ac->ac_sb, ac->ac_f_ex.fe_group); in ext4_discard_allocated_blocks()
4669 pa->pa_free += ac->ac_b_ex.fe_len; in ext4_discard_allocated_blocks()
4677 static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac, in ext4_mb_use_inode_pa() argument
4680 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_use_inode_pa()
4686 start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart); in ext4_mb_use_inode_pa()
4688 start + EXT4_C2B(sbi, ac->ac_o_ex.fe_len)); in ext4_mb_use_inode_pa()
4690 ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group, in ext4_mb_use_inode_pa()
4691 &ac->ac_b_ex.fe_start); in ext4_mb_use_inode_pa()
4692 ac->ac_b_ex.fe_len = len; in ext4_mb_use_inode_pa()
4693 ac->ac_status = AC_STATUS_FOUND; in ext4_mb_use_inode_pa()
4694 ac->ac_pa = pa; in ext4_mb_use_inode_pa()
4699 BUG_ON(ac->ac_b_ex.fe_len <= 0); in ext4_mb_use_inode_pa()
4702 mb_debug(ac->ac_sb, "use %llu/%d from inode pa %p\n", start, len, pa); in ext4_mb_use_inode_pa()
4708 static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac, in ext4_mb_use_group_pa() argument
4711 unsigned int len = ac->ac_o_ex.fe_len; in ext4_mb_use_group_pa()
4713 ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart, in ext4_mb_use_group_pa()
4714 &ac->ac_b_ex.fe_group, in ext4_mb_use_group_pa()
4715 &ac->ac_b_ex.fe_start); in ext4_mb_use_group_pa()
4716 ac->ac_b_ex.fe_len = len; in ext4_mb_use_group_pa()
4717 ac->ac_status = AC_STATUS_FOUND; in ext4_mb_use_group_pa()
4718 ac->ac_pa = pa; in ext4_mb_use_group_pa()
4726 mb_debug(ac->ac_sb, "use %u/%u from group pa %p\n", in ext4_mb_use_group_pa()
4763 ext4_mb_pa_goal_check(struct ext4_allocation_context *ac, in ext4_mb_pa_goal_check() argument
4766 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_pa_goal_check()
4769 if (likely(!(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))) in ext4_mb_pa_goal_check()
4779 (ac->ac_g_ex.fe_logical - pa->pa_lstart); in ext4_mb_pa_goal_check()
4780 if (ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex) != start) in ext4_mb_pa_goal_check()
4783 if (ac->ac_g_ex.fe_len > pa->pa_len - in ext4_mb_pa_goal_check()
4784 EXT4_B2C(sbi, ac->ac_g_ex.fe_logical - pa->pa_lstart)) in ext4_mb_pa_goal_check()
4794 ext4_mb_use_preallocated(struct ext4_allocation_context *ac) in ext4_mb_use_preallocated() argument
4796 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_use_preallocated()
4798 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); in ext4_mb_use_preallocated()
4805 if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) in ext4_mb_use_preallocated()
4822 * Step 1: Find a pa with logical start immediately adjacent to the in ext4_mb_use_preallocated()
4828 iter = ext4_mb_pa_rb_next_iter(ac->ac_o_ex.fe_logical, in ext4_mb_use_preallocated()
4839 if (tmp_pa->pa_lstart > ac->ac_o_ex.fe_logical) { in ext4_mb_use_preallocated()
4856 BUG_ON(!(tmp_pa && tmp_pa->pa_lstart <= ac->ac_o_ex.fe_logical)); in ext4_mb_use_preallocated()
4888 BUG_ON(!(tmp_pa && tmp_pa->pa_lstart <= ac->ac_o_ex.fe_logical)); in ext4_mb_use_preallocated()
4889 BUG_ON(tmp_pa->pa_deleted == 1); in ext4_mb_use_preallocated()
4896 if (ac->ac_o_ex.fe_logical >= pa_logical_end(sbi, tmp_pa)) { in ext4_mb_use_preallocated()
4902 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) && in ext4_mb_use_preallocated()
4913 if (tmp_pa->pa_free && likely(ext4_mb_pa_goal_check(ac, tmp_pa))) { in ext4_mb_use_preallocated()
4915 ext4_mb_use_inode_pa(ac, tmp_pa); in ext4_mb_use_preallocated()
4925 * 1. When a new inode pa is added to rbtree it must have in ext4_mb_use_preallocated()
4954 if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)) in ext4_mb_use_preallocated()
4958 lg = ac->ac_lg; in ext4_mb_use_preallocated()
4961 order = fls(ac->ac_o_ex.fe_len) - 1; in ext4_mb_use_preallocated()
4962 if (order > PREALLOC_TB_SIZE - 1) in ext4_mb_use_preallocated()
4964 order = PREALLOC_TB_SIZE - 1; in ext4_mb_use_preallocated()
4966 goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex); in ext4_mb_use_preallocated()
4977 tmp_pa->pa_free >= ac->ac_o_ex.fe_len) { in ext4_mb_use_preallocated()
4987 ext4_mb_use_group_pa(ac, cpa); in ext4_mb_use_preallocated()
5049 pa->pa_deleted = 1; in ext4_mb_mark_pa_deleted()
5077 static void ext4_mb_put_pa(struct ext4_allocation_context *ac, in ext4_mb_put_pa() argument
5082 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); in ext4_mb_put_pa()
5091 if (pa->pa_deleted == 1) { in ext4_mb_put_pa()
5169 ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) in ext4_mb_new_inode_pa() argument
5171 struct super_block *sb = ac->ac_sb; in ext4_mb_new_inode_pa()
5178 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len); in ext4_mb_new_inode_pa()
5179 BUG_ON(ac->ac_status != AC_STATUS_FOUND); in ext4_mb_new_inode_pa()
5180 BUG_ON(!S_ISREG(ac->ac_inode->i_mode)); in ext4_mb_new_inode_pa()
5181 BUG_ON(ac->ac_pa == NULL); in ext4_mb_new_inode_pa()
5183 pa = ac->ac_pa; in ext4_mb_new_inode_pa()
5185 if (ac->ac_b_ex.fe_len < ac->ac_orig_goal_len) { in ext4_mb_new_inode_pa()
5187 .fe_logical = ac->ac_g_ex.fe_logical, in ext4_mb_new_inode_pa()
5188 .fe_len = ac->ac_orig_goal_len, in ext4_mb_new_inode_pa()
5191 loff_t o_ex_end = extent_logical_end(sbi, &ac->ac_o_ex); in ext4_mb_new_inode_pa()
5201 BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical); in ext4_mb_new_inode_pa()
5202 BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len); in ext4_mb_new_inode_pa()
5209 * 1. Check if best ex can be kept at end of goal (before in ext4_mb_new_inode_pa()
5215 ex.fe_len = ac->ac_b_ex.fe_len; in ext4_mb_new_inode_pa()
5218 if (ac->ac_o_ex.fe_logical >= ex.fe_logical) in ext4_mb_new_inode_pa()
5221 ex.fe_logical = ac->ac_g_ex.fe_logical; in ext4_mb_new_inode_pa()
5225 ex.fe_logical = ac->ac_o_ex.fe_logical; in ext4_mb_new_inode_pa()
5227 ac->ac_b_ex.fe_logical = ex.fe_logical; in ext4_mb_new_inode_pa()
5229 BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical); in ext4_mb_new_inode_pa()
5233 pa->pa_lstart = ac->ac_b_ex.fe_logical; in ext4_mb_new_inode_pa()
5234 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); in ext4_mb_new_inode_pa()
5235 pa->pa_len = ac->ac_b_ex.fe_len; in ext4_mb_new_inode_pa()
5244 trace_ext4_mb_new_inode_pa(ac, pa); in ext4_mb_new_inode_pa()
5247 ext4_mb_use_inode_pa(ac, pa); in ext4_mb_new_inode_pa()
5249 ei = EXT4_I(ac->ac_inode); in ext4_mb_new_inode_pa()
5250 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); in ext4_mb_new_inode_pa()
5255 pa->pa_inode = ac->ac_inode; in ext4_mb_new_inode_pa()
5269 ext4_mb_new_group_pa(struct ext4_allocation_context *ac) in ext4_mb_new_group_pa() argument
5271 struct super_block *sb = ac->ac_sb; in ext4_mb_new_group_pa()
5277 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len); in ext4_mb_new_group_pa()
5278 BUG_ON(ac->ac_status != AC_STATUS_FOUND); in ext4_mb_new_group_pa()
5279 BUG_ON(!S_ISREG(ac->ac_inode->i_mode)); in ext4_mb_new_group_pa()
5280 BUG_ON(ac->ac_pa == NULL); in ext4_mb_new_group_pa()
5282 pa = ac->ac_pa; in ext4_mb_new_group_pa()
5284 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); in ext4_mb_new_group_pa()
5286 pa->pa_len = ac->ac_b_ex.fe_len; in ext4_mb_new_group_pa()
5296 trace_ext4_mb_new_group_pa(ac, pa); in ext4_mb_new_group_pa()
5298 ext4_mb_use_group_pa(ac, pa); in ext4_mb_new_group_pa()
5301 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); in ext4_mb_new_group_pa()
5304 lg = ac->ac_lg; in ext4_mb_new_group_pa()
5318 static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac) in ext4_mb_new_preallocation() argument
5320 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) in ext4_mb_new_preallocation()
5321 ext4_mb_new_group_pa(ac); in ext4_mb_new_preallocation()
5323 ext4_mb_new_inode_pa(ac); in ext4_mb_new_preallocation()
5368 bit = next + 1; in ext4_mb_release_inode_pa()
5416 * 1) ENOSPC
5418 * 1) how many requested
5462 *busy = 1; in ext4_mb_discard_group_preallocations()
5573 WARN_ON(1); in ext4_discard_preallocations()
5642 static int ext4_mb_pa_alloc(struct ext4_allocation_context *ac) in ext4_mb_pa_alloc() argument
5650 atomic_set(&pa->pa_count, 1); in ext4_mb_pa_alloc()
5651 ac->ac_pa = pa; in ext4_mb_pa_alloc()
5655 static void ext4_mb_pa_put_free(struct ext4_allocation_context *ac) in ext4_mb_pa_put_free() argument
5657 struct ext4_prealloc_space *pa = ac->ac_pa; in ext4_mb_pa_put_free()
5660 ac->ac_pa = NULL; in ext4_mb_pa_put_free()
5667 pa->pa_deleted = 1; in ext4_mb_pa_put_free()
5706 static void ext4_mb_show_ac(struct ext4_allocation_context *ac) in ext4_mb_show_ac() argument
5708 struct super_block *sb = ac->ac_sb; in ext4_mb_show_ac()
5716 ac->ac_status, ac->ac_flags); in ext4_mb_show_ac()
5720 (unsigned long)ac->ac_o_ex.fe_group, in ext4_mb_show_ac()
5721 (unsigned long)ac->ac_o_ex.fe_start, in ext4_mb_show_ac()
5722 (unsigned long)ac->ac_o_ex.fe_len, in ext4_mb_show_ac()
5723 (unsigned long)ac->ac_o_ex.fe_logical, in ext4_mb_show_ac()
5724 (unsigned long)ac->ac_g_ex.fe_group, in ext4_mb_show_ac()
5725 (unsigned long)ac->ac_g_ex.fe_start, in ext4_mb_show_ac()
5726 (unsigned long)ac->ac_g_ex.fe_len, in ext4_mb_show_ac()
5727 (unsigned long)ac->ac_g_ex.fe_logical, in ext4_mb_show_ac()
5728 (unsigned long)ac->ac_b_ex.fe_group, in ext4_mb_show_ac()
5729 (unsigned long)ac->ac_b_ex.fe_start, in ext4_mb_show_ac()
5730 (unsigned long)ac->ac_b_ex.fe_len, in ext4_mb_show_ac()
5731 (unsigned long)ac->ac_b_ex.fe_logical, in ext4_mb_show_ac()
5732 (int)ac->ac_criteria); in ext4_mb_show_ac()
5733 mb_debug(sb, "%u found", ac->ac_found); in ext4_mb_show_ac()
5734 mb_debug(sb, "used pa: %s, ", ac->ac_pa ? "yes" : "no"); in ext4_mb_show_ac()
5735 if (ac->ac_pa) in ext4_mb_show_ac()
5736 mb_debug(sb, "pa_type %s\n", ac->ac_pa->pa_type == MB_GROUP_PA ? in ext4_mb_show_ac()
5744 static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac) in ext4_mb_show_ac() argument
5746 ext4_mb_show_pa(ac->ac_sb); in ext4_mb_show_ac()
5757 static void ext4_mb_group_or_file(struct ext4_allocation_context *ac) in ext4_mb_group_or_file() argument
5759 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_group_or_file()
5760 int bsbits = ac->ac_sb->s_blocksize_bits; in ext4_mb_group_or_file()
5764 if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) in ext4_mb_group_or_file()
5767 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) in ext4_mb_group_or_file()
5772 size = extent_logical_end(sbi, &ac->ac_o_ex); in ext4_mb_group_or_file()
5773 isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1) in ext4_mb_group_or_file()
5778 !inode_is_open_for_write(ac->ac_inode)) in ext4_mb_group_or_file()
5788 ac->ac_flags |= EXT4_MB_STREAM_ALLOC; in ext4_mb_group_or_file()
5790 ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC; in ext4_mb_group_or_file()
5794 BUG_ON(ac->ac_lg != NULL); in ext4_mb_group_or_file()
5800 ac->ac_lg = raw_cpu_ptr(sbi->s_locality_groups); in ext4_mb_group_or_file()
5803 ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC; in ext4_mb_group_or_file()
5806 mutex_lock(&ac->ac_lg->lg_mutex); in ext4_mb_group_or_file()
5810 ext4_mb_initialize_context(struct ext4_allocation_context *ac, in ext4_mb_initialize_context() argument
5836 ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical); in ext4_mb_initialize_context()
5837 ac->ac_status = AC_STATUS_CONTINUE; in ext4_mb_initialize_context()
5838 ac->ac_sb = sb; in ext4_mb_initialize_context()
5839 ac->ac_inode = ar->inode; in ext4_mb_initialize_context()
5840 ac->ac_o_ex.fe_logical = ac->ac_b_ex.fe_logical; in ext4_mb_initialize_context()
5841 ac->ac_o_ex.fe_group = group; in ext4_mb_initialize_context()
5842 ac->ac_o_ex.fe_start = block; in ext4_mb_initialize_context()
5843 ac->ac_o_ex.fe_len = len; in ext4_mb_initialize_context()
5844 ac->ac_g_ex = ac->ac_o_ex; in ext4_mb_initialize_context()
5845 ac->ac_orig_goal_len = ac->ac_g_ex.fe_len; in ext4_mb_initialize_context()
5846 ac->ac_flags = ar->flags; in ext4_mb_initialize_context()
5850 ext4_mb_group_or_file(ac); in ext4_mb_initialize_context()
5852 mb_debug(sb, "init ac: %u blocks @ %u, goal %u, flags 0x%x, 2^%d, " in ext4_mb_initialize_context()
5855 (unsigned) ar->goal, ac->ac_flags, ac->ac_2order, in ext4_mb_initialize_context()
5945 static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac) in ext4_mb_add_n_trim() argument
5947 int order, added = 0, lg_prealloc_count = 1; in ext4_mb_add_n_trim()
5948 struct super_block *sb = ac->ac_sb; in ext4_mb_add_n_trim()
5949 struct ext4_locality_group *lg = ac->ac_lg; in ext4_mb_add_n_trim()
5950 struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa; in ext4_mb_add_n_trim()
5952 order = fls(pa->pa_free) - 1; in ext4_mb_add_n_trim()
5953 if (order > PREALLOC_TB_SIZE - 1) in ext4_mb_add_n_trim()
5955 order = PREALLOC_TB_SIZE - 1; in ext4_mb_add_n_trim()
5970 added = 1; in ext4_mb_add_n_trim()
5993 static int ext4_mb_release_context(struct ext4_allocation_context *ac) in ext4_mb_release_context() argument
5995 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_release_context()
5996 struct ext4_prealloc_space *pa = ac->ac_pa; in ext4_mb_release_context()
6001 pa->pa_pstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len); in ext4_mb_release_context()
6002 pa->pa_lstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len); in ext4_mb_release_context()
6003 pa->pa_free -= ac->ac_b_ex.fe_len; in ext4_mb_release_context()
6004 pa->pa_len -= ac->ac_b_ex.fe_len; in ext4_mb_release_context()
6017 ext4_mb_add_n_trim(ac); in ext4_mb_release_context()
6021 ext4_mb_put_pa(ac, ac->ac_sb, pa); in ext4_mb_release_context()
6023 if (ac->ac_bitmap_page) in ext4_mb_release_context()
6024 put_page(ac->ac_bitmap_page); in ext4_mb_release_context()
6025 if (ac->ac_buddy_page) in ext4_mb_release_context()
6026 put_page(ac->ac_buddy_page); in ext4_mb_release_context()
6027 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) in ext4_mb_release_context()
6028 mutex_unlock(&ac->ac_lg->lg_mutex); in ext4_mb_release_context()
6029 ext4_mb_collect_stats(ac); in ext4_mb_release_context()
6043 needed = EXT4_CLUSTERS_PER_GROUP(sb) + 1; in ext4_mb_discard_preallocations()
6061 struct ext4_allocation_context *ac, u64 *seq) in ext4_mb_discard_preallocations_should_retry() argument
6067 freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len); in ext4_mb_discard_preallocations_should_retry()
6073 if (!(ac->ac_flags & EXT4_MB_STRICT_CHECK) || seq_retry != *seq) { in ext4_mb_discard_preallocations_should_retry()
6074 ac->ac_flags |= EXT4_MB_STRICT_CHECK; in ext4_mb_discard_preallocations_should_retry()
6117 while (1) { in ext4_mb_new_blocks_simple()
6125 blkoff = i + 1; in ext4_mb_new_blocks_simple()
6145 ext4_mb_mark_bb(sb, block, 1, 1); in ext4_mb_new_blocks_simple()
6146 ar->len = 1; in ext4_mb_new_blocks_simple()
6160 struct ext4_allocation_context *ac = NULL; in ext4_mb_new_blocks() local
6191 ar->len = ar->len >> 1; in ext4_mb_new_blocks()
6218 ac = kmem_cache_zalloc(ext4_ac_cachep, GFP_NOFS); in ext4_mb_new_blocks()
6219 if (!ac) { in ext4_mb_new_blocks()
6225 ext4_mb_initialize_context(ac, ar); in ext4_mb_new_blocks()
6227 ac->ac_op = EXT4_MB_HISTORY_PREALLOC; in ext4_mb_new_blocks()
6229 if (!ext4_mb_use_preallocated(ac)) { in ext4_mb_new_blocks()
6230 ac->ac_op = EXT4_MB_HISTORY_ALLOC; in ext4_mb_new_blocks()
6231 ext4_mb_normalize_request(ac, ar); in ext4_mb_new_blocks()
6233 *errp = ext4_mb_pa_alloc(ac); in ext4_mb_new_blocks()
6238 *errp = ext4_mb_regular_allocator(ac); in ext4_mb_new_blocks()
6242 * ac->ac_status == AC_STATUS_FOUND. in ext4_mb_new_blocks()
6243 * And error from above mean ac->ac_status != AC_STATUS_FOUND in ext4_mb_new_blocks()
6247 ext4_mb_pa_put_free(ac); in ext4_mb_new_blocks()
6248 ext4_discard_allocated_blocks(ac); in ext4_mb_new_blocks()
6251 if (ac->ac_status == AC_STATUS_FOUND && in ext4_mb_new_blocks()
6252 ac->ac_o_ex.fe_len >= ac->ac_f_ex.fe_len) in ext4_mb_new_blocks()
6253 ext4_mb_pa_put_free(ac); in ext4_mb_new_blocks()
6255 if (likely(ac->ac_status == AC_STATUS_FOUND)) { in ext4_mb_new_blocks()
6256 *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs); in ext4_mb_new_blocks()
6258 ext4_discard_allocated_blocks(ac); in ext4_mb_new_blocks()
6261 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); in ext4_mb_new_blocks()
6262 ar->len = ac->ac_b_ex.fe_len; in ext4_mb_new_blocks()
6266 ext4_mb_discard_preallocations_should_retry(sb, ac, &seq)) in ext4_mb_new_blocks()
6272 ext4_mb_pa_put_free(ac); in ext4_mb_new_blocks()
6278 ac->ac_b_ex.fe_len = 0; in ext4_mb_new_blocks()
6280 ext4_mb_show_ac(ac); in ext4_mb_new_blocks()
6282 ext4_mb_release_context(ac); in ext4_mb_new_blocks()
6283 kmem_cache_free(ext4_ac_cachep, ac); in ext4_mb_new_blocks()
6690 BUG_ON(count > 1); in ext4_free_blocks()
6770 ext4_fsblk_t last_cluster = EXT4_B2C(sbi, block + count - 1); in ext4_group_add_blocks()
6771 unsigned long cluster_count = last_cluster - first_cluster + 1; in ext4_group_add_blocks()
6773 ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1); in ext4_group_add_blocks()
6929 if (grp < (ext4_get_groups_count(sb) - 1)) in ext4_last_grp_cluster()
6936 return nr_clusters_in_group - 1; in ext4_last_grp_cluster()
6967 start = mb_find_next_zero_bit(bitmap, max + 1, start); in ext4_try_to_trim_range()
6971 next = mb_find_next_bit(bitmap, last + 1, start); in ext4_try_to_trim_range()
6983 start = next + 1; in ext4_try_to_trim_range()
7076 end = start + (range->len >> sb->s_blocksize_bits) - 1; in ext4_trim_fs()
7091 if (end >= max_blks - 1) in ext4_trim_fs()
7092 end = max_blks - 1; in ext4_trim_fs()
7105 end = EXT4_CLUSTERS_PER_GROUP(sb) - 1; in ext4_trim_fs()
7122 * always be EXT4_CLUSTERS_PER_GROUP(sb)-1, so we only need to in ext4_trim_fs()
7178 end = EXT4_CLUSTERS_PER_GROUP(sb) - 1; in ext4_mballoc_query_range()
7190 start = mb_find_next_zero_bit(bitmap, end + 1, start); in ext4_mballoc_query_range()
7193 next = mb_find_next_bit(bitmap, end + 1, start); in ext4_mballoc_query_range()
7201 start = next + 1; in ext4_mballoc_query_range()