Lines Matching +full:rcu +full:- +full:endian +full:- +full:offset
1 // SPDX-License-Identifier: GPL-2.0
7 * Laboratoire MASI - Institut Blaise Pascal
11 * Big-endian to little-endian byte-swapping/bitmaps by
41 group = (block - in ext4_get_group_number()
42 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) >> in ext4_get_group_number()
50 * Calculate the block group number and offset into the block/cluster
56 struct ext4_super_block *es = EXT4_SB(sb)->s_es; in ext4_get_group_no_and_offset()
57 ext4_grpblk_t offset; in ext4_get_group_no_and_offset() local
59 blocknr = blocknr - le32_to_cpu(es->s_first_data_block); in ext4_get_group_no_and_offset()
60 offset = do_div(blocknr, EXT4_BLOCKS_PER_GROUP(sb)) >> in ext4_get_group_no_and_offset()
61 EXT4_SB(sb)->s_cluster_bits; in ext4_get_group_no_and_offset()
63 *offsetp = offset; in ext4_get_group_no_and_offset()
91 int block_cluster = -1, inode_cluster = -1, itbl_cluster = -1, i, c; in ext4_num_overhead_clusters()
115 ext4_block_bitmap(sb, gdp) - start); in ext4_num_overhead_clusters()
117 block_cluster = -1; in ext4_num_overhead_clusters()
120 block_cluster = -1; in ext4_num_overhead_clusters()
126 ext4_inode_bitmap(sb, gdp) - start); in ext4_num_overhead_clusters()
128 inode_cluster = -1; in ext4_num_overhead_clusters()
131 inode_cluster = -1; in ext4_num_overhead_clusters()
136 for (i = 0; i < sbi->s_itb_per_group; i++) { in ext4_num_overhead_clusters()
138 c = EXT4_B2C(sbi, itbl_blk + i - start); in ext4_num_overhead_clusters()
151 if (block_cluster != -1) in ext4_num_overhead_clusters()
153 if (inode_cluster != -1) in ext4_num_overhead_clusters()
164 if (block_group == ext4_get_groups_count(sb) - 1) { in num_clusters_in_group()
171 blocks = ext4_blocks_count(EXT4_SB(sb)->s_es) - in num_clusters_in_group()
191 * essentially implementing a per-group read-only flag. */ in ext4_init_block_bitmap()
196 return -EFSBADCRC; in ext4_init_block_bitmap()
198 memset(bh->b_data, 0, sb->s_blocksize); in ext4_init_block_bitmap()
201 if ((bit_max >> 3) >= bh->b_size) in ext4_init_block_bitmap()
202 return -EFSCORRUPTED; in ext4_init_block_bitmap()
205 ext4_set_bit(bit, bh->b_data); in ext4_init_block_bitmap()
212 ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data); in ext4_init_block_bitmap()
216 ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data); in ext4_init_block_bitmap()
220 sbi->s_itb_per_group; tmp++) { in ext4_init_block_bitmap()
222 ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data); in ext4_init_block_bitmap()
231 sb->s_blocksize * 8, bh->b_data); in ext4_init_block_bitmap()
242 return num_clusters_in_group(sb, block_group) - in ext4_free_clusters_after_init()
258 * ext4_get_group_desc() -- load group descriptor from disk
269 unsigned int offset; in ext4_get_group_desc() local
276 ext4_error(sb, "block_group >= groups_count - block_group = %u," in ext4_get_group_desc()
283 offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1); in ext4_get_group_desc()
286 * sbi_array_rcu_deref returns with rcu unlocked, this is ok since in ext4_get_group_desc()
292 ext4_error(sb, "Group descriptor not loaded - " in ext4_get_group_desc()
294 block_group, group_desc, offset); in ext4_get_group_desc()
299 (__u8 *)bh_p->b_data + in ext4_get_group_desc()
300 offset * EXT4_DESC_SIZE(sb)); in ext4_get_group_desc()
311 unsigned long bitmap_size = sb->s_blocksize * 8; in ext4_valid_block_bitmap_padding()
312 unsigned int offset = num_clusters_in_group(sb, block_group); in ext4_valid_block_bitmap_padding() local
314 if (bitmap_size <= offset) in ext4_valid_block_bitmap_padding()
317 next_zero_bit = ext4_find_next_zero_bit(bh->b_data, bitmap_size, offset); in ext4_valid_block_bitmap_padding()
328 if (unlikely(group >= EXT4_SB(sb)->s_groups_count)) in ext4_get_group_info()
331 indexh = group & ((EXT4_DESC_PER_BLOCK(sb)) - 1); in ext4_get_group_info()
346 ext4_grpblk_t offset; in ext4_valid_block_bitmap() local
365 offset = blk - group_first_block; in ext4_valid_block_bitmap()
366 if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit || in ext4_valid_block_bitmap()
367 !ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data)) in ext4_valid_block_bitmap()
373 offset = blk - group_first_block; in ext4_valid_block_bitmap()
374 if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit || in ext4_valid_block_bitmap()
375 !ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data)) in ext4_valid_block_bitmap()
381 offset = blk - group_first_block; in ext4_valid_block_bitmap()
382 if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit || in ext4_valid_block_bitmap()
383 EXT4_B2C(sbi, offset + sbi->s_itb_per_group) >= max_bit) in ext4_valid_block_bitmap()
385 next_zero_bit = ext4_find_next_zero_bit(bh->b_data, in ext4_valid_block_bitmap()
386 EXT4_B2C(sbi, offset + sbi->s_itb_per_group), in ext4_valid_block_bitmap()
387 EXT4_B2C(sbi, offset)); in ext4_valid_block_bitmap()
389 EXT4_B2C(sbi, offset + sbi->s_itb_per_group)) in ext4_valid_block_bitmap()
403 if (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY) in ext4_validate_block_bitmap()
411 return -EFSCORRUPTED; in ext4_validate_block_bitmap()
423 return -EFSBADCRC; in ext4_validate_block_bitmap()
432 return -EFSCORRUPTED; in ext4_validate_block_bitmap()
441 return -EFSCORRUPTED; in ext4_validate_block_bitmap()
471 return ERR_PTR(-EFSCORRUPTED); in ext4_read_block_bitmap_nowait()
473 if ((bitmap_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) || in ext4_read_block_bitmap_nowait()
474 (bitmap_blk >= ext4_blocks_count(sbi->s_es))) { in ext4_read_block_bitmap_nowait()
479 return ERR_PTR(-EFSCORRUPTED); in ext4_read_block_bitmap_nowait()
483 ext4_warning(sb, "Cannot get buffer for block bitmap - " in ext4_read_block_bitmap_nowait()
486 return ERR_PTR(-ENOMEM); in ext4_read_block_bitmap_nowait()
505 (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { in ext4_read_block_bitmap_nowait()
511 err = -EFSCORRUPTED; in ext4_read_block_bitmap_nowait()
556 /* Returns 0 on success, -errno on error */
566 return -EFSCORRUPTED; in ext4_wait_block_bitmap()
570 ext4_error_err(sb, EIO, "Cannot read block bitmap - " in ext4_wait_block_bitmap()
572 block_group, (unsigned long long) bh->b_blocknr); in ext4_wait_block_bitmap()
575 return -EIO; in ext4_wait_block_bitmap()
578 /* Panic or remount fs read-only if block bitmap is invalid */ in ext4_wait_block_bitmap()
601 * @sbi: in-core super block structure.
612 struct percpu_counter *fcc = &sbi->s_freeclusters_counter; in ext4_has_free_clusters()
613 struct percpu_counter *dcc = &sbi->s_dirtyclusters_counter; in ext4_has_free_clusters()
617 resv_clusters = atomic64_read(&sbi->s_resv_clusters); in ext4_has_free_clusters()
623 rsv = (ext4_r_blocks_count(sbi->s_es) >> sbi->s_cluster_bits) + in ext4_has_free_clusters()
626 if (free_clusters - (nclusters + rsv + dirty_clusters) < in ext4_has_free_clusters()
638 if (uid_eq(sbi->s_resuid, current_fsuid()) || in ext4_has_free_clusters()
639 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) || in ext4_has_free_clusters()
660 percpu_counter_add(&sbi->s_dirtyclusters_counter, nclusters); in ext4_claim_free_clusters()
663 return -ENOSPC; in ext4_claim_free_clusters()
667 * ext4_should_retry_alloc() - check if a block allocation should be retried
681 if (!sbi->s_journal) in ext4_should_retry_alloc()
685 percpu_counter_inc(&sbi->s_sra_exceeded_retry_limit); in ext4_should_retry_alloc()
694 if (sbi->s_mb_free_pending == 0) in ext4_should_retry_alloc()
701 jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id); in ext4_should_retry_alloc()
702 (void) jbd2_journal_force_commit_nested(sbi->s_journal); in ext4_should_retry_alloc()
707 * ext4_new_meta_blocks() -- allocate block for meta data (indexing) blocks
741 EXT4_C2B(EXT4_SB(inode->i_sb), ar.len)); in ext4_new_meta_blocks()
747 * ext4_count_free_clusters() -- count filesystem free clusters
765 es = EXT4_SB(sb)->s_es; in ext4_count_free_clusters()
775 if (EXT4_SB(sb)->s_group_info) in ext4_count_free_clusters()
786 x = ext4_count_free(bitmap_bh->b_data, in ext4_count_free_clusters()
805 if (EXT4_SB(sb)->s_group_info) in ext4_count_free_clusters()
829 * ext4_bg_has_super - number of blocks used by the superblock in group
838 struct ext4_super_block *es = EXT4_SB(sb)->s_es; in ext4_bg_has_super()
843 if (group == le32_to_cpu(es->s_backup_bgs[0]) || in ext4_bg_has_super()
844 group == le32_to_cpu(es->s_backup_bgs[1])) in ext4_bg_has_super()
864 ext4_group_t last = first + EXT4_DESC_PER_BLOCK(sb) - 1; in ext4_bg_num_gdb_meta()
878 return le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg); in ext4_bg_num_gdb_nometa()
880 return EXT4_SB(sb)->s_gdb_count; in ext4_bg_num_gdb_nometa()
884 * ext4_bg_num_gdb - number of blocks used by the group table in group
895 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg); in ext4_bg_num_gdb()
919 block_group < le32_to_cpu(sbi->s_es->s_first_meta_bg) * in ext4_num_base_meta_blocks()
920 sbi->s_desc_per_block) { in ext4_num_base_meta_blocks()
923 num += le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks); in ext4_num_base_meta_blocks()
938 * ext4_inode_to_goal_block - return a hint for block allocation
949 int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb)); in ext4_inode_to_goal_block()
953 block_group = ei->i_block_group; in ext4_inode_to_goal_block()
963 block_group &= ~(flex_size-1); in ext4_inode_to_goal_block()
964 if (S_ISREG(inode->i_mode)) in ext4_inode_to_goal_block()
967 bg_start = ext4_group_first_block_no(inode->i_sb, block_group); in ext4_inode_to_goal_block()
968 last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1; in ext4_inode_to_goal_block()
974 if (test_opt(inode->i_sb, DELALLOC)) in ext4_inode_to_goal_block()
977 if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block) in ext4_inode_to_goal_block()
979 (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16); in ext4_inode_to_goal_block()
982 ((last_block - bg_start) / 16); in ext4_inode_to_goal_block()