Home
last modified time | relevance | path

Searched refs:blk (Results 1 – 25 of 53) sorted by relevance

123

/fs/yaffs2/
Dyaffs_bitmap.c20 static inline u8 *yaffs_block_bits(struct yaffs_dev *dev, int blk) in yaffs_block_bits() argument
22 if (blk < dev->internal_start_block || blk > dev->internal_end_block) { in yaffs_block_bits()
25 blk); in yaffs_block_bits()
29 (dev->chunk_bit_stride * (blk - dev->internal_start_block)); in yaffs_block_bits()
32 void yaffs_verify_chunk_bit_id(struct yaffs_dev *dev, int blk, int chunk) in yaffs_verify_chunk_bit_id() argument
34 if (blk < dev->internal_start_block || blk > dev->internal_end_block || in yaffs_verify_chunk_bit_id()
38 blk, chunk); in yaffs_verify_chunk_bit_id()
43 void yaffs_clear_chunk_bits(struct yaffs_dev *dev, int blk) in yaffs_clear_chunk_bits() argument
45 u8 *blk_bits = yaffs_block_bits(dev, blk); in yaffs_clear_chunk_bits()
50 void yaffs_clear_chunk_bit(struct yaffs_dev *dev, int blk, int chunk) in yaffs_clear_chunk_bit() argument
[all …]
Dyaffs_yaffs1.c25 int blk; in yaffs1_scan() local
51 for (blk = dev->internal_start_block; blk <= dev->internal_end_block; in yaffs1_scan()
52 blk++) { in yaffs1_scan()
53 yaffs_clear_chunk_bits(dev, blk); in yaffs1_scan()
57 yaffs_query_init_block_state(dev, blk, &state, &seq_number); in yaffs1_scan()
67 blk, state, seq_number); in yaffs1_scan()
71 "block %d is bad", blk); in yaffs1_scan()
81 for (blk = dev->internal_start_block; in yaffs1_scan()
82 !alloc_failed && blk <= dev->internal_end_block; blk++) { in yaffs1_scan()
86 bi = yaffs_get_block_info(dev, blk); in yaffs1_scan()
[all …]
Dyaffs_bitmap.h25 void yaffs_verify_chunk_bit_id(struct yaffs_dev *dev, int blk, int chunk);
26 void yaffs_clear_chunk_bits(struct yaffs_dev *dev, int blk);
27 void yaffs_clear_chunk_bit(struct yaffs_dev *dev, int blk, int chunk);
28 void yaffs_set_chunk_bit(struct yaffs_dev *dev, int blk, int chunk);
29 int yaffs_check_chunk_bit(struct yaffs_dev *dev, int blk, int chunk);
30 int yaffs_still_some_chunks(struct yaffs_dev *dev, int blk);
31 int yaffs_count_chunk_bits(struct yaffs_dev *dev, int blk);
Dyaffs_summary.c118 static int yaffs_summary_write(struct yaffs_dev *dev, int blk) in yaffs_summary_write() argument
130 struct yaffs_block_info *bi = yaffs_get_block_info(dev, blk); in yaffs_summary_write()
142 hdr.block = blk; in yaffs_summary_write()
158 yaffs_set_chunk_bit(dev, blk, chunk_in_block); in yaffs_summary_write()
180 int blk) in yaffs_summary_read() argument
192 struct yaffs_block_info *bi = yaffs_get_block_info(dev, blk); in yaffs_summary_read()
201 chunk_in_nand = blk * dev->param.chunks_per_block + in yaffs_summary_read()
222 yaffs_set_chunk_bit(dev, blk, chunk_in_block); in yaffs_summary_read()
238 hdr.block != blk || in yaffs_summary_read()
296 void yaffs_summary_gc(struct yaffs_dev *dev, int blk) in yaffs_summary_gc() argument
[all …]
Dyaffs_getblockinfo.h24 *dev, int blk) in yaffs_get_block_info() argument
26 if (blk < dev->internal_start_block || blk > dev->internal_end_block) { in yaffs_get_block_info()
29 blk); in yaffs_get_block_info()
32 return &dev->block_info[blk - dev->internal_start_block]; in yaffs_get_block_info()
Dyaffs_yaffs2.c935 int blk, int chunk_in_block, in yaffs2_scan_chunk() argument
951 int chunk = blk * dev->param.chunks_per_block + chunk_in_block; in yaffs2_scan_chunk()
997 blk, chunk_in_block); in yaffs2_scan_chunk()
1001 dev->alloc_block = blk; in yaffs2_scan_chunk()
1003 dev->alloc_block_finder = blk; in yaffs2_scan_chunk()
1011 blk); in yaffs2_scan_chunk()
1022 blk, chunk_in_block); in yaffs2_scan_chunk()
1032 blk, chunk_in_block, tags.obj_id, in yaffs2_scan_chunk()
1043 yaffs_set_chunk_bit(dev, blk, chunk_in_block); in yaffs2_scan_chunk()
1090 yaffs_set_chunk_bit(dev, blk, chunk_in_block); in yaffs2_scan_chunk()
[all …]
Dyaffs_summary.h33 int blk);
34 void yaffs_summary_gc(struct yaffs_dev *dev, int blk);
/fs/quota/
Dquota_tree.c51 static ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf) in read_blk() argument
57 info->dqi_usable_bs, blk << info->dqi_blocksize_bits); in read_blk()
60 static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf) in write_blk() argument
66 info->dqi_usable_bs, blk << info->dqi_blocksize_bits); in write_blk()
80 int ret, blk; in get_free_dqblk() local
85 blk = info->dqi_free_blk; in get_free_dqblk()
86 ret = read_blk(info, blk, buf); in get_free_dqblk()
97 blk = info->dqi_blocks++; in get_free_dqblk()
100 ret = blk; in get_free_dqblk()
107 static int put_free_dqblk(struct qtree_mem_dqinfo *info, char *buf, uint blk) in put_free_dqblk() argument
[all …]
/fs/affs/
Dbitmap.c67 u32 blk, bmap, bit, mask, tmp; in affs_free_block() local
75 blk = block - sbi->s_reserved; in affs_free_block()
76 bmap = blk / sbi->s_bmap_bits; in affs_free_block()
77 bit = blk % sbi->s_bmap_bits; in affs_free_block()
146 u32 blk, bmap, bit, mask, mask2, tmp; in affs_alloc_block() local
168 blk = goal - sbi->s_reserved; in affs_alloc_block()
169 bmap = blk / sbi->s_bmap_bits; in affs_alloc_block()
191 blk = bmap * sbi->s_bmap_bits; in affs_alloc_block()
206 bit = blk % sbi->s_bmap_bits; in affs_alloc_block()
210 blk &= ~31UL; in affs_alloc_block()
[all …]
/fs/xfs/
Dxfs_da_btree.c155 ASSERT(state->path.blk[max].magic == XFS_ATTR_LEAF_MAGIC || in xfs_da_split()
156 state->path.blk[max].magic == XFS_DIR2_LEAFN_MAGIC); in xfs_da_split()
158 addblk = &state->path.blk[max]; /* initial dummy value */ in xfs_da_split()
160 oldblk = &state->path.blk[i]; in xfs_da_split()
161 newblk = &state->altpath.blk[i]; in xfs_da_split()
240 oldblk = &state->path.blk[0]; in xfs_da_split()
639 drop_blk = &state->path.blk[ state->path.active-1 ]; in xfs_da_join()
640 save_blk = &state->altpath.blk[ state->path.active-1 ]; in xfs_da_join()
641 ASSERT(state->path.blk[0].magic == XFS_DA_NODE_MAGIC); in xfs_da_join()
707 error = xfs_da_root_join(state, &state->path.blk[0]); in xfs_da_join()
[all …]
Dxfs_attr.c1230 xfs_da_state_blk_t *blk; in xfs_attr_node_addname() local
1256 blk = &state->path.blk[ state->path.active-1 ]; in xfs_attr_node_addname()
1257 ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC); in xfs_attr_node_addname()
1275 retval = xfs_attr_leaf_add(blk->bp, state->args); in xfs_attr_node_addname()
1424 blk = &state->path.blk[ state->path.active-1 ]; in xfs_attr_node_addname()
1425 ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC); in xfs_attr_node_addname()
1426 error = xfs_attr_leaf_remove(blk->bp, args); in xfs_attr_node_addname()
1492 xfs_da_state_blk_t *blk; in xfs_attr_node_removename() local
1524 blk = &state->path.blk[ state->path.active-1 ]; in xfs_attr_node_removename()
1525 ASSERT(blk->bp != NULL); in xfs_attr_node_removename()
[all …]
Dxfs_dir2_node.c1116 xfs_da_state_blk_t *blk; /* leaf block */ in xfs_dir2_leafn_toosmall() local
1133 blk = &state->path.blk[state->path.active - 1]; in xfs_dir2_leafn_toosmall()
1134 info = blk->bp->data; in xfs_dir2_leafn_toosmall()
1221 if (blkno < blk->blkno) in xfs_dir2_leafn_toosmall()
1284 xfs_da_state_blk_t *blk; /* leaf block for insert */ in xfs_dir2_node_addname() local
1318 blk = &state->path.blk[state->path.active - 1]; in xfs_dir2_node_addname()
1319 ASSERT(blk->magic == XFS_DIR2_LEAFN_MAGIC); in xfs_dir2_node_addname()
1323 rval = xfs_dir2_leafn_add(blk->bp, args, blk->index); in xfs_dir2_node_addname()
1773 xfs_da_brelse(args->trans, state->path.blk[i].bp); in xfs_dir2_node_lookup()
1774 state->path.blk[i].bp = NULL; in xfs_dir2_node_lookup()
[all …]
/fs/gfs2/
Drecovery.c32 int gfs2_replay_read_block(struct gfs2_jdesc *jd, unsigned int blk, in gfs2_replay_read_block() argument
42 error = gfs2_extent_map(&ip->i_inode, blk, &new, &dblock, &extlen); in gfs2_replay_read_block()
150 static int get_log_header(struct gfs2_jdesc *jd, unsigned int blk, in get_log_header() argument
159 error = gfs2_replay_read_block(jd, blk, &bh); in get_log_header()
170 if (error || lh.lh_blkno != blk || lh.lh_hash != hash) in get_log_header()
191 static int find_good_lh(struct gfs2_jdesc *jd, unsigned int *blk, in find_good_lh() argument
194 unsigned int orig_blk = *blk; in find_good_lh()
198 error = get_log_header(jd, *blk, head); in find_good_lh()
202 if (++*blk == jd->jd_blocks) in find_good_lh()
203 *blk = 0; in find_good_lh()
[all …]
Drecovery.h17 static inline void gfs2_replay_incr_blk(struct gfs2_sbd *sdp, unsigned int *blk) in gfs2_replay_incr_blk() argument
19 if (++*blk == sdp->sd_jdesc->jd_blocks) in gfs2_replay_incr_blk()
20 *blk = 0; in gfs2_replay_incr_blk()
23 extern int gfs2_replay_read_block(struct gfs2_jdesc *jd, unsigned int blk,
Drgrp.c330 struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk, bool exact) in gfs2_blk2rgrpd() argument
340 if (blk < cur->rd_addr) in gfs2_blk2rgrpd()
342 else if (blk >= cur->rd_data0 + cur->rd_data) in gfs2_blk2rgrpd()
347 if (blk < cur->rd_addr) in gfs2_blk2rgrpd()
349 if (blk >= cur->rd_data0 + cur->rd_data) in gfs2_blk2rgrpd()
829 u64 blk; in gfs2_rgrp_send_discards() local
850 blk = offset + ((bi->bi_start + x) * GFS2_NBBY); in gfs2_rgrp_send_discards()
851 blk *= sects_per_blk; /* convert to sectors */ in gfs2_rgrp_send_discards()
856 if ((start + nr_sects) != blk) { in gfs2_rgrp_send_discards()
867 start = blk; in gfs2_rgrp_send_discards()
[all …]
Ddir.c1841 u64 blk, nblk; in leaf_dealloc() local
1869 for (blk = leaf_no; blk; blk = nblk) { in leaf_dealloc()
1870 if (blk != leaf_no) { in leaf_dealloc()
1871 error = get_leaf(dip, blk, &bh); in leaf_dealloc()
1877 if (blk != leaf_no) in leaf_dealloc()
1880 gfs2_rlist_add(dip, &rlist, blk); in leaf_dealloc()
1904 for (blk = leaf_no; blk; blk = nblk) { in leaf_dealloc()
1905 if (blk != leaf_no) { in leaf_dealloc()
1906 error = get_leaf(dip, blk, &bh); in leaf_dealloc()
1912 if (blk != leaf_no) in leaf_dealloc()
[all …]
/fs/adfs/
Ddir_f.c147 int blk = 0; in adfs_dir_read() local
161 for (blk = 0; blk < size; blk++) { in adfs_dir_read()
164 phys = __adfs_block_map(sb, object_id, blk); in adfs_dir_read()
167 object_id, blk); in adfs_dir_read()
171 dir->bh[blk] = sb_bread(sb, phys); in adfs_dir_read()
172 if (!dir->bh[blk]) in adfs_dir_read()
190 dir->nr_buffers = blk; in adfs_dir_read()
198 for (blk -= 1; blk >= 0; blk -= 1) in adfs_dir_read()
199 brelse(dir->bh[blk]); in adfs_dir_read()
Ddir_fplus.c21 unsigned int blk, size; in adfs_fplus_read() local
73 for (blk = 1; blk < size; blk++) { in adfs_fplus_read()
74 block = __adfs_block_map(sb, id, blk); in adfs_fplus_read()
76 adfs_error(sb, "dir object %X has a hole at offset %d", id, blk); in adfs_fplus_read()
80 dir->bh_fplus[blk] = sb_bread(sb, block); in adfs_fplus_read()
81 if (!dir->bh_fplus[blk]) { in adfs_fplus_read()
84 id, blk, block); in adfs_fplus_read()
/fs/ext4/
Dblock_validity.c253 unsigned int blk; in ext4_check_blockref() local
256 blk = le32_to_cpu(*bref++); in ext4_check_blockref()
257 if (blk && in ext4_check_blockref()
259 blk, 1))) { in ext4_check_blockref()
260 es->s_last_error_block = cpu_to_le64(blk); in ext4_check_blockref()
261 ext4_error_inode(inode, function, line, blk, in ext4_check_blockref()
Dresize.c311 ext4_fsblk_t blk) in bclean() argument
316 bh = sb_getblk(sb, blk); in bclean()
653 const ext4_fsblk_t blk = primary->b_blocknr; in verify_reserved_gdb() local
663 grp * EXT4_BLOCKS_PER_GROUP(sb) + blk){ in verify_reserved_gdb()
666 blk, grp, in verify_reserved_gdb()
669 blk); in verify_reserved_gdb()
854 ext4_fsblk_t blk; in reserve_backup_gdb() local
871 blk = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + EXT4_SB(sb)->s_gdb_count; in reserve_backup_gdb()
877 for (res = 0; res < reserved_gdb; res++, blk++) { in reserve_backup_gdb()
878 if (le32_to_cpu(*data) != blk) { in reserve_backup_gdb()
[all …]
Dext4.h282 #define EXT4_B2C(sbi, blk) ((blk) >> (sbi)->s_cluster_bits) argument
2015 struct ext4_group_desc *bg, ext4_fsblk_t blk);
2017 struct ext4_group_desc *bg, ext4_fsblk_t blk);
2019 struct ext4_group_desc *bg, ext4_fsblk_t blk);
2053 ext4_fsblk_t blk) in ext4_blocks_count_set() argument
2055 es->s_blocks_count_lo = cpu_to_le32((u32)blk); in ext4_blocks_count_set()
2056 es->s_blocks_count_hi = cpu_to_le32(blk >> 32); in ext4_blocks_count_set()
2060 ext4_fsblk_t blk) in ext4_free_blocks_count_set() argument
2062 es->s_free_blocks_count_lo = cpu_to_le32((u32)blk); in ext4_free_blocks_count_set()
2063 es->s_free_blocks_count_hi = cpu_to_le32(blk >> 32); in ext4_free_blocks_count_set()
[all …]
/fs/ext3/
Dresize.c113 ext3_fsblk_t blk) in bclean() argument
118 bh = sb_getblk(sb, blk); in bclean()
389 const ext3_fsblk_t blk = primary->b_blocknr; in verify_reserved_gdb() local
399 if (le32_to_cpu(*p++) != grp * EXT3_BLOCKS_PER_GROUP(sb) + blk){ in verify_reserved_gdb()
403 blk, grp, in verify_reserved_gdb()
404 grp * EXT3_BLOCKS_PER_GROUP(sb) + blk); in verify_reserved_gdb()
587 ext3_fsblk_t blk; in reserve_backup_gdb() local
604 blk = EXT3_SB(sb)->s_sbh->b_blocknr + 1 + EXT3_SB(sb)->s_gdb_count; in reserve_backup_gdb()
610 for (res = 0; res < reserved_gdb; res++, blk++) { in reserve_backup_gdb()
611 if (le32_to_cpu(*data) != blk) { in reserve_backup_gdb()
[all …]
/fs/squashfs/
Dnamei.c209 unsigned int blk, off, ino_num; in squashfs_lookup() local
211 blk = le32_to_cpu(dirh.start_block); in squashfs_lookup()
215 ino = SQUASHFS_MKINODE(blk, off); in squashfs_lookup()
219 blk, off, ino_num); in squashfs_lookup()
Dexport.c55 int blk = SQUASHFS_LOOKUP_BLOCK(ino_num - 1); in squashfs_inode_lookup() local
57 u64 start = le64_to_cpu(msblk->inode_lookup_table[blk]); in squashfs_inode_lookup()
/fs/ufs/
Dutil.h342 u64 blk) in ubh_get_data_ptr() argument
345 return ubh_get_addr64(ubh, blk); in ubh_get_data_ptr()
347 return ubh_get_addr32(ubh, blk); in ubh_get_data_ptr()
553 unsigned blk) in ufs_get_direct_data_ptr() argument
555 BUG_ON(blk > UFS_TIND_BLOCK); in ufs_get_direct_data_ptr()
557 (void *)&ufsi->i_u1.u2_i_data[blk] : in ufs_get_direct_data_ptr()
558 (void *)&ufsi->i_u1.i_data[blk]; in ufs_get_direct_data_ptr()

123