Home
last modified time | relevance | path

Searched refs:blk (Results 1 – 25 of 65) sorted by relevance

123

/fs/xfs/scrub/
Ddabtree.c54 ds->state->path.blk[level].blkno), in xchk_da_process_error()
76 ds->state->path.blk[level].blkno), in xchk_da_set_corrupt()
85 struct xfs_da_state_blk *blk = &ds->state->path.blk[level]; in xchk_da_btree_node_entry() local
88 ASSERT(blk->magic == XFS_DA_NODE_MAGIC); in xchk_da_btree_node_entry()
90 xfs_da3_node_hdr_from_disk(ds->sc->mp, &hdr, blk->bp->b_addr); in xchk_da_btree_node_entry()
91 return hdr.btree + blk->index; in xchk_da_btree_node_entry()
252 if (altpath->blk[level].bp) in xchk_da_btree_block_check_sibling()
253 xchk_buffer_recheck(ds->sc, altpath->blk[level].bp); in xchk_da_btree_block_check_sibling()
256 if (altpath->blk[level].blkno != sibling) in xchk_da_btree_block_check_sibling()
262 if (altpath->blk[plevel].bp == NULL || in xchk_da_btree_block_check_sibling()
[all …]
Dattr.c308 struct xfs_da_state_blk *blk = &ds->state->path.blk[level]; in xchk_xattr_block() local
309 struct xfs_buf *bp = blk->bp; in xchk_xattr_block()
323 if (*last_checked == blk->blkno) in xchk_xattr_block()
334 *last_checked = blk->blkno; in xchk_xattr_block()
404 struct xfs_da_state_blk *blk = &ds->state->path.blk[level]; in xchk_xattr_rec() local
416 ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC); in xchk_xattr_rec()
418 ent = xfs_attr3_leaf_entryp(blk->bp->b_addr) + blk->index; in xchk_xattr_rec()
433 bp = blk->bp; in xchk_xattr_rec()
Ddir.c190 struct xfs_da_state_blk *blk = &ds->state->path.blk[level]; in xchk_dir_rec() local
210 ASSERT(blk->magic == XFS_DIR2_LEAF1_MAGIC || in xchk_dir_rec()
211 blk->magic == XFS_DIR2_LEAFN_MAGIC); in xchk_dir_rec()
213 xfs_dir2_leaf_hdr_from_disk(mp, &hdr, blk->bp->b_addr); in xchk_dir_rec()
214 ent = hdr.ents + blk->index; in xchk_dir_rec()
/fs/quota/
Dquota_tree.c59 static ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf) in read_blk() argument
65 info->dqi_usable_bs, (loff_t)blk << info->dqi_blocksize_bits); in read_blk()
68 static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf) in write_blk() argument
74 info->dqi_usable_bs, (loff_t)blk << info->dqi_blocksize_bits); in write_blk()
117 int ret, blk; in get_free_dqblk() local
122 blk = info->dqi_free_blk; in get_free_dqblk()
123 ret = read_blk(info, blk, buf); in get_free_dqblk()
137 blk = info->dqi_blocks++; in get_free_dqblk()
140 ret = blk; in get_free_dqblk()
147 static int put_free_dqblk(struct qtree_mem_dqinfo *info, char *buf, uint blk) in put_free_dqblk() argument
[all …]
/fs/xfs/libxfs/
Dxfs_attr.c913 struct xfs_da_state_blk *blk; in xfs_attr_node_addname() local
933 blk = &state->path.blk[ state->path.active-1 ]; in xfs_attr_node_addname()
934 ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC); in xfs_attr_node_addname()
957 retval = xfs_attr3_leaf_add(blk->bp, state->args); in xfs_attr_node_addname()
1090 blk = &state->path.blk[state->path.active-1]; in xfs_attr_node_addname()
1091 ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC); in xfs_attr_node_addname()
1092 error = xfs_attr3_leaf_remove(blk->bp, args); in xfs_attr_node_addname()
1129 ASSERT(state->path.blk[0].bp); in xfs_attr_node_shrink()
1130 state->path.blk[0].bp = NULL; in xfs_attr_node_shrink()
1188 ASSERT((*state)->path.blk[(*state)->path.active - 1].bp != NULL); in xfs_attr_node_removename_setup()
[all …]
Dxfs_da_btree.c102 state->altpath.blk[i].bp = NULL; in xfs_da_state_kill_altpath()
493 ASSERT(state->path.blk[max].magic == XFS_ATTR_LEAF_MAGIC || in xfs_da3_split()
494 state->path.blk[max].magic == XFS_DIR2_LEAFN_MAGIC); in xfs_da3_split()
496 addblk = &state->path.blk[max]; /* initial dummy value */ in xfs_da3_split()
498 oldblk = &state->path.blk[i]; in xfs_da3_split()
499 newblk = &state->altpath.blk[i]; in xfs_da3_split()
574 state->path.blk[max].magic == XFS_DIR2_LEAFN_MAGIC); in xfs_da3_split()
580 oldblk = &state->path.blk[0]; in xfs_da3_split()
1072 drop_blk = &state->path.blk[ state->path.active-1 ]; in xfs_da3_join()
1073 save_blk = &state->altpath.blk[ state->path.active-1 ]; in xfs_da3_join()
[all …]
Dxfs_dir2_node.c1488 xfs_da_state_blk_t *blk; /* leaf block */ in xfs_dir2_leafn_toosmall() local
1507 blk = &state->path.blk[state->path.active - 1]; in xfs_dir2_leafn_toosmall()
1508 leaf = blk->bp->b_addr; in xfs_dir2_leafn_toosmall()
1511 xfs_dir3_leaf_check(dp, blk->bp); in xfs_dir2_leafn_toosmall()
1596 if (blkno < blk->blkno) in xfs_dir2_leafn_toosmall()
2008 xfs_da_state_blk_t *blk; /* leaf block for insert */ in xfs_dir2_node_addname() local
2038 blk = &state->path.blk[state->path.active - 1]; in xfs_dir2_node_addname()
2039 ASSERT(blk->magic == XFS_DIR2_LEAFN_MAGIC); in xfs_dir2_node_addname()
2043 rval = xfs_dir2_leafn_add(blk->bp, args, blk->index); in xfs_dir2_node_addname()
2108 xfs_trans_brelse(args->trans, state->path.blk[i].bp); in xfs_dir2_node_lookup()
[all …]
/fs/affs/
Dbitmap.c44 u32 blk, bmap, bit, mask, tmp; in affs_free_block() local
52 blk = block - sbi->s_reserved; in affs_free_block()
53 bmap = blk / sbi->s_bmap_bits; in affs_free_block()
54 bit = blk % sbi->s_bmap_bits; in affs_free_block()
122 u32 blk, bmap, bit, mask, mask2, tmp; in affs_alloc_block() local
144 blk = goal - sbi->s_reserved; in affs_alloc_block()
145 bmap = blk / sbi->s_bmap_bits; in affs_alloc_block()
167 blk = bmap * sbi->s_bmap_bits; in affs_alloc_block()
182 bit = blk % sbi->s_bmap_bits; in affs_alloc_block()
186 blk &= ~31UL; in affs_alloc_block()
[all …]
Dfile.c864 u32 last_blk, blkcnt, blk; in affs_truncate() local
921 blk = last_blk; in affs_truncate()
924 blk++; in affs_truncate()
929 if (size > blkcnt - blk + i) in affs_truncate()
930 size = blkcnt - blk + i; in affs_truncate()
931 for (; i < size; i++, blk++) { in affs_truncate()
967 if (size > blkcnt - blk) in affs_truncate()
968 size = blkcnt - blk; in affs_truncate()
969 for (i = 0; i < size; i++, blk++) in affs_truncate()
/fs/xfs/
Dxfs_buf_item_recover.c706 void *blk = bp->b_addr; in xlog_recover_get_buf_lsn() local
714 magic32 = be32_to_cpu(*(__be32 *)blk); in xlog_recover_get_buf_lsn()
726 struct xfs_btree_block *btb = blk; in xlog_recover_get_buf_lsn()
734 struct xfs_btree_block *btb = blk; in xlog_recover_get_buf_lsn()
741 lsn = be64_to_cpu(((struct xfs_agf *)blk)->agf_lsn); in xlog_recover_get_buf_lsn()
742 uuid = &((struct xfs_agf *)blk)->agf_uuid; in xlog_recover_get_buf_lsn()
745 lsn = be64_to_cpu(((struct xfs_agfl *)blk)->agfl_lsn); in xlog_recover_get_buf_lsn()
746 uuid = &((struct xfs_agfl *)blk)->agfl_uuid; in xlog_recover_get_buf_lsn()
749 lsn = be64_to_cpu(((struct xfs_agi *)blk)->agi_lsn); in xlog_recover_get_buf_lsn()
750 uuid = &((struct xfs_agi *)blk)->agi_uuid; in xlog_recover_get_buf_lsn()
[all …]
/fs/ext4/
Dballoc.c349 ext4_fsblk_t blk; in ext4_valid_block_bitmap() local
364 blk = ext4_block_bitmap(sb, desc); in ext4_valid_block_bitmap()
365 offset = blk - group_first_block; in ext4_valid_block_bitmap()
369 return blk; in ext4_valid_block_bitmap()
372 blk = ext4_inode_bitmap(sb, desc); in ext4_valid_block_bitmap()
373 offset = blk - group_first_block; in ext4_valid_block_bitmap()
377 return blk; in ext4_valid_block_bitmap()
380 blk = ext4_inode_table(sb, desc); in ext4_valid_block_bitmap()
381 offset = blk - group_first_block; in ext4_valid_block_bitmap()
384 return blk; in ext4_valid_block_bitmap()
[all …]
Dblock_validity.c355 unsigned int blk; in ext4_check_blockref() local
363 blk = le32_to_cpu(*bref++); in ext4_check_blockref()
364 if (blk && in ext4_check_blockref()
365 unlikely(!ext4_inode_block_valid(inode, blk, 1))) { in ext4_check_blockref()
366 ext4_error_inode(inode, function, line, blk, in ext4_check_blockref()
Dresize.c421 ext4_fsblk_t blk) in bclean() argument
426 bh = sb_getblk(sb, blk); in bclean()
767 const ext4_fsblk_t blk = primary->b_blocknr; in verify_reserved_gdb() local
777 grp * EXT4_BLOCKS_PER_GROUP(sb) + blk){ in verify_reserved_gdb()
780 blk, grp, in verify_reserved_gdb()
783 blk); in verify_reserved_gdb()
1004 ext4_fsblk_t blk; in reserve_backup_gdb() local
1022 blk = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + EXT4_SB(sb)->s_gdb_count; in reserve_backup_gdb()
1028 for (res = 0; res < reserved_gdb; res++, blk++) { in reserve_backup_gdb()
1029 if (le32_to_cpu(*data) != blk) { in reserve_backup_gdb()
[all …]
/fs/gfs2/
Drecovery.h14 static inline void gfs2_replay_incr_blk(struct gfs2_jdesc *jd, u32 *blk) in gfs2_replay_incr_blk() argument
16 if (++*blk == jd->jd_blocks) in gfs2_replay_incr_blk()
17 *blk = 0; in gfs2_replay_incr_blk()
20 extern int gfs2_replay_read_block(struct gfs2_jdesc *jd, unsigned int blk,
Ddir.c1976 u64 blk, nblk; in leaf_dealloc() local
2001 for (blk = leaf_no; blk; blk = nblk) { in leaf_dealloc()
2002 if (blk != leaf_no) { in leaf_dealloc()
2003 error = get_leaf(dip, blk, &bh); in leaf_dealloc()
2009 if (blk != leaf_no) in leaf_dealloc()
2012 gfs2_rlist_add(dip, &rlist, blk); in leaf_dealloc()
2037 for (blk = leaf_no; blk; blk = nblk) { in leaf_dealloc()
2040 if (blk != leaf_no) { in leaf_dealloc()
2041 error = get_leaf(dip, blk, &bh); in leaf_dealloc()
2047 if (blk != leaf_no) in leaf_dealloc()
[all …]
Drecovery.c32 int gfs2_replay_read_block(struct gfs2_jdesc *jd, unsigned int blk, in gfs2_replay_read_block() argument
42 error = gfs2_extent_map(&ip->i_inode, blk, &new, &dblock, &extlen); in gfs2_replay_read_block()
167 static int get_log_header(struct gfs2_jdesc *jd, unsigned int blk, in get_log_header() argument
174 error = gfs2_replay_read_block(jd, blk, &bh); in get_log_header()
179 blk, head); in get_log_header()
Drgrp.c186 static inline int rs_cmp(u64 blk, u32 len, struct gfs2_blkreserv *rs) in rs_cmp() argument
190 if (blk >= startblk + rs->rs_free) in rs_cmp()
192 if (blk + len - 1 < startblk) in rs_cmp()
498 struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk, bool exact) in gfs2_blk2rgrpd() argument
508 if (blk < cur->rd_addr) in gfs2_blk2rgrpd()
510 else if (blk >= cur->rd_data0 + cur->rd_data) in gfs2_blk2rgrpd()
515 if (blk < cur->rd_addr) in gfs2_blk2rgrpd()
517 if (blk >= cur->rd_data0 + cur->rd_data) in gfs2_blk2rgrpd()
1291 u64 blk; in gfs2_rgrp_send_discards() local
1312 blk = offset + ((bi->bi_start + x) * GFS2_NBBY); in gfs2_rgrp_send_discards()
[all …]
/fs/pstore/
DKconfig162 The common layer for pstore/blk (and pstore/ram in the future)
176 For more information, see Documentation/admin-guide/pstore-blk.rst
185 Which block device should be used for pstore/blk.
207 pstore/blk, but module parameters have priority over Kconfig.
215 pstore/blk. The size is in KB and must be a multiple of 4.
218 pstore/blk, but module parameters have priority over Kconfig.
230 pstore/blk, but module parameters have priority over Kconfig.
238 This just sets size of pmsg (pmsg_size) for pstore/blk. The size is
242 pstore/blk, but module parameters have priority over Kconfig.
251 pstore/blk. The size is in KB and must be a multiple of 4.
[all …]
/fs/squashfs/
Dnamei.c200 unsigned int blk, off, ino_num; in squashfs_lookup() local
202 blk = le32_to_cpu(dirh.start_block); in squashfs_lookup()
206 ino = SQUASHFS_MKINODE(blk, off); in squashfs_lookup()
210 blk, off, ino_num); in squashfs_lookup()
Dexport.c42 int blk = SQUASHFS_LOOKUP_BLOCK(ino_num - 1); in squashfs_inode_lookup() local
53 start = le64_to_cpu(msblk->inode_lookup_table[blk]); in squashfs_inode_lookup()
/fs/f2fs/
Dextent_cache.c24 block_t blk, bool keep_clen, in __set_extent_info() argument
32 ei->blk = blk; in __set_extent_info()
105 back->blk + back->len == front->blk); in __is_extent_mergeable()
599 next_ex->ei.blk = ei->blk; in __try_merge_extent_node()
675 tei->blk, 0); in __update_extent_tree_range()
731 end - dei.fofs + dei.blk, false, in __update_extent_tree_range()
740 en->ei.blk + (end - dei.fofs), true, in __update_extent_tree_range()
778 if (tei->blk) { in __update_extent_tree_range()
779 __set_extent_info(&ei, fofs, len, tei->blk, false, in __update_extent_tree_range()
951 ei.blk = NULL_ADDR; in __update_extent_cache()
[all …]
/fs/freevxfs/
Dvxfs_super.c150 unsigned blk, __fs32 magic) in vxfs_try_sb_magic() argument
157 bp = sb_bread(sbp, blk); in vxfs_try_sb_magic()
163 blk); in vxfs_try_sb_magic()
174 rsbp->vs_magic, blk); in vxfs_try_sb_magic()
/fs/erofs/
Dsuper.c133 erofs_blk_t blk; in erofs_read_metadata() local
136 blk = erofs_blknr(*offset); in erofs_read_metadata()
138 if (!page || page->index != blk) { in erofs_read_metadata()
143 page = erofs_get_meta_page(sb, blk); in erofs_read_metadata()
162 blk = erofs_blknr(*offset); in erofs_read_metadata()
164 if (!page || page->index != blk) { in erofs_read_metadata()
170 page = erofs_get_meta_page(sb, blk); in erofs_read_metadata()
/fs/ufs/
Dutil.h343 u64 blk) in ubh_get_data_ptr() argument
346 return ubh_get_addr64(ubh, blk); in ubh_get_data_ptr()
348 return ubh_get_addr32(ubh, blk); in ubh_get_data_ptr()
553 unsigned blk) in ufs_get_direct_data_ptr() argument
555 BUG_ON(blk > UFS_TIND_BLOCK); in ufs_get_direct_data_ptr()
557 (void *)&ufsi->i_u1.u2_i_data[blk] : in ufs_get_direct_data_ptr()
558 (void *)&ufsi->i_u1.i_data[blk]; in ufs_get_direct_data_ptr()
/fs/jfs/
Dsuper.c740 sector_t blk = off >> sb->s_blocksize_bits; in jfs_quota_read() local
760 err = jfs_get_block(inode, blk, &tmp_bh, 0); in jfs_quota_read()
775 blk++; in jfs_quota_read()
785 sector_t blk = off >> sb->s_blocksize_bits; in jfs_quota_write() local
800 err = jfs_get_block(inode, blk, &tmp_bh, 1); in jfs_quota_write()
821 blk++; in jfs_quota_write()

123