Home
last modified time | relevance | path

Searched refs:bhs (Results 1 – 25 of 29) sorted by relevance

12

/fs/fat/
Dfatent.c45 struct buffer_head **bhs = fatent->bhs; in fat12_ent_set_ptr() local
47 WARN_ON(offset >= (bhs[0]->b_size - 1)); in fat12_ent_set_ptr()
48 fatent->u.ent12_p[0] = bhs[0]->b_data + offset; in fat12_ent_set_ptr()
49 fatent->u.ent12_p[1] = bhs[0]->b_data + (offset + 1); in fat12_ent_set_ptr()
51 WARN_ON(offset != (bhs[0]->b_size - 1)); in fat12_ent_set_ptr()
52 fatent->u.ent12_p[0] = bhs[0]->b_data + offset; in fat12_ent_set_ptr()
53 fatent->u.ent12_p[1] = bhs[1]->b_data; in fat12_ent_set_ptr()
60 fatent->u.ent16_p = (__le16 *)(fatent->bhs[0]->b_data + offset); in fat16_ent_set_ptr()
66 fatent->u.ent32_p = (__le32 *)(fatent->bhs[0]->b_data + offset); in fat32_ent_set_ptr()
72 struct buffer_head **bhs = fatent->bhs; in fat12_ent_bread() local
[all …]
Ddir.c1082 struct buffer_head **bhs, int nr_bhs) in fat_zeroed_cluster() argument
1092 bhs[n] = sb_getblk(sb, blknr); in fat_zeroed_cluster()
1093 if (!bhs[n]) { in fat_zeroed_cluster()
1098 lock_buffer(bhs[n]); in fat_zeroed_cluster()
1099 memset(bhs[n]->b_data, 0, sb->s_blocksize); in fat_zeroed_cluster()
1100 set_buffer_uptodate(bhs[n]); in fat_zeroed_cluster()
1101 unlock_buffer(bhs[n]); in fat_zeroed_cluster()
1102 mark_buffer_dirty_inode(bhs[n], dir); in fat_zeroed_cluster()
1108 err = fat_sync_bhs(bhs, n); in fat_zeroed_cluster()
1113 brelse(bhs[i]); in fat_zeroed_cluster()
[all …]
Dfat.h350 struct buffer_head *bhs[2]; member
359 fatent->bhs[0] = fatent->bhs[1] = NULL; in fatent_init()
374 brelse(fatent->bhs[i]); in fatent_brelse()
376 fatent->bhs[0] = fatent->bhs[1] = NULL; in fatent_brelse()
464 extern int fat_sync_bhs(struct buffer_head **bhs, int nr_bhs);
Dmisc.c362 int fat_sync_bhs(struct buffer_head **bhs, int nr_bhs) in fat_sync_bhs() argument
367 write_dirty_buffer(bhs[i], 0); in fat_sync_bhs()
370 wait_on_buffer(bhs[i]); in fat_sync_bhs()
371 if (!err && !buffer_uptodate(bhs[i])) in fat_sync_bhs()
/fs/adfs/
Ddir.c30 memcpy(dst, dir->bhs[index]->b_data + offset, remain); in adfs_dir_copyfrom()
37 memcpy(dst, dir->bhs[index]->b_data + offset, len); in adfs_dir_copyfrom()
55 memcpy(dir->bhs[index]->b_data + offset, src, remain); in adfs_dir_copyto()
62 memcpy(dir->bhs[index]->b_data + offset, src, len); in adfs_dir_copyto()
71 if (dir->bhs != dir->bh) in __adfs_dir_cleanup()
72 kfree(dir->bhs); in __adfs_dir_cleanup()
73 dir->bhs = NULL; in __adfs_dir_cleanup()
82 brelse(dir->bhs[i]); in adfs_dir_relse()
92 bforget(dir->bhs[i]); in adfs_dir_forget()
100 struct buffer_head **bhs; in adfs_dir_read_buffers() local
[all …]
Ddir_fplus.c83 bp = (void *)dir->bhs[bi]->b_data; in adfs_fplus_checkbyte()
84 bs = dir->bhs[bi]->b_size; in adfs_fplus_checkbyte()
116 dir->bighead = h = (void *)dir->bhs[0]->b_data; in adfs_fplus_read()
136 (dir->bhs[dir->nr_buffers - 1]->b_data + (sb->s_blocksize - 8)); in adfs_fplus_read()
Dadfs.h95 struct buffer_head **bhs; member
/fs/ocfs2/
Dblockcheck.c441 void ocfs2_block_check_compute_bhs(struct buffer_head **bhs, int nr, in ocfs2_block_check_compute_bhs() argument
455 crc = crc32_le(crc, bhs[i]->b_data, bhs[i]->b_size); in ocfs2_block_check_compute_bhs()
461 ecc = (u16)ocfs2_hamming_encode(ecc, bhs[i]->b_data, in ocfs2_block_check_compute_bhs()
462 bhs[i]->b_size * 8, in ocfs2_block_check_compute_bhs()
463 bhs[i]->b_size * 8 * i); in ocfs2_block_check_compute_bhs()
485 int ocfs2_block_check_validate_bhs(struct buffer_head **bhs, int nr, in ocfs2_block_check_validate_bhs() argument
508 crc = crc32_le(crc, bhs[i]->b_data, bhs[i]->b_size); in ocfs2_block_check_validate_bhs()
524 ecc = (u16)ocfs2_hamming_encode(ecc, bhs[i]->b_data, in ocfs2_block_check_validate_bhs()
525 bhs[i]->b_size * 8, in ocfs2_block_check_validate_bhs()
526 bhs[i]->b_size * 8 * i); in ocfs2_block_check_validate_bhs()
[all …]
Dbuffer_head_io.c90 unsigned int nr, struct buffer_head *bhs[]) in ocfs2_read_blocks_sync() argument
105 new_bh = (bhs[0] == NULL); in ocfs2_read_blocks_sync()
108 if (bhs[i] == NULL) { in ocfs2_read_blocks_sync()
109 bhs[i] = sb_getblk(osb->sb, block++); in ocfs2_read_blocks_sync()
110 if (bhs[i] == NULL) { in ocfs2_read_blocks_sync()
116 bh = bhs[i]; in ocfs2_read_blocks_sync()
155 bh = bhs[i - 1]; in ocfs2_read_blocks_sync()
166 bhs[i - 1] = NULL; in ocfs2_read_blocks_sync()
194 struct buffer_head *bhs[], int flags, in ocfs2_read_blocks() argument
210 if (bhs == NULL) { in ocfs2_read_blocks()
[all …]
Dblockcheck.h35 struct buffer_head **bhs, int nr,
38 struct buffer_head **bhs, int nr,
47 void ocfs2_block_check_compute_bhs(struct buffer_head **bhs, int nr,
49 int ocfs2_block_check_validate_bhs(struct buffer_head **bhs, int nr,
Dbuffer_head_io.h19 unsigned int nr, struct buffer_head *bhs[]);
29 struct buffer_head *bhs[], int flags,
Dnamei.c1702 struct buffer_head **bhs = NULL; in ocfs2_create_symlink_data() local
1724 bhs = kcalloc(blocks, sizeof(struct buffer_head *), GFP_KERNEL); in ocfs2_create_symlink_data()
1725 if (!bhs) { in ocfs2_create_symlink_data()
1751 bhs[virtual] = sb_getblk(sb, p_blkno); in ocfs2_create_symlink_data()
1752 if (!bhs[virtual]) { in ocfs2_create_symlink_data()
1758 bhs[virtual]); in ocfs2_create_symlink_data()
1761 bhs[virtual], in ocfs2_create_symlink_data()
1768 memset(bhs[virtual]->b_data, 0, sb->s_blocksize); in ocfs2_create_symlink_data()
1770 memcpy(bhs[virtual]->b_data, c, in ocfs2_create_symlink_data()
1774 ocfs2_journal_dirty(handle, bhs[virtual]); in ocfs2_create_symlink_data()
[all …]
Dextent_map.c957 struct buffer_head *bhs[], int flags, in ocfs2_read_virt_blocks() argument
966 inode, (unsigned long long)v_block, nr, bhs, flags, in ocfs2_read_virt_blocks()
1005 if (!bhs[done + i]) in ocfs2_read_virt_blocks()
1007 BUG_ON(bhs[done + i]->b_blocknr != (p_block + i)); in ocfs2_read_virt_blocks()
1011 bhs + done, flags, validate); in ocfs2_read_virt_blocks()
Dextent_map.h52 struct buffer_head *bhs[], int flags,
Docfs2_trace.h1601 void *bhs, unsigned int flags, void *validate),
1602 TP_ARGS(inode, vblock, nr, bhs, flags, validate),
1607 __field(void *, bhs)
1615 __entry->bhs = bhs;
1620 __entry->nr, __entry->bhs, __entry->flags, __entry->validate)
/fs/isofs/
Dcompress.c58 struct buffer_head **bhs; in zisofs_uncompress_block() local
78 bhs = kcalloc(needblocks + 1, sizeof(*bhs), GFP_KERNEL); in zisofs_uncompress_block()
79 if (!bhs) { in zisofs_uncompress_block()
83 haveblocks = isofs_get_blocks(inode, blocknum, bhs, needblocks); in zisofs_uncompress_block()
84 bh_read_batch(haveblocks, bhs); in zisofs_uncompress_block()
95 if (!bhs[0]) in zisofs_uncompress_block()
98 wait_on_buffer(bhs[0]); in zisofs_uncompress_block()
99 if (!buffer_uptodate(bhs[0])) { in zisofs_uncompress_block()
132 wait_on_buffer(bhs[curbh]); in zisofs_uncompress_block()
133 if (!buffer_uptodate(bhs[curbh])) { in zisofs_uncompress_block()
[all …]
/fs/ntfs/
Dcompress.c474 struct buffer_head **bhs; in ntfs_read_compressed_block() local
520 bhs = kmalloc(bhs_size, GFP_NOFS); in ntfs_read_compressed_block()
522 if (unlikely(!pages || !bhs || !completed_pages)) { in ntfs_read_compressed_block()
523 kfree(bhs); in ntfs_read_compressed_block()
550 kfree(bhs); in ntfs_read_compressed_block()
639 if (unlikely(!(bhs[nr_bhs] = sb_getblk(sb, block)))) in ntfs_read_compressed_block()
651 struct buffer_head *tbh = bhs[i]; in ntfs_read_compressed_block()
666 struct buffer_head *tbh = bhs[i]; in ntfs_read_compressed_block()
707 memcpy(cb_pos, bhs[i]->b_data, block_size); in ntfs_read_compressed_block()
875 brelse(bhs[i]); in ntfs_read_compressed_block()
[all …]
Daops.c912 struct buffer_head *bhs[MAX_BUF_PER_PAGE]; in ntfs_write_mst_block() local
1079 while (bhs[--nr_bhs] != rec_start_bh) in ntfs_write_mst_block()
1096 bhs[nr_bhs++] = bh; in ntfs_write_mst_block()
1114 tbh = bhs[i]; in ntfs_write_mst_block()
1138 bhs[i] = NULL; in ntfs_write_mst_block()
1166 clear_buffer_dirty(bhs[i]); in ntfs_write_mst_block()
1167 bhs[i] = NULL; in ntfs_write_mst_block()
1179 tbh = bhs[i]; in ntfs_write_mst_block()
1198 tbh = bhs[i]; in ntfs_write_mst_block()
1231 tbh = bhs[i]; in ntfs_write_mst_block()
[all …]
Dmft.c460 struct buffer_head *bhs[MAX_BHS]; in ntfs_sync_mft_mirror() local
570 bhs[nr_bhs++] = bh; in ntfs_sync_mft_mirror()
578 struct buffer_head *tbh = bhs[i_bhs]; in ntfs_sync_mft_mirror()
590 struct buffer_head *tbh = bhs[i_bhs]; in ntfs_sync_mft_mirror()
605 clear_buffer_dirty(bhs[i_bhs]); in ntfs_sync_mft_mirror()
667 struct buffer_head *bhs[MAX_BHS]; in write_mft_record_nolock() local
757 bhs[nr_bhs++] = bh; in write_mft_record_nolock()
775 struct buffer_head *tbh = bhs[i_bhs]; in write_mft_record_nolock()
790 struct buffer_head *tbh = bhs[i_bhs]; in write_mft_record_nolock()
823 clear_buffer_dirty(bhs[i_bhs]); in write_mft_record_nolock()
/fs/exfat/
Dmisc.c165 int exfat_update_bhs(struct buffer_head **bhs, int nr_bhs, int sync) in exfat_update_bhs() argument
170 set_buffer_uptodate(bhs[i]); in exfat_update_bhs()
171 mark_buffer_dirty(bhs[i]); in exfat_update_bhs()
173 write_dirty_buffer(bhs[i], REQ_SYNC); in exfat_update_bhs()
177 wait_on_buffer(bhs[i]); in exfat_update_bhs()
178 if (!err && !buffer_uptodate(bhs[i])) in exfat_update_bhs()
/fs/gfs2/
Dmeta_io.c220 static void gfs2_submit_bhs(blk_opf_t opf, struct buffer_head *bhs[], int num) in gfs2_submit_bhs() argument
223 struct buffer_head *bh = *bhs; in gfs2_submit_bhs()
229 bh = *bhs; in gfs2_submit_bhs()
234 bhs++; in gfs2_submit_bhs()
257 struct buffer_head *bh, *bhs[2]; in gfs2_meta_read() local
275 bhs[num++] = bh; in gfs2_meta_read()
287 bhs[num++] = bh; in gfs2_meta_read()
291 gfs2_submit_bhs(REQ_OP_READ | REQ_META | REQ_PRIO, bhs, num); in gfs2_meta_read()
/fs/
Dbuffer.c1305 struct buffer_head *bhs[BH_LRU_SIZE]; member
1352 swap(evictee, b->bhs[i]); in bh_lru_install()
1380 struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]); in lookup_bh_lru()
1386 __this_cpu_write(bh_lrus.bhs[i], in lookup_bh_lru()
1387 __this_cpu_read(bh_lrus.bhs[i - 1])); in lookup_bh_lru()
1390 __this_cpu_write(bh_lrus.bhs[0], bh); in lookup_bh_lru()
1486 brelse(b->bhs[i]); in __invalidate_bh_lrus()
1487 b->bhs[i] = NULL; in __invalidate_bh_lrus()
1509 if (b->bhs[i]) in has_bh_in_lru()
3059 brelse(b->bhs[i]); in buffer_exit_cpu_dead()
[all …]
/fs/ext4/
Dxattr.c388 struct buffer_head **bhs = bhs_inline; in ext4_xattr_inode_read() local
392 bhs = kmalloc_array(bh_count, sizeof(*bhs), GFP_NOFS); in ext4_xattr_inode_read()
393 if (!bhs) in ext4_xattr_inode_read()
398 true /* wait */, bhs); in ext4_xattr_inode_read()
404 if (!bhs[i]) { in ext4_xattr_inode_read()
408 memcpy((char *)buf + blocksize * i, bhs[i]->b_data, in ext4_xattr_inode_read()
414 brelse(bhs[i]); in ext4_xattr_inode_read()
416 if (bhs != bhs_inline) in ext4_xattr_inode_read()
417 kfree(bhs); in ext4_xattr_inode_read()
Dinode.c932 bool wait, struct buffer_head **bhs) in ext4_bread_batch() argument
937 bhs[i] = ext4_getblk(NULL, inode, block + i, 0 /* map_flags */); in ext4_bread_batch()
938 if (IS_ERR(bhs[i])) { in ext4_bread_batch()
939 err = PTR_ERR(bhs[i]); in ext4_bread_batch()
947 if (bhs[i] && !ext4_buffer_uptodate(bhs[i])) in ext4_bread_batch()
948 ext4_read_bh_lock(bhs[i], REQ_META | REQ_PRIO, false); in ext4_bread_batch()
954 if (bhs[i]) in ext4_bread_batch()
955 wait_on_buffer(bhs[i]); in ext4_bread_batch()
958 if (bhs[i] && !buffer_uptodate(bhs[i])) { in ext4_bread_batch()
967 brelse(bhs[i]); in ext4_bread_batch()
[all …]
/fs/nilfs2/
Drecovery.c93 struct buffer_head *bhs, u32 *sum, in nilfs_compute_checksum() argument
105 (unsigned char *)bhs->b_data + offset, size); in nilfs_compute_checksum()

12