/fs/ |
D | buffer.c | 48 static int submit_bh_wbc(int rw, struct buffer_head *bh, 54 void init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private) in init_buffer() argument 56 bh->b_end_io = handler; in init_buffer() 57 bh->b_private = private; in init_buffer() 61 inline void touch_buffer(struct buffer_head *bh) in touch_buffer() argument 63 trace_block_touch_buffer(bh); in touch_buffer() 64 mark_page_accessed(bh->b_page); in touch_buffer() 68 void __lock_buffer(struct buffer_head *bh) in __lock_buffer() argument 70 wait_on_bit_lock_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE); in __lock_buffer() 74 void unlock_buffer(struct buffer_head *bh) in unlock_buffer() argument [all …]
|
D | dax.c | 71 static long dax_get_addr(struct buffer_head *bh, void __pmem **addr, in dax_get_addr() argument 75 sector_t sector = bh->b_blocknr << (blkbits - 9); in dax_get_addr() 76 return bdev_direct_access(bh->b_bdev, sector, addr, &pfn, bh->b_size); in dax_get_addr() 91 static bool buffer_written(struct buffer_head *bh) in buffer_written() argument 93 return buffer_mapped(bh) && !buffer_unwritten(bh); in buffer_written() 103 static bool buffer_size_valid(struct buffer_head *bh) in buffer_size_valid() argument 105 return bh->b_state != 0; in buffer_size_valid() 110 struct buffer_head *bh) in dax_io() argument 133 bh->b_size = PAGE_ALIGN(end - pos); in dax_io() 134 bh->b_state = 0; in dax_io() [all …]
|
/fs/ocfs2/ |
D | buffer_head_io.c | 53 int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh, in ocfs2_write_block() argument 58 trace_ocfs2_write_block((unsigned long long)bh->b_blocknr, ci); in ocfs2_write_block() 60 BUG_ON(bh->b_blocknr < OCFS2_SUPER_BLOCK_BLKNO); in ocfs2_write_block() 61 BUG_ON(buffer_jbd(bh)); in ocfs2_write_block() 74 lock_buffer(bh); in ocfs2_write_block() 75 set_buffer_uptodate(bh); in ocfs2_write_block() 78 clear_buffer_dirty(bh); in ocfs2_write_block() 80 get_bh(bh); /* for end_buffer_write_sync() */ in ocfs2_write_block() 81 bh->b_end_io = end_buffer_write_sync; in ocfs2_write_block() 82 submit_bh(WRITE, bh); in ocfs2_write_block() [all …]
|
/fs/gfs2/ |
D | meta_io.c | 38 struct buffer_head *bh, *head; in gfs2_aspace_writepage() local 47 bh = head; in gfs2_aspace_writepage() 50 if (!buffer_mapped(bh)) in gfs2_aspace_writepage() 60 lock_buffer(bh); in gfs2_aspace_writepage() 61 } else if (!trylock_buffer(bh)) { in gfs2_aspace_writepage() 65 if (test_clear_buffer_dirty(bh)) { in gfs2_aspace_writepage() 66 mark_buffer_async_write(bh); in gfs2_aspace_writepage() 68 unlock_buffer(bh); in gfs2_aspace_writepage() 70 } while ((bh = bh->b_this_page) != head); in gfs2_aspace_writepage() 80 struct buffer_head *next = bh->b_this_page; in gfs2_aspace_writepage() [all …]
|
/fs/hpfs/ |
D | buffer.c | 39 struct buffer_head *bh; in hpfs_prefetch_sectors() local 48 bh = sb_find_get_block(s, secno); in hpfs_prefetch_sectors() 49 if (bh) { in hpfs_prefetch_sectors() 50 if (buffer_uptodate(bh)) { in hpfs_prefetch_sectors() 51 brelse(bh); in hpfs_prefetch_sectors() 54 brelse(bh); in hpfs_prefetch_sectors() 73 struct buffer_head *bh; in hpfs_map_sector() local 81 *bhp = bh = sb_bread(s, hpfs_search_hotfix_map(s, secno)); in hpfs_map_sector() 82 if (bh != NULL) in hpfs_map_sector() 83 return bh->b_data; in hpfs_map_sector() [all …]
|
D | anode.c | 15 struct buffer_head *bh) in hpfs_bplus_lookup() argument 27 brelse(bh); in hpfs_bplus_lookup() 28 if (!(anode = hpfs_map_anode(s, a, &bh))) return -1; in hpfs_bplus_lookup() 33 brelse(bh); in hpfs_bplus_lookup() 41 brelse(bh); in hpfs_bplus_lookup() 50 brelse(bh); in hpfs_bplus_lookup() 54 brelse(bh); in hpfs_bplus_lookup() 67 struct buffer_head *bh, *bh1, *bh2; in hpfs_add_sector_to_btree() local 72 if (!(fnode = hpfs_map_fnode(s, node, &bh))) return -1; in hpfs_add_sector_to_btree() 75 if (!(anode = hpfs_map_anode(s, node, &bh))) return -1; in hpfs_add_sector_to_btree() [all …]
|
/fs/nilfs2/ |
D | btnode.c | 47 struct buffer_head *bh; in nilfs_btnode_create_block() local 49 bh = nilfs_grab_buffer(inode, btnc, blocknr, 1 << BH_NILFS_Node); in nilfs_btnode_create_block() 50 if (unlikely(!bh)) in nilfs_btnode_create_block() 53 if (unlikely(buffer_mapped(bh) || buffer_uptodate(bh) || in nilfs_btnode_create_block() 54 buffer_dirty(bh))) { in nilfs_btnode_create_block() 55 brelse(bh); in nilfs_btnode_create_block() 58 memset(bh->b_data, 0, i_blocksize(inode)); in nilfs_btnode_create_block() 59 bh->b_bdev = inode->i_sb->s_bdev; in nilfs_btnode_create_block() 60 bh->b_blocknr = blocknr; in nilfs_btnode_create_block() 61 set_buffer_mapped(bh); in nilfs_btnode_create_block() [all …]
|
D | gcinode.c | 76 struct buffer_head *bh; in nilfs_gccache_submit_read_data() local 79 bh = nilfs_grab_buffer(inode, inode->i_mapping, blkoff, 0); in nilfs_gccache_submit_read_data() 80 if (unlikely(!bh)) in nilfs_gccache_submit_read_data() 83 if (buffer_uptodate(bh)) in nilfs_gccache_submit_read_data() 91 brelse(bh); in nilfs_gccache_submit_read_data() 96 lock_buffer(bh); in nilfs_gccache_submit_read_data() 97 if (buffer_uptodate(bh)) { in nilfs_gccache_submit_read_data() 98 unlock_buffer(bh); in nilfs_gccache_submit_read_data() 102 if (!buffer_mapped(bh)) { in nilfs_gccache_submit_read_data() 103 bh->b_bdev = inode->i_sb->s_bdev; in nilfs_gccache_submit_read_data() [all …]
|
D | mdt.c | 43 struct buffer_head *bh, in nilfs_mdt_insert_new_block() argument 54 bh->b_blocknr = 0; in nilfs_mdt_insert_new_block() 56 ret = nilfs_bmap_insert(ii->i_bmap, block, (unsigned long)bh); in nilfs_mdt_insert_new_block() 60 set_buffer_mapped(bh); in nilfs_mdt_insert_new_block() 62 kaddr = kmap_atomic(bh->b_page); in nilfs_mdt_insert_new_block() 63 memset(kaddr + bh_offset(bh), 0, i_blocksize(inode)); in nilfs_mdt_insert_new_block() 65 init_block(inode, bh, kaddr); in nilfs_mdt_insert_new_block() 66 flush_dcache_page(bh->b_page); in nilfs_mdt_insert_new_block() 69 set_buffer_uptodate(bh); in nilfs_mdt_insert_new_block() 70 mark_buffer_dirty(bh); in nilfs_mdt_insert_new_block() [all …]
|
D | page.c | 48 struct buffer_head *bh; in __nilfs_get_page_block() local 54 bh = nilfs_page_get_nth_block(page, block - first_block); in __nilfs_get_page_block() 56 touch_buffer(bh); in __nilfs_get_page_block() 57 wait_on_buffer(bh); in __nilfs_get_page_block() 58 return bh; in __nilfs_get_page_block() 69 struct buffer_head *bh; in nilfs_grab_buffer() local 75 bh = __nilfs_get_page_block(page, blkoff, index, blkbits, b_state); in nilfs_grab_buffer() 76 if (unlikely(!bh)) { in nilfs_grab_buffer() 81 return bh; in nilfs_grab_buffer() 89 void nilfs_forget_buffer(struct buffer_head *bh) in nilfs_forget_buffer() argument [all …]
|
/fs/omfs/ |
D | dir.c | 38 struct buffer_head *bh; in omfs_scan_list() local 44 bh = omfs_bread(dir->i_sb, block); in omfs_scan_list() 45 if (!bh) { in omfs_scan_list() 50 oi = (struct omfs_inode *) bh->b_data; in omfs_scan_list() 52 brelse(bh); in omfs_scan_list() 57 return bh; in omfs_scan_list() 61 brelse(bh); in omfs_scan_list() 70 struct buffer_head *bh; in omfs_find_entry() local 74 bh = omfs_get_bucket(dir, name, namelen, &ofs); in omfs_find_entry() 75 if (!bh) in omfs_find_entry() [all …]
|
/fs/ext2/ |
D | xattr.c | 67 #define HDR(bh) ((struct ext2_xattr_header *)((bh)->b_data)) argument 69 #define FIRST_ENTRY(bh) ENTRY(HDR(bh)+1) argument 79 # define ea_bdebug(bh, f...) do { \ argument 82 bdevname(bh->b_bdev, b), \ 83 (unsigned long) bh->b_blocknr); \ 150 struct buffer_head *bh = NULL; in ext2_xattr_get() local 171 bh = sb_bread(inode->i_sb, EXT2_I(inode)->i_file_acl); in ext2_xattr_get() 173 if (!bh) in ext2_xattr_get() 175 ea_bdebug(bh, "b_count=%d, refcount=%d", in ext2_xattr_get() 176 atomic_read(&(bh->b_count)), le32_to_cpu(HDR(bh)->h_refcount)); in ext2_xattr_get() [all …]
|
/fs/affs/ |
D | file.c | 50 struct buffer_head *bh; in affs_grow_extcache() local 99 bh = affs_bread(sb, key); in affs_grow_extcache() 100 if (!bh) in affs_grow_extcache() 102 key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension); in affs_grow_extcache() 103 affs_brelse(bh); in affs_grow_extcache() 117 affs_alloc_extblock(struct inode *inode, struct buffer_head *bh, u32 ext) in affs_alloc_extblock() argument 123 blocknr = affs_alloc_block(inode, bh->b_blocknr); in affs_alloc_extblock() 141 tmp = be32_to_cpu(AFFS_TAIL(sb, bh)->extension); in affs_alloc_extblock() 144 AFFS_TAIL(sb, bh)->extension = cpu_to_be32(blocknr); in affs_alloc_extblock() 145 affs_adjust_checksum(bh, blocknr - tmp); in affs_alloc_extblock() [all …]
|
D | amigaffs.c | 23 affs_insert_hash(struct inode *dir, struct buffer_head *bh) in affs_insert_hash() argument 30 ino = bh->b_blocknr; in affs_insert_hash() 31 offset = affs_hash_name(sb, AFFS_TAIL(sb, bh)->name + 1, AFFS_TAIL(sb, bh)->name[0]); in affs_insert_hash() 47 AFFS_TAIL(sb, bh)->parent = cpu_to_be32(dir->i_ino); in affs_insert_hash() 48 AFFS_TAIL(sb, bh)->hash_chain = 0; in affs_insert_hash() 49 affs_fix_checksum(sb, bh); in affs_insert_hash() 75 struct buffer_head *bh; in affs_remove_hash() local 86 bh = affs_bread(sb, dir->i_ino); in affs_remove_hash() 87 if (!bh) in affs_remove_hash() 91 hash_ino = be32_to_cpu(AFFS_HEAD(bh)->table[offset]); in affs_remove_hash() [all …]
|
/fs/reiserfs/ |
D | prints.c | 134 static void sprintf_block_head(char *buf, struct buffer_head *bh) in sprintf_block_head() argument 137 B_LEVEL(bh), B_NR_ITEMS(bh), B_FREE_SPACE(bh)); in sprintf_block_head() 140 static void sprintf_buffer_head(char *buf, struct buffer_head *bh) in sprintf_buffer_head() argument 146 bdevname(bh->b_bdev, b), bh->b_size, in sprintf_buffer_head() 147 (unsigned long long)bh->b_blocknr, atomic_read(&(bh->b_count)), in sprintf_buffer_head() 148 bh->b_state, bh->b_page, in sprintf_buffer_head() 149 buffer_uptodate(bh) ? "UPTODATE" : "!UPTODATE", in sprintf_buffer_head() 150 buffer_dirty(bh) ? "DIRTY" : "CLEAN", in sprintf_buffer_head() 151 buffer_locked(bh) ? "LOCKED" : "UNLOCKED"); in sprintf_buffer_head() 423 static int print_internal(struct buffer_head *bh, int first, int last) in print_internal() argument [all …]
|
/fs/jbd2/ |
D | commit.c | 35 static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate) in journal_end_buffer_io_sync() argument 37 struct buffer_head *orig_bh = bh->b_private; in journal_end_buffer_io_sync() 39 BUFFER_TRACE(bh, ""); in journal_end_buffer_io_sync() 41 set_buffer_uptodate(bh); in journal_end_buffer_io_sync() 43 clear_buffer_uptodate(bh); in journal_end_buffer_io_sync() 49 unlock_buffer(bh); in journal_end_buffer_io_sync() 66 static void release_buffer_page(struct buffer_head *bh) in release_buffer_page() argument 70 if (buffer_dirty(bh)) in release_buffer_page() 72 if (atomic_read(&bh->b_count) != 1) in release_buffer_page() 74 page = bh->b_page; in release_buffer_page() [all …]
|
D | transaction.c | 766 static void warn_dirty_buffer(struct buffer_head *bh) in warn_dirty_buffer() argument 774 bdevname(bh->b_bdev, b), (unsigned long long)bh->b_blocknr); in warn_dirty_buffer() 783 struct buffer_head *bh = jh2bh(jh); in jbd2_freeze_jh_data() local 785 J_EXPECT_JH(jh, buffer_uptodate(bh), "Possible IO failure.\n"); in jbd2_freeze_jh_data() 786 page = bh->b_page; in jbd2_freeze_jh_data() 787 offset = offset_in_page(bh->b_data); in jbd2_freeze_jh_data() 791 memcpy(jh->b_frozen_data, source + offset, bh->b_size); in jbd2_freeze_jh_data() 815 struct buffer_head *bh; in do_get_write_access() local 830 bh = jh2bh(jh); in do_get_write_access() 835 lock_buffer(bh); in do_get_write_access() [all …]
|
/fs/ext4/ |
D | ext4_jbd2.c | 133 struct buffer_head *bh, in ext4_journal_abort_handle() argument 141 if (bh) in ext4_journal_abort_handle() 142 BUFFER_TRACE(bh, "abort"); in ext4_journal_abort_handle() 157 handle_t *handle, struct buffer_head *bh) in __ext4_journal_get_write_access() argument 164 err = jbd2_journal_get_write_access(handle, bh); in __ext4_journal_get_write_access() 166 ext4_journal_abort_handle(where, line, __func__, bh, in __ext4_journal_get_write_access() 186 struct buffer_head *bh, ext4_fsblk_t blocknr) in __ext4_forget() argument 193 BUFFER_TRACE(bh, "enter"); in __ext4_forget() 197 bh, is_metadata, inode->i_mode, in __ext4_forget() 202 bforget(bh); in __ext4_forget() [all …]
|
D | mmp.c | 41 static int write_mmp_block(struct super_block *sb, struct buffer_head *bh) in write_mmp_block() argument 43 struct mmp_struct *mmp = (struct mmp_struct *)(bh->b_data); in write_mmp_block() 51 lock_buffer(bh); in write_mmp_block() 52 bh->b_end_io = end_buffer_write_sync; in write_mmp_block() 53 get_bh(bh); in write_mmp_block() 54 submit_bh(WRITE_SYNC | REQ_META | REQ_PRIO, bh); in write_mmp_block() 55 wait_on_buffer(bh); in write_mmp_block() 57 if (unlikely(!buffer_uptodate(bh))) in write_mmp_block() 67 static int read_mmp_block(struct super_block *sb, struct buffer_head **bh, in read_mmp_block() argument 73 if (*bh) in read_mmp_block() [all …]
|
D | page-io.c | 54 static void buffer_io_error(struct buffer_head *bh) in buffer_io_error() argument 58 bdevname(bh->b_bdev, b), in buffer_io_error() 59 (unsigned long long)bh->b_blocknr); in buffer_io_error() 73 struct buffer_head *bh, *head; in ext4_finish_bio() local 95 bh = head = page_buffers(page); in ext4_finish_bio() 103 if (bh_offset(bh) < bio_start || in ext4_finish_bio() 104 bh_offset(bh) + bh->b_size > bio_end) { in ext4_finish_bio() 105 if (buffer_async_write(bh)) in ext4_finish_bio() 109 clear_buffer_async_write(bh); in ext4_finish_bio() 111 buffer_io_error(bh); in ext4_finish_bio() [all …]
|
D | xattr.c | 70 # define ea_bdebug(bh, f...) do { \ argument 73 bdevname(bh->b_bdev, b), \ 74 (unsigned long) bh->b_blocknr); \ 80 # define ea_bdebug(bh, fmt, ...) no_printk(fmt, ##__VA_ARGS__) argument 142 struct buffer_head *bh) in ext4_xattr_block_csum_verify() argument 144 struct ext4_xattr_header *hdr = BHDR(bh); in ext4_xattr_block_csum_verify() 148 lock_buffer(bh); in ext4_xattr_block_csum_verify() 150 bh->b_blocknr, hdr)); in ext4_xattr_block_csum_verify() 151 unlock_buffer(bh); in ext4_xattr_block_csum_verify() 157 struct buffer_head *bh) in ext4_xattr_block_csum_set() argument [all …]
|
/fs/minix/ |
D | itree_common.c | 6 struct buffer_head *bh; member 11 static inline void add_chain(Indirect *p, struct buffer_head *bh, block_t *v) in add_chain() argument 14 p->bh = bh; in add_chain() 24 static inline block_t *block_end(struct buffer_head *bh) in block_end() argument 26 return (block_t *)((char*)bh->b_data + bh->b_size); in block_end() 37 struct buffer_head *bh; in get_branch() local 45 bh = sb_bread(sb, block_to_cpu(p->key)); in get_branch() 46 if (!bh) in get_branch() 51 add_chain(++p, bh, (block_t *)bh->b_data + *++offsets); in get_branch() 60 brelse(bh); in get_branch() [all …]
|
D | bitmap.c | 45 struct buffer_head *bh; in minix_free_block() local 60 bh = sbi->s_zmap[zone]; in minix_free_block() 62 if (!minix_test_and_clear_bit(bit, bh->b_data)) in minix_free_block() 66 mark_buffer_dirty(bh); in minix_free_block() 77 struct buffer_head *bh = sbi->s_zmap[i]; in minix_new_block() local 81 j = minix_find_first_zero_bit(bh->b_data, bits_per_zone); in minix_new_block() 83 minix_set_bit(j, bh->b_data); in minix_new_block() 85 mark_buffer_dirty(bh); in minix_new_block() 106 minix_V1_raw_inode(struct super_block *sb, ino_t ino, struct buffer_head **bh) in minix_V1_raw_inode() argument 120 *bh = sb_bread(sb, block); in minix_V1_raw_inode() [all …]
|
/fs/xfs/ |
D | xfs_aops.c | 45 struct buffer_head *bh, *head; in xfs_count_page_state() local 49 bh = head = page_buffers(page); in xfs_count_page_state() 51 if (buffer_unwritten(bh)) in xfs_count_page_state() 53 else if (buffer_delay(bh)) in xfs_count_page_state() 55 } while ((bh = bh->b_this_page) != head); in xfs_count_page_state() 81 struct buffer_head *bh, *next; in xfs_destroy_ioend() local 83 for (bh = ioend->io_buffer_head; bh; bh = next) { in xfs_destroy_ioend() 84 next = bh->b_private; in xfs_destroy_ioend() 85 bh->b_end_io(bh, !ioend->io_error); in xfs_destroy_ioend() 390 struct buffer_head *bh) in xfs_alloc_ioend_bio() argument [all …]
|
/fs/ntfs/ |
D | aops.c | 58 static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate) in ntfs_end_buffer_async_read() argument 67 page = bh->b_page; in ntfs_end_buffer_async_read() 75 set_buffer_uptodate(bh); in ntfs_end_buffer_async_read() 78 bh_offset(bh); in ntfs_end_buffer_async_read() 88 if (unlikely(file_ofs + bh->b_size > init_size)) { in ntfs_end_buffer_async_read() 97 memset(kaddr + bh_offset(bh) + ofs, 0, in ntfs_end_buffer_async_read() 98 bh->b_size - ofs); in ntfs_end_buffer_async_read() 104 clear_buffer_uptodate(bh); in ntfs_end_buffer_async_read() 107 "0x%llx.", (unsigned long long)bh->b_blocknr); in ntfs_end_buffer_async_read() 112 clear_buffer_async_read(bh); in ntfs_end_buffer_async_read() [all …]
|