Lines Matching refs:bh
86 struct buffer_head *bh, *head; in xfs_count_page_state() local
90 bh = head = page_buffers(page); in xfs_count_page_state()
92 if (buffer_uptodate(bh) && !buffer_mapped(bh)) in xfs_count_page_state()
94 else if (buffer_unwritten(bh)) in xfs_count_page_state()
96 else if (buffer_delay(bh)) in xfs_count_page_state()
98 } while ((bh = bh->b_this_page) != head); in xfs_count_page_state()
182 struct buffer_head *bh, *next; in xfs_destroy_ioend() local
185 for (bh = ioend->io_buffer_head; bh; bh = next) { in xfs_destroy_ioend()
186 next = bh->b_private; in xfs_destroy_ioend()
187 bh->b_end_io(bh, !ioend->io_error); in xfs_destroy_ioend()
415 struct buffer_head *bh) in xfs_alloc_ioend_bio() argument
418 int nvecs = bio_get_nr_vecs(bh->b_bdev); in xfs_alloc_ioend_bio()
426 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9); in xfs_alloc_ioend_bio()
427 bio->bi_bdev = bh->b_bdev; in xfs_alloc_ioend_bio()
434 struct buffer_head *bh) in xfs_start_buffer_writeback() argument
436 ASSERT(buffer_mapped(bh)); in xfs_start_buffer_writeback()
437 ASSERT(buffer_locked(bh)); in xfs_start_buffer_writeback()
438 ASSERT(!buffer_delay(bh)); in xfs_start_buffer_writeback()
439 ASSERT(!buffer_unwritten(bh)); in xfs_start_buffer_writeback()
441 mark_buffer_async_write(bh); in xfs_start_buffer_writeback()
442 set_buffer_uptodate(bh); in xfs_start_buffer_writeback()
443 clear_buffer_dirty(bh); in xfs_start_buffer_writeback()
463 static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh) in bio_add_buffer() argument
465 return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh)); in bio_add_buffer()
491 struct buffer_head *bh; in xfs_submit_ioend() local
498 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) { in xfs_submit_ioend()
499 xfs_start_buffer_writeback(bh); in xfs_submit_ioend()
509 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) { in xfs_submit_ioend()
513 bio = xfs_alloc_ioend_bio(bh); in xfs_submit_ioend()
514 } else if (bh->b_blocknr != lastblock + 1) { in xfs_submit_ioend()
519 if (bio_add_buffer(bio, bh) != bh->b_size) { in xfs_submit_ioend()
524 lastblock = bh->b_blocknr; in xfs_submit_ioend()
542 struct buffer_head *bh, *next_bh; in xfs_cancel_ioend() local
546 bh = ioend->io_buffer_head; in xfs_cancel_ioend()
548 next_bh = bh->b_private; in xfs_cancel_ioend()
549 clear_buffer_async_write(bh); in xfs_cancel_ioend()
550 unlock_buffer(bh); in xfs_cancel_ioend()
551 } while ((bh = next_bh) != NULL); in xfs_cancel_ioend()
567 struct buffer_head *bh, in xfs_add_to_ioend() argument
580 ioend->io_buffer_head = bh; in xfs_add_to_ioend()
581 ioend->io_buffer_tail = bh; in xfs_add_to_ioend()
586 ioend->io_buffer_tail->b_private = bh; in xfs_add_to_ioend()
587 ioend->io_buffer_tail = bh; in xfs_add_to_ioend()
590 bh->b_private = NULL; in xfs_add_to_ioend()
591 ioend->io_size += bh->b_size; in xfs_add_to_ioend()
596 struct buffer_head *bh, in xfs_map_buffer() argument
610 bh->b_blocknr = bn; in xfs_map_buffer()
611 set_buffer_mapped(bh); in xfs_map_buffer()
616 struct buffer_head *bh, in xfs_map_at_offset() argument
624 lock_buffer(bh); in xfs_map_at_offset()
625 xfs_map_buffer(bh, iomapp, offset, block_bits); in xfs_map_at_offset()
626 bh->b_bdev = iomapp->iomap_target->bt_bdev; in xfs_map_at_offset()
627 set_buffer_mapped(bh); in xfs_map_at_offset()
628 clear_buffer_delay(bh); in xfs_map_at_offset()
629 clear_buffer_unwritten(bh); in xfs_map_at_offset()
648 struct buffer_head *bh, *head; in xfs_probe_page() local
650 bh = head = page_buffers(page); in xfs_probe_page()
652 if (!buffer_uptodate(bh)) in xfs_probe_page()
654 if (mapped != buffer_mapped(bh)) in xfs_probe_page()
656 ret += bh->b_size; in xfs_probe_page()
659 } while ((bh = bh->b_this_page) != head); in xfs_probe_page()
671 struct buffer_head *bh, in xfs_probe_cluster() argument
682 if (!buffer_uptodate(bh) || (mapped != buffer_mapped(bh))) in xfs_probe_cluster()
684 total += bh->b_size; in xfs_probe_cluster()
685 } while ((bh = bh->b_this_page) != head); in xfs_probe_cluster()
749 struct buffer_head *bh, *head; in xfs_is_delayed_page() local
752 bh = head = page_buffers(page); in xfs_is_delayed_page()
754 if (buffer_unwritten(bh)) in xfs_is_delayed_page()
756 else if (buffer_delay(bh)) in xfs_is_delayed_page()
758 else if (buffer_dirty(bh) && buffer_mapped(bh)) in xfs_is_delayed_page()
762 } while ((bh = bh->b_this_page) != head); in xfs_is_delayed_page()
788 struct buffer_head *bh, *head; in xfs_convert_page() local
831 bh = head = page_buffers(page); in xfs_convert_page()
835 if (!buffer_uptodate(bh)) in xfs_convert_page()
837 if (!(PageUptodate(page) || buffer_uptodate(bh))) { in xfs_convert_page()
842 if (buffer_unwritten(bh) || buffer_delay(bh)) { in xfs_convert_page()
843 if (buffer_unwritten(bh)) in xfs_convert_page()
856 xfs_map_at_offset(bh, offset, bbits, mp); in xfs_convert_page()
858 xfs_add_to_ioend(inode, bh, offset, in xfs_convert_page()
861 set_buffer_dirty(bh); in xfs_convert_page()
862 unlock_buffer(bh); in xfs_convert_page()
863 mark_buffer_dirty(bh); in xfs_convert_page()
869 if (buffer_mapped(bh) && all_bh && startio) { in xfs_convert_page()
870 lock_buffer(bh); in xfs_convert_page()
871 xfs_add_to_ioend(inode, bh, offset, in xfs_convert_page()
879 } while (offset += len, (bh = bh->b_this_page) != head); in xfs_convert_page()
881 if (uptodate && bh == head) in xfs_convert_page()
971 struct buffer_head *bh, *head; in xfs_page_state_convert() local
1024 bh = head = page_buffers(page); in xfs_page_state_convert()
1034 if (!buffer_uptodate(bh)) in xfs_page_state_convert()
1036 if (!(PageUptodate(page) || buffer_uptodate(bh)) && !startio) { in xfs_page_state_convert()
1058 if (buffer_unwritten(bh) || buffer_delay(bh) || in xfs_page_state_convert()
1059 ((buffer_uptodate(bh) || PageUptodate(page)) && in xfs_page_state_convert()
1060 !buffer_mapped(bh) && (unmapped || startio))) { in xfs_page_state_convert()
1069 if (buffer_unwritten(bh)) { in xfs_page_state_convert()
1072 } else if (buffer_delay(bh)) { in xfs_page_state_convert()
1092 page, bh, head, 0); in xfs_page_state_convert()
1104 xfs_map_at_offset(bh, offset, in xfs_page_state_convert()
1107 xfs_add_to_ioend(inode, bh, offset, in xfs_page_state_convert()
1111 set_buffer_dirty(bh); in xfs_page_state_convert()
1112 unlock_buffer(bh); in xfs_page_state_convert()
1113 mark_buffer_dirty(bh); in xfs_page_state_convert()
1118 } else if (buffer_uptodate(bh) && startio) { in xfs_page_state_convert()
1126 size = xfs_probe_cluster(inode, page, bh, in xfs_page_state_convert()
1144 if (trylock_buffer(bh)) { in xfs_page_state_convert()
1145 ASSERT(buffer_mapped(bh)); in xfs_page_state_convert()
1148 xfs_add_to_ioend(inode, bh, offset, type, in xfs_page_state_convert()
1155 } else if ((buffer_uptodate(bh) || PageUptodate(page)) && in xfs_page_state_convert()
1163 } while (offset += len, ((bh = bh->b_this_page) != head)); in xfs_page_state_convert()
1165 if (uptodate && bh == head) in xfs_page_state_convert()