Lines Matching refs:bh
45 struct buffer_head *bh, *head; in xfs_count_page_state() local
49 bh = head = page_buffers(page); in xfs_count_page_state()
51 if (buffer_unwritten(bh)) in xfs_count_page_state()
53 else if (buffer_delay(bh)) in xfs_count_page_state()
55 } while ((bh = bh->b_this_page) != head); in xfs_count_page_state()
81 struct buffer_head *bh, *next; in xfs_destroy_ioend() local
83 for (bh = ioend->io_buffer_head; bh; bh = next) { in xfs_destroy_ioend()
84 next = bh->b_private; in xfs_destroy_ioend()
85 bh->b_end_io(bh, !ioend->io_error); in xfs_destroy_ioend()
390 struct buffer_head *bh) in xfs_alloc_ioend_bio() argument
395 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); in xfs_alloc_ioend_bio()
396 bio->bi_bdev = bh->b_bdev; in xfs_alloc_ioend_bio()
402 struct buffer_head *bh) in xfs_start_buffer_writeback() argument
404 ASSERT(buffer_mapped(bh)); in xfs_start_buffer_writeback()
405 ASSERT(buffer_locked(bh)); in xfs_start_buffer_writeback()
406 ASSERT(!buffer_delay(bh)); in xfs_start_buffer_writeback()
407 ASSERT(!buffer_unwritten(bh)); in xfs_start_buffer_writeback()
409 mark_buffer_async_write(bh); in xfs_start_buffer_writeback()
410 set_buffer_uptodate(bh); in xfs_start_buffer_writeback()
411 clear_buffer_dirty(bh); in xfs_start_buffer_writeback()
443 static inline int xfs_bio_add_buffer(struct bio *bio, struct buffer_head *bh) in xfs_bio_add_buffer() argument
445 return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh)); in xfs_bio_add_buffer()
478 struct buffer_head *bh; in xfs_submit_ioend() local
485 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) in xfs_submit_ioend()
486 xfs_start_buffer_writeback(bh); in xfs_submit_ioend()
507 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) { in xfs_submit_ioend()
511 bio = xfs_alloc_ioend_bio(bh); in xfs_submit_ioend()
512 } else if (bh->b_blocknr != lastblock + 1) { in xfs_submit_ioend()
517 if (xfs_bio_add_buffer(bio, bh) != bh->b_size) { in xfs_submit_ioend()
522 lastblock = bh->b_blocknr; in xfs_submit_ioend()
540 struct buffer_head *bh, *next_bh; in xfs_cancel_ioend() local
544 bh = ioend->io_buffer_head; in xfs_cancel_ioend()
546 next_bh = bh->b_private; in xfs_cancel_ioend()
547 clear_buffer_async_write(bh); in xfs_cancel_ioend()
554 set_buffer_unwritten(bh); in xfs_cancel_ioend()
555 unlock_buffer(bh); in xfs_cancel_ioend()
556 } while ((bh = next_bh) != NULL); in xfs_cancel_ioend()
571 struct buffer_head *bh, in xfs_add_to_ioend() argument
584 ioend->io_buffer_head = bh; in xfs_add_to_ioend()
585 ioend->io_buffer_tail = bh; in xfs_add_to_ioend()
590 ioend->io_buffer_tail->b_private = bh; in xfs_add_to_ioend()
591 ioend->io_buffer_tail = bh; in xfs_add_to_ioend()
594 bh->b_private = NULL; in xfs_add_to_ioend()
595 ioend->io_size += bh->b_size; in xfs_add_to_ioend()
601 struct buffer_head *bh, in xfs_map_buffer() argument
618 bh->b_blocknr = bn; in xfs_map_buffer()
619 set_buffer_mapped(bh); in xfs_map_buffer()
625 struct buffer_head *bh, in xfs_map_at_offset() argument
632 xfs_map_buffer(inode, bh, imap, offset); in xfs_map_at_offset()
633 set_buffer_mapped(bh); in xfs_map_at_offset()
634 clear_buffer_delay(bh); in xfs_map_at_offset()
635 clear_buffer_unwritten(bh); in xfs_map_at_offset()
650 struct buffer_head *bh; in xfs_check_page_type() local
660 bh = head = page_buffers(page); in xfs_check_page_type()
662 if (buffer_unwritten(bh)) { in xfs_check_page_type()
665 } else if (buffer_delay(bh)) { in xfs_check_page_type()
668 } else if (buffer_dirty(bh) && buffer_mapped(bh)) { in xfs_check_page_type()
676 } while ((bh = bh->b_this_page) != head); in xfs_check_page_type()
696 struct buffer_head *bh, *head; in xfs_convert_page() local
766 bh = head = page_buffers(page); in xfs_convert_page()
770 if (!buffer_uptodate(bh)) in xfs_convert_page()
772 if (!(PageUptodate(page) || buffer_uptodate(bh))) { in xfs_convert_page()
777 if (buffer_unwritten(bh) || buffer_delay(bh) || in xfs_convert_page()
778 buffer_mapped(bh)) { in xfs_convert_page()
779 if (buffer_unwritten(bh)) in xfs_convert_page()
781 else if (buffer_delay(bh)) in xfs_convert_page()
792 lock_buffer(bh); in xfs_convert_page()
794 xfs_map_at_offset(inode, bh, imap, offset); in xfs_convert_page()
795 xfs_add_to_ioend(inode, bh, offset, type, in xfs_convert_page()
804 } while (offset += len, (bh = bh->b_this_page) != head); in xfs_convert_page()
806 if (uptodate && bh == head) in xfs_convert_page()
891 struct buffer_head *bh, *head; in xfs_aops_discard_page() local
905 bh = head = page_buffers(page); in xfs_aops_discard_page()
910 if (!buffer_delay(bh)) in xfs_aops_discard_page()
926 } while ((bh = bh->b_this_page) != head); in xfs_aops_discard_page()
948 struct buffer_head *bh, *head; in xfs_vm_writepage() local
1054 bh = head = page_buffers(page); in xfs_vm_writepage()
1066 if (!buffer_uptodate(bh)) in xfs_vm_writepage()
1075 if (!buffer_mapped(bh) && buffer_uptodate(bh)) { in xfs_vm_writepage()
1080 if (buffer_unwritten(bh)) { in xfs_vm_writepage()
1085 } else if (buffer_delay(bh)) { in xfs_vm_writepage()
1090 } else if (buffer_uptodate(bh)) { in xfs_vm_writepage()
1097 ASSERT(buffer_mapped(bh)); in xfs_vm_writepage()
1127 lock_buffer(bh); in xfs_vm_writepage()
1129 xfs_map_at_offset(inode, bh, &imap, offset); in xfs_vm_writepage()
1130 xfs_add_to_ioend(inode, bh, offset, type, &ioend, in xfs_vm_writepage()
1138 } while (offset += len, ((bh = bh->b_this_page) != head)); in xfs_vm_writepage()
1140 if (uptodate && bh == head) in xfs_vm_writepage()
1773 struct buffer_head *bh, *head; in xfs_vm_write_failed() local
1792 for (bh = head; bh != head || !block_start; in xfs_vm_write_failed()
1793 bh = bh->b_this_page, block_start = block_end, in xfs_vm_write_failed()
1794 block_offset += bh->b_size) { in xfs_vm_write_failed()
1795 block_end = block_start + bh->b_size; in xfs_vm_write_failed()
1805 if (!buffer_delay(bh)) in xfs_vm_write_failed()
1808 if (!buffer_new(bh) && block_offset < i_size_read(inode)) in xfs_vm_write_failed()
1812 block_offset + bh->b_size); in xfs_vm_write_failed()
1818 clear_buffer_delay(bh); in xfs_vm_write_failed()
1819 clear_buffer_uptodate(bh); in xfs_vm_write_failed()
1820 clear_buffer_mapped(bh); in xfs_vm_write_failed()
1821 clear_buffer_new(bh); in xfs_vm_write_failed()
1822 clear_buffer_dirty(bh); in xfs_vm_write_failed()
1984 struct buffer_head *bh = head; in xfs_vm_set_page_dirty() local
1988 set_buffer_dirty(bh); in xfs_vm_set_page_dirty()
1989 bh = bh->b_this_page; in xfs_vm_set_page_dirty()
1991 } while (bh != head); in xfs_vm_set_page_dirty()