Lines Matching refs:bh
50 init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private) in init_buffer() argument
52 bh->b_end_io = handler; in init_buffer()
53 bh->b_private = private; in init_buffer()
59 struct buffer_head *bh in sync_buffer() local
63 bd = bh->b_bdev; in sync_buffer()
70 void __lock_buffer(struct buffer_head *bh) in __lock_buffer() argument
72 wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer, in __lock_buffer()
77 void unlock_buffer(struct buffer_head *bh) in unlock_buffer() argument
79 clear_bit_unlock(BH_Lock, &bh->b_state); in unlock_buffer()
81 wake_up_bit(&bh->b_state, BH_Lock); in unlock_buffer()
89 void __wait_on_buffer(struct buffer_head * bh) in __wait_on_buffer() argument
91 wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE); in __wait_on_buffer()
103 static int quiet_error(struct buffer_head *bh) in quiet_error() argument
105 if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit()) in quiet_error()
111 static void buffer_io_error(struct buffer_head *bh) in buffer_io_error() argument
115 bdevname(bh->b_bdev, b), in buffer_io_error()
116 (unsigned long long)bh->b_blocknr); in buffer_io_error()
127 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate) in __end_buffer_read_notouch() argument
130 set_buffer_uptodate(bh); in __end_buffer_read_notouch()
133 clear_buffer_uptodate(bh); in __end_buffer_read_notouch()
135 unlock_buffer(bh); in __end_buffer_read_notouch()
142 void end_buffer_read_sync(struct buffer_head *bh, int uptodate) in end_buffer_read_sync() argument
144 __end_buffer_read_notouch(bh, uptodate); in end_buffer_read_sync()
145 put_bh(bh); in end_buffer_read_sync()
148 void end_buffer_write_sync(struct buffer_head *bh, int uptodate) in end_buffer_write_sync() argument
153 set_buffer_uptodate(bh); in end_buffer_write_sync()
155 if (!buffer_eopnotsupp(bh) && !quiet_error(bh)) { in end_buffer_write_sync()
156 buffer_io_error(bh); in end_buffer_write_sync()
159 bdevname(bh->b_bdev, b)); in end_buffer_write_sync()
161 set_buffer_write_io_error(bh); in end_buffer_write_sync()
162 clear_buffer_uptodate(bh); in end_buffer_write_sync()
164 unlock_buffer(bh); in end_buffer_write_sync()
165 put_bh(bh); in end_buffer_write_sync()
331 struct buffer_head *bh; in __find_get_block_slow() local
345 bh = head; in __find_get_block_slow()
347 if (bh->b_blocknr == block) { in __find_get_block_slow()
348 ret = bh; in __find_get_block_slow()
349 get_bh(bh); in __find_get_block_slow()
352 if (!buffer_mapped(bh)) in __find_get_block_slow()
354 bh = bh->b_this_page; in __find_get_block_slow()
355 } while (bh != head); in __find_get_block_slow()
366 (unsigned long long)bh->b_blocknr); in __find_get_block_slow()
368 bh->b_state, bh->b_size); in __find_get_block_slow()
446 static void end_buffer_async_read(struct buffer_head *bh, int uptodate) in end_buffer_async_read() argument
454 BUG_ON(!buffer_async_read(bh)); in end_buffer_async_read()
456 page = bh->b_page; in end_buffer_async_read()
458 set_buffer_uptodate(bh); in end_buffer_async_read()
460 clear_buffer_uptodate(bh); in end_buffer_async_read()
461 if (!quiet_error(bh)) in end_buffer_async_read()
462 buffer_io_error(bh); in end_buffer_async_read()
474 clear_buffer_async_read(bh); in end_buffer_async_read()
475 unlock_buffer(bh); in end_buffer_async_read()
476 tmp = bh; in end_buffer_async_read()
485 } while (tmp != bh); in end_buffer_async_read()
508 static void end_buffer_async_write(struct buffer_head *bh, int uptodate) in end_buffer_async_write() argument
516 BUG_ON(!buffer_async_write(bh)); in end_buffer_async_write()
518 page = bh->b_page; in end_buffer_async_write()
520 set_buffer_uptodate(bh); in end_buffer_async_write()
522 if (!quiet_error(bh)) { in end_buffer_async_write()
523 buffer_io_error(bh); in end_buffer_async_write()
526 bdevname(bh->b_bdev, b)); in end_buffer_async_write()
529 set_buffer_write_io_error(bh); in end_buffer_async_write()
530 clear_buffer_uptodate(bh); in end_buffer_async_write()
538 clear_buffer_async_write(bh); in end_buffer_async_write()
539 unlock_buffer(bh); in end_buffer_async_write()
540 tmp = bh->b_this_page; in end_buffer_async_write()
541 while (tmp != bh) { in end_buffer_async_write()
580 static void mark_buffer_async_read(struct buffer_head *bh) in mark_buffer_async_read() argument
582 bh->b_end_io = end_buffer_async_read; in mark_buffer_async_read()
583 set_buffer_async_read(bh); in mark_buffer_async_read()
586 void mark_buffer_async_write(struct buffer_head *bh) in mark_buffer_async_write() argument
588 bh->b_end_io = end_buffer_async_write; in mark_buffer_async_write()
589 set_buffer_async_write(bh); in mark_buffer_async_write()
646 static void __remove_assoc_queue(struct buffer_head *bh) in __remove_assoc_queue() argument
648 list_del_init(&bh->b_assoc_buffers); in __remove_assoc_queue()
649 WARN_ON(!bh->b_assoc_map); in __remove_assoc_queue()
650 if (buffer_write_io_error(bh)) in __remove_assoc_queue()
651 set_bit(AS_EIO, &bh->b_assoc_map->flags); in __remove_assoc_queue()
652 bh->b_assoc_map = NULL; in __remove_assoc_queue()
672 struct buffer_head *bh; in osync_buffers_list() local
679 bh = BH_ENTRY(p); in osync_buffers_list()
680 if (buffer_locked(bh)) { in osync_buffers_list()
681 get_bh(bh); in osync_buffers_list()
683 wait_on_buffer(bh); in osync_buffers_list()
684 if (!buffer_uptodate(bh)) in osync_buffers_list()
686 brelse(bh); in osync_buffers_list()
727 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize); in write_boundary_block() local
728 if (bh) { in write_boundary_block()
729 if (buffer_dirty(bh)) in write_boundary_block()
730 ll_rw_block(WRITE, 1, &bh); in write_boundary_block()
731 put_bh(bh); in write_boundary_block()
735 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode) in mark_buffer_dirty_inode() argument
738 struct address_space *buffer_mapping = bh->b_page->mapping; in mark_buffer_dirty_inode()
740 mark_buffer_dirty(bh); in mark_buffer_dirty_inode()
746 if (!bh->b_assoc_map) { in mark_buffer_dirty_inode()
748 list_move_tail(&bh->b_assoc_buffers, in mark_buffer_dirty_inode()
750 bh->b_assoc_map = mapping; in mark_buffer_dirty_inode()
820 struct buffer_head *bh = head; in __set_page_dirty_buffers() local
823 set_buffer_dirty(bh); in __set_page_dirty_buffers()
824 bh = bh->b_this_page; in __set_page_dirty_buffers()
825 } while (bh != head); in __set_page_dirty_buffers()
857 struct buffer_head *bh; in fsync_buffers_list() local
866 bh = BH_ENTRY(list->next); in fsync_buffers_list()
867 mapping = bh->b_assoc_map; in fsync_buffers_list()
868 __remove_assoc_queue(bh); in fsync_buffers_list()
872 if (buffer_dirty(bh) || buffer_locked(bh)) { in fsync_buffers_list()
873 list_add(&bh->b_assoc_buffers, &tmp); in fsync_buffers_list()
874 bh->b_assoc_map = mapping; in fsync_buffers_list()
875 if (buffer_dirty(bh)) { in fsync_buffers_list()
876 get_bh(bh); in fsync_buffers_list()
884 ll_rw_block(SWRITE_SYNC, 1, &bh); in fsync_buffers_list()
885 brelse(bh); in fsync_buffers_list()
892 bh = BH_ENTRY(tmp.prev); in fsync_buffers_list()
893 get_bh(bh); in fsync_buffers_list()
894 mapping = bh->b_assoc_map; in fsync_buffers_list()
895 __remove_assoc_queue(bh); in fsync_buffers_list()
899 if (buffer_dirty(bh)) { in fsync_buffers_list()
900 list_add(&bh->b_assoc_buffers, in fsync_buffers_list()
902 bh->b_assoc_map = mapping; in fsync_buffers_list()
905 wait_on_buffer(bh); in fsync_buffers_list()
906 if (!buffer_uptodate(bh)) in fsync_buffers_list()
908 brelse(bh); in fsync_buffers_list()
961 struct buffer_head *bh = BH_ENTRY(list->next); in remove_inode_buffers() local
962 if (buffer_dirty(bh)) { in remove_inode_buffers()
966 __remove_assoc_queue(bh); in remove_inode_buffers()
985 struct buffer_head *bh, *head; in alloc_page_buffers() local
992 bh = alloc_buffer_head(GFP_NOFS); in alloc_page_buffers()
993 if (!bh) in alloc_page_buffers()
996 bh->b_bdev = NULL; in alloc_page_buffers()
997 bh->b_this_page = head; in alloc_page_buffers()
998 bh->b_blocknr = -1; in alloc_page_buffers()
999 head = bh; in alloc_page_buffers()
1001 bh->b_state = 0; in alloc_page_buffers()
1002 atomic_set(&bh->b_count, 0); in alloc_page_buffers()
1003 bh->b_private = NULL; in alloc_page_buffers()
1004 bh->b_size = size; in alloc_page_buffers()
1007 set_bh_page(bh, page, offset); in alloc_page_buffers()
1009 init_buffer(bh, NULL, NULL); in alloc_page_buffers()
1018 bh = head; in alloc_page_buffers()
1020 free_buffer_head(bh); in alloc_page_buffers()
1047 struct buffer_head *bh, *tail; in link_dev_buffers() local
1049 bh = head; in link_dev_buffers()
1051 tail = bh; in link_dev_buffers()
1052 bh = bh->b_this_page; in link_dev_buffers()
1053 } while (bh); in link_dev_buffers()
1066 struct buffer_head *bh = head; in init_page_buffers() local
1070 if (!buffer_mapped(bh)) { in init_page_buffers()
1071 init_buffer(bh, NULL, NULL); in init_page_buffers()
1072 bh->b_bdev = bdev; in init_page_buffers()
1073 bh->b_blocknr = block; in init_page_buffers()
1075 set_buffer_uptodate(bh); in init_page_buffers()
1076 set_buffer_mapped(bh); in init_page_buffers()
1079 bh = bh->b_this_page; in init_page_buffers()
1080 } while (bh != head); in init_page_buffers()
1094 struct buffer_head *bh; in grow_dev_page() local
1104 bh = page_buffers(page); in grow_dev_page()
1105 if (bh->b_size == size) { in grow_dev_page()
1116 bh = alloc_page_buffers(page, size, 0); in grow_dev_page()
1117 if (!bh) in grow_dev_page()
1126 link_dev_buffers(page, bh); in grow_dev_page()
1195 struct buffer_head * bh; in __getblk_slow() local
1198 bh = __find_get_block(bdev, block, size); in __getblk_slow()
1199 if (bh) in __getblk_slow()
1200 return bh; in __getblk_slow()
1245 void mark_buffer_dirty(struct buffer_head *bh) in mark_buffer_dirty() argument
1247 WARN_ON_ONCE(!buffer_uptodate(bh)); in mark_buffer_dirty()
1255 if (buffer_dirty(bh)) { in mark_buffer_dirty()
1257 if (buffer_dirty(bh)) in mark_buffer_dirty()
1261 if (!test_set_buffer_dirty(bh)) { in mark_buffer_dirty()
1262 struct page *page = bh->b_page; in mark_buffer_dirty()
1288 void __bforget(struct buffer_head *bh) in __bforget() argument
1290 clear_buffer_dirty(bh); in __bforget()
1291 if (bh->b_assoc_map) { in __bforget()
1292 struct address_space *buffer_mapping = bh->b_page->mapping; in __bforget()
1295 list_del_init(&bh->b_assoc_buffers); in __bforget()
1296 bh->b_assoc_map = NULL; in __bforget()
1299 __brelse(bh); in __bforget()
1302 static struct buffer_head *__bread_slow(struct buffer_head *bh) in __bread_slow() argument
1304 lock_buffer(bh); in __bread_slow()
1305 if (buffer_uptodate(bh)) { in __bread_slow()
1306 unlock_buffer(bh); in __bread_slow()
1307 return bh; in __bread_slow()
1309 get_bh(bh); in __bread_slow()
1310 bh->b_end_io = end_buffer_read_sync; in __bread_slow()
1311 submit_bh(READ, bh); in __bread_slow()
1312 wait_on_buffer(bh); in __bread_slow()
1313 if (buffer_uptodate(bh)) in __bread_slow()
1314 return bh; in __bread_slow()
1316 brelse(bh); in __bread_slow()
1360 static void bh_lru_install(struct buffer_head *bh) in bh_lru_install() argument
1368 if (lru->bhs[0] != bh) { in bh_lru_install()
1373 get_bh(bh); in bh_lru_install()
1374 bhs[out++] = bh; in bh_lru_install()
1378 if (bh2 == bh) { in bh_lru_install()
1413 struct buffer_head *bh = lru->bhs[i]; in lookup_bh_lru() local
1415 if (bh && bh->b_bdev == bdev && in lookup_bh_lru()
1416 bh->b_blocknr == block && bh->b_size == size) { in lookup_bh_lru()
1422 lru->bhs[0] = bh; in lookup_bh_lru()
1424 get_bh(bh); in lookup_bh_lru()
1425 ret = bh; in lookup_bh_lru()
1441 struct buffer_head *bh = lookup_bh_lru(bdev, block, size); in __find_get_block() local
1443 if (bh == NULL) { in __find_get_block()
1444 bh = __find_get_block_slow(bdev, block); in __find_get_block()
1445 if (bh) in __find_get_block()
1446 bh_lru_install(bh); in __find_get_block()
1448 if (bh) in __find_get_block()
1449 touch_buffer(bh); in __find_get_block()
1450 return bh; in __find_get_block()
1469 struct buffer_head *bh = __find_get_block(bdev, block, size); in __getblk() local
1472 if (bh == NULL) in __getblk()
1473 bh = __getblk_slow(bdev, block, size); in __getblk()
1474 return bh; in __getblk()
1483 struct buffer_head *bh = __getblk(bdev, block, size); in __breadahead() local
1484 if (likely(bh)) { in __breadahead()
1485 ll_rw_block(READA, 1, &bh); in __breadahead()
1486 brelse(bh); in __breadahead()
1503 struct buffer_head *bh = __getblk(bdev, block, size); in __bread() local
1505 if (likely(bh) && !buffer_uptodate(bh)) in __bread()
1506 bh = __bread_slow(bh); in __bread()
1507 return bh; in __bread()
1534 void set_bh_page(struct buffer_head *bh, in set_bh_page() argument
1537 bh->b_page = page; in set_bh_page()
1543 bh->b_data = (char *)(0 + offset); in set_bh_page()
1545 bh->b_data = page_address(page) + offset; in set_bh_page()
1552 static void discard_buffer(struct buffer_head * bh) in discard_buffer() argument
1554 lock_buffer(bh); in discard_buffer()
1555 clear_buffer_dirty(bh); in discard_buffer()
1556 bh->b_bdev = NULL; in discard_buffer()
1557 clear_buffer_mapped(bh); in discard_buffer()
1558 clear_buffer_req(bh); in discard_buffer()
1559 clear_buffer_new(bh); in discard_buffer()
1560 clear_buffer_delay(bh); in discard_buffer()
1561 clear_buffer_unwritten(bh); in discard_buffer()
1562 unlock_buffer(bh); in discard_buffer()
1582 struct buffer_head *head, *bh, *next; in block_invalidatepage() local
1590 bh = head; in block_invalidatepage()
1592 unsigned int next_off = curr_off + bh->b_size; in block_invalidatepage()
1593 next = bh->b_this_page; in block_invalidatepage()
1599 discard_buffer(bh); in block_invalidatepage()
1601 bh = next; in block_invalidatepage()
1602 } while (bh != head); in block_invalidatepage()
1624 struct buffer_head *bh, *head, *tail; in create_empty_buffers() local
1627 bh = head; in create_empty_buffers()
1629 bh->b_state |= b_state; in create_empty_buffers()
1630 tail = bh; in create_empty_buffers()
1631 bh = bh->b_this_page; in create_empty_buffers()
1632 } while (bh); in create_empty_buffers()
1637 bh = head; in create_empty_buffers()
1640 set_buffer_dirty(bh); in create_empty_buffers()
1642 set_buffer_uptodate(bh); in create_empty_buffers()
1643 bh = bh->b_this_page; in create_empty_buffers()
1644 } while (bh != head); in create_empty_buffers()
1714 struct buffer_head *bh, *head; in __block_write_full_page() local
1739 bh = head; in __block_write_full_page()
1755 clear_buffer_dirty(bh); in __block_write_full_page()
1756 set_buffer_uptodate(bh); in __block_write_full_page()
1757 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) && in __block_write_full_page()
1758 buffer_dirty(bh)) { in __block_write_full_page()
1759 WARN_ON(bh->b_size != blocksize); in __block_write_full_page()
1760 err = get_block(inode, block, bh, 1); in __block_write_full_page()
1763 clear_buffer_delay(bh); in __block_write_full_page()
1764 if (buffer_new(bh)) { in __block_write_full_page()
1766 clear_buffer_new(bh); in __block_write_full_page()
1767 unmap_underlying_metadata(bh->b_bdev, in __block_write_full_page()
1768 bh->b_blocknr); in __block_write_full_page()
1771 bh = bh->b_this_page; in __block_write_full_page()
1773 } while (bh != head); in __block_write_full_page()
1776 if (!buffer_mapped(bh)) in __block_write_full_page()
1786 lock_buffer(bh); in __block_write_full_page()
1787 } else if (!trylock_buffer(bh)) { in __block_write_full_page()
1791 if (test_clear_buffer_dirty(bh)) { in __block_write_full_page()
1792 mark_buffer_async_write(bh); in __block_write_full_page()
1794 unlock_buffer(bh); in __block_write_full_page()
1796 } while ((bh = bh->b_this_page) != head); in __block_write_full_page()
1806 struct buffer_head *next = bh->b_this_page; in __block_write_full_page()
1807 if (buffer_async_write(bh)) { in __block_write_full_page()
1808 submit_bh(WRITE, bh); in __block_write_full_page()
1811 bh = next; in __block_write_full_page()
1812 } while (bh != head); in __block_write_full_page()
1839 bh = head; in __block_write_full_page()
1842 if (buffer_mapped(bh) && buffer_dirty(bh) && in __block_write_full_page()
1843 !buffer_delay(bh)) { in __block_write_full_page()
1844 lock_buffer(bh); in __block_write_full_page()
1845 mark_buffer_async_write(bh); in __block_write_full_page()
1851 clear_buffer_dirty(bh); in __block_write_full_page()
1853 } while ((bh = bh->b_this_page) != head); in __block_write_full_page()
1859 struct buffer_head *next = bh->b_this_page; in __block_write_full_page()
1860 if (buffer_async_write(bh)) { in __block_write_full_page()
1861 clear_buffer_dirty(bh); in __block_write_full_page()
1862 submit_bh(WRITE, bh); in __block_write_full_page()
1865 bh = next; in __block_write_full_page()
1866 } while (bh != head); in __block_write_full_page()
1879 struct buffer_head *head, *bh; in page_zero_new_buffers() local
1885 bh = head = page_buffers(page); in page_zero_new_buffers()
1888 block_end = block_start + bh->b_size; in page_zero_new_buffers()
1890 if (buffer_new(bh)) { in page_zero_new_buffers()
1899 set_buffer_uptodate(bh); in page_zero_new_buffers()
1902 clear_buffer_new(bh); in page_zero_new_buffers()
1903 mark_buffer_dirty(bh); in page_zero_new_buffers()
1908 bh = bh->b_this_page; in page_zero_new_buffers()
1909 } while (bh != head); in page_zero_new_buffers()
1920 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait; in __block_prepare_write() local
1935 for(bh = head, block_start = 0; bh != head || !block_start; in __block_prepare_write()
1936 block++, block_start=block_end, bh = bh->b_this_page) { in __block_prepare_write()
1940 if (!buffer_uptodate(bh)) in __block_prepare_write()
1941 set_buffer_uptodate(bh); in __block_prepare_write()
1945 if (buffer_new(bh)) in __block_prepare_write()
1946 clear_buffer_new(bh); in __block_prepare_write()
1947 if (!buffer_mapped(bh)) { in __block_prepare_write()
1948 WARN_ON(bh->b_size != blocksize); in __block_prepare_write()
1949 err = get_block(inode, block, bh, 1); in __block_prepare_write()
1952 if (buffer_new(bh)) { in __block_prepare_write()
1953 unmap_underlying_metadata(bh->b_bdev, in __block_prepare_write()
1954 bh->b_blocknr); in __block_prepare_write()
1956 clear_buffer_new(bh); in __block_prepare_write()
1957 set_buffer_uptodate(bh); in __block_prepare_write()
1958 mark_buffer_dirty(bh); in __block_prepare_write()
1969 if (!buffer_uptodate(bh)) in __block_prepare_write()
1970 set_buffer_uptodate(bh); in __block_prepare_write()
1973 if (!buffer_uptodate(bh) && !buffer_delay(bh) && in __block_prepare_write()
1974 !buffer_unwritten(bh) && in __block_prepare_write()
1976 ll_rw_block(READ, 1, &bh); in __block_prepare_write()
1977 *wait_bh++=bh; in __block_prepare_write()
1999 struct buffer_head *bh, *head; in __block_commit_write() local
2003 for(bh = head = page_buffers(page), block_start = 0; in __block_commit_write()
2004 bh != head || !block_start; in __block_commit_write()
2005 block_start=block_end, bh = bh->b_this_page) { in __block_commit_write()
2008 if (!buffer_uptodate(bh)) in __block_commit_write()
2011 set_buffer_uptodate(bh); in __block_commit_write()
2012 mark_buffer_dirty(bh); in __block_commit_write()
2014 clear_buffer_new(bh); in __block_commit_write()
2174 struct buffer_head *bh, *head; in block_is_partially_uptodate() local
2187 bh = head; in block_is_partially_uptodate()
2192 if (!buffer_uptodate(bh)) { in block_is_partially_uptodate()
2200 bh = bh->b_this_page; in block_is_partially_uptodate()
2201 } while (bh != head); in block_is_partially_uptodate()
2218 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE]; in block_read_full_page() local
2231 bh = head; in block_read_full_page()
2236 if (buffer_uptodate(bh)) in block_read_full_page()
2239 if (!buffer_mapped(bh)) { in block_read_full_page()
2244 WARN_ON(bh->b_size != blocksize); in block_read_full_page()
2245 err = get_block(inode, iblock, bh, 0); in block_read_full_page()
2249 if (!buffer_mapped(bh)) { in block_read_full_page()
2252 set_buffer_uptodate(bh); in block_read_full_page()
2259 if (buffer_uptodate(bh)) in block_read_full_page()
2262 arr[nr++] = bh; in block_read_full_page()
2263 } while (i++, iblock++, (bh = bh->b_this_page) != head); in block_read_full_page()
2281 bh = arr[i]; in block_read_full_page()
2282 lock_buffer(bh); in block_read_full_page()
2283 mark_buffer_async_read(bh); in block_read_full_page()
2292 bh = arr[i]; in block_read_full_page()
2293 if (buffer_uptodate(bh)) in block_read_full_page()
2294 end_buffer_async_read(bh, 1); in block_read_full_page()
2296 submit_bh(READ, bh); in block_read_full_page()
2504 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate) in end_buffer_read_nobh() argument
2506 __end_buffer_read_notouch(bh, uptodate); in end_buffer_read_nobh()
2516 struct buffer_head *bh; in attach_nobh_buffers() local
2521 bh = head; in attach_nobh_buffers()
2524 set_buffer_dirty(bh); in attach_nobh_buffers()
2525 if (!bh->b_this_page) in attach_nobh_buffers()
2526 bh->b_this_page = head; in attach_nobh_buffers()
2527 bh = bh->b_this_page; in attach_nobh_buffers()
2528 } while (bh != head); in attach_nobh_buffers()
2545 struct buffer_head *head, *bh; in nobh_write_begin() local
2599 for (block_start = 0, block_in_page = 0, bh = head; in nobh_write_begin()
2601 block_in_page++, block_start += blocksize, bh = bh->b_this_page) { in nobh_write_begin()
2605 bh->b_state = 0; in nobh_write_begin()
2610 bh, create); in nobh_write_begin()
2613 if (!buffer_mapped(bh)) in nobh_write_begin()
2615 if (buffer_new(bh)) in nobh_write_begin()
2616 unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr); in nobh_write_begin()
2618 set_buffer_uptodate(bh); in nobh_write_begin()
2621 if (buffer_new(bh) || !buffer_mapped(bh)) { in nobh_write_begin()
2626 if (buffer_uptodate(bh)) in nobh_write_begin()
2629 lock_buffer(bh); in nobh_write_begin()
2630 bh->b_end_io = end_buffer_read_nobh; in nobh_write_begin()
2631 submit_bh(READ, bh); in nobh_write_begin()
2642 for (bh = head; bh; bh = bh->b_this_page) { in nobh_write_begin()
2643 wait_on_buffer(bh); in nobh_write_begin()
2644 if (!buffer_uptodate(bh)) in nobh_write_begin()
2688 struct buffer_head *bh; in nobh_write_end() local
2708 bh = head; in nobh_write_end()
2710 free_buffer_head(bh); in nobh_write_end()
2854 struct buffer_head *bh; in block_truncate_page() local
2876 bh = page_buffers(page); in block_truncate_page()
2879 bh = bh->b_this_page; in block_truncate_page()
2885 if (!buffer_mapped(bh)) { in block_truncate_page()
2886 WARN_ON(bh->b_size != blocksize); in block_truncate_page()
2887 err = get_block(inode, iblock, bh, 0); in block_truncate_page()
2891 if (!buffer_mapped(bh)) in block_truncate_page()
2897 set_buffer_uptodate(bh); in block_truncate_page()
2899 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) { in block_truncate_page()
2901 ll_rw_block(READ, 1, &bh); in block_truncate_page()
2902 wait_on_buffer(bh); in block_truncate_page()
2904 if (!buffer_uptodate(bh)) in block_truncate_page()
2909 mark_buffer_dirty(bh); in block_truncate_page()
2972 struct buffer_head *bh = bio->bi_private; in end_bio_bh_io_sync() local
2976 set_bit(BH_Eopnotsupp, &bh->b_state); in end_bio_bh_io_sync()
2980 set_bit(BH_Quiet, &bh->b_state); in end_bio_bh_io_sync()
2982 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags)); in end_bio_bh_io_sync()
2986 int submit_bh(int rw, struct buffer_head * bh) in submit_bh() argument
2991 BUG_ON(!buffer_locked(bh)); in submit_bh()
2992 BUG_ON(!buffer_mapped(bh)); in submit_bh()
2993 BUG_ON(!bh->b_end_io); in submit_bh()
2999 if (buffer_ordered(bh) && (rw & WRITE)) in submit_bh()
3005 if (test_set_buffer_req(bh) && (rw & WRITE)) in submit_bh()
3006 clear_buffer_write_io_error(bh); in submit_bh()
3014 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9); in submit_bh()
3015 bio->bi_bdev = bh->b_bdev; in submit_bh()
3016 bio->bi_io_vec[0].bv_page = bh->b_page; in submit_bh()
3017 bio->bi_io_vec[0].bv_len = bh->b_size; in submit_bh()
3018 bio->bi_io_vec[0].bv_offset = bh_offset(bh); in submit_bh()
3022 bio->bi_size = bh->b_size; in submit_bh()
3025 bio->bi_private = bh; in submit_bh()
3068 struct buffer_head *bh = bhs[i]; in ll_rw_block() local
3071 lock_buffer(bh); in ll_rw_block()
3072 else if (!trylock_buffer(bh)) in ll_rw_block()
3076 if (test_clear_buffer_dirty(bh)) { in ll_rw_block()
3077 bh->b_end_io = end_buffer_write_sync; in ll_rw_block()
3078 get_bh(bh); in ll_rw_block()
3080 submit_bh(WRITE_SYNC, bh); in ll_rw_block()
3082 submit_bh(WRITE, bh); in ll_rw_block()
3086 if (!buffer_uptodate(bh)) { in ll_rw_block()
3087 bh->b_end_io = end_buffer_read_sync; in ll_rw_block()
3088 get_bh(bh); in ll_rw_block()
3089 submit_bh(rw, bh); in ll_rw_block()
3093 unlock_buffer(bh); in ll_rw_block()
3102 int sync_dirty_buffer(struct buffer_head *bh) in sync_dirty_buffer() argument
3106 WARN_ON(atomic_read(&bh->b_count) < 1); in sync_dirty_buffer()
3107 lock_buffer(bh); in sync_dirty_buffer()
3108 if (test_clear_buffer_dirty(bh)) { in sync_dirty_buffer()
3109 get_bh(bh); in sync_dirty_buffer()
3110 bh->b_end_io = end_buffer_write_sync; in sync_dirty_buffer()
3111 ret = submit_bh(WRITE, bh); in sync_dirty_buffer()
3112 wait_on_buffer(bh); in sync_dirty_buffer()
3113 if (buffer_eopnotsupp(bh)) { in sync_dirty_buffer()
3114 clear_buffer_eopnotsupp(bh); in sync_dirty_buffer()
3117 if (!ret && !buffer_uptodate(bh)) in sync_dirty_buffer()
3120 unlock_buffer(bh); in sync_dirty_buffer()
3145 static inline int buffer_busy(struct buffer_head *bh) in buffer_busy() argument
3147 return atomic_read(&bh->b_count) | in buffer_busy()
3148 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock))); in buffer_busy()
3155 struct buffer_head *bh; in drop_buffers() local
3157 bh = head; in drop_buffers()
3159 if (buffer_write_io_error(bh) && page->mapping) in drop_buffers()
3161 if (buffer_busy(bh)) in drop_buffers()
3163 bh = bh->b_this_page; in drop_buffers()
3164 } while (bh != head); in drop_buffers()
3167 struct buffer_head *next = bh->b_this_page; in drop_buffers()
3169 if (bh->b_assoc_map) in drop_buffers()
3170 __remove_assoc_queue(bh); in drop_buffers()
3171 bh = next; in drop_buffers()
3172 } while (bh != head); in drop_buffers()
3217 struct buffer_head *bh = buffers_to_free; in try_to_free_buffers() local
3220 struct buffer_head *next = bh->b_this_page; in try_to_free_buffers()
3221 free_buffer_head(bh); in try_to_free_buffers()
3222 bh = next; in try_to_free_buffers()
3223 } while (bh != buffers_to_free); in try_to_free_buffers()
3312 void free_buffer_head(struct buffer_head *bh) in free_buffer_head() argument
3314 BUG_ON(!list_empty(&bh->b_assoc_buffers)); in free_buffer_head()
3315 kmem_cache_free(bh_cachep, bh); in free_buffer_head()
3351 int bh_uptodate_or_lock(struct buffer_head *bh) in bh_uptodate_or_lock() argument
3353 if (!buffer_uptodate(bh)) { in bh_uptodate_or_lock()
3354 lock_buffer(bh); in bh_uptodate_or_lock()
3355 if (!buffer_uptodate(bh)) in bh_uptodate_or_lock()
3357 unlock_buffer(bh); in bh_uptodate_or_lock()
3369 int bh_submit_read(struct buffer_head *bh) in bh_submit_read() argument
3371 BUG_ON(!buffer_locked(bh)); in bh_submit_read()
3373 if (buffer_uptodate(bh)) { in bh_submit_read()
3374 unlock_buffer(bh); in bh_submit_read()
3378 get_bh(bh); in bh_submit_read()
3379 bh->b_end_io = end_buffer_read_sync; in bh_submit_read()
3380 submit_bh(READ, bh); in bh_submit_read()
3381 wait_on_buffer(bh); in bh_submit_read()
3382 if (buffer_uptodate(bh)) in bh_submit_read()
3391 struct buffer_head *bh = data; in init_buffer_head() local
3393 memset(bh, 0, sizeof(*bh)); in init_buffer_head()
3394 INIT_LIST_HEAD(&bh->b_assoc_buffers); in init_buffer_head()