Lines Matching refs:bh
48 static int submit_bh_wbc(int rw, struct buffer_head *bh,
54 void init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private) in init_buffer() argument
56 bh->b_end_io = handler; in init_buffer()
57 bh->b_private = private; in init_buffer()
61 inline void touch_buffer(struct buffer_head *bh) in touch_buffer() argument
63 trace_block_touch_buffer(bh); in touch_buffer()
64 mark_page_accessed(bh->b_page); in touch_buffer()
68 void __lock_buffer(struct buffer_head *bh) in __lock_buffer() argument
70 wait_on_bit_lock_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE); in __lock_buffer()
74 void unlock_buffer(struct buffer_head *bh) in unlock_buffer() argument
76 clear_bit_unlock(BH_Lock, &bh->b_state); in unlock_buffer()
78 wake_up_bit(&bh->b_state, BH_Lock); in unlock_buffer()
90 struct buffer_head *head, *bh; in buffer_check_dirty_writeback() local
103 bh = head; in buffer_check_dirty_writeback()
105 if (buffer_locked(bh)) in buffer_check_dirty_writeback()
108 if (buffer_dirty(bh)) in buffer_check_dirty_writeback()
111 bh = bh->b_this_page; in buffer_check_dirty_writeback()
112 } while (bh != head); in buffer_check_dirty_writeback()
121 void __wait_on_buffer(struct buffer_head * bh) in __wait_on_buffer() argument
123 wait_on_bit_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE); in __wait_on_buffer()
135 static void buffer_io_error(struct buffer_head *bh, char *msg) in buffer_io_error() argument
139 if (!test_bit(BH_Quiet, &bh->b_state)) in buffer_io_error()
142 bdevname(bh->b_bdev, b), in buffer_io_error()
143 (unsigned long long)bh->b_blocknr, msg); in buffer_io_error()
154 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate) in __end_buffer_read_notouch() argument
157 set_buffer_uptodate(bh); in __end_buffer_read_notouch()
160 clear_buffer_uptodate(bh); in __end_buffer_read_notouch()
162 unlock_buffer(bh); in __end_buffer_read_notouch()
169 void end_buffer_read_sync(struct buffer_head *bh, int uptodate) in end_buffer_read_sync() argument
171 __end_buffer_read_notouch(bh, uptodate); in end_buffer_read_sync()
172 put_bh(bh); in end_buffer_read_sync()
176 void end_buffer_write_sync(struct buffer_head *bh, int uptodate) in end_buffer_write_sync() argument
179 set_buffer_uptodate(bh); in end_buffer_write_sync()
181 buffer_io_error(bh, ", lost sync page write"); in end_buffer_write_sync()
182 set_buffer_write_io_error(bh); in end_buffer_write_sync()
183 clear_buffer_uptodate(bh); in end_buffer_write_sync()
185 unlock_buffer(bh); in end_buffer_write_sync()
186 put_bh(bh); in end_buffer_write_sync()
208 struct buffer_head *bh; in __find_get_block_slow() local
222 bh = head; in __find_get_block_slow()
224 if (!buffer_mapped(bh)) in __find_get_block_slow()
226 else if (bh->b_blocknr == block) { in __find_get_block_slow()
227 ret = bh; in __find_get_block_slow()
228 get_bh(bh); in __find_get_block_slow()
231 bh = bh->b_this_page; in __find_get_block_slow()
232 } while (bh != head); in __find_get_block_slow()
245 (unsigned long long)bh->b_blocknr); in __find_get_block_slow()
247 bh->b_state, bh->b_size); in __find_get_block_slow()
283 static void end_buffer_async_read(struct buffer_head *bh, int uptodate) in end_buffer_async_read() argument
291 BUG_ON(!buffer_async_read(bh)); in end_buffer_async_read()
293 page = bh->b_page; in end_buffer_async_read()
295 set_buffer_uptodate(bh); in end_buffer_async_read()
297 clear_buffer_uptodate(bh); in end_buffer_async_read()
298 buffer_io_error(bh, ", async page read"); in end_buffer_async_read()
310 clear_buffer_async_read(bh); in end_buffer_async_read()
311 unlock_buffer(bh); in end_buffer_async_read()
312 tmp = bh; in end_buffer_async_read()
321 } while (tmp != bh); in end_buffer_async_read()
344 void end_buffer_async_write(struct buffer_head *bh, int uptodate) in end_buffer_async_write() argument
351 BUG_ON(!buffer_async_write(bh)); in end_buffer_async_write()
353 page = bh->b_page; in end_buffer_async_write()
355 set_buffer_uptodate(bh); in end_buffer_async_write()
357 buffer_io_error(bh, ", lost async page write"); in end_buffer_async_write()
359 set_buffer_write_io_error(bh); in end_buffer_async_write()
360 clear_buffer_uptodate(bh); in end_buffer_async_write()
368 clear_buffer_async_write(bh); in end_buffer_async_write()
369 unlock_buffer(bh); in end_buffer_async_write()
370 tmp = bh->b_this_page; in end_buffer_async_write()
371 while (tmp != bh) { in end_buffer_async_write()
411 static void mark_buffer_async_read(struct buffer_head *bh) in mark_buffer_async_read() argument
413 bh->b_end_io = end_buffer_async_read; in mark_buffer_async_read()
414 set_buffer_async_read(bh); in mark_buffer_async_read()
417 static void mark_buffer_async_write_endio(struct buffer_head *bh, in mark_buffer_async_write_endio() argument
420 bh->b_end_io = handler; in mark_buffer_async_write_endio()
421 set_buffer_async_write(bh); in mark_buffer_async_write_endio()
424 void mark_buffer_async_write(struct buffer_head *bh) in mark_buffer_async_write() argument
426 mark_buffer_async_write_endio(bh, end_buffer_async_write); in mark_buffer_async_write()
483 static void __remove_assoc_queue(struct buffer_head *bh) in __remove_assoc_queue() argument
485 list_del_init(&bh->b_assoc_buffers); in __remove_assoc_queue()
486 WARN_ON(!bh->b_assoc_map); in __remove_assoc_queue()
487 if (buffer_write_io_error(bh)) in __remove_assoc_queue()
488 set_bit(AS_EIO, &bh->b_assoc_map->flags); in __remove_assoc_queue()
489 bh->b_assoc_map = NULL; in __remove_assoc_queue()
509 struct buffer_head *bh; in osync_buffers_list() local
516 bh = BH_ENTRY(p); in osync_buffers_list()
517 if (buffer_locked(bh)) { in osync_buffers_list()
518 get_bh(bh); in osync_buffers_list()
520 wait_on_buffer(bh); in osync_buffers_list()
521 if (!buffer_uptodate(bh)) in osync_buffers_list()
523 brelse(bh); in osync_buffers_list()
595 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize); in write_boundary_block() local
596 if (bh) { in write_boundary_block()
597 if (buffer_dirty(bh)) in write_boundary_block()
598 ll_rw_block(WRITE, 1, &bh); in write_boundary_block()
599 put_bh(bh); in write_boundary_block()
603 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode) in mark_buffer_dirty_inode() argument
606 struct address_space *buffer_mapping = bh->b_page->mapping; in mark_buffer_dirty_inode()
608 mark_buffer_dirty(bh); in mark_buffer_dirty_inode()
614 if (!bh->b_assoc_map) { in mark_buffer_dirty_inode()
616 list_move_tail(&bh->b_assoc_buffers, in mark_buffer_dirty_inode()
618 bh->b_assoc_map = mapping; in mark_buffer_dirty_inode()
685 struct buffer_head *bh = head; in __set_page_dirty_buffers() local
688 set_buffer_dirty(bh); in __set_page_dirty_buffers()
689 bh = bh->b_this_page; in __set_page_dirty_buffers()
690 } while (bh != head); in __set_page_dirty_buffers()
733 struct buffer_head *bh; in fsync_buffers_list() local
744 bh = BH_ENTRY(list->next); in fsync_buffers_list()
745 mapping = bh->b_assoc_map; in fsync_buffers_list()
746 __remove_assoc_queue(bh); in fsync_buffers_list()
750 if (buffer_dirty(bh) || buffer_locked(bh)) { in fsync_buffers_list()
751 list_add(&bh->b_assoc_buffers, &tmp); in fsync_buffers_list()
752 bh->b_assoc_map = mapping; in fsync_buffers_list()
753 if (buffer_dirty(bh)) { in fsync_buffers_list()
754 get_bh(bh); in fsync_buffers_list()
763 write_dirty_buffer(bh, WRITE_SYNC); in fsync_buffers_list()
771 brelse(bh); in fsync_buffers_list()
782 bh = BH_ENTRY(tmp.prev); in fsync_buffers_list()
783 get_bh(bh); in fsync_buffers_list()
784 mapping = bh->b_assoc_map; in fsync_buffers_list()
785 __remove_assoc_queue(bh); in fsync_buffers_list()
789 if (buffer_dirty(bh)) { in fsync_buffers_list()
790 list_add(&bh->b_assoc_buffers, in fsync_buffers_list()
792 bh->b_assoc_map = mapping; in fsync_buffers_list()
795 wait_on_buffer(bh); in fsync_buffers_list()
796 if (!buffer_uptodate(bh)) in fsync_buffers_list()
798 brelse(bh); in fsync_buffers_list()
851 struct buffer_head *bh = BH_ENTRY(list->next); in remove_inode_buffers() local
852 if (buffer_dirty(bh)) { in remove_inode_buffers()
856 __remove_assoc_queue(bh); in remove_inode_buffers()
875 struct buffer_head *bh, *head; in alloc_page_buffers() local
882 bh = alloc_buffer_head(GFP_NOFS); in alloc_page_buffers()
883 if (!bh) in alloc_page_buffers()
886 bh->b_this_page = head; in alloc_page_buffers()
887 bh->b_blocknr = -1; in alloc_page_buffers()
888 head = bh; in alloc_page_buffers()
890 bh->b_size = size; in alloc_page_buffers()
893 set_bh_page(bh, page, offset); in alloc_page_buffers()
902 bh = head; in alloc_page_buffers()
904 free_buffer_head(bh); in alloc_page_buffers()
931 struct buffer_head *bh, *tail; in link_dev_buffers() local
933 bh = head; in link_dev_buffers()
935 tail = bh; in link_dev_buffers()
936 bh = bh->b_this_page; in link_dev_buffers()
937 } while (bh); in link_dev_buffers()
962 struct buffer_head *bh = head; in init_page_buffers() local
967 if (!buffer_mapped(bh)) { in init_page_buffers()
968 init_buffer(bh, NULL, NULL); in init_page_buffers()
969 bh->b_bdev = bdev; in init_page_buffers()
970 bh->b_blocknr = block; in init_page_buffers()
972 set_buffer_uptodate(bh); in init_page_buffers()
974 set_buffer_mapped(bh); in init_page_buffers()
977 bh = bh->b_this_page; in init_page_buffers()
978 } while (bh != head); in init_page_buffers()
997 struct buffer_head *bh; in grow_dev_page() local
1019 bh = page_buffers(page); in grow_dev_page()
1020 if (bh->b_size == size) { in grow_dev_page()
1033 bh = alloc_page_buffers(page, size, 0); in grow_dev_page()
1034 if (!bh) in grow_dev_page()
1043 link_dev_buffers(page, bh); in grow_dev_page()
1107 struct buffer_head *bh; in __getblk_slow() local
1110 bh = __find_get_block(bdev, block, size); in __getblk_slow()
1111 if (bh) in __getblk_slow()
1112 return bh; in __getblk_slow()
1158 void mark_buffer_dirty(struct buffer_head *bh) in mark_buffer_dirty() argument
1160 WARN_ON_ONCE(!buffer_uptodate(bh)); in mark_buffer_dirty()
1162 trace_block_dirty_buffer(bh); in mark_buffer_dirty()
1170 if (buffer_dirty(bh)) { in mark_buffer_dirty()
1172 if (buffer_dirty(bh)) in mark_buffer_dirty()
1176 if (!test_set_buffer_dirty(bh)) { in mark_buffer_dirty()
1177 struct page *page = bh->b_page; in mark_buffer_dirty()
1215 void __bforget(struct buffer_head *bh) in __bforget() argument
1217 clear_buffer_dirty(bh); in __bforget()
1218 if (bh->b_assoc_map) { in __bforget()
1219 struct address_space *buffer_mapping = bh->b_page->mapping; in __bforget()
1222 list_del_init(&bh->b_assoc_buffers); in __bforget()
1223 bh->b_assoc_map = NULL; in __bforget()
1226 __brelse(bh); in __bforget()
1230 static struct buffer_head *__bread_slow(struct buffer_head *bh) in __bread_slow() argument
1232 lock_buffer(bh); in __bread_slow()
1233 if (buffer_uptodate(bh)) { in __bread_slow()
1234 unlock_buffer(bh); in __bread_slow()
1235 return bh; in __bread_slow()
1237 get_bh(bh); in __bread_slow()
1238 bh->b_end_io = end_buffer_read_sync; in __bread_slow()
1239 submit_bh(READ, bh); in __bread_slow()
1240 wait_on_buffer(bh); in __bread_slow()
1241 if (buffer_uptodate(bh)) in __bread_slow()
1242 return bh; in __bread_slow()
1244 brelse(bh); in __bread_slow()
1288 static void bh_lru_install(struct buffer_head *bh) in bh_lru_install() argument
1294 if (__this_cpu_read(bh_lrus.bhs[0]) != bh) { in bh_lru_install()
1299 get_bh(bh); in bh_lru_install()
1300 bhs[out++] = bh; in bh_lru_install()
1305 if (bh2 == bh) { in bh_lru_install()
1338 struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]); in lookup_bh_lru() local
1340 if (bh && bh->b_blocknr == block && bh->b_bdev == bdev && in lookup_bh_lru()
1341 bh->b_size == size) { in lookup_bh_lru()
1348 __this_cpu_write(bh_lrus.bhs[0], bh); in lookup_bh_lru()
1350 get_bh(bh); in lookup_bh_lru()
1351 ret = bh; in lookup_bh_lru()
1367 struct buffer_head *bh = lookup_bh_lru(bdev, block, size); in __find_get_block() local
1369 if (bh == NULL) { in __find_get_block()
1371 bh = __find_get_block_slow(bdev, block); in __find_get_block()
1372 if (bh) in __find_get_block()
1373 bh_lru_install(bh); in __find_get_block()
1375 touch_buffer(bh); in __find_get_block()
1377 return bh; in __find_get_block()
1393 struct buffer_head *bh = __find_get_block(bdev, block, size); in __getblk_gfp() local
1396 if (bh == NULL) in __getblk_gfp()
1397 bh = __getblk_slow(bdev, block, size, gfp); in __getblk_gfp()
1398 return bh; in __getblk_gfp()
1407 struct buffer_head *bh = __getblk(bdev, block, size); in __breadahead() local
1408 if (likely(bh)) { in __breadahead()
1409 ll_rw_block(READA, 1, &bh); in __breadahead()
1410 brelse(bh); in __breadahead()
1431 struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp); in __bread_gfp() local
1433 if (likely(bh) && !buffer_uptodate(bh)) in __bread_gfp()
1434 bh = __bread_slow(bh); in __bread_gfp()
1435 return bh; in __bread_gfp()
1475 void set_bh_page(struct buffer_head *bh, in set_bh_page() argument
1478 bh->b_page = page; in set_bh_page()
1484 bh->b_data = (char *)(0 + offset); in set_bh_page()
1486 bh->b_data = page_address(page) + offset; in set_bh_page()
1499 static void discard_buffer(struct buffer_head * bh) in discard_buffer() argument
1503 lock_buffer(bh); in discard_buffer()
1504 clear_buffer_dirty(bh); in discard_buffer()
1505 bh->b_bdev = NULL; in discard_buffer()
1506 b_state = bh->b_state; in discard_buffer()
1508 b_state_old = cmpxchg(&bh->b_state, b_state, in discard_buffer()
1514 unlock_buffer(bh); in discard_buffer()
1536 struct buffer_head *head, *bh, *next; in block_invalidatepage() local
1550 bh = head; in block_invalidatepage()
1552 unsigned int next_off = curr_off + bh->b_size; in block_invalidatepage()
1553 next = bh->b_this_page; in block_invalidatepage()
1565 discard_buffer(bh); in block_invalidatepage()
1567 bh = next; in block_invalidatepage()
1568 } while (bh != head); in block_invalidatepage()
1591 struct buffer_head *bh, *head, *tail; in create_empty_buffers() local
1594 bh = head; in create_empty_buffers()
1596 bh->b_state |= b_state; in create_empty_buffers()
1597 tail = bh; in create_empty_buffers()
1598 bh = bh->b_this_page; in create_empty_buffers()
1599 } while (bh); in create_empty_buffers()
1604 bh = head; in create_empty_buffers()
1607 set_buffer_dirty(bh); in create_empty_buffers()
1609 set_buffer_uptodate(bh); in create_empty_buffers()
1610 bh = bh->b_this_page; in create_empty_buffers()
1611 } while (bh != head); in create_empty_buffers()
1708 struct buffer_head *bh, *head; in __block_write_full_page() local
1726 bh = head; in __block_write_full_page()
1727 blocksize = bh->b_size; in __block_write_full_page()
1747 clear_buffer_dirty(bh); in __block_write_full_page()
1748 set_buffer_uptodate(bh); in __block_write_full_page()
1749 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) && in __block_write_full_page()
1750 buffer_dirty(bh)) { in __block_write_full_page()
1751 WARN_ON(bh->b_size != blocksize); in __block_write_full_page()
1752 err = get_block(inode, block, bh, 1); in __block_write_full_page()
1755 clear_buffer_delay(bh); in __block_write_full_page()
1756 if (buffer_new(bh)) { in __block_write_full_page()
1758 clear_buffer_new(bh); in __block_write_full_page()
1759 unmap_underlying_metadata(bh->b_bdev, in __block_write_full_page()
1760 bh->b_blocknr); in __block_write_full_page()
1763 bh = bh->b_this_page; in __block_write_full_page()
1765 } while (bh != head); in __block_write_full_page()
1768 if (!buffer_mapped(bh)) in __block_write_full_page()
1778 lock_buffer(bh); in __block_write_full_page()
1779 } else if (!trylock_buffer(bh)) { in __block_write_full_page()
1783 if (test_clear_buffer_dirty(bh)) { in __block_write_full_page()
1784 mark_buffer_async_write_endio(bh, handler); in __block_write_full_page()
1786 unlock_buffer(bh); in __block_write_full_page()
1788 } while ((bh = bh->b_this_page) != head); in __block_write_full_page()
1798 struct buffer_head *next = bh->b_this_page; in __block_write_full_page()
1799 if (buffer_async_write(bh)) { in __block_write_full_page()
1800 submit_bh_wbc(write_op, bh, 0, wbc); in __block_write_full_page()
1803 bh = next; in __block_write_full_page()
1804 } while (bh != head); in __block_write_full_page()
1831 bh = head; in __block_write_full_page()
1834 if (buffer_mapped(bh) && buffer_dirty(bh) && in __block_write_full_page()
1835 !buffer_delay(bh)) { in __block_write_full_page()
1836 lock_buffer(bh); in __block_write_full_page()
1837 mark_buffer_async_write_endio(bh, handler); in __block_write_full_page()
1843 clear_buffer_dirty(bh); in __block_write_full_page()
1845 } while ((bh = bh->b_this_page) != head); in __block_write_full_page()
1851 struct buffer_head *next = bh->b_this_page; in __block_write_full_page()
1852 if (buffer_async_write(bh)) { in __block_write_full_page()
1853 clear_buffer_dirty(bh); in __block_write_full_page()
1854 submit_bh_wbc(write_op, bh, 0, wbc); in __block_write_full_page()
1857 bh = next; in __block_write_full_page()
1858 } while (bh != head); in __block_write_full_page()
1871 struct buffer_head *head, *bh; in page_zero_new_buffers() local
1877 bh = head = page_buffers(page); in page_zero_new_buffers()
1880 block_end = block_start + bh->b_size; in page_zero_new_buffers()
1882 if (buffer_new(bh)) { in page_zero_new_buffers()
1891 set_buffer_uptodate(bh); in page_zero_new_buffers()
1894 clear_buffer_new(bh); in page_zero_new_buffers()
1895 mark_buffer_dirty(bh); in page_zero_new_buffers()
1900 bh = bh->b_this_page; in page_zero_new_buffers()
1901 } while (bh != head); in page_zero_new_buffers()
1915 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait; in __block_write_begin() local
1928 for(bh = head, block_start = 0; bh != head || !block_start; in __block_write_begin()
1929 block++, block_start=block_end, bh = bh->b_this_page) { in __block_write_begin()
1933 if (!buffer_uptodate(bh)) in __block_write_begin()
1934 set_buffer_uptodate(bh); in __block_write_begin()
1938 if (buffer_new(bh)) in __block_write_begin()
1939 clear_buffer_new(bh); in __block_write_begin()
1940 if (!buffer_mapped(bh)) { in __block_write_begin()
1941 WARN_ON(bh->b_size != blocksize); in __block_write_begin()
1942 err = get_block(inode, block, bh, 1); in __block_write_begin()
1945 if (buffer_new(bh)) { in __block_write_begin()
1946 unmap_underlying_metadata(bh->b_bdev, in __block_write_begin()
1947 bh->b_blocknr); in __block_write_begin()
1949 clear_buffer_new(bh); in __block_write_begin()
1950 set_buffer_uptodate(bh); in __block_write_begin()
1951 mark_buffer_dirty(bh); in __block_write_begin()
1962 if (!buffer_uptodate(bh)) in __block_write_begin()
1963 set_buffer_uptodate(bh); in __block_write_begin()
1966 if (!buffer_uptodate(bh) && !buffer_delay(bh) && in __block_write_begin()
1967 !buffer_unwritten(bh) && in __block_write_begin()
1969 ll_rw_block(READ, 1, &bh); in __block_write_begin()
1970 *wait_bh++=bh; in __block_write_begin()
1993 struct buffer_head *bh, *head; in __block_commit_write() local
1995 bh = head = page_buffers(page); in __block_commit_write()
1996 blocksize = bh->b_size; in __block_commit_write()
2002 if (!buffer_uptodate(bh)) in __block_commit_write()
2005 set_buffer_uptodate(bh); in __block_commit_write()
2006 mark_buffer_dirty(bh); in __block_commit_write()
2008 clear_buffer_new(bh); in __block_commit_write()
2011 bh = bh->b_this_page; in __block_commit_write()
2012 } while (bh != head); in __block_commit_write()
2142 struct buffer_head *bh, *head; in block_is_partially_uptodate() local
2155 bh = head; in block_is_partially_uptodate()
2160 if (!buffer_uptodate(bh)) { in block_is_partially_uptodate()
2168 bh = bh->b_this_page; in block_is_partially_uptodate()
2169 } while (bh != head); in block_is_partially_uptodate()
2186 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE]; in block_read_full_page() local
2197 bh = head; in block_read_full_page()
2202 if (buffer_uptodate(bh)) in block_read_full_page()
2205 if (!buffer_mapped(bh)) { in block_read_full_page()
2210 WARN_ON(bh->b_size != blocksize); in block_read_full_page()
2211 err = get_block(inode, iblock, bh, 0); in block_read_full_page()
2215 if (!buffer_mapped(bh)) { in block_read_full_page()
2218 set_buffer_uptodate(bh); in block_read_full_page()
2225 if (buffer_uptodate(bh)) in block_read_full_page()
2228 arr[nr++] = bh; in block_read_full_page()
2229 } while (i++, iblock++, (bh = bh->b_this_page) != head); in block_read_full_page()
2247 bh = arr[i]; in block_read_full_page()
2248 lock_buffer(bh); in block_read_full_page()
2249 mark_buffer_async_read(bh); in block_read_full_page()
2258 bh = arr[i]; in block_read_full_page()
2259 if (buffer_uptodate(bh)) in block_read_full_page()
2260 end_buffer_async_read(bh, 1); in block_read_full_page()
2262 submit_bh(READ, bh); in block_read_full_page()
2469 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate) in end_buffer_read_nobh() argument
2471 __end_buffer_read_notouch(bh, uptodate); in end_buffer_read_nobh()
2481 struct buffer_head *bh; in attach_nobh_buffers() local
2486 bh = head; in attach_nobh_buffers()
2489 set_buffer_dirty(bh); in attach_nobh_buffers()
2490 if (!bh->b_this_page) in attach_nobh_buffers()
2491 bh->b_this_page = head; in attach_nobh_buffers()
2492 bh = bh->b_this_page; in attach_nobh_buffers()
2493 } while (bh != head); in attach_nobh_buffers()
2511 struct buffer_head *head, *bh; in nobh_write_begin() local
2564 for (block_start = 0, block_in_page = 0, bh = head; in nobh_write_begin()
2566 block_in_page++, block_start += blocksize, bh = bh->b_this_page) { in nobh_write_begin()
2570 bh->b_state = 0; in nobh_write_begin()
2575 bh, create); in nobh_write_begin()
2578 if (!buffer_mapped(bh)) in nobh_write_begin()
2580 if (buffer_new(bh)) in nobh_write_begin()
2581 unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr); in nobh_write_begin()
2583 set_buffer_uptodate(bh); in nobh_write_begin()
2586 if (buffer_new(bh) || !buffer_mapped(bh)) { in nobh_write_begin()
2591 if (buffer_uptodate(bh)) in nobh_write_begin()
2594 lock_buffer(bh); in nobh_write_begin()
2595 bh->b_end_io = end_buffer_read_nobh; in nobh_write_begin()
2596 submit_bh(READ, bh); in nobh_write_begin()
2607 for (bh = head; bh; bh = bh->b_this_page) { in nobh_write_begin()
2608 wait_on_buffer(bh); in nobh_write_begin()
2609 if (!buffer_uptodate(bh)) in nobh_write_begin()
2650 struct buffer_head *bh; in nobh_write_end() local
2670 bh = head; in nobh_write_end()
2672 free_buffer_head(bh); in nobh_write_end()
2819 struct buffer_head *bh; in block_truncate_page() local
2841 bh = page_buffers(page); in block_truncate_page()
2844 bh = bh->b_this_page; in block_truncate_page()
2850 if (!buffer_mapped(bh)) { in block_truncate_page()
2851 WARN_ON(bh->b_size != blocksize); in block_truncate_page()
2852 err = get_block(inode, iblock, bh, 0); in block_truncate_page()
2856 if (!buffer_mapped(bh)) in block_truncate_page()
2862 set_buffer_uptodate(bh); in block_truncate_page()
2864 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) { in block_truncate_page()
2866 ll_rw_block(READ, 1, &bh); in block_truncate_page()
2867 wait_on_buffer(bh); in block_truncate_page()
2869 if (!buffer_uptodate(bh)) in block_truncate_page()
2874 mark_buffer_dirty(bh); in block_truncate_page()
2942 struct buffer_head *bh = bio->bi_private; in end_bio_bh_io_sync() local
2945 set_bit(BH_Quiet, &bh->b_state); in end_bio_bh_io_sync()
2947 bh->b_end_io(bh, !bio->bi_error); in end_bio_bh_io_sync()
3006 static int submit_bh_wbc(int rw, struct buffer_head *bh, in submit_bh_wbc() argument
3011 BUG_ON(!buffer_locked(bh)); in submit_bh_wbc()
3012 BUG_ON(!buffer_mapped(bh)); in submit_bh_wbc()
3013 BUG_ON(!bh->b_end_io); in submit_bh_wbc()
3014 BUG_ON(buffer_delay(bh)); in submit_bh_wbc()
3015 BUG_ON(buffer_unwritten(bh)); in submit_bh_wbc()
3020 if (test_set_buffer_req(bh) && (rw & WRITE)) in submit_bh_wbc()
3021 clear_buffer_write_io_error(bh); in submit_bh_wbc()
3031 wbc_account_io(wbc, bh->b_page, bh->b_size); in submit_bh_wbc()
3034 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); in submit_bh_wbc()
3035 bio->bi_bdev = bh->b_bdev; in submit_bh_wbc()
3037 bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh)); in submit_bh_wbc()
3038 BUG_ON(bio->bi_iter.bi_size != bh->b_size); in submit_bh_wbc()
3041 bio->bi_private = bh; in submit_bh_wbc()
3047 if (buffer_meta(bh)) in submit_bh_wbc()
3049 if (buffer_prio(bh)) in submit_bh_wbc()
3056 int _submit_bh(int rw, struct buffer_head *bh, unsigned long bio_flags) in _submit_bh() argument
3058 return submit_bh_wbc(rw, bh, bio_flags, NULL); in _submit_bh()
3062 int submit_bh(int rw, struct buffer_head *bh) in submit_bh() argument
3064 return submit_bh_wbc(rw, bh, 0, NULL); in submit_bh()
3098 struct buffer_head *bh = bhs[i]; in ll_rw_block() local
3100 if (!trylock_buffer(bh)) in ll_rw_block()
3103 if (test_clear_buffer_dirty(bh)) { in ll_rw_block()
3104 bh->b_end_io = end_buffer_write_sync; in ll_rw_block()
3105 get_bh(bh); in ll_rw_block()
3106 submit_bh(WRITE, bh); in ll_rw_block()
3110 if (!buffer_uptodate(bh)) { in ll_rw_block()
3111 bh->b_end_io = end_buffer_read_sync; in ll_rw_block()
3112 get_bh(bh); in ll_rw_block()
3113 submit_bh(rw, bh); in ll_rw_block()
3117 unlock_buffer(bh); in ll_rw_block()
3122 void write_dirty_buffer(struct buffer_head *bh, int rw) in write_dirty_buffer() argument
3124 lock_buffer(bh); in write_dirty_buffer()
3125 if (!test_clear_buffer_dirty(bh)) { in write_dirty_buffer()
3126 unlock_buffer(bh); in write_dirty_buffer()
3129 bh->b_end_io = end_buffer_write_sync; in write_dirty_buffer()
3130 get_bh(bh); in write_dirty_buffer()
3131 submit_bh(rw, bh); in write_dirty_buffer()
3140 int __sync_dirty_buffer(struct buffer_head *bh, int rw) in __sync_dirty_buffer() argument
3144 WARN_ON(atomic_read(&bh->b_count) < 1); in __sync_dirty_buffer()
3145 lock_buffer(bh); in __sync_dirty_buffer()
3146 if (test_clear_buffer_dirty(bh)) { in __sync_dirty_buffer()
3147 get_bh(bh); in __sync_dirty_buffer()
3148 bh->b_end_io = end_buffer_write_sync; in __sync_dirty_buffer()
3149 ret = submit_bh(rw, bh); in __sync_dirty_buffer()
3150 wait_on_buffer(bh); in __sync_dirty_buffer()
3151 if (!ret && !buffer_uptodate(bh)) in __sync_dirty_buffer()
3154 unlock_buffer(bh); in __sync_dirty_buffer()
3160 int sync_dirty_buffer(struct buffer_head *bh) in sync_dirty_buffer() argument
3162 return __sync_dirty_buffer(bh, WRITE_SYNC); in sync_dirty_buffer()
3186 static inline int buffer_busy(struct buffer_head *bh) in buffer_busy() argument
3188 return atomic_read(&bh->b_count) | in buffer_busy()
3189 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock))); in buffer_busy()
3196 struct buffer_head *bh; in drop_buffers() local
3198 bh = head; in drop_buffers()
3200 if (buffer_write_io_error(bh) && page->mapping) in drop_buffers()
3202 if (buffer_busy(bh)) in drop_buffers()
3204 bh = bh->b_this_page; in drop_buffers()
3205 } while (bh != head); in drop_buffers()
3208 struct buffer_head *next = bh->b_this_page; in drop_buffers()
3210 if (bh->b_assoc_map) in drop_buffers()
3211 __remove_assoc_queue(bh); in drop_buffers()
3212 bh = next; in drop_buffers()
3213 } while (bh != head); in drop_buffers()
3258 struct buffer_head *bh = buffers_to_free; in try_to_free_buffers() local
3261 struct buffer_head *next = bh->b_this_page; in try_to_free_buffers()
3262 free_buffer_head(bh); in try_to_free_buffers()
3263 bh = next; in try_to_free_buffers()
3264 } while (bh != buffers_to_free); in try_to_free_buffers()
3344 void free_buffer_head(struct buffer_head *bh) in free_buffer_head() argument
3346 BUG_ON(!list_empty(&bh->b_assoc_buffers)); in free_buffer_head()
3347 kmem_cache_free(bh_cachep, bh); in free_buffer_head()
3383 int bh_uptodate_or_lock(struct buffer_head *bh) in bh_uptodate_or_lock() argument
3385 if (!buffer_uptodate(bh)) { in bh_uptodate_or_lock()
3386 lock_buffer(bh); in bh_uptodate_or_lock()
3387 if (!buffer_uptodate(bh)) in bh_uptodate_or_lock()
3389 unlock_buffer(bh); in bh_uptodate_or_lock()
3401 int bh_submit_read(struct buffer_head *bh) in bh_submit_read() argument
3403 BUG_ON(!buffer_locked(bh)); in bh_submit_read()
3405 if (buffer_uptodate(bh)) { in bh_submit_read()
3406 unlock_buffer(bh); in bh_submit_read()
3410 get_bh(bh); in bh_submit_read()
3411 bh->b_end_io = end_buffer_read_sync; in bh_submit_read()
3412 submit_bh(READ, bh); in bh_submit_read()
3413 wait_on_buffer(bh); in bh_submit_read()
3414 if (buffer_uptodate(bh)) in bh_submit_read()