• Home
  • Raw
  • Download

Lines Matching refs:bh

49 static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
55 void init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private) in init_buffer() argument
57 bh->b_end_io = handler; in init_buffer()
58 bh->b_private = private; in init_buffer()
62 inline void touch_buffer(struct buffer_head *bh) in touch_buffer() argument
64 trace_block_touch_buffer(bh); in touch_buffer()
65 mark_page_accessed(bh->b_page); in touch_buffer()
69 void __lock_buffer(struct buffer_head *bh) in __lock_buffer() argument
71 wait_on_bit_lock_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE); in __lock_buffer()
75 void unlock_buffer(struct buffer_head *bh) in unlock_buffer() argument
77 clear_bit_unlock(BH_Lock, &bh->b_state); in unlock_buffer()
79 wake_up_bit(&bh->b_state, BH_Lock); in unlock_buffer()
91 struct buffer_head *head, *bh; in buffer_check_dirty_writeback() local
104 bh = head; in buffer_check_dirty_writeback()
106 if (buffer_locked(bh)) in buffer_check_dirty_writeback()
109 if (buffer_dirty(bh)) in buffer_check_dirty_writeback()
112 bh = bh->b_this_page; in buffer_check_dirty_writeback()
113 } while (bh != head); in buffer_check_dirty_writeback()
122 void __wait_on_buffer(struct buffer_head * bh) in __wait_on_buffer() argument
124 wait_on_bit_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE); in __wait_on_buffer()
136 static void buffer_io_error(struct buffer_head *bh, char *msg) in buffer_io_error() argument
138 if (!test_bit(BH_Quiet, &bh->b_state)) in buffer_io_error()
141 bh->b_bdev, (unsigned long long)bh->b_blocknr, msg); in buffer_io_error()
152 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate) in __end_buffer_read_notouch() argument
155 set_buffer_uptodate(bh); in __end_buffer_read_notouch()
158 clear_buffer_uptodate(bh); in __end_buffer_read_notouch()
160 unlock_buffer(bh); in __end_buffer_read_notouch()
167 void end_buffer_read_sync(struct buffer_head *bh, int uptodate) in end_buffer_read_sync() argument
169 __end_buffer_read_notouch(bh, uptodate); in end_buffer_read_sync()
170 put_bh(bh); in end_buffer_read_sync()
174 void end_buffer_write_sync(struct buffer_head *bh, int uptodate) in end_buffer_write_sync() argument
177 set_buffer_uptodate(bh); in end_buffer_write_sync()
179 buffer_io_error(bh, ", lost sync page write"); in end_buffer_write_sync()
180 set_buffer_write_io_error(bh); in end_buffer_write_sync()
181 clear_buffer_uptodate(bh); in end_buffer_write_sync()
183 unlock_buffer(bh); in end_buffer_write_sync()
184 put_bh(bh); in end_buffer_write_sync()
206 struct buffer_head *bh; in __find_get_block_slow() local
220 bh = head; in __find_get_block_slow()
222 if (!buffer_mapped(bh)) in __find_get_block_slow()
224 else if (bh->b_blocknr == block) { in __find_get_block_slow()
225 ret = bh; in __find_get_block_slow()
226 get_bh(bh); in __find_get_block_slow()
229 bh = bh->b_this_page; in __find_get_block_slow()
230 } while (bh != head); in __find_get_block_slow()
241 (unsigned long long)bh->b_blocknr); in __find_get_block_slow()
243 bh->b_state, bh->b_size); in __find_get_block_slow()
279 static void end_buffer_async_read(struct buffer_head *bh, int uptodate) in end_buffer_async_read() argument
287 BUG_ON(!buffer_async_read(bh)); in end_buffer_async_read()
289 page = bh->b_page; in end_buffer_async_read()
291 set_buffer_uptodate(bh); in end_buffer_async_read()
293 clear_buffer_uptodate(bh); in end_buffer_async_read()
294 buffer_io_error(bh, ", async page read"); in end_buffer_async_read()
306 clear_buffer_async_read(bh); in end_buffer_async_read()
307 unlock_buffer(bh); in end_buffer_async_read()
308 tmp = bh; in end_buffer_async_read()
317 } while (tmp != bh); in end_buffer_async_read()
340 void end_buffer_async_write(struct buffer_head *bh, int uptodate) in end_buffer_async_write() argument
347 BUG_ON(!buffer_async_write(bh)); in end_buffer_async_write()
349 page = bh->b_page; in end_buffer_async_write()
351 set_buffer_uptodate(bh); in end_buffer_async_write()
353 buffer_io_error(bh, ", lost async page write"); in end_buffer_async_write()
355 set_buffer_write_io_error(bh); in end_buffer_async_write()
356 clear_buffer_uptodate(bh); in end_buffer_async_write()
364 clear_buffer_async_write(bh); in end_buffer_async_write()
365 unlock_buffer(bh); in end_buffer_async_write()
366 tmp = bh->b_this_page; in end_buffer_async_write()
367 while (tmp != bh) { in end_buffer_async_write()
407 static void mark_buffer_async_read(struct buffer_head *bh) in mark_buffer_async_read() argument
409 bh->b_end_io = end_buffer_async_read; in mark_buffer_async_read()
410 set_buffer_async_read(bh); in mark_buffer_async_read()
413 static void mark_buffer_async_write_endio(struct buffer_head *bh, in mark_buffer_async_write_endio() argument
416 bh->b_end_io = handler; in mark_buffer_async_write_endio()
417 set_buffer_async_write(bh); in mark_buffer_async_write_endio()
420 void mark_buffer_async_write(struct buffer_head *bh) in mark_buffer_async_write() argument
422 mark_buffer_async_write_endio(bh, end_buffer_async_write); in mark_buffer_async_write()
479 static void __remove_assoc_queue(struct buffer_head *bh) in __remove_assoc_queue() argument
481 list_del_init(&bh->b_assoc_buffers); in __remove_assoc_queue()
482 WARN_ON(!bh->b_assoc_map); in __remove_assoc_queue()
483 if (buffer_write_io_error(bh)) in __remove_assoc_queue()
484 set_bit(AS_EIO, &bh->b_assoc_map->flags); in __remove_assoc_queue()
485 bh->b_assoc_map = NULL; in __remove_assoc_queue()
505 struct buffer_head *bh; in osync_buffers_list() local
512 bh = BH_ENTRY(p); in osync_buffers_list()
513 if (buffer_locked(bh)) { in osync_buffers_list()
514 get_bh(bh); in osync_buffers_list()
516 wait_on_buffer(bh); in osync_buffers_list()
517 if (!buffer_uptodate(bh)) in osync_buffers_list()
519 brelse(bh); in osync_buffers_list()
589 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize); in write_boundary_block() local
590 if (bh) { in write_boundary_block()
591 if (buffer_dirty(bh)) in write_boundary_block()
592 ll_rw_block(REQ_OP_WRITE, 0, 1, &bh); in write_boundary_block()
593 put_bh(bh); in write_boundary_block()
597 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode) in mark_buffer_dirty_inode() argument
600 struct address_space *buffer_mapping = bh->b_page->mapping; in mark_buffer_dirty_inode()
602 mark_buffer_dirty(bh); in mark_buffer_dirty_inode()
608 if (!bh->b_assoc_map) { in mark_buffer_dirty_inode()
610 list_move_tail(&bh->b_assoc_buffers, in mark_buffer_dirty_inode()
612 bh->b_assoc_map = mapping; in mark_buffer_dirty_inode()
678 struct buffer_head *bh = head; in __set_page_dirty_buffers() local
681 set_buffer_dirty(bh); in __set_page_dirty_buffers()
682 bh = bh->b_this_page; in __set_page_dirty_buffers()
683 } while (bh != head); in __set_page_dirty_buffers()
726 struct buffer_head *bh; in fsync_buffers_list() local
737 bh = BH_ENTRY(list->next); in fsync_buffers_list()
738 mapping = bh->b_assoc_map; in fsync_buffers_list()
739 __remove_assoc_queue(bh); in fsync_buffers_list()
743 if (buffer_dirty(bh) || buffer_locked(bh)) { in fsync_buffers_list()
744 list_add(&bh->b_assoc_buffers, &tmp); in fsync_buffers_list()
745 bh->b_assoc_map = mapping; in fsync_buffers_list()
746 if (buffer_dirty(bh)) { in fsync_buffers_list()
747 get_bh(bh); in fsync_buffers_list()
756 write_dirty_buffer(bh, WRITE_SYNC); in fsync_buffers_list()
764 brelse(bh); in fsync_buffers_list()
775 bh = BH_ENTRY(tmp.prev); in fsync_buffers_list()
776 get_bh(bh); in fsync_buffers_list()
777 mapping = bh->b_assoc_map; in fsync_buffers_list()
778 __remove_assoc_queue(bh); in fsync_buffers_list()
782 if (buffer_dirty(bh)) { in fsync_buffers_list()
783 list_add(&bh->b_assoc_buffers, in fsync_buffers_list()
785 bh->b_assoc_map = mapping; in fsync_buffers_list()
788 wait_on_buffer(bh); in fsync_buffers_list()
789 if (!buffer_uptodate(bh)) in fsync_buffers_list()
791 brelse(bh); in fsync_buffers_list()
844 struct buffer_head *bh = BH_ENTRY(list->next); in remove_inode_buffers() local
845 if (buffer_dirty(bh)) { in remove_inode_buffers()
849 __remove_assoc_queue(bh); in remove_inode_buffers()
868 struct buffer_head *bh, *head; in alloc_page_buffers() local
875 bh = alloc_buffer_head(GFP_NOFS); in alloc_page_buffers()
876 if (!bh) in alloc_page_buffers()
879 bh->b_this_page = head; in alloc_page_buffers()
880 bh->b_blocknr = -1; in alloc_page_buffers()
881 head = bh; in alloc_page_buffers()
883 bh->b_size = size; in alloc_page_buffers()
886 set_bh_page(bh, page, offset); in alloc_page_buffers()
895 bh = head; in alloc_page_buffers()
897 free_buffer_head(bh); in alloc_page_buffers()
924 struct buffer_head *bh, *tail; in link_dev_buffers() local
926 bh = head; in link_dev_buffers()
928 tail = bh; in link_dev_buffers()
929 bh = bh->b_this_page; in link_dev_buffers()
930 } while (bh); in link_dev_buffers()
955 struct buffer_head *bh = head; in init_page_buffers() local
960 if (!buffer_mapped(bh)) { in init_page_buffers()
961 init_buffer(bh, NULL, NULL); in init_page_buffers()
962 bh->b_bdev = bdev; in init_page_buffers()
963 bh->b_blocknr = block; in init_page_buffers()
965 set_buffer_uptodate(bh); in init_page_buffers()
967 set_buffer_mapped(bh); in init_page_buffers()
970 bh = bh->b_this_page; in init_page_buffers()
971 } while (bh != head); in init_page_buffers()
990 struct buffer_head *bh; in grow_dev_page() local
1012 bh = page_buffers(page); in grow_dev_page()
1013 if (bh->b_size == size) { in grow_dev_page()
1026 bh = alloc_page_buffers(page, size, 0); in grow_dev_page()
1027 if (!bh) in grow_dev_page()
1036 link_dev_buffers(page, bh); in grow_dev_page()
1098 struct buffer_head *bh; in __getblk_slow() local
1101 bh = __find_get_block(bdev, block, size); in __getblk_slow()
1102 if (bh) in __getblk_slow()
1103 return bh; in __getblk_slow()
1148 void mark_buffer_dirty(struct buffer_head *bh) in mark_buffer_dirty() argument
1150 WARN_ON_ONCE(!buffer_uptodate(bh)); in mark_buffer_dirty()
1152 trace_block_dirty_buffer(bh); in mark_buffer_dirty()
1160 if (buffer_dirty(bh)) { in mark_buffer_dirty()
1162 if (buffer_dirty(bh)) in mark_buffer_dirty()
1166 if (!test_set_buffer_dirty(bh)) { in mark_buffer_dirty()
1167 struct page *page = bh->b_page; in mark_buffer_dirty()
1204 void __bforget(struct buffer_head *bh) in __bforget() argument
1206 clear_buffer_dirty(bh); in __bforget()
1207 if (bh->b_assoc_map) { in __bforget()
1208 struct address_space *buffer_mapping = bh->b_page->mapping; in __bforget()
1211 list_del_init(&bh->b_assoc_buffers); in __bforget()
1212 bh->b_assoc_map = NULL; in __bforget()
1215 __brelse(bh); in __bforget()
1219 static struct buffer_head *__bread_slow(struct buffer_head *bh) in __bread_slow() argument
1221 lock_buffer(bh); in __bread_slow()
1222 if (buffer_uptodate(bh)) { in __bread_slow()
1223 unlock_buffer(bh); in __bread_slow()
1224 return bh; in __bread_slow()
1226 get_bh(bh); in __bread_slow()
1227 bh->b_end_io = end_buffer_read_sync; in __bread_slow()
1228 submit_bh(REQ_OP_READ, 0, bh); in __bread_slow()
1229 wait_on_buffer(bh); in __bread_slow()
1230 if (buffer_uptodate(bh)) in __bread_slow()
1231 return bh; in __bread_slow()
1233 brelse(bh); in __bread_slow()
1277 static void bh_lru_install(struct buffer_head *bh) in bh_lru_install() argument
1283 if (__this_cpu_read(bh_lrus.bhs[0]) != bh) { in bh_lru_install()
1288 get_bh(bh); in bh_lru_install()
1289 bhs[out++] = bh; in bh_lru_install()
1294 if (bh2 == bh) { in bh_lru_install()
1327 struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]); in lookup_bh_lru() local
1329 if (bh && bh->b_blocknr == block && bh->b_bdev == bdev && in lookup_bh_lru()
1330 bh->b_size == size) { in lookup_bh_lru()
1337 __this_cpu_write(bh_lrus.bhs[0], bh); in lookup_bh_lru()
1339 get_bh(bh); in lookup_bh_lru()
1340 ret = bh; in lookup_bh_lru()
1356 struct buffer_head *bh = lookup_bh_lru(bdev, block, size); in __find_get_block() local
1358 if (bh == NULL) { in __find_get_block()
1360 bh = __find_get_block_slow(bdev, block); in __find_get_block()
1361 if (bh) in __find_get_block()
1362 bh_lru_install(bh); in __find_get_block()
1364 touch_buffer(bh); in __find_get_block()
1366 return bh; in __find_get_block()
1382 struct buffer_head *bh = __find_get_block(bdev, block, size); in __getblk_gfp() local
1385 if (bh == NULL) in __getblk_gfp()
1386 bh = __getblk_slow(bdev, block, size, gfp); in __getblk_gfp()
1387 return bh; in __getblk_gfp()
1396 struct buffer_head *bh = __getblk(bdev, block, size); in __breadahead() local
1397 if (likely(bh)) { in __breadahead()
1398 ll_rw_block(REQ_OP_READ, REQ_RAHEAD, 1, &bh); in __breadahead()
1399 brelse(bh); in __breadahead()
1420 struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp); in __bread_gfp() local
1422 if (likely(bh) && !buffer_uptodate(bh)) in __bread_gfp()
1423 bh = __bread_slow(bh); in __bread_gfp()
1424 return bh; in __bread_gfp()
1464 void set_bh_page(struct buffer_head *bh, in set_bh_page() argument
1467 bh->b_page = page; in set_bh_page()
1473 bh->b_data = (char *)(0 + offset); in set_bh_page()
1475 bh->b_data = page_address(page) + offset; in set_bh_page()
1488 static void discard_buffer(struct buffer_head * bh) in discard_buffer() argument
1492 lock_buffer(bh); in discard_buffer()
1493 clear_buffer_dirty(bh); in discard_buffer()
1494 bh->b_bdev = NULL; in discard_buffer()
1495 b_state = bh->b_state; in discard_buffer()
1497 b_state_old = cmpxchg(&bh->b_state, b_state, in discard_buffer()
1503 unlock_buffer(bh); in discard_buffer()
1525 struct buffer_head *head, *bh, *next; in block_invalidatepage() local
1539 bh = head; in block_invalidatepage()
1541 unsigned int next_off = curr_off + bh->b_size; in block_invalidatepage()
1542 next = bh->b_this_page; in block_invalidatepage()
1554 discard_buffer(bh); in block_invalidatepage()
1556 bh = next; in block_invalidatepage()
1557 } while (bh != head); in block_invalidatepage()
1580 struct buffer_head *bh, *head, *tail; in create_empty_buffers() local
1583 bh = head; in create_empty_buffers()
1585 bh->b_state |= b_state; in create_empty_buffers()
1586 tail = bh; in create_empty_buffers()
1587 bh = bh->b_this_page; in create_empty_buffers()
1588 } while (bh); in create_empty_buffers()
1593 bh = head; in create_empty_buffers()
1596 set_buffer_dirty(bh); in create_empty_buffers()
1598 set_buffer_uptodate(bh); in create_empty_buffers()
1599 bh = bh->b_this_page; in create_empty_buffers()
1600 } while (bh != head); in create_empty_buffers()
1697 struct buffer_head *bh, *head; in __block_write_full_page() local
1715 bh = head; in __block_write_full_page()
1716 blocksize = bh->b_size; in __block_write_full_page()
1736 clear_buffer_dirty(bh); in __block_write_full_page()
1737 set_buffer_uptodate(bh); in __block_write_full_page()
1738 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) && in __block_write_full_page()
1739 buffer_dirty(bh)) { in __block_write_full_page()
1740 WARN_ON(bh->b_size != blocksize); in __block_write_full_page()
1741 err = get_block(inode, block, bh, 1); in __block_write_full_page()
1744 clear_buffer_delay(bh); in __block_write_full_page()
1745 if (buffer_new(bh)) { in __block_write_full_page()
1747 clear_buffer_new(bh); in __block_write_full_page()
1748 unmap_underlying_metadata(bh->b_bdev, in __block_write_full_page()
1749 bh->b_blocknr); in __block_write_full_page()
1752 bh = bh->b_this_page; in __block_write_full_page()
1754 } while (bh != head); in __block_write_full_page()
1757 if (!buffer_mapped(bh)) in __block_write_full_page()
1767 lock_buffer(bh); in __block_write_full_page()
1768 } else if (!trylock_buffer(bh)) { in __block_write_full_page()
1772 if (test_clear_buffer_dirty(bh)) { in __block_write_full_page()
1773 mark_buffer_async_write_endio(bh, handler); in __block_write_full_page()
1775 unlock_buffer(bh); in __block_write_full_page()
1777 } while ((bh = bh->b_this_page) != head); in __block_write_full_page()
1787 struct buffer_head *next = bh->b_this_page; in __block_write_full_page()
1788 if (buffer_async_write(bh)) { in __block_write_full_page()
1789 submit_bh_wbc(REQ_OP_WRITE, write_flags, bh, 0, wbc); in __block_write_full_page()
1792 bh = next; in __block_write_full_page()
1793 } while (bh != head); in __block_write_full_page()
1820 bh = head; in __block_write_full_page()
1823 if (buffer_mapped(bh) && buffer_dirty(bh) && in __block_write_full_page()
1824 !buffer_delay(bh)) { in __block_write_full_page()
1825 lock_buffer(bh); in __block_write_full_page()
1826 mark_buffer_async_write_endio(bh, handler); in __block_write_full_page()
1832 clear_buffer_dirty(bh); in __block_write_full_page()
1834 } while ((bh = bh->b_this_page) != head); in __block_write_full_page()
1840 struct buffer_head *next = bh->b_this_page; in __block_write_full_page()
1841 if (buffer_async_write(bh)) { in __block_write_full_page()
1842 clear_buffer_dirty(bh); in __block_write_full_page()
1843 submit_bh_wbc(REQ_OP_WRITE, write_flags, bh, 0, wbc); in __block_write_full_page()
1846 bh = next; in __block_write_full_page()
1847 } while (bh != head); in __block_write_full_page()
1861 struct buffer_head *head, *bh; in page_zero_new_buffers() local
1867 bh = head = page_buffers(page); in page_zero_new_buffers()
1870 block_end = block_start + bh->b_size; in page_zero_new_buffers()
1872 if (buffer_new(bh)) { in page_zero_new_buffers()
1881 set_buffer_uptodate(bh); in page_zero_new_buffers()
1884 clear_buffer_new(bh); in page_zero_new_buffers()
1885 mark_buffer_dirty(bh); in page_zero_new_buffers()
1890 bh = bh->b_this_page; in page_zero_new_buffers()
1891 } while (bh != head); in page_zero_new_buffers()
1896 iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh, in iomap_to_bh() argument
1901 bh->b_bdev = iomap->bdev; in iomap_to_bh()
1918 if (!buffer_uptodate(bh) || in iomap_to_bh()
1920 set_buffer_new(bh); in iomap_to_bh()
1923 if (!buffer_uptodate(bh) || in iomap_to_bh()
1925 set_buffer_new(bh); in iomap_to_bh()
1926 set_buffer_uptodate(bh); in iomap_to_bh()
1927 set_buffer_mapped(bh); in iomap_to_bh()
1928 set_buffer_delay(bh); in iomap_to_bh()
1936 set_buffer_new(bh); in iomap_to_bh()
1937 set_buffer_unwritten(bh); in iomap_to_bh()
1941 set_buffer_new(bh); in iomap_to_bh()
1942 bh->b_blocknr = (iomap->blkno >> (inode->i_blkbits - 9)) + in iomap_to_bh()
1944 set_buffer_mapped(bh); in iomap_to_bh()
1959 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait; in __block_write_begin_int() local
1972 for(bh = head, block_start = 0; bh != head || !block_start; in __block_write_begin_int()
1973 block++, block_start=block_end, bh = bh->b_this_page) { in __block_write_begin_int()
1977 if (!buffer_uptodate(bh)) in __block_write_begin_int()
1978 set_buffer_uptodate(bh); in __block_write_begin_int()
1982 if (buffer_new(bh)) in __block_write_begin_int()
1983 clear_buffer_new(bh); in __block_write_begin_int()
1984 if (!buffer_mapped(bh)) { in __block_write_begin_int()
1985 WARN_ON(bh->b_size != blocksize); in __block_write_begin_int()
1987 err = get_block(inode, block, bh, 1); in __block_write_begin_int()
1991 iomap_to_bh(inode, block, bh, iomap); in __block_write_begin_int()
1994 if (buffer_new(bh)) { in __block_write_begin_int()
1995 unmap_underlying_metadata(bh->b_bdev, in __block_write_begin_int()
1996 bh->b_blocknr); in __block_write_begin_int()
1998 clear_buffer_new(bh); in __block_write_begin_int()
1999 set_buffer_uptodate(bh); in __block_write_begin_int()
2000 mark_buffer_dirty(bh); in __block_write_begin_int()
2011 if (!buffer_uptodate(bh)) in __block_write_begin_int()
2012 set_buffer_uptodate(bh); in __block_write_begin_int()
2015 if (!buffer_uptodate(bh) && !buffer_delay(bh) && in __block_write_begin_int()
2016 !buffer_unwritten(bh) && in __block_write_begin_int()
2018 ll_rw_block(REQ_OP_READ, 0, 1, &bh); in __block_write_begin_int()
2019 *wait_bh++=bh; in __block_write_begin_int()
2048 struct buffer_head *bh, *head; in __block_commit_write() local
2050 bh = head = page_buffers(page); in __block_commit_write()
2051 blocksize = bh->b_size; in __block_commit_write()
2057 if (!buffer_uptodate(bh)) in __block_commit_write()
2060 set_buffer_uptodate(bh); in __block_commit_write()
2061 mark_buffer_dirty(bh); in __block_commit_write()
2063 clear_buffer_new(bh); in __block_commit_write()
2066 bh = bh->b_this_page; in __block_commit_write()
2067 } while (bh != head); in __block_commit_write()
2197 struct buffer_head *bh, *head; in block_is_partially_uptodate() local
2210 bh = head; in block_is_partially_uptodate()
2215 if (!buffer_uptodate(bh)) { in block_is_partially_uptodate()
2223 bh = bh->b_this_page; in block_is_partially_uptodate()
2224 } while (bh != head); in block_is_partially_uptodate()
2241 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE]; in block_read_full_page() local
2252 bh = head; in block_read_full_page()
2257 if (buffer_uptodate(bh)) in block_read_full_page()
2260 if (!buffer_mapped(bh)) { in block_read_full_page()
2265 WARN_ON(bh->b_size != blocksize); in block_read_full_page()
2266 err = get_block(inode, iblock, bh, 0); in block_read_full_page()
2270 if (!buffer_mapped(bh)) { in block_read_full_page()
2273 set_buffer_uptodate(bh); in block_read_full_page()
2280 if (buffer_uptodate(bh)) in block_read_full_page()
2283 arr[nr++] = bh; in block_read_full_page()
2284 } while (i++, iblock++, (bh = bh->b_this_page) != head); in block_read_full_page()
2302 bh = arr[i]; in block_read_full_page()
2303 lock_buffer(bh); in block_read_full_page()
2304 mark_buffer_async_read(bh); in block_read_full_page()
2313 bh = arr[i]; in block_read_full_page()
2314 if (buffer_uptodate(bh)) in block_read_full_page()
2315 end_buffer_async_read(bh, 1); in block_read_full_page()
2317 submit_bh(REQ_OP_READ, 0, bh); in block_read_full_page()
2524 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate) in end_buffer_read_nobh() argument
2526 __end_buffer_read_notouch(bh, uptodate); in end_buffer_read_nobh()
2536 struct buffer_head *bh; in attach_nobh_buffers() local
2541 bh = head; in attach_nobh_buffers()
2544 set_buffer_dirty(bh); in attach_nobh_buffers()
2545 if (!bh->b_this_page) in attach_nobh_buffers()
2546 bh->b_this_page = head; in attach_nobh_buffers()
2547 bh = bh->b_this_page; in attach_nobh_buffers()
2548 } while (bh != head); in attach_nobh_buffers()
2566 struct buffer_head *head, *bh; in nobh_write_begin() local
2619 for (block_start = 0, block_in_page = 0, bh = head; in nobh_write_begin()
2621 block_in_page++, block_start += blocksize, bh = bh->b_this_page) { in nobh_write_begin()
2625 bh->b_state = 0; in nobh_write_begin()
2630 bh, create); in nobh_write_begin()
2633 if (!buffer_mapped(bh)) in nobh_write_begin()
2635 if (buffer_new(bh)) in nobh_write_begin()
2636 unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr); in nobh_write_begin()
2638 set_buffer_uptodate(bh); in nobh_write_begin()
2641 if (buffer_new(bh) || !buffer_mapped(bh)) { in nobh_write_begin()
2646 if (buffer_uptodate(bh)) in nobh_write_begin()
2649 lock_buffer(bh); in nobh_write_begin()
2650 bh->b_end_io = end_buffer_read_nobh; in nobh_write_begin()
2651 submit_bh(REQ_OP_READ, 0, bh); in nobh_write_begin()
2662 for (bh = head; bh; bh = bh->b_this_page) { in nobh_write_begin()
2663 wait_on_buffer(bh); in nobh_write_begin()
2664 if (!buffer_uptodate(bh)) in nobh_write_begin()
2705 struct buffer_head *bh; in nobh_write_end() local
2725 bh = head; in nobh_write_end()
2727 free_buffer_head(bh); in nobh_write_end()
2874 struct buffer_head *bh; in block_truncate_page() local
2896 bh = page_buffers(page); in block_truncate_page()
2899 bh = bh->b_this_page; in block_truncate_page()
2905 if (!buffer_mapped(bh)) { in block_truncate_page()
2906 WARN_ON(bh->b_size != blocksize); in block_truncate_page()
2907 err = get_block(inode, iblock, bh, 0); in block_truncate_page()
2911 if (!buffer_mapped(bh)) in block_truncate_page()
2917 set_buffer_uptodate(bh); in block_truncate_page()
2919 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) { in block_truncate_page()
2921 ll_rw_block(REQ_OP_READ, 0, 1, &bh); in block_truncate_page()
2922 wait_on_buffer(bh); in block_truncate_page()
2924 if (!buffer_uptodate(bh)) in block_truncate_page()
2929 mark_buffer_dirty(bh); in block_truncate_page()
2997 struct buffer_head *bh = bio->bi_private; in end_bio_bh_io_sync() local
3000 set_bit(BH_Quiet, &bh->b_state); in end_bio_bh_io_sync()
3002 bh->b_end_io(bh, !bio->bi_error); in end_bio_bh_io_sync()
3054 static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh, in submit_bh_wbc() argument
3059 BUG_ON(!buffer_locked(bh)); in submit_bh_wbc()
3060 BUG_ON(!buffer_mapped(bh)); in submit_bh_wbc()
3061 BUG_ON(!bh->b_end_io); in submit_bh_wbc()
3062 BUG_ON(buffer_delay(bh)); in submit_bh_wbc()
3063 BUG_ON(buffer_unwritten(bh)); in submit_bh_wbc()
3068 if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE)) in submit_bh_wbc()
3069 clear_buffer_write_io_error(bh); in submit_bh_wbc()
3079 wbc_account_io(wbc, bh->b_page, bh->b_size); in submit_bh_wbc()
3082 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); in submit_bh_wbc()
3083 bio->bi_bdev = bh->b_bdev; in submit_bh_wbc()
3085 bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh)); in submit_bh_wbc()
3086 BUG_ON(bio->bi_iter.bi_size != bh->b_size); in submit_bh_wbc()
3089 bio->bi_private = bh; in submit_bh_wbc()
3095 if (buffer_meta(bh)) in submit_bh_wbc()
3097 if (buffer_prio(bh)) in submit_bh_wbc()
3105 int _submit_bh(int op, int op_flags, struct buffer_head *bh, in _submit_bh() argument
3108 return submit_bh_wbc(op, op_flags, bh, bio_flags, NULL); in _submit_bh()
3112 int submit_bh(int op, int op_flags, struct buffer_head *bh) in submit_bh() argument
3114 return submit_bh_wbc(op, op_flags, bh, 0, NULL); in submit_bh()
3149 struct buffer_head *bh = bhs[i]; in ll_rw_block() local
3151 if (!trylock_buffer(bh)) in ll_rw_block()
3154 if (test_clear_buffer_dirty(bh)) { in ll_rw_block()
3155 bh->b_end_io = end_buffer_write_sync; in ll_rw_block()
3156 get_bh(bh); in ll_rw_block()
3157 submit_bh(op, op_flags, bh); in ll_rw_block()
3161 if (!buffer_uptodate(bh)) { in ll_rw_block()
3162 bh->b_end_io = end_buffer_read_sync; in ll_rw_block()
3163 get_bh(bh); in ll_rw_block()
3164 submit_bh(op, op_flags, bh); in ll_rw_block()
3168 unlock_buffer(bh); in ll_rw_block()
3173 void write_dirty_buffer(struct buffer_head *bh, int op_flags) in write_dirty_buffer() argument
3175 lock_buffer(bh); in write_dirty_buffer()
3176 if (!test_clear_buffer_dirty(bh)) { in write_dirty_buffer()
3177 unlock_buffer(bh); in write_dirty_buffer()
3180 bh->b_end_io = end_buffer_write_sync; in write_dirty_buffer()
3181 get_bh(bh); in write_dirty_buffer()
3182 submit_bh(REQ_OP_WRITE, op_flags, bh); in write_dirty_buffer()
3191 int __sync_dirty_buffer(struct buffer_head *bh, int op_flags) in __sync_dirty_buffer() argument
3195 WARN_ON(atomic_read(&bh->b_count) < 1); in __sync_dirty_buffer()
3196 lock_buffer(bh); in __sync_dirty_buffer()
3197 if (test_clear_buffer_dirty(bh)) { in __sync_dirty_buffer()
3198 get_bh(bh); in __sync_dirty_buffer()
3199 bh->b_end_io = end_buffer_write_sync; in __sync_dirty_buffer()
3200 ret = submit_bh(REQ_OP_WRITE, op_flags, bh); in __sync_dirty_buffer()
3201 wait_on_buffer(bh); in __sync_dirty_buffer()
3202 if (!ret && !buffer_uptodate(bh)) in __sync_dirty_buffer()
3205 unlock_buffer(bh); in __sync_dirty_buffer()
3211 int sync_dirty_buffer(struct buffer_head *bh) in sync_dirty_buffer() argument
3213 return __sync_dirty_buffer(bh, WRITE_SYNC); in sync_dirty_buffer()
3237 static inline int buffer_busy(struct buffer_head *bh) in buffer_busy() argument
3239 return atomic_read(&bh->b_count) | in buffer_busy()
3240 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock))); in buffer_busy()
3247 struct buffer_head *bh; in drop_buffers() local
3249 bh = head; in drop_buffers()
3251 if (buffer_write_io_error(bh) && page->mapping) in drop_buffers()
3253 if (buffer_busy(bh)) in drop_buffers()
3255 bh = bh->b_this_page; in drop_buffers()
3256 } while (bh != head); in drop_buffers()
3259 struct buffer_head *next = bh->b_this_page; in drop_buffers()
3261 if (bh->b_assoc_map) in drop_buffers()
3262 __remove_assoc_queue(bh); in drop_buffers()
3263 bh = next; in drop_buffers()
3264 } while (bh != head); in drop_buffers()
3309 struct buffer_head *bh = buffers_to_free; in try_to_free_buffers() local
3312 struct buffer_head *next = bh->b_this_page; in try_to_free_buffers()
3313 free_buffer_head(bh); in try_to_free_buffers()
3314 bh = next; in try_to_free_buffers()
3315 } while (bh != buffers_to_free); in try_to_free_buffers()
3395 void free_buffer_head(struct buffer_head *bh) in free_buffer_head() argument
3397 BUG_ON(!list_empty(&bh->b_assoc_buffers)); in free_buffer_head()
3398 kmem_cache_free(bh_cachep, bh); in free_buffer_head()
3434 int bh_uptodate_or_lock(struct buffer_head *bh) in bh_uptodate_or_lock() argument
3436 if (!buffer_uptodate(bh)) { in bh_uptodate_or_lock()
3437 lock_buffer(bh); in bh_uptodate_or_lock()
3438 if (!buffer_uptodate(bh)) in bh_uptodate_or_lock()
3440 unlock_buffer(bh); in bh_uptodate_or_lock()
3452 int bh_submit_read(struct buffer_head *bh) in bh_submit_read() argument
3454 BUG_ON(!buffer_locked(bh)); in bh_submit_read()
3456 if (buffer_uptodate(bh)) { in bh_submit_read()
3457 unlock_buffer(bh); in bh_submit_read()
3461 get_bh(bh); in bh_submit_read()
3462 bh->b_end_io = end_buffer_read_sync; in bh_submit_read()
3463 submit_bh(REQ_OP_READ, 0, bh); in bh_submit_read()
3464 wait_on_buffer(bh); in bh_submit_read()
3465 if (buffer_uptodate(bh)) in bh_submit_read()