Lines Matching +full:ext2 +full:- +full:buffer +full:- +full:low +full:- +full:power
1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/fs/buffer.c
9 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
12 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
15 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
17 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
19 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
36 #include <linux/backing-dev.h>
63 mark_page_accessed(bh->b_page); in touch_buffer()
69 wait_on_bit_lock_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE); in __lock_buffer()
75 clear_bit_unlock(BH_Lock, &bh->b_state); in unlock_buffer()
77 wake_up_bit(&bh->b_state, BH_Lock); in unlock_buffer()
110 bh = bh->b_this_page; in buffer_check_dirty_writeback()
116 * Block until a buffer comes unlocked. This doesn't stop it
117 * from becoming locked again - you have to lock it yourself
122 wait_on_bit_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE); in __wait_on_buffer()
128 if (!test_bit(BH_Quiet, &bh->b_state)) in buffer_io_error()
130 "Buffer I/O error on dev %pg, logical block %llu%s\n", in buffer_io_error()
131 bh->b_bdev, (unsigned long long)bh->b_blocknr, msg); in buffer_io_error()
135 * End-of-IO handler helper function which does not touch the bh after
137 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
139 * hashing after unlocking the buffer, so it doesn't actually touch the bh
147 /* This happens, due to failed read-ahead attempts. */ in __end_buffer_read_notouch()
154 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
155 * unlock the buffer. This is what ll_rw_block uses too.
179 * Various filesystems appear to want __find_get_block to be non-blocking.
191 struct inode *bd_inode = bdev->bd_inode; in __find_get_block_slow()
192 struct address_space *bd_mapping = bd_inode->i_mapping; in __find_get_block_slow()
201 index = block >> (PAGE_SHIFT - bd_inode->i_blkbits); in __find_get_block_slow()
206 spin_lock(&bd_mapping->private_lock); in __find_get_block_slow()
214 else if (bh->b_blocknr == block) { in __find_get_block_slow()
219 bh = bh->b_this_page; in __find_get_block_slow()
233 (unsigned long long)bh->b_blocknr, in __find_get_block_slow()
234 bh->b_state, bh->b_size, bdev, in __find_get_block_slow()
235 1 << bd_inode->i_blkbits); in __find_get_block_slow()
238 spin_unlock(&bd_mapping->private_lock); in __find_get_block_slow()
254 page = bh->b_page; in end_buffer_async_read()
265 * two buffer heads end IO at almost the same time and both in end_buffer_async_read()
269 spin_lock_irqsave(&first->b_uptodate_lock, flags); in end_buffer_async_read()
280 tmp = tmp->b_this_page; in end_buffer_async_read()
282 spin_unlock_irqrestore(&first->b_uptodate_lock, flags); in end_buffer_async_read()
294 spin_unlock_irqrestore(&first->b_uptodate_lock, flags); in end_buffer_async_read()
307 struct buffer_head *bh = ctx->bh; in decrypt_bh()
310 err = fscrypt_decrypt_pagecache_blocks(bh->b_page, bh->b_size, in decrypt_bh()
317 * I/O completion handler for block_read_full_page() - pages
324 fscrypt_inode_uses_fs_layer_crypto(bh->b_page->mapping->host)) { in end_buffer_async_read_io()
328 INIT_WORK(&ctx->work, decrypt_bh); in end_buffer_async_read_io()
329 ctx->bh = bh; in end_buffer_async_read_io()
330 fscrypt_enqueue_decrypt_work(&ctx->work); in end_buffer_async_read_io()
339 * Completion handler for block_write_full_page() - pages which are unlocked
351 page = bh->b_page; in end_buffer_async_write()
362 spin_lock_irqsave(&first->b_uptodate_lock, flags); in end_buffer_async_write()
366 tmp = bh->b_this_page; in end_buffer_async_write()
372 tmp = tmp->b_this_page; in end_buffer_async_write()
374 spin_unlock_irqrestore(&first->b_uptodate_lock, flags); in end_buffer_async_write()
379 spin_unlock_irqrestore(&first->b_uptodate_lock, flags); in end_buffer_async_write()
389 * locked buffer would confuse end_buffer_async_read() into not unlocking
391 * that this buffer is not under async I/O.
407 bh->b_end_io = end_buffer_async_read_io; in mark_buffer_async_read()
414 bh->b_end_io = handler; in mark_buffer_async_write_endio()
426 * fs/buffer.c contains helper functions for buffer-backed address space's
427 * fsync functions. A common requirement for buffer-based filesystems is
429 * a successful fsync(). For example, ext2 indirect blocks need to be
434 * management of a list of dependent buffers at ->i_mapping->private_list.
443 * mapping->private_lock does *not* protect mapping->private_list! In fact,
444 * mapping->private_list will always be protected by the backing blockdev's
445 * ->private_lock.
448 * ->private_list must be from the same address_space: the blockdev's.
450 * address_spaces which do not place buffers at ->private_list via these
459 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
464 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
465 * list if it is already on a list. Because if the buffer is on a list,
475 * The buffer's backing address_space's private_lock must be held
479 list_del_init(&bh->b_assoc_buffers); in __remove_assoc_queue()
480 WARN_ON(!bh->b_assoc_map); in __remove_assoc_queue()
481 bh->b_assoc_map = NULL; in __remove_assoc_queue()
486 return !list_empty(&inode->i_data.private_list); in inode_has_buffers()
491 * all already-submitted IO to complete, but does not queue any new
494 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
514 err = -EIO; in osync_buffers_list()
526 while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb)) in emergency_thaw_bdev()
527 printk(KERN_WARNING "Emergency Thaw on %pg\n", sb->s_bdev); in emergency_thaw_bdev()
531 * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
534 * Starts I/O against the buffers at mapping->private_list, and waits upon
543 struct address_space *buffer_mapping = mapping->private_data; in sync_mapping_buffers()
545 if (buffer_mapping == NULL || list_empty(&mapping->private_list)) in sync_mapping_buffers()
548 return fsync_buffers_list(&buffer_mapping->private_lock, in sync_mapping_buffers()
549 &mapping->private_list); in sync_mapping_buffers()
555 * `bblock' was for a buffer_boundary() buffer. This means that the block at
572 struct address_space *mapping = inode->i_mapping; in mark_buffer_dirty_inode()
573 struct address_space *buffer_mapping = bh->b_page->mapping; in mark_buffer_dirty_inode()
576 if (!mapping->private_data) { in mark_buffer_dirty_inode()
577 mapping->private_data = buffer_mapping; in mark_buffer_dirty_inode()
579 BUG_ON(mapping->private_data != buffer_mapping); in mark_buffer_dirty_inode()
581 if (!bh->b_assoc_map) { in mark_buffer_dirty_inode()
582 spin_lock(&buffer_mapping->private_lock); in mark_buffer_dirty_inode()
583 list_move_tail(&bh->b_assoc_buffers, in mark_buffer_dirty_inode()
584 &mapping->private_list); in mark_buffer_dirty_inode()
585 bh->b_assoc_map = mapping; in mark_buffer_dirty_inode()
586 spin_unlock(&buffer_mapping->private_lock); in mark_buffer_dirty_inode()
605 xa_lock_irqsave(&mapping->i_pages, flags); in __set_page_dirty()
606 if (page->mapping) { /* Race with truncate? */ in __set_page_dirty()
609 __xa_set_mark(&mapping->i_pages, page_index(page), in __set_page_dirty()
612 xa_unlock_irqrestore(&mapping->i_pages, flags); in __set_page_dirty()
623 * dirty-state coherency between the page and the buffers. It the page does
629 * buffer dirtiness. That's fine. If this code were to set the page dirty
635 * page's buffer list. Also use this to protect against clean buffers being
638 * FIXME: may need to call ->reservepage here as well. That's rather up to the
649 spin_lock(&mapping->private_lock); in __set_page_dirty_buffers()
656 bh = bh->b_this_page; in __set_page_dirty_buffers()
660 * Lock out page->mem_cgroup migration to keep PageDirty in __set_page_dirty_buffers()
661 * synchronized with per-memcg dirty page counters. in __set_page_dirty_buffers()
665 spin_unlock(&mapping->private_lock); in __set_page_dirty_buffers()
673 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); in __set_page_dirty_buffers()
692 * up refiling the buffer on the original inode's dirty list again, so
693 * there is a chance we will end up with a buffer queued for write but
711 bh = BH_ENTRY(list->next); in fsync_buffers_list()
712 mapping = bh->b_assoc_map; in fsync_buffers_list()
718 list_add(&bh->b_assoc_buffers, &tmp); in fsync_buffers_list()
719 bh->b_assoc_map = mapping; in fsync_buffers_list()
726 * current contents - it is a noop if I/O is in fsync_buffers_list()
751 mapping = bh->b_assoc_map; in fsync_buffers_list()
757 list_add(&bh->b_assoc_buffers, in fsync_buffers_list()
758 &mapping->private_list); in fsync_buffers_list()
759 bh->b_assoc_map = mapping; in fsync_buffers_list()
764 err = -EIO; in fsync_buffers_list()
789 struct address_space *mapping = &inode->i_data; in invalidate_inode_buffers()
790 struct list_head *list = &mapping->private_list; in invalidate_inode_buffers()
791 struct address_space *buffer_mapping = mapping->private_data; in invalidate_inode_buffers()
793 spin_lock(&buffer_mapping->private_lock); in invalidate_inode_buffers()
795 __remove_assoc_queue(BH_ENTRY(list->next)); in invalidate_inode_buffers()
796 spin_unlock(&buffer_mapping->private_lock); in invalidate_inode_buffers()
802 * Remove any clean buffers from the inode's buffer list. This is called
812 struct address_space *mapping = &inode->i_data; in remove_inode_buffers()
813 struct list_head *list = &mapping->private_list; in remove_inode_buffers()
814 struct address_space *buffer_mapping = mapping->private_data; in remove_inode_buffers()
816 spin_lock(&buffer_mapping->private_lock); in remove_inode_buffers()
818 struct buffer_head *bh = BH_ENTRY(list->next); in remove_inode_buffers()
825 spin_unlock(&buffer_mapping->private_lock); in remove_inode_buffers()
832 * the size of each buffer.. Use the bh->b_this_page linked list to
837 * which may not fail from ordinary buffer allocations.
855 while ((offset -= size) >= 0) { in alloc_page_buffers()
860 bh->b_this_page = head; in alloc_page_buffers()
861 bh->b_blocknr = -1; in alloc_page_buffers()
864 bh->b_size = size; in alloc_page_buffers()
866 /* Link the buffer to its page */ in alloc_page_buffers()
880 head = head->b_this_page; in alloc_page_buffers()
897 bh = bh->b_this_page; in link_dev_buffers()
899 tail->b_this_page = head; in link_dev_buffers()
906 loff_t sz = i_size_read(bdev->bd_inode); in blkdev_max_block()
925 sector_t end_block = blkdev_max_block(I_BDEV(bdev->bd_inode), size); in init_page_buffers()
929 bh->b_end_io = NULL; in init_page_buffers()
930 bh->b_private = NULL; in init_page_buffers()
931 bh->b_bdev = bdev; in init_page_buffers()
932 bh->b_blocknr = block; in init_page_buffers()
939 bh = bh->b_this_page; in init_page_buffers()
949 * Create the page-cache page that contains the requested block.
957 struct inode *inode = bdev->bd_inode; in grow_dev_page()
964 gfp_mask = mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS) | gfp; in grow_dev_page()
974 page = find_or_create_page(inode->i_mapping, index, gfp_mask); in grow_dev_page()
980 if (bh->b_size == size) { in grow_dev_page()
1000 spin_lock(&inode->i_mapping->private_lock); in grow_dev_page()
1004 spin_unlock(&inode->i_mapping->private_lock); in grow_dev_page()
1006 ret = (block < end_block) ? 1 : -ENXIO; in grow_dev_page()
1023 sizebits = -1; in grow_buffers()
1035 printk(KERN_ERR "%s: requested out-of-range block %llu for " in grow_buffers()
1039 return -EIO; in grow_buffers()
1051 if (unlikely(size & (bdev_logical_block_size(bdev)-1) || in __getblk_slow()
1089 * When a buffer is marked dirty, its page is dirtied, but the page's other
1094 * uptodate - even if all of its buffers are uptodate. A subsequent
1100 * mark_buffer_dirty - mark a buffer_head as needing writeout
1103 * mark_buffer_dirty() will set the dirty bit against the buffer, then set
1108 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1109 * i_pages lock and mapping->host->i_lock.
1118 * Very *carefully* optimize the it-is-already-dirty case. in mark_buffer_dirty()
1121 * perhaps modified the buffer. in mark_buffer_dirty()
1130 struct page *page = bh->b_page; in mark_buffer_dirty()
1141 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); in mark_buffer_dirty()
1152 if (bh->b_page && bh->b_page->mapping) in mark_buffer_write_io_error()
1153 mapping_set_error(bh->b_page->mapping, -EIO); in mark_buffer_write_io_error()
1154 if (bh->b_assoc_map) in mark_buffer_write_io_error()
1155 mapping_set_error(bh->b_assoc_map, -EIO); in mark_buffer_write_io_error()
1157 sb = READ_ONCE(bh->b_bdev->bd_super); in mark_buffer_write_io_error()
1159 errseq_set(&sb->s_wb_err, -EIO); in mark_buffer_write_io_error()
1173 if (atomic_read(&buf->b_count)) { in __brelse()
1177 WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n"); in __brelse()
1188 if (bh->b_assoc_map) { in __bforget()
1189 struct address_space *buffer_mapping = bh->b_page->mapping; in __bforget()
1191 spin_lock(&buffer_mapping->private_lock); in __bforget()
1192 list_del_init(&bh->b_assoc_buffers); in __bforget()
1193 bh->b_assoc_map = NULL; in __bforget()
1194 spin_unlock(&buffer_mapping->private_lock); in __bforget()
1208 bh->b_end_io = end_buffer_read_sync; in __bread_slow()
1219 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1220 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1221 * refcount elevated by one when they're in an LRU. A buffer can only appear
1222 * once in a particular CPU's LRU. A single buffer can be present in multiple
1225 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1271 swap(evictee, b->bhs[i]); in bh_lru_install()
1297 if (bh && bh->b_blocknr == block && bh->b_bdev == bdev && in lookup_bh_lru()
1298 bh->b_size == size) { in lookup_bh_lru()
1302 __this_cpu_read(bh_lrus.bhs[i - 1])); in lookup_bh_lru()
1303 i--; in lookup_bh_lru()
1317 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1341 * returned buffer has its reference count incremented.
1360 * Do async read-ahead on a buffer..
1384 * __bread_gfp() - reads a specified block and returns the bh
1390 * Reads a specified block, and returns buffer head that contains it.
1391 * The page cache can be allocated from non-movable area
1408 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1418 brelse(b->bhs[i]); in invalidate_bh_lru()
1419 b->bhs[i] = NULL; in invalidate_bh_lru()
1430 if (b->bhs[i]) in has_bh_in_lru()
1446 bh->b_page = page; in set_bh_page()
1452 bh->b_data = (char *)(0 + offset); in set_bh_page()
1454 bh->b_data = page_address(page) + offset; in set_bh_page()
1459 * Called when truncating a buffer on a page completely.
1473 bh->b_bdev = NULL; in discard_buffer()
1474 b_state = bh->b_state; in discard_buffer()
1476 b_state_old = cmpxchg(&bh->b_state, b_state, in discard_buffer()
1486 * block_invalidatepage - invalidate part or all of a buffer-backed page
1496 * ensure that no dirty buffer is left outside @offset and that no I/O
1499 * blocks on-disk.
1520 unsigned int next_off = curr_off + bh->b_size; in block_invalidatepage()
1521 next = bh->b_this_page; in block_invalidatepage()
1564 bh->b_state |= b_state; in create_empty_buffers()
1566 bh = bh->b_this_page; in create_empty_buffers()
1568 tail->b_this_page = head; in create_empty_buffers()
1570 spin_lock(&page->mapping->private_lock); in create_empty_buffers()
1578 bh = bh->b_this_page; in create_empty_buffers()
1582 spin_unlock(&page->mapping->private_lock); in create_empty_buffers()
1593 * buffer-cache aliases starting from return from this function and until the
1594 * moment when something will explicitly mark the buffer dirty (hopefully that
1595 * will not happen until we will free that block ;-) We don't even need to mark
1596 * it not-uptodate - nobody can expect anything from a newly allocated buffer
1598 * wrong. We definitely don't want to mark the alias unmapped, for example - it
1601 * Also.. Note that bforget() doesn't lock the buffer. So there can be
1602 * writeout I/O going on against recently-freed buffers. We don't wait on that
1603 * I/O in bforget() - it's more efficient to wait on the I/O only if we really
1608 struct inode *bd_inode = bdev->bd_inode; in clean_bdev_aliases()
1609 struct address_space *bd_mapping = bd_inode->i_mapping; in clean_bdev_aliases()
1611 pgoff_t index = block >> (PAGE_SHIFT - bd_inode->i_blkbits); in clean_bdev_aliases()
1617 end = (block + len - 1) >> (PAGE_SHIFT - bd_inode->i_blkbits); in clean_bdev_aliases()
1627 * We use page lock instead of bd_mapping->private_lock in clean_bdev_aliases()
1638 if (!buffer_mapped(bh) || (bh->b_blocknr < block)) in clean_bdev_aliases()
1640 if (bh->b_blocknr >= block + len) in clean_bdev_aliases()
1646 bh = bh->b_this_page; in clean_bdev_aliases()
1661 * Size is a power-of-two in the range 512..PAGE_SIZE,
1666 * architecture has a slow bit-scan instruction)
1678 create_empty_buffers(page, 1 << READ_ONCE(inode->i_blkbits), in create_page_buffers()
1688 * No No "unknown" - must do get_block()
1689 * No Yes "hole" - zero-filled
1690 * Yes No "allocated" - allocated on disk, not read in
1691 * Yes Yes "valid" - allocated and up-to-date in memory.
1699 * again at any time. We handle that by only looking at the buffer
1703 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1704 * locked buffer. This only can happen if someone has written the buffer
1708 * If block_write_full_page() is called with wbc->sync_mode ==
1730 * any time. If a buffer becomes dirty here after we've inspected it in __block_write_full_page()
1738 blocksize = bh->b_size; in __block_write_full_page()
1741 block = (sector_t)page->index << (PAGE_SHIFT - bbits); in __block_write_full_page()
1742 last_block = (i_size_read(inode) - 1) >> bbits; in __block_write_full_page()
1756 * The buffer was zeroed by block_write_full_page() in __block_write_full_page()
1762 WARN_ON(bh->b_size != blocksize); in __block_write_full_page()
1773 bh = bh->b_this_page; in __block_write_full_page()
1781 * If it's a fully non-blocking write attempt and we cannot in __block_write_full_page()
1782 * lock the buffer then redirty the page. Note that this can in __block_write_full_page()
1783 * potentially cause a busy-wait loop from writeback threads in __block_write_full_page()
1785 * higher-level throttling. in __block_write_full_page()
1787 if (wbc->sync_mode != WB_SYNC_NONE) { in __block_write_full_page()
1798 } while ((bh = bh->b_this_page) != head); in __block_write_full_page()
1808 struct buffer_head *next = bh->b_this_page; in __block_write_full_page()
1811 inode->i_write_hint, wbc); in __block_write_full_page()
1851 * The buffer may have been set dirty during in __block_write_full_page()
1856 } while ((bh = bh->b_this_page) != head); in __block_write_full_page()
1859 mapping_set_error(page->mapping, err); in __block_write_full_page()
1862 struct buffer_head *next = bh->b_this_page; in __block_write_full_page()
1866 inode->i_write_hint, wbc); in __block_write_full_page()
1893 block_end = block_start + bh->b_size; in page_zero_new_buffers()
1901 size = min(to, block_end) - start; in page_zero_new_buffers()
1913 bh = bh->b_this_page; in page_zero_new_buffers()
1922 loff_t offset = block << inode->i_blkbits; in iomap_to_bh()
1924 bh->b_bdev = iomap->bdev; in iomap_to_bh()
1929 * current block, then do not map the buffer and let the caller in iomap_to_bh()
1932 BUG_ON(offset >= iomap->offset + iomap->length); in iomap_to_bh()
1934 switch (iomap->type) { in iomap_to_bh()
1937 * If the buffer is not up to date or beyond the current EOF, in iomap_to_bh()
1938 * we need to mark it as new to ensure sub-block zeroing is in iomap_to_bh()
1957 * buffer as new to ensure this. in iomap_to_bh()
1963 if ((iomap->flags & IOMAP_F_NEW) || in iomap_to_bh()
1966 bh->b_blocknr = (iomap->addr + offset - iomap->offset) >> in iomap_to_bh()
1967 inode->i_blkbits; in iomap_to_bh()
1976 unsigned from = pos & (PAGE_SIZE - 1); in __block_write_begin_int()
1978 struct inode *inode = page->mapping->host; in __block_write_begin_int()
1991 blocksize = head->b_size; in __block_write_begin_int()
1994 block = (sector_t)page->index << (PAGE_SHIFT - bbits); in __block_write_begin_int()
1997 block++, block_start=block_end, bh = bh->b_this_page) { in __block_write_begin_int()
2009 WARN_ON(bh->b_size != blocksize); in __block_write_begin_int()
2046 * If we issued read requests - let them complete. in __block_write_begin_int()
2049 wait_on_buffer(*--wait_bh); in __block_write_begin_int()
2051 err = -EIO; in __block_write_begin_int()
2074 blocksize = bh->b_size; in __block_commit_write()
2089 bh = bh->b_this_page; in __block_commit_write()
2118 return -ENOMEM; in block_write_begin()
2136 struct inode *inode = mapping->host; in block_write_end()
2139 start = pos & (PAGE_SIZE - 1); in block_write_end()
2146 * a short write and only partially written into a buffer, it in block_write_end()
2151 * non uptodate page as a zero-length write, and force the in block_write_end()
2161 /* This could be a short (even 0-length) commit */ in block_write_end()
2172 struct inode *inode = mapping->host; in generic_write_end()
2173 loff_t old_size = inode->i_size; in generic_write_end()
2185 if (pos + copied > inode->i_size) { in generic_write_end()
2226 blocksize = head->b_size; in block_is_partially_uptodate()
2227 to = min_t(unsigned, PAGE_SIZE - from, count); in block_is_partially_uptodate()
2229 if (from < blocksize && to > PAGE_SIZE - blocksize) in block_is_partially_uptodate()
2245 bh = bh->b_this_page; in block_is_partially_uptodate()
2255 * Reads the page asynchronously --- the unlock_buffer() and
2256 * set/clear_buffer_uptodate() functions propagate buffer state into the
2261 struct inode *inode = page->mapping->host; in block_read_full_page()
2269 blocksize = head->b_size; in block_read_full_page()
2272 iblock = (sector_t)page->index << (PAGE_SHIFT - bbits); in block_read_full_page()
2273 lblock = (i_size_read(inode)+blocksize-1) >> bbits; in block_read_full_page()
2287 WARN_ON(bh->b_size != blocksize); in block_read_full_page()
2299 * get_block() might have updated the buffer in block_read_full_page()
2306 } while (i++, iblock++, (bh = bh->b_this_page) != head); in block_read_full_page()
2313 * All buffers are uptodate - we can set the page uptodate in block_read_full_page()
2331 * inside the buffer lock in case another process reading in block_read_full_page()
2351 struct address_space *mapping = inode->i_mapping; in generic_cont_expand_simple()
2376 struct inode *inode = mapping->host; in cont_expand_zero()
2390 if (zerofrom & (blocksize-1)) { in cont_expand_zero()
2391 *bytes |= (blocksize-1); in cont_expand_zero()
2394 len = PAGE_SIZE - zerofrom; in cont_expand_zero()
2411 err = -EINTR; in cont_expand_zero()
2423 if (zerofrom & (blocksize-1)) { in cont_expand_zero()
2424 *bytes |= (blocksize-1); in cont_expand_zero()
2427 len = offset - zerofrom; in cont_expand_zero()
2454 struct inode *inode = mapping->host; in cont_write_begin()
2464 if (pos+len > *bytes && zerofrom & (blocksize-1)) { in cont_write_begin()
2465 *bytes |= (blocksize-1); in cont_write_begin()
2475 struct inode *inode = page->mapping->host; in block_commit_write()
2497 * using sb_start_pagefault() - sb_end_pagefault() functions.
2502 struct page *page = vmf->page; in block_page_mkwrite()
2503 struct inode *inode = file_inode(vma->vm_file); in block_page_mkwrite()
2510 if ((page->mapping != inode->i_mapping) || in block_page_mkwrite()
2513 ret = -EFAULT; in block_page_mkwrite()
2518 if (((page->index + 1) << PAGE_SHIFT) > size) in block_page_mkwrite()
2549 * Attach the singly-linked list of buffers created by nobh_write_begin, to
2559 spin_lock(&page->mapping->private_lock); in attach_nobh_buffers()
2564 if (!bh->b_this_page) in attach_nobh_buffers()
2565 bh->b_this_page = head; in attach_nobh_buffers()
2566 bh = bh->b_this_page; in attach_nobh_buffers()
2569 spin_unlock(&page->mapping->private_lock); in attach_nobh_buffers()
2582 struct inode *inode = mapping->host; in nobh_write_begin()
2583 const unsigned blkbits = inode->i_blkbits; in nobh_write_begin()
2597 from = pos & (PAGE_SIZE - 1); in nobh_write_begin()
2602 return -ENOMEM; in nobh_write_begin()
2622 * Be careful: the buffer linked list is a NULL terminated one, rather in nobh_write_begin()
2627 ret = -ENOMEM; in nobh_write_begin()
2631 block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits); in nobh_write_begin()
2636 * page is fully mapped-to-disk. in nobh_write_begin()
2640 block_in_page++, block_start += blocksize, bh = bh->b_this_page) { in nobh_write_begin()
2644 bh->b_state = 0; in nobh_write_begin()
2669 bh->b_end_io = end_buffer_read_nobh; in nobh_write_begin()
2681 for (bh = head; bh; bh = bh->b_this_page) { in nobh_write_begin()
2684 ret = -EIO; in nobh_write_begin()
2722 struct inode *inode = page->mapping->host; in nobh_write_end()
2735 if (pos+copied > inode->i_size) { in nobh_write_end()
2745 head = head->b_this_page; in nobh_write_end()
2754 * nobh_writepage() - based on block_full_write_page() except
2761 struct inode * const inode = page->mapping->host; in nobh_writepage()
2768 if (page->index < end_index) in nobh_writepage()
2772 offset = i_size & (PAGE_SIZE-1); in nobh_writepage()
2773 if (page->index >= end_index+1 || !offset) { in nobh_writepage()
2788 if (ret == -EAGAIN) in nobh_writepage()
2799 unsigned offset = from & (PAGE_SIZE-1); in nobh_truncate_page()
2803 struct inode *inode = mapping->host; in nobh_truncate_page()
2809 length = offset & (blocksize - 1); in nobh_truncate_page()
2815 length = blocksize - length; in nobh_truncate_page()
2816 iblock = (sector_t)index << (PAGE_SHIFT - inode->i_blkbits); in nobh_truncate_page()
2819 err = -ENOMEM; in nobh_truncate_page()
2830 /* Find the buffer that contains "offset" */ in nobh_truncate_page()
2842 /* unmapped? It's a hole - nothing to do */ in nobh_truncate_page()
2846 /* Ok, it's mapped. Make sure it's up-to-date */ in nobh_truncate_page()
2848 err = mapping->a_ops->readpage(NULL, page); in nobh_truncate_page()
2855 err = -EIO; in nobh_truncate_page()
2877 unsigned offset = from & (PAGE_SIZE-1); in block_truncate_page()
2881 struct inode *inode = mapping->host; in block_truncate_page()
2887 length = offset & (blocksize - 1); in block_truncate_page()
2893 length = blocksize - length; in block_truncate_page()
2894 iblock = (sector_t)index << (PAGE_SHIFT - inode->i_blkbits); in block_truncate_page()
2897 err = -ENOMEM; in block_truncate_page()
2904 /* Find the buffer that contains "offset" */ in block_truncate_page()
2908 bh = bh->b_this_page; in block_truncate_page()
2915 WARN_ON(bh->b_size != blocksize); in block_truncate_page()
2919 /* unmapped? It's a hole - nothing to do */ in block_truncate_page()
2924 /* Ok, it's mapped. Make sure it's up-to-date */ in block_truncate_page()
2929 err = -EIO; in block_truncate_page()
2950 * The generic ->writepage function for buffer-backed address_spaces
2955 struct inode * const inode = page->mapping->host; in block_write_full_page()
2961 if (page->index < end_index) in block_write_full_page()
2966 offset = i_size & (PAGE_SIZE-1); in block_write_full_page()
2967 if (page->index >= end_index+1 || !offset) { in block_write_full_page()
2988 struct inode *inode = mapping->host; in generic_block_bmap()
3000 struct buffer_head *bh = bio->bi_private; in end_bio_bh_io_sync()
3003 set_bit(BH_Quiet, &bh->b_state); in end_bio_bh_io_sync()
3005 bh->b_end_io(bh, !bio->bi_status); in end_bio_bh_io_sync()
3016 BUG_ON(!bh->b_end_io); in submit_bh_wbc()
3030 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); in submit_bh_wbc()
3031 bio_set_dev(bio, bh->b_bdev); in submit_bh_wbc()
3032 bio->bi_write_hint = write_hint; in submit_bh_wbc()
3034 bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh)); in submit_bh_wbc()
3035 BUG_ON(bio->bi_iter.bi_size != bh->b_size); in submit_bh_wbc()
3037 bio->bi_end_io = end_bio_bh_io_sync; in submit_bh_wbc()
3038 bio->bi_private = bh; in submit_bh_wbc()
3051 wbc_account_cgroup_owner(wbc, bh->b_page, bh->b_size); in submit_bh_wbc()
3065 * ll_rw_block: low-level access to block devices (DEPRECATED)
3076 * This function drops any buffer that it cannot get a lock on (with the
3077 * BH_Lock state bit), any buffer that appears to be clean when doing a write
3078 * request, and any buffer that appears to be up-to-date when doing read
3080 * writing (the buffer cache won't assume that they are actually clean
3081 * until the buffer gets unlocked).
3084 * the buffer up-to-date (if appropriate), unlocks the buffer and wakes
3101 bh->b_end_io = end_buffer_write_sync; in ll_rw_block()
3108 bh->b_end_io = end_buffer_read_sync; in ll_rw_block()
3126 bh->b_end_io = end_buffer_write_sync; in write_dirty_buffer()
3133 * For a data-integrity writeout, we need to wait upon any in-progress I/O
3141 WARN_ON(atomic_read(&bh->b_count) < 1); in __sync_dirty_buffer()
3146 * device was hot-removed. Not much we can do but fail the I/O. in __sync_dirty_buffer()
3150 return -EIO; in __sync_dirty_buffer()
3154 bh->b_end_io = end_buffer_write_sync; in __sync_dirty_buffer()
3158 ret = -EIO; in __sync_dirty_buffer()
3190 * try_to_free_buffers() is non-blocking.
3194 return atomic_read(&bh->b_count) | in buffer_busy()
3195 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock))); in buffer_busy()
3208 bh = bh->b_this_page; in drop_buffers()
3212 struct buffer_head *next = bh->b_this_page; in drop_buffers()
3214 if (bh->b_assoc_map) in drop_buffers()
3227 struct address_space * const mapping = page->mapping; in try_to_free_buffers()
3240 spin_lock(&mapping->private_lock); in try_to_free_buffers()
3259 spin_unlock(&mapping->private_lock); in try_to_free_buffers()
3265 struct buffer_head *next = bh->b_this_page; in try_to_free_buffers()
3279 * The `flush-X' kernel threads fully replace bdflush daemons and this call.
3286 return -EPERM; in SYSCALL_DEFINE2()
3292 " system call\n", current->comm); in SYSCALL_DEFINE2()
3302 * Buffer-head allocation
3326 if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096) in recalc_bh_state()
3338 INIT_LIST_HEAD(&ret->b_assoc_buffers); in alloc_buffer_head()
3339 spin_lock_init(&ret->b_uptodate_lock); in alloc_buffer_head()
3351 BUG_ON(!list_empty(&bh->b_assoc_buffers)); in free_buffer_head()
3366 brelse(b->bhs[i]); in buffer_exit_cpu_dead()
3367 b->bhs[i] = NULL; in buffer_exit_cpu_dead()
3375 * bh_uptodate_or_lock - Test whether the buffer is uptodate
3378 * Return true if the buffer is up-to-date and false,
3379 * with the buffer locked, if not.
3394 * bh_submit_read - Submit a locked buffer for reading
3397 * Returns zero on success and -EIO on error.
3409 bh->b_end_io = end_buffer_read_sync; in bh_submit_read()
3414 return -EIO; in bh_submit_read()
3434 ret = cpuhp_setup_state_nocalls(CPUHP_FS_BUFF_DEAD, "fs/buffer:dead", in buffer_init()