1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/fs/buffer.c
4 *
5 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
6 */
7
8 /*
9 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
10 *
11 * Removed a lot of unnecessary code and simplified things now that
12 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
13 *
14 * Speed up hash, lru, and free list operations. Use gfp() for allocating
15 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
16 *
17 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
18 *
19 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
20 */
21
22 #include <linux/kernel.h>
23 #include <linux/sched/signal.h>
24 #include <linux/syscalls.h>
25 #include <linux/fs.h>
26 #include <linux/iomap.h>
27 #include <linux/mm.h>
28 #include <linux/percpu.h>
29 #include <linux/slab.h>
30 #include <linux/capability.h>
31 #include <linux/blkdev.h>
32 #include <linux/file.h>
33 #include <linux/quotaops.h>
34 #include <linux/highmem.h>
35 #include <linux/export.h>
36 #include <linux/backing-dev.h>
37 #include <linux/writeback.h>
38 #include <linux/hash.h>
39 #include <linux/suspend.h>
40 #include <linux/buffer_head.h>
41 #include <linux/task_io_accounting_ops.h>
42 #include <linux/bio.h>
43 #include <linux/cpu.h>
44 #include <linux/bitops.h>
45 #include <linux/mpage.h>
46 #include <linux/bit_spinlock.h>
47 #include <linux/pagevec.h>
48 #include <linux/sched/mm.h>
49 #include <trace/events/block.h>
50 #include <linux/fscrypt.h>
51 #include <linux/fsverity.h>
52 #include <linux/sched/isolation.h>
53
54 #include "internal.h"
55
56 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
57 static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
58 enum rw_hint hint, struct writeback_control *wbc);
59
60 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
61
touch_buffer(struct buffer_head * bh)62 inline void touch_buffer(struct buffer_head *bh)
63 {
64 trace_block_touch_buffer(bh);
65 folio_mark_accessed(bh->b_folio);
66 }
67 EXPORT_SYMBOL(touch_buffer);
68
__lock_buffer(struct buffer_head * bh)69 void __lock_buffer(struct buffer_head *bh)
70 {
71 wait_on_bit_lock_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
72 }
73 EXPORT_SYMBOL(__lock_buffer);
74
unlock_buffer(struct buffer_head * bh)75 void unlock_buffer(struct buffer_head *bh)
76 {
77 clear_bit_unlock(BH_Lock, &bh->b_state);
78 smp_mb__after_atomic();
79 wake_up_bit(&bh->b_state, BH_Lock);
80 }
81 EXPORT_SYMBOL(unlock_buffer);
82
83 /*
84 * Returns if the folio has dirty or writeback buffers. If all the buffers
85 * are unlocked and clean then the folio_test_dirty information is stale. If
86 * any of the buffers are locked, it is assumed they are locked for IO.
87 */
buffer_check_dirty_writeback(struct folio * folio,bool * dirty,bool * writeback)88 void buffer_check_dirty_writeback(struct folio *folio,
89 bool *dirty, bool *writeback)
90 {
91 struct buffer_head *head, *bh;
92 *dirty = false;
93 *writeback = false;
94
95 BUG_ON(!folio_test_locked(folio));
96
97 head = folio_buffers(folio);
98 if (!head)
99 return;
100
101 if (folio_test_writeback(folio))
102 *writeback = true;
103
104 bh = head;
105 do {
106 if (buffer_locked(bh))
107 *writeback = true;
108
109 if (buffer_dirty(bh))
110 *dirty = true;
111
112 bh = bh->b_this_page;
113 } while (bh != head);
114 }
115
116 /*
117 * Block until a buffer comes unlocked. This doesn't stop it
118 * from becoming locked again - you have to lock it yourself
119 * if you want to preserve its state.
120 */
__wait_on_buffer(struct buffer_head * bh)121 void __wait_on_buffer(struct buffer_head * bh)
122 {
123 wait_on_bit_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
124 }
125 EXPORT_SYMBOL(__wait_on_buffer);
126
buffer_io_error(struct buffer_head * bh,char * msg)127 static void buffer_io_error(struct buffer_head *bh, char *msg)
128 {
129 if (!test_bit(BH_Quiet, &bh->b_state))
130 printk_ratelimited(KERN_ERR
131 "Buffer I/O error on dev %pg, logical block %llu%s\n",
132 bh->b_bdev, (unsigned long long)bh->b_blocknr, msg);
133 }
134
135 /*
136 * End-of-IO handler helper function which does not touch the bh after
137 * unlocking it.
138 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
139 * a race there is benign: unlock_buffer() only use the bh's address for
140 * hashing after unlocking the buffer, so it doesn't actually touch the bh
141 * itself.
142 */
__end_buffer_read_notouch(struct buffer_head * bh,int uptodate)143 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
144 {
145 if (uptodate) {
146 set_buffer_uptodate(bh);
147 } else {
148 /* This happens, due to failed read-ahead attempts. */
149 clear_buffer_uptodate(bh);
150 }
151 unlock_buffer(bh);
152 }
153
154 /*
155 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
156 * unlock the buffer.
157 */
end_buffer_read_sync(struct buffer_head * bh,int uptodate)158 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
159 {
160 put_bh(bh);
161 __end_buffer_read_notouch(bh, uptodate);
162 }
163 EXPORT_SYMBOL(end_buffer_read_sync);
164
end_buffer_write_sync(struct buffer_head * bh,int uptodate)165 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
166 {
167 if (uptodate) {
168 set_buffer_uptodate(bh);
169 } else {
170 buffer_io_error(bh, ", lost sync page write");
171 mark_buffer_write_io_error(bh);
172 clear_buffer_uptodate(bh);
173 }
174 unlock_buffer(bh);
175 put_bh(bh);
176 }
177 EXPORT_SYMBOL(end_buffer_write_sync);
178
179 static struct buffer_head *
__find_get_block_slow(struct block_device * bdev,sector_t block,bool atomic)180 __find_get_block_slow(struct block_device *bdev, sector_t block, bool atomic)
181 {
182 struct address_space *bd_mapping = bdev->bd_mapping;
183 const int blkbits = bd_mapping->host->i_blkbits;
184 struct buffer_head *ret = NULL;
185 pgoff_t index;
186 struct buffer_head *bh;
187 struct buffer_head *head;
188 struct folio *folio;
189 int all_mapped = 1;
190 static DEFINE_RATELIMIT_STATE(last_warned, HZ, 1);
191
192 index = ((loff_t)block << blkbits) / PAGE_SIZE;
193 folio = __filemap_get_folio(bd_mapping, index, FGP_ACCESSED, 0);
194 if (IS_ERR(folio))
195 goto out;
196
197 /*
198 * Folio lock protects the buffers. Callers that cannot block
199 * will fallback to serializing vs try_to_free_buffers() via
200 * the i_private_lock.
201 */
202 if (atomic)
203 spin_lock(&bd_mapping->i_private_lock);
204 else
205 folio_lock(folio);
206
207 head = folio_buffers(folio);
208 if (!head)
209 goto out_unlock;
210 bh = head;
211 do {
212 if (!buffer_mapped(bh))
213 all_mapped = 0;
214 else if (bh->b_blocknr == block) {
215 ret = bh;
216 get_bh(bh);
217 goto out_unlock;
218 }
219 bh = bh->b_this_page;
220 } while (bh != head);
221
222 /* we might be here because some of the buffers on this page are
223 * not mapped. This is due to various races between
224 * file io on the block device and getblk. It gets dealt with
225 * elsewhere, don't buffer_error if we had some unmapped buffers
226 */
227 ratelimit_set_flags(&last_warned, RATELIMIT_MSG_ON_RELEASE);
228 if (all_mapped && __ratelimit(&last_warned)) {
229 printk("__find_get_block_slow() failed. block=%llu, "
230 "b_blocknr=%llu, b_state=0x%08lx, b_size=%zu, "
231 "device %pg blocksize: %d\n",
232 (unsigned long long)block,
233 (unsigned long long)bh->b_blocknr,
234 bh->b_state, bh->b_size, bdev,
235 1 << blkbits);
236 }
237 out_unlock:
238 if (atomic)
239 spin_unlock(&bd_mapping->i_private_lock);
240 else
241 folio_unlock(folio);
242 folio_put(folio);
243 out:
244 return ret;
245 }
246
end_buffer_async_read(struct buffer_head * bh,int uptodate)247 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
248 {
249 unsigned long flags;
250 struct buffer_head *first;
251 struct buffer_head *tmp;
252 struct folio *folio;
253 int folio_uptodate = 1;
254
255 BUG_ON(!buffer_async_read(bh));
256
257 folio = bh->b_folio;
258 if (uptodate) {
259 set_buffer_uptodate(bh);
260 } else {
261 clear_buffer_uptodate(bh);
262 buffer_io_error(bh, ", async page read");
263 }
264
265 /*
266 * Be _very_ careful from here on. Bad things can happen if
267 * two buffer heads end IO at almost the same time and both
268 * decide that the page is now completely done.
269 */
270 first = folio_buffers(folio);
271 spin_lock_irqsave(&first->b_uptodate_lock, flags);
272 clear_buffer_async_read(bh);
273 unlock_buffer(bh);
274 tmp = bh;
275 do {
276 if (!buffer_uptodate(tmp))
277 folio_uptodate = 0;
278 if (buffer_async_read(tmp)) {
279 BUG_ON(!buffer_locked(tmp));
280 goto still_busy;
281 }
282 tmp = tmp->b_this_page;
283 } while (tmp != bh);
284 spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
285
286 folio_end_read(folio, folio_uptodate);
287 return;
288
289 still_busy:
290 spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
291 return;
292 }
293
294 struct postprocess_bh_ctx {
295 struct work_struct work;
296 struct buffer_head *bh;
297 };
298
verify_bh(struct work_struct * work)299 static void verify_bh(struct work_struct *work)
300 {
301 struct postprocess_bh_ctx *ctx =
302 container_of(work, struct postprocess_bh_ctx, work);
303 struct buffer_head *bh = ctx->bh;
304 bool valid;
305
306 valid = fsverity_verify_blocks(bh->b_folio, bh->b_size, bh_offset(bh));
307 end_buffer_async_read(bh, valid);
308 kfree(ctx);
309 }
310
need_fsverity(struct buffer_head * bh)311 static bool need_fsverity(struct buffer_head *bh)
312 {
313 struct folio *folio = bh->b_folio;
314 struct inode *inode = folio->mapping->host;
315
316 return fsverity_active(inode) &&
317 /* needed by ext4 */
318 folio->index < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
319 }
320
decrypt_bh(struct work_struct * work)321 static void decrypt_bh(struct work_struct *work)
322 {
323 struct postprocess_bh_ctx *ctx =
324 container_of(work, struct postprocess_bh_ctx, work);
325 struct buffer_head *bh = ctx->bh;
326 int err;
327
328 err = fscrypt_decrypt_pagecache_blocks(bh->b_folio, bh->b_size,
329 bh_offset(bh));
330 if (err == 0 && need_fsverity(bh)) {
331 /*
332 * We use different work queues for decryption and for verity
333 * because verity may require reading metadata pages that need
334 * decryption, and we shouldn't recurse to the same workqueue.
335 */
336 INIT_WORK(&ctx->work, verify_bh);
337 fsverity_enqueue_verify_work(&ctx->work);
338 return;
339 }
340 end_buffer_async_read(bh, err == 0);
341 kfree(ctx);
342 }
343
344 /*
345 * I/O completion handler for block_read_full_folio() - pages
346 * which come unlocked at the end of I/O.
347 */
end_buffer_async_read_io(struct buffer_head * bh,int uptodate)348 static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate)
349 {
350 struct inode *inode = bh->b_folio->mapping->host;
351 bool decrypt = fscrypt_inode_uses_fs_layer_crypto(inode);
352 bool verify = need_fsverity(bh);
353
354 /* Decrypt (with fscrypt) and/or verify (with fsverity) if needed. */
355 if (uptodate && (decrypt || verify)) {
356 struct postprocess_bh_ctx *ctx =
357 kmalloc(sizeof(*ctx), GFP_ATOMIC);
358
359 if (ctx) {
360 ctx->bh = bh;
361 if (decrypt) {
362 INIT_WORK(&ctx->work, decrypt_bh);
363 fscrypt_enqueue_decrypt_work(&ctx->work);
364 } else {
365 INIT_WORK(&ctx->work, verify_bh);
366 fsverity_enqueue_verify_work(&ctx->work);
367 }
368 return;
369 }
370 uptodate = 0;
371 }
372 end_buffer_async_read(bh, uptodate);
373 }
374
375 /*
376 * Completion handler for block_write_full_folio() - folios which are unlocked
377 * during I/O, and which have the writeback flag cleared upon I/O completion.
378 */
end_buffer_async_write(struct buffer_head * bh,int uptodate)379 static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
380 {
381 unsigned long flags;
382 struct buffer_head *first;
383 struct buffer_head *tmp;
384 struct folio *folio;
385
386 BUG_ON(!buffer_async_write(bh));
387
388 folio = bh->b_folio;
389 if (uptodate) {
390 set_buffer_uptodate(bh);
391 } else {
392 buffer_io_error(bh, ", lost async page write");
393 mark_buffer_write_io_error(bh);
394 clear_buffer_uptodate(bh);
395 }
396
397 first = folio_buffers(folio);
398 spin_lock_irqsave(&first->b_uptodate_lock, flags);
399
400 clear_buffer_async_write(bh);
401 unlock_buffer(bh);
402 tmp = bh->b_this_page;
403 while (tmp != bh) {
404 if (buffer_async_write(tmp)) {
405 BUG_ON(!buffer_locked(tmp));
406 goto still_busy;
407 }
408 tmp = tmp->b_this_page;
409 }
410 spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
411 folio_end_writeback(folio);
412 return;
413
414 still_busy:
415 spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
416 return;
417 }
418
419 /*
420 * If a page's buffers are under async readin (end_buffer_async_read
421 * completion) then there is a possibility that another thread of
422 * control could lock one of the buffers after it has completed
423 * but while some of the other buffers have not completed. This
424 * locked buffer would confuse end_buffer_async_read() into not unlocking
425 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
426 * that this buffer is not under async I/O.
427 *
428 * The page comes unlocked when it has no locked buffer_async buffers
429 * left.
430 *
431 * PageLocked prevents anyone starting new async I/O reads any of
432 * the buffers.
433 *
434 * PageWriteback is used to prevent simultaneous writeout of the same
435 * page.
436 *
437 * PageLocked prevents anyone from starting writeback of a page which is
438 * under read I/O (PageWriteback is only ever set against a locked page).
439 */
mark_buffer_async_read(struct buffer_head * bh)440 static void mark_buffer_async_read(struct buffer_head *bh)
441 {
442 bh->b_end_io = end_buffer_async_read_io;
443 set_buffer_async_read(bh);
444 }
445
mark_buffer_async_write_endio(struct buffer_head * bh,bh_end_io_t * handler)446 static void mark_buffer_async_write_endio(struct buffer_head *bh,
447 bh_end_io_t *handler)
448 {
449 bh->b_end_io = handler;
450 set_buffer_async_write(bh);
451 }
452
mark_buffer_async_write(struct buffer_head * bh)453 void mark_buffer_async_write(struct buffer_head *bh)
454 {
455 mark_buffer_async_write_endio(bh, end_buffer_async_write);
456 }
457 EXPORT_SYMBOL(mark_buffer_async_write);
458
459
460 /*
461 * fs/buffer.c contains helper functions for buffer-backed address space's
462 * fsync functions. A common requirement for buffer-based filesystems is
463 * that certain data from the backing blockdev needs to be written out for
464 * a successful fsync(). For example, ext2 indirect blocks need to be
465 * written back and waited upon before fsync() returns.
466 *
467 * The functions mark_buffer_dirty_inode(), fsync_inode_buffers(),
468 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
469 * management of a list of dependent buffers at ->i_mapping->i_private_list.
470 *
471 * Locking is a little subtle: try_to_free_buffers() will remove buffers
472 * from their controlling inode's queue when they are being freed. But
473 * try_to_free_buffers() will be operating against the *blockdev* mapping
474 * at the time, not against the S_ISREG file which depends on those buffers.
475 * So the locking for i_private_list is via the i_private_lock in the address_space
476 * which backs the buffers. Which is different from the address_space
477 * against which the buffers are listed. So for a particular address_space,
478 * mapping->i_private_lock does *not* protect mapping->i_private_list! In fact,
479 * mapping->i_private_list will always be protected by the backing blockdev's
480 * ->i_private_lock.
481 *
482 * Which introduces a requirement: all buffers on an address_space's
483 * ->i_private_list must be from the same address_space: the blockdev's.
484 *
485 * address_spaces which do not place buffers at ->i_private_list via these
486 * utility functions are free to use i_private_lock and i_private_list for
487 * whatever they want. The only requirement is that list_empty(i_private_list)
488 * be true at clear_inode() time.
489 *
490 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
491 * filesystems should do that. invalidate_inode_buffers() should just go
492 * BUG_ON(!list_empty).
493 *
494 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
495 * take an address_space, not an inode. And it should be called
496 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
497 * queued up.
498 *
499 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
500 * list if it is already on a list. Because if the buffer is on a list,
501 * it *must* already be on the right one. If not, the filesystem is being
502 * silly. This will save a ton of locking. But first we have to ensure
503 * that buffers are taken *off* the old inode's list when they are freed
504 * (presumably in truncate). That requires careful auditing of all
505 * filesystems (do it inside bforget()). It could also be done by bringing
506 * b_inode back.
507 */
508
509 /*
510 * The buffer's backing address_space's i_private_lock must be held
511 */
__remove_assoc_queue(struct buffer_head * bh)512 static void __remove_assoc_queue(struct buffer_head *bh)
513 {
514 list_del_init(&bh->b_assoc_buffers);
515 WARN_ON(!bh->b_assoc_map);
516 bh->b_assoc_map = NULL;
517 }
518
inode_has_buffers(struct inode * inode)519 int inode_has_buffers(struct inode *inode)
520 {
521 return !list_empty(&inode->i_data.i_private_list);
522 }
523
524 /*
525 * osync is designed to support O_SYNC io. It waits synchronously for
526 * all already-submitted IO to complete, but does not queue any new
527 * writes to the disk.
528 *
529 * To do O_SYNC writes, just queue the buffer writes with write_dirty_buffer
530 * as you dirty the buffers, and then use osync_inode_buffers to wait for
531 * completion. Any other dirty buffers which are not yet queued for
532 * write will not be flushed to disk by the osync.
533 */
osync_buffers_list(spinlock_t * lock,struct list_head * list)534 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
535 {
536 struct buffer_head *bh;
537 struct list_head *p;
538 int err = 0;
539
540 spin_lock(lock);
541 repeat:
542 list_for_each_prev(p, list) {
543 bh = BH_ENTRY(p);
544 if (buffer_locked(bh)) {
545 get_bh(bh);
546 spin_unlock(lock);
547 wait_on_buffer(bh);
548 if (!buffer_uptodate(bh))
549 err = -EIO;
550 brelse(bh);
551 spin_lock(lock);
552 goto repeat;
553 }
554 }
555 spin_unlock(lock);
556 return err;
557 }
558
559 /**
560 * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
561 * @mapping: the mapping which wants those buffers written
562 *
563 * Starts I/O against the buffers at mapping->i_private_list, and waits upon
564 * that I/O.
565 *
566 * Basically, this is a convenience function for fsync().
567 * @mapping is a file or directory which needs those buffers to be written for
568 * a successful fsync().
569 */
sync_mapping_buffers(struct address_space * mapping)570 int sync_mapping_buffers(struct address_space *mapping)
571 {
572 struct address_space *buffer_mapping = mapping->i_private_data;
573
574 if (buffer_mapping == NULL || list_empty(&mapping->i_private_list))
575 return 0;
576
577 return fsync_buffers_list(&buffer_mapping->i_private_lock,
578 &mapping->i_private_list);
579 }
580 EXPORT_SYMBOL(sync_mapping_buffers);
581
582 /**
583 * generic_buffers_fsync_noflush - generic buffer fsync implementation
584 * for simple filesystems with no inode lock
585 *
586 * @file: file to synchronize
587 * @start: start offset in bytes
588 * @end: end offset in bytes (inclusive)
589 * @datasync: only synchronize essential metadata if true
590 *
591 * This is a generic implementation of the fsync method for simple
592 * filesystems which track all non-inode metadata in the buffers list
593 * hanging off the address_space structure.
594 */
generic_buffers_fsync_noflush(struct file * file,loff_t start,loff_t end,bool datasync)595 int generic_buffers_fsync_noflush(struct file *file, loff_t start, loff_t end,
596 bool datasync)
597 {
598 struct inode *inode = file->f_mapping->host;
599 int err;
600 int ret;
601
602 err = file_write_and_wait_range(file, start, end);
603 if (err)
604 return err;
605
606 ret = sync_mapping_buffers(inode->i_mapping);
607 if (!(inode->i_state & I_DIRTY_ALL))
608 goto out;
609 if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
610 goto out;
611
612 err = sync_inode_metadata(inode, 1);
613 if (ret == 0)
614 ret = err;
615
616 out:
617 /* check and advance again to catch errors after syncing out buffers */
618 err = file_check_and_advance_wb_err(file);
619 if (ret == 0)
620 ret = err;
621 return ret;
622 }
623 EXPORT_SYMBOL(generic_buffers_fsync_noflush);
624
625 /**
626 * generic_buffers_fsync - generic buffer fsync implementation
627 * for simple filesystems with no inode lock
628 *
629 * @file: file to synchronize
630 * @start: start offset in bytes
631 * @end: end offset in bytes (inclusive)
632 * @datasync: only synchronize essential metadata if true
633 *
634 * This is a generic implementation of the fsync method for simple
635 * filesystems which track all non-inode metadata in the buffers list
636 * hanging off the address_space structure. This also makes sure that
637 * a device cache flush operation is called at the end.
638 */
generic_buffers_fsync(struct file * file,loff_t start,loff_t end,bool datasync)639 int generic_buffers_fsync(struct file *file, loff_t start, loff_t end,
640 bool datasync)
641 {
642 struct inode *inode = file->f_mapping->host;
643 int ret;
644
645 ret = generic_buffers_fsync_noflush(file, start, end, datasync);
646 if (!ret)
647 ret = blkdev_issue_flush(inode->i_sb->s_bdev);
648 return ret;
649 }
650 EXPORT_SYMBOL(generic_buffers_fsync);
651
652 /*
653 * Called when we've recently written block `bblock', and it is known that
654 * `bblock' was for a buffer_boundary() buffer. This means that the block at
655 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
656 * dirty, schedule it for IO. So that indirects merge nicely with their data.
657 */
write_boundary_block(struct block_device * bdev,sector_t bblock,unsigned blocksize)658 void write_boundary_block(struct block_device *bdev,
659 sector_t bblock, unsigned blocksize)
660 {
661 struct buffer_head *bh;
662
663 bh = __find_get_block_nonatomic(bdev, bblock + 1, blocksize);
664 if (bh) {
665 if (buffer_dirty(bh))
666 write_dirty_buffer(bh, 0);
667 put_bh(bh);
668 }
669 }
670
mark_buffer_dirty_inode(struct buffer_head * bh,struct inode * inode)671 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
672 {
673 struct address_space *mapping = inode->i_mapping;
674 struct address_space *buffer_mapping = bh->b_folio->mapping;
675
676 mark_buffer_dirty(bh);
677 if (!mapping->i_private_data) {
678 mapping->i_private_data = buffer_mapping;
679 } else {
680 BUG_ON(mapping->i_private_data != buffer_mapping);
681 }
682 if (!bh->b_assoc_map) {
683 spin_lock(&buffer_mapping->i_private_lock);
684 list_move_tail(&bh->b_assoc_buffers,
685 &mapping->i_private_list);
686 bh->b_assoc_map = mapping;
687 spin_unlock(&buffer_mapping->i_private_lock);
688 }
689 }
690 EXPORT_SYMBOL(mark_buffer_dirty_inode);
691
692 /**
693 * block_dirty_folio - Mark a folio as dirty.
694 * @mapping: The address space containing this folio.
695 * @folio: The folio to mark dirty.
696 *
697 * Filesystems which use buffer_heads can use this function as their
698 * ->dirty_folio implementation. Some filesystems need to do a little
699 * work before calling this function. Filesystems which do not use
700 * buffer_heads should call filemap_dirty_folio() instead.
701 *
702 * If the folio has buffers, the uptodate buffers are set dirty, to
703 * preserve dirty-state coherency between the folio and the buffers.
704 * Buffers added to a dirty folio are created dirty.
705 *
706 * The buffers are dirtied before the folio is dirtied. There's a small
707 * race window in which writeback may see the folio cleanness but not the
708 * buffer dirtiness. That's fine. If this code were to set the folio
709 * dirty before the buffers, writeback could clear the folio dirty flag,
710 * see a bunch of clean buffers and we'd end up with dirty buffers/clean
711 * folio on the dirty folio list.
712 *
713 * We use i_private_lock to lock against try_to_free_buffers() while
714 * using the folio's buffer list. This also prevents clean buffers
715 * being added to the folio after it was set dirty.
716 *
717 * Context: May only be called from process context. Does not sleep.
718 * Caller must ensure that @folio cannot be truncated during this call,
719 * typically by holding the folio lock or having a page in the folio
720 * mapped and holding the page table lock.
721 *
722 * Return: True if the folio was dirtied; false if it was already dirtied.
723 */
block_dirty_folio(struct address_space * mapping,struct folio * folio)724 bool block_dirty_folio(struct address_space *mapping, struct folio *folio)
725 {
726 struct buffer_head *head;
727 bool newly_dirty;
728
729 spin_lock(&mapping->i_private_lock);
730 head = folio_buffers(folio);
731 if (head) {
732 struct buffer_head *bh = head;
733
734 do {
735 set_buffer_dirty(bh);
736 bh = bh->b_this_page;
737 } while (bh != head);
738 }
739 /*
740 * Lock out page's memcg migration to keep PageDirty
741 * synchronized with per-memcg dirty page counters.
742 */
743 folio_memcg_lock(folio);
744 newly_dirty = !folio_test_set_dirty(folio);
745 spin_unlock(&mapping->i_private_lock);
746
747 if (newly_dirty)
748 __folio_mark_dirty(folio, mapping, 1);
749
750 folio_memcg_unlock(folio);
751
752 if (newly_dirty)
753 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
754
755 return newly_dirty;
756 }
757 EXPORT_SYMBOL(block_dirty_folio);
758
759 /*
760 * Write out and wait upon a list of buffers.
761 *
762 * We have conflicting pressures: we want to make sure that all
763 * initially dirty buffers get waited on, but that any subsequently
764 * dirtied buffers don't. After all, we don't want fsync to last
765 * forever if somebody is actively writing to the file.
766 *
767 * Do this in two main stages: first we copy dirty buffers to a
768 * temporary inode list, queueing the writes as we go. Then we clean
769 * up, waiting for those writes to complete.
770 *
771 * During this second stage, any subsequent updates to the file may end
772 * up refiling the buffer on the original inode's dirty list again, so
773 * there is a chance we will end up with a buffer queued for write but
774 * not yet completed on that list. So, as a final cleanup we go through
775 * the osync code to catch these locked, dirty buffers without requeuing
776 * any newly dirty buffers for write.
777 */
fsync_buffers_list(spinlock_t * lock,struct list_head * list)778 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
779 {
780 struct buffer_head *bh;
781 struct address_space *mapping;
782 int err = 0, err2;
783 struct blk_plug plug;
784 LIST_HEAD(tmp);
785
786 blk_start_plug(&plug);
787
788 spin_lock(lock);
789 while (!list_empty(list)) {
790 bh = BH_ENTRY(list->next);
791 mapping = bh->b_assoc_map;
792 __remove_assoc_queue(bh);
793 /* Avoid race with mark_buffer_dirty_inode() which does
794 * a lockless check and we rely on seeing the dirty bit */
795 smp_mb();
796 if (buffer_dirty(bh) || buffer_locked(bh)) {
797 list_add(&bh->b_assoc_buffers, &tmp);
798 bh->b_assoc_map = mapping;
799 if (buffer_dirty(bh)) {
800 get_bh(bh);
801 spin_unlock(lock);
802 /*
803 * Ensure any pending I/O completes so that
804 * write_dirty_buffer() actually writes the
805 * current contents - it is a noop if I/O is
806 * still in flight on potentially older
807 * contents.
808 */
809 write_dirty_buffer(bh, REQ_SYNC);
810
811 /*
812 * Kick off IO for the previous mapping. Note
813 * that we will not run the very last mapping,
814 * wait_on_buffer() will do that for us
815 * through sync_buffer().
816 */
817 brelse(bh);
818 spin_lock(lock);
819 }
820 }
821 }
822
823 spin_unlock(lock);
824 blk_finish_plug(&plug);
825 spin_lock(lock);
826
827 while (!list_empty(&tmp)) {
828 bh = BH_ENTRY(tmp.prev);
829 get_bh(bh);
830 mapping = bh->b_assoc_map;
831 __remove_assoc_queue(bh);
832 /* Avoid race with mark_buffer_dirty_inode() which does
833 * a lockless check and we rely on seeing the dirty bit */
834 smp_mb();
835 if (buffer_dirty(bh)) {
836 list_add(&bh->b_assoc_buffers,
837 &mapping->i_private_list);
838 bh->b_assoc_map = mapping;
839 }
840 spin_unlock(lock);
841 wait_on_buffer(bh);
842 if (!buffer_uptodate(bh))
843 err = -EIO;
844 brelse(bh);
845 spin_lock(lock);
846 }
847
848 spin_unlock(lock);
849 err2 = osync_buffers_list(lock, list);
850 if (err)
851 return err;
852 else
853 return err2;
854 }
855
856 /*
857 * Invalidate any and all dirty buffers on a given inode. We are
858 * probably unmounting the fs, but that doesn't mean we have already
859 * done a sync(). Just drop the buffers from the inode list.
860 *
861 * NOTE: we take the inode's blockdev's mapping's i_private_lock. Which
862 * assumes that all the buffers are against the blockdev. Not true
863 * for reiserfs.
864 */
invalidate_inode_buffers(struct inode * inode)865 void invalidate_inode_buffers(struct inode *inode)
866 {
867 if (inode_has_buffers(inode)) {
868 struct address_space *mapping = &inode->i_data;
869 struct list_head *list = &mapping->i_private_list;
870 struct address_space *buffer_mapping = mapping->i_private_data;
871
872 spin_lock(&buffer_mapping->i_private_lock);
873 while (!list_empty(list))
874 __remove_assoc_queue(BH_ENTRY(list->next));
875 spin_unlock(&buffer_mapping->i_private_lock);
876 }
877 }
878 EXPORT_SYMBOL(invalidate_inode_buffers);
879
880 /*
881 * Remove any clean buffers from the inode's buffer list. This is called
882 * when we're trying to free the inode itself. Those buffers can pin it.
883 *
884 * Returns true if all buffers were removed.
885 */
remove_inode_buffers(struct inode * inode)886 int remove_inode_buffers(struct inode *inode)
887 {
888 int ret = 1;
889
890 if (inode_has_buffers(inode)) {
891 struct address_space *mapping = &inode->i_data;
892 struct list_head *list = &mapping->i_private_list;
893 struct address_space *buffer_mapping = mapping->i_private_data;
894
895 spin_lock(&buffer_mapping->i_private_lock);
896 while (!list_empty(list)) {
897 struct buffer_head *bh = BH_ENTRY(list->next);
898 if (buffer_dirty(bh)) {
899 ret = 0;
900 break;
901 }
902 __remove_assoc_queue(bh);
903 }
904 spin_unlock(&buffer_mapping->i_private_lock);
905 }
906 return ret;
907 }
908
909 /*
910 * Create the appropriate buffers when given a folio for data area and
911 * the size of each buffer.. Use the bh->b_this_page linked list to
912 * follow the buffers created. Return NULL if unable to create more
913 * buffers.
914 *
915 * The retry flag is used to differentiate async IO (paging, swapping)
916 * which may not fail from ordinary buffer allocations.
917 */
folio_alloc_buffers(struct folio * folio,unsigned long size,gfp_t gfp)918 struct buffer_head *folio_alloc_buffers(struct folio *folio, unsigned long size,
919 gfp_t gfp)
920 {
921 struct buffer_head *bh, *head;
922 long offset;
923 struct mem_cgroup *memcg, *old_memcg;
924
925 /* The folio lock pins the memcg */
926 memcg = folio_memcg(folio);
927 old_memcg = set_active_memcg(memcg);
928
929 head = NULL;
930 offset = folio_size(folio);
931 while ((offset -= size) >= 0) {
932 bh = alloc_buffer_head(gfp);
933 if (!bh)
934 goto no_grow;
935
936 bh->b_this_page = head;
937 bh->b_blocknr = -1;
938 head = bh;
939
940 bh->b_size = size;
941
942 /* Link the buffer to its folio */
943 folio_set_bh(bh, folio, offset);
944 }
945 out:
946 set_active_memcg(old_memcg);
947 return head;
948 /*
949 * In case anything failed, we just free everything we got.
950 */
951 no_grow:
952 if (head) {
953 do {
954 bh = head;
955 head = head->b_this_page;
956 free_buffer_head(bh);
957 } while (head);
958 }
959
960 goto out;
961 }
962 EXPORT_SYMBOL_GPL(folio_alloc_buffers);
963
alloc_page_buffers(struct page * page,unsigned long size)964 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size)
965 {
966 gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT;
967
968 return folio_alloc_buffers(page_folio(page), size, gfp);
969 }
970 EXPORT_SYMBOL_GPL(alloc_page_buffers);
971
link_dev_buffers(struct folio * folio,struct buffer_head * head)972 static inline void link_dev_buffers(struct folio *folio,
973 struct buffer_head *head)
974 {
975 struct buffer_head *bh, *tail;
976
977 bh = head;
978 do {
979 tail = bh;
980 bh = bh->b_this_page;
981 } while (bh);
982 tail->b_this_page = head;
983 folio_attach_private(folio, head);
984 }
985
blkdev_max_block(struct block_device * bdev,unsigned int size)986 static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size)
987 {
988 sector_t retval = ~((sector_t)0);
989 loff_t sz = bdev_nr_bytes(bdev);
990
991 if (sz) {
992 unsigned int sizebits = blksize_bits(size);
993 retval = (sz >> sizebits);
994 }
995 return retval;
996 }
997
998 /*
999 * Initialise the state of a blockdev folio's buffers.
1000 */
folio_init_buffers(struct folio * folio,struct block_device * bdev,unsigned size)1001 static sector_t folio_init_buffers(struct folio *folio,
1002 struct block_device *bdev, unsigned size)
1003 {
1004 struct buffer_head *head = folio_buffers(folio);
1005 struct buffer_head *bh = head;
1006 bool uptodate = folio_test_uptodate(folio);
1007 sector_t block = div_u64(folio_pos(folio), size);
1008 sector_t end_block = blkdev_max_block(bdev, size);
1009
1010 do {
1011 if (!buffer_mapped(bh)) {
1012 bh->b_end_io = NULL;
1013 bh->b_private = NULL;
1014 bh->b_bdev = bdev;
1015 bh->b_blocknr = block;
1016 if (uptodate)
1017 set_buffer_uptodate(bh);
1018 if (block < end_block)
1019 set_buffer_mapped(bh);
1020 }
1021 block++;
1022 bh = bh->b_this_page;
1023 } while (bh != head);
1024
1025 /*
1026 * Caller needs to validate requested block against end of device.
1027 */
1028 return end_block;
1029 }
1030
1031 /*
1032 * Create the page-cache folio that contains the requested block.
1033 *
1034 * This is used purely for blockdev mappings.
1035 *
1036 * Returns false if we have a failure which cannot be cured by retrying
1037 * without sleeping. Returns true if we succeeded, or the caller should retry.
1038 */
grow_dev_folio(struct block_device * bdev,sector_t block,pgoff_t index,unsigned size,gfp_t gfp)1039 static bool grow_dev_folio(struct block_device *bdev, sector_t block,
1040 pgoff_t index, unsigned size, gfp_t gfp)
1041 {
1042 struct address_space *mapping = bdev->bd_mapping;
1043 struct folio *folio;
1044 struct buffer_head *bh;
1045 sector_t end_block = 0;
1046
1047 folio = __filemap_get_folio(mapping, index,
1048 FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
1049 if (IS_ERR(folio))
1050 return false;
1051
1052 bh = folio_buffers(folio);
1053 if (bh) {
1054 if (bh->b_size == size) {
1055 end_block = folio_init_buffers(folio, bdev, size);
1056 goto unlock;
1057 }
1058
1059 /*
1060 * Retrying may succeed; for example the folio may finish
1061 * writeback, or buffers may be cleaned. This should not
1062 * happen very often; maybe we have old buffers attached to
1063 * this blockdev's page cache and we're trying to change
1064 * the block size?
1065 */
1066 if (!try_to_free_buffers(folio)) {
1067 end_block = ~0ULL;
1068 goto unlock;
1069 }
1070 }
1071
1072 bh = folio_alloc_buffers(folio, size, gfp | __GFP_ACCOUNT);
1073 if (!bh)
1074 goto unlock;
1075
1076 /*
1077 * Link the folio to the buffers and initialise them. Take the
1078 * lock to be atomic wrt __find_get_block(), which does not
1079 * run under the folio lock.
1080 */
1081 spin_lock(&mapping->i_private_lock);
1082 link_dev_buffers(folio, bh);
1083 end_block = folio_init_buffers(folio, bdev, size);
1084 spin_unlock(&mapping->i_private_lock);
1085 unlock:
1086 folio_unlock(folio);
1087 folio_put(folio);
1088 return block < end_block;
1089 }
1090
1091 /*
1092 * Create buffers for the specified block device block's folio. If
1093 * that folio was dirty, the buffers are set dirty also. Returns false
1094 * if we've hit a permanent error.
1095 */
grow_buffers(struct block_device * bdev,sector_t block,unsigned size,gfp_t gfp)1096 static bool grow_buffers(struct block_device *bdev, sector_t block,
1097 unsigned size, gfp_t gfp)
1098 {
1099 loff_t pos;
1100
1101 /*
1102 * Check for a block which lies outside our maximum possible
1103 * pagecache index.
1104 */
1105 if (check_mul_overflow(block, (sector_t)size, &pos) || pos > MAX_LFS_FILESIZE) {
1106 printk(KERN_ERR "%s: requested out-of-range block %llu for device %pg\n",
1107 __func__, (unsigned long long)block,
1108 bdev);
1109 return false;
1110 }
1111
1112 /* Create a folio with the proper size buffers */
1113 return grow_dev_folio(bdev, block, pos / PAGE_SIZE, size, gfp);
1114 }
1115
1116 static struct buffer_head *
__getblk_slow(struct block_device * bdev,sector_t block,unsigned size,gfp_t gfp)1117 __getblk_slow(struct block_device *bdev, sector_t block,
1118 unsigned size, gfp_t gfp)
1119 {
1120 /* Size must be multiple of hard sectorsize */
1121 if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
1122 (size < 512 || size > PAGE_SIZE))) {
1123 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1124 size);
1125 printk(KERN_ERR "logical block size: %d\n",
1126 bdev_logical_block_size(bdev));
1127
1128 dump_stack();
1129 return NULL;
1130 }
1131
1132 for (;;) {
1133 struct buffer_head *bh;
1134
1135 bh = __find_get_block(bdev, block, size);
1136 if (bh)
1137 return bh;
1138
1139 if (!grow_buffers(bdev, block, size, gfp))
1140 return NULL;
1141 }
1142 }
1143
1144 /*
1145 * The relationship between dirty buffers and dirty pages:
1146 *
1147 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1148 * the page is tagged dirty in the page cache.
1149 *
1150 * At all times, the dirtiness of the buffers represents the dirtiness of
1151 * subsections of the page. If the page has buffers, the page dirty bit is
1152 * merely a hint about the true dirty state.
1153 *
1154 * When a page is set dirty in its entirety, all its buffers are marked dirty
1155 * (if the page has buffers).
1156 *
1157 * When a buffer is marked dirty, its page is dirtied, but the page's other
1158 * buffers are not.
1159 *
1160 * Also. When blockdev buffers are explicitly read with bread(), they
1161 * individually become uptodate. But their backing page remains not
1162 * uptodate - even if all of its buffers are uptodate. A subsequent
1163 * block_read_full_folio() against that folio will discover all the uptodate
1164 * buffers, will set the folio uptodate and will perform no I/O.
1165 */
1166
1167 /**
1168 * mark_buffer_dirty - mark a buffer_head as needing writeout
1169 * @bh: the buffer_head to mark dirty
1170 *
1171 * mark_buffer_dirty() will set the dirty bit against the buffer, then set
1172 * its backing page dirty, then tag the page as dirty in the page cache
1173 * and then attach the address_space's inode to its superblock's dirty
1174 * inode list.
1175 *
1176 * mark_buffer_dirty() is atomic. It takes bh->b_folio->mapping->i_private_lock,
1177 * i_pages lock and mapping->host->i_lock.
1178 */
mark_buffer_dirty(struct buffer_head * bh)1179 void mark_buffer_dirty(struct buffer_head *bh)
1180 {
1181 WARN_ON_ONCE(!buffer_uptodate(bh));
1182
1183 trace_block_dirty_buffer(bh);
1184
1185 /*
1186 * Very *carefully* optimize the it-is-already-dirty case.
1187 *
1188 * Don't let the final "is it dirty" escape to before we
1189 * perhaps modified the buffer.
1190 */
1191 if (buffer_dirty(bh)) {
1192 smp_mb();
1193 if (buffer_dirty(bh))
1194 return;
1195 }
1196
1197 if (!test_set_buffer_dirty(bh)) {
1198 struct folio *folio = bh->b_folio;
1199 struct address_space *mapping = NULL;
1200
1201 folio_memcg_lock(folio);
1202 if (!folio_test_set_dirty(folio)) {
1203 mapping = folio->mapping;
1204 if (mapping)
1205 __folio_mark_dirty(folio, mapping, 0);
1206 }
1207 folio_memcg_unlock(folio);
1208 if (mapping)
1209 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1210 }
1211 }
1212 EXPORT_SYMBOL(mark_buffer_dirty);
1213
mark_buffer_write_io_error(struct buffer_head * bh)1214 void mark_buffer_write_io_error(struct buffer_head *bh)
1215 {
1216 set_buffer_write_io_error(bh);
1217 /* FIXME: do we need to set this in both places? */
1218 if (bh->b_folio && bh->b_folio->mapping)
1219 mapping_set_error(bh->b_folio->mapping, -EIO);
1220 if (bh->b_assoc_map) {
1221 mapping_set_error(bh->b_assoc_map, -EIO);
1222 errseq_set(&bh->b_assoc_map->host->i_sb->s_wb_err, -EIO);
1223 }
1224 }
1225 EXPORT_SYMBOL(mark_buffer_write_io_error);
1226
1227 /**
1228 * __brelse - Release a buffer.
1229 * @bh: The buffer to release.
1230 *
1231 * This variant of brelse() can be called if @bh is guaranteed to not be NULL.
1232 */
__brelse(struct buffer_head * bh)1233 void __brelse(struct buffer_head *bh)
1234 {
1235 if (atomic_read(&bh->b_count)) {
1236 put_bh(bh);
1237 return;
1238 }
1239 WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1240 }
1241 EXPORT_SYMBOL(__brelse);
1242
1243 /**
1244 * __bforget - Discard any dirty data in a buffer.
1245 * @bh: The buffer to forget.
1246 *
1247 * This variant of bforget() can be called if @bh is guaranteed to not
1248 * be NULL.
1249 */
__bforget(struct buffer_head * bh)1250 void __bforget(struct buffer_head *bh)
1251 {
1252 clear_buffer_dirty(bh);
1253 if (bh->b_assoc_map) {
1254 struct address_space *buffer_mapping = bh->b_folio->mapping;
1255
1256 spin_lock(&buffer_mapping->i_private_lock);
1257 list_del_init(&bh->b_assoc_buffers);
1258 bh->b_assoc_map = NULL;
1259 spin_unlock(&buffer_mapping->i_private_lock);
1260 }
1261 __brelse(bh);
1262 }
1263 EXPORT_SYMBOL(__bforget);
1264
__bread_slow(struct buffer_head * bh)1265 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1266 {
1267 lock_buffer(bh);
1268 if (buffer_uptodate(bh)) {
1269 unlock_buffer(bh);
1270 return bh;
1271 } else {
1272 get_bh(bh);
1273 bh->b_end_io = end_buffer_read_sync;
1274 submit_bh(REQ_OP_READ, bh);
1275 wait_on_buffer(bh);
1276 if (buffer_uptodate(bh))
1277 return bh;
1278 }
1279 brelse(bh);
1280 return NULL;
1281 }
1282
1283 /*
1284 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1285 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1286 * refcount elevated by one when they're in an LRU. A buffer can only appear
1287 * once in a particular CPU's LRU. A single buffer can be present in multiple
1288 * CPU's LRUs at the same time.
1289 *
1290 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1291 * sb_find_get_block().
1292 *
1293 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1294 * a local interrupt disable for that.
1295 */
1296
1297 #define BH_LRU_SIZE 16
1298
1299 struct bh_lru {
1300 struct buffer_head *bhs[BH_LRU_SIZE];
1301 };
1302
1303 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1304
1305 #ifdef CONFIG_SMP
1306 #define bh_lru_lock() local_irq_disable()
1307 #define bh_lru_unlock() local_irq_enable()
1308 #else
1309 #define bh_lru_lock() preempt_disable()
1310 #define bh_lru_unlock() preempt_enable()
1311 #endif
1312
check_irqs_on(void)1313 static inline void check_irqs_on(void)
1314 {
1315 #ifdef irqs_disabled
1316 BUG_ON(irqs_disabled());
1317 #endif
1318 }
1319
1320 /*
1321 * Install a buffer_head into this cpu's LRU. If not already in the LRU, it is
1322 * inserted at the front, and the buffer_head at the back if any is evicted.
1323 * Or, if already in the LRU it is moved to the front.
1324 */
bh_lru_install(struct buffer_head * bh)1325 static void bh_lru_install(struct buffer_head *bh)
1326 {
1327 struct buffer_head *evictee = bh;
1328 struct bh_lru *b;
1329 int i;
1330
1331 check_irqs_on();
1332 bh_lru_lock();
1333
1334 /*
1335 * the refcount of buffer_head in bh_lru prevents dropping the
1336 * attached page(i.e., try_to_free_buffers) so it could cause
1337 * failing page migration.
1338 * Skip putting upcoming bh into bh_lru until migration is done.
1339 */
1340 if (lru_cache_disabled() || cpu_is_isolated(smp_processor_id())) {
1341 bh_lru_unlock();
1342 return;
1343 }
1344
1345 b = this_cpu_ptr(&bh_lrus);
1346 for (i = 0; i < BH_LRU_SIZE; i++) {
1347 swap(evictee, b->bhs[i]);
1348 if (evictee == bh) {
1349 bh_lru_unlock();
1350 return;
1351 }
1352 }
1353
1354 get_bh(bh);
1355 bh_lru_unlock();
1356 brelse(evictee);
1357 }
1358
1359 /*
1360 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1361 */
1362 static struct buffer_head *
lookup_bh_lru(struct block_device * bdev,sector_t block,unsigned size)1363 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1364 {
1365 struct buffer_head *ret = NULL;
1366 unsigned int i;
1367
1368 check_irqs_on();
1369 bh_lru_lock();
1370 if (cpu_is_isolated(smp_processor_id())) {
1371 bh_lru_unlock();
1372 return NULL;
1373 }
1374 for (i = 0; i < BH_LRU_SIZE; i++) {
1375 struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
1376
1377 if (bh && bh->b_blocknr == block && bh->b_bdev == bdev &&
1378 bh->b_size == size) {
1379 if (i) {
1380 while (i) {
1381 __this_cpu_write(bh_lrus.bhs[i],
1382 __this_cpu_read(bh_lrus.bhs[i - 1]));
1383 i--;
1384 }
1385 __this_cpu_write(bh_lrus.bhs[0], bh);
1386 }
1387 get_bh(bh);
1388 ret = bh;
1389 break;
1390 }
1391 }
1392 bh_lru_unlock();
1393 return ret;
1394 }
1395
1396 /*
1397 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1398 * it in the LRU and mark it as accessed. If it is not present then return
1399 * NULL
1400 */
1401 static struct buffer_head *
find_get_block_common(struct block_device * bdev,sector_t block,unsigned size,bool atomic)1402 find_get_block_common(struct block_device *bdev, sector_t block,
1403 unsigned size, bool atomic)
1404 {
1405 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1406
1407 if (bh == NULL) {
1408 /* __find_get_block_slow will mark the page accessed */
1409 bh = __find_get_block_slow(bdev, block, atomic);
1410 if (bh)
1411 bh_lru_install(bh);
1412 } else
1413 touch_buffer(bh);
1414
1415 return bh;
1416 }
1417
1418 struct buffer_head *
__find_get_block(struct block_device * bdev,sector_t block,unsigned size)1419 __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1420 {
1421 return find_get_block_common(bdev, block, size, true);
1422 }
1423 EXPORT_SYMBOL(__find_get_block);
1424
1425 /* same as __find_get_block() but allows sleeping contexts */
1426 struct buffer_head *
__find_get_block_nonatomic(struct block_device * bdev,sector_t block,unsigned size)1427 __find_get_block_nonatomic(struct block_device *bdev, sector_t block,
1428 unsigned size)
1429 {
1430 return find_get_block_common(bdev, block, size, false);
1431 }
1432 EXPORT_SYMBOL(__find_get_block_nonatomic);
1433
1434 /**
1435 * bdev_getblk - Get a buffer_head in a block device's buffer cache.
1436 * @bdev: The block device.
1437 * @block: The block number.
1438 * @size: The size of buffer_heads for this @bdev.
1439 * @gfp: The memory allocation flags to use.
1440 *
1441 * The returned buffer head has its reference count incremented, but is
1442 * not locked. The caller should call brelse() when it has finished
1443 * with the buffer. The buffer may not be uptodate. If needed, the
1444 * caller can bring it uptodate either by reading it or overwriting it.
1445 *
1446 * Return: The buffer head, or NULL if memory could not be allocated.
1447 */
bdev_getblk(struct block_device * bdev,sector_t block,unsigned size,gfp_t gfp)1448 struct buffer_head *bdev_getblk(struct block_device *bdev, sector_t block,
1449 unsigned size, gfp_t gfp)
1450 {
1451 struct buffer_head *bh;
1452
1453 if (gfpflags_allow_blocking(gfp))
1454 bh = __find_get_block_nonatomic(bdev, block, size);
1455 else
1456 bh = __find_get_block(bdev, block, size);
1457
1458 might_alloc(gfp);
1459 if (bh)
1460 return bh;
1461
1462 return __getblk_slow(bdev, block, size, gfp);
1463 }
1464 EXPORT_SYMBOL(bdev_getblk);
1465
1466 /*
1467 * Do async read-ahead on a buffer..
1468 */
__breadahead(struct block_device * bdev,sector_t block,unsigned size)1469 void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1470 {
1471 struct buffer_head *bh = bdev_getblk(bdev, block, size,
1472 GFP_NOWAIT | __GFP_MOVABLE);
1473
1474 if (likely(bh)) {
1475 bh_readahead(bh, REQ_RAHEAD);
1476 brelse(bh);
1477 }
1478 }
1479 EXPORT_SYMBOL(__breadahead);
1480
1481 /**
1482 * __bread_gfp() - Read a block.
1483 * @bdev: The block device to read from.
1484 * @block: Block number in units of block size.
1485 * @size: The block size of this device in bytes.
1486 * @gfp: Not page allocation flags; see below.
1487 *
1488 * You are not expected to call this function. You should use one of
1489 * sb_bread(), sb_bread_unmovable() or __bread().
1490 *
1491 * Read a specified block, and return the buffer head that refers to it.
1492 * If @gfp is 0, the memory will be allocated using the block device's
1493 * default GFP flags. If @gfp is __GFP_MOVABLE, the memory may be
1494 * allocated from a movable area. Do not pass in a complete set of
1495 * GFP flags.
1496 *
1497 * The returned buffer head has its refcount increased. The caller should
1498 * call brelse() when it has finished with the buffer.
1499 *
1500 * Context: May sleep waiting for I/O.
1501 * Return: NULL if the block was unreadable.
1502 */
__bread_gfp(struct block_device * bdev,sector_t block,unsigned size,gfp_t gfp)1503 struct buffer_head *__bread_gfp(struct block_device *bdev, sector_t block,
1504 unsigned size, gfp_t gfp)
1505 {
1506 struct buffer_head *bh;
1507
1508 gfp |= mapping_gfp_constraint(bdev->bd_mapping, ~__GFP_FS);
1509
1510 /*
1511 * Prefer looping in the allocator rather than here, at least that
1512 * code knows what it's doing.
1513 */
1514 gfp |= __GFP_NOFAIL;
1515
1516 bh = bdev_getblk(bdev, block, size, gfp);
1517
1518 if (likely(bh) && !buffer_uptodate(bh))
1519 bh = __bread_slow(bh);
1520 return bh;
1521 }
1522 EXPORT_SYMBOL(__bread_gfp);
1523
__invalidate_bh_lrus(struct bh_lru * b)1524 static void __invalidate_bh_lrus(struct bh_lru *b)
1525 {
1526 int i;
1527
1528 for (i = 0; i < BH_LRU_SIZE; i++) {
1529 brelse(b->bhs[i]);
1530 b->bhs[i] = NULL;
1531 }
1532 }
1533 /*
1534 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1535 * This doesn't race because it runs in each cpu either in irq
1536 * or with preempt disabled.
1537 */
invalidate_bh_lru(void * arg)1538 static void invalidate_bh_lru(void *arg)
1539 {
1540 struct bh_lru *b = &get_cpu_var(bh_lrus);
1541
1542 __invalidate_bh_lrus(b);
1543 put_cpu_var(bh_lrus);
1544 }
1545
has_bh_in_lru(int cpu,void * dummy)1546 bool has_bh_in_lru(int cpu, void *dummy)
1547 {
1548 struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu);
1549 int i;
1550
1551 for (i = 0; i < BH_LRU_SIZE; i++) {
1552 if (b->bhs[i])
1553 return true;
1554 }
1555
1556 return false;
1557 }
1558
invalidate_bh_lrus(void)1559 void invalidate_bh_lrus(void)
1560 {
1561 on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1);
1562 }
1563 EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1564
1565 /*
1566 * It's called from workqueue context so we need a bh_lru_lock to close
1567 * the race with preemption/irq.
1568 */
invalidate_bh_lrus_cpu(void)1569 void invalidate_bh_lrus_cpu(void)
1570 {
1571 struct bh_lru *b;
1572
1573 bh_lru_lock();
1574 b = this_cpu_ptr(&bh_lrus);
1575 __invalidate_bh_lrus(b);
1576 bh_lru_unlock();
1577 }
1578
folio_set_bh(struct buffer_head * bh,struct folio * folio,unsigned long offset)1579 void folio_set_bh(struct buffer_head *bh, struct folio *folio,
1580 unsigned long offset)
1581 {
1582 bh->b_folio = folio;
1583 BUG_ON(offset >= folio_size(folio));
1584 if (folio_test_highmem(folio))
1585 /*
1586 * This catches illegal uses and preserves the offset:
1587 */
1588 bh->b_data = (char *)(0 + offset);
1589 else
1590 bh->b_data = folio_address(folio) + offset;
1591 }
1592 EXPORT_SYMBOL(folio_set_bh);
1593
1594 /*
1595 * Called when truncating a buffer on a page completely.
1596 */
1597
1598 /* Bits that are cleared during an invalidate */
1599 #define BUFFER_FLAGS_DISCARD \
1600 (1 << BH_Mapped | 1 << BH_New | 1 << BH_Req | \
1601 1 << BH_Delay | 1 << BH_Unwritten)
1602
discard_buffer(struct buffer_head * bh)1603 static void discard_buffer(struct buffer_head * bh)
1604 {
1605 unsigned long b_state;
1606
1607 lock_buffer(bh);
1608 clear_buffer_dirty(bh);
1609 bh->b_bdev = NULL;
1610 b_state = READ_ONCE(bh->b_state);
1611 do {
1612 } while (!try_cmpxchg(&bh->b_state, &b_state,
1613 b_state & ~BUFFER_FLAGS_DISCARD));
1614 unlock_buffer(bh);
1615 }
1616
1617 /**
1618 * block_invalidate_folio - Invalidate part or all of a buffer-backed folio.
1619 * @folio: The folio which is affected.
1620 * @offset: start of the range to invalidate
1621 * @length: length of the range to invalidate
1622 *
1623 * block_invalidate_folio() is called when all or part of the folio has been
1624 * invalidated by a truncate operation.
1625 *
1626 * block_invalidate_folio() does not have to release all buffers, but it must
1627 * ensure that no dirty buffer is left outside @offset and that no I/O
1628 * is underway against any of the blocks which are outside the truncation
1629 * point. Because the caller is about to free (and possibly reuse) those
1630 * blocks on-disk.
1631 */
block_invalidate_folio(struct folio * folio,size_t offset,size_t length)1632 void block_invalidate_folio(struct folio *folio, size_t offset, size_t length)
1633 {
1634 struct buffer_head *head, *bh, *next;
1635 size_t curr_off = 0;
1636 size_t stop = length + offset;
1637
1638 BUG_ON(!folio_test_locked(folio));
1639
1640 /*
1641 * Check for overflow
1642 */
1643 BUG_ON(stop > folio_size(folio) || stop < length);
1644
1645 head = folio_buffers(folio);
1646 if (!head)
1647 return;
1648
1649 bh = head;
1650 do {
1651 size_t next_off = curr_off + bh->b_size;
1652 next = bh->b_this_page;
1653
1654 /*
1655 * Are we still fully in range ?
1656 */
1657 if (next_off > stop)
1658 goto out;
1659
1660 /*
1661 * is this block fully invalidated?
1662 */
1663 if (offset <= curr_off)
1664 discard_buffer(bh);
1665 curr_off = next_off;
1666 bh = next;
1667 } while (bh != head);
1668
1669 /*
1670 * We release buffers only if the entire folio is being invalidated.
1671 * The get_block cached value has been unconditionally invalidated,
1672 * so real IO is not possible anymore.
1673 */
1674 if (length == folio_size(folio))
1675 filemap_release_folio(folio, 0);
1676 out:
1677 return;
1678 }
1679 EXPORT_SYMBOL(block_invalidate_folio);
1680
1681 /*
1682 * We attach and possibly dirty the buffers atomically wrt
1683 * block_dirty_folio() via i_private_lock. try_to_free_buffers
1684 * is already excluded via the folio lock.
1685 */
create_empty_buffers(struct folio * folio,unsigned long blocksize,unsigned long b_state)1686 struct buffer_head *create_empty_buffers(struct folio *folio,
1687 unsigned long blocksize, unsigned long b_state)
1688 {
1689 struct buffer_head *bh, *head, *tail;
1690 gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT | __GFP_NOFAIL;
1691
1692 head = folio_alloc_buffers(folio, blocksize, gfp);
1693 bh = head;
1694 do {
1695 bh->b_state |= b_state;
1696 tail = bh;
1697 bh = bh->b_this_page;
1698 } while (bh);
1699 tail->b_this_page = head;
1700
1701 spin_lock(&folio->mapping->i_private_lock);
1702 if (folio_test_uptodate(folio) || folio_test_dirty(folio)) {
1703 bh = head;
1704 do {
1705 if (folio_test_dirty(folio))
1706 set_buffer_dirty(bh);
1707 if (folio_test_uptodate(folio))
1708 set_buffer_uptodate(bh);
1709 bh = bh->b_this_page;
1710 } while (bh != head);
1711 }
1712 folio_attach_private(folio, head);
1713 spin_unlock(&folio->mapping->i_private_lock);
1714
1715 return head;
1716 }
1717 EXPORT_SYMBOL(create_empty_buffers);
1718
1719 /**
1720 * clean_bdev_aliases: clean a range of buffers in block device
1721 * @bdev: Block device to clean buffers in
1722 * @block: Start of a range of blocks to clean
1723 * @len: Number of blocks to clean
1724 *
1725 * We are taking a range of blocks for data and we don't want writeback of any
1726 * buffer-cache aliases starting from return from this function and until the
1727 * moment when something will explicitly mark the buffer dirty (hopefully that
1728 * will not happen until we will free that block ;-) We don't even need to mark
1729 * it not-uptodate - nobody can expect anything from a newly allocated buffer
1730 * anyway. We used to use unmap_buffer() for such invalidation, but that was
1731 * wrong. We definitely don't want to mark the alias unmapped, for example - it
1732 * would confuse anyone who might pick it with bread() afterwards...
1733 *
1734 * Also.. Note that bforget() doesn't lock the buffer. So there can be
1735 * writeout I/O going on against recently-freed buffers. We don't wait on that
1736 * I/O in bforget() - it's more efficient to wait on the I/O only if we really
1737 * need to. That happens here.
1738 */
clean_bdev_aliases(struct block_device * bdev,sector_t block,sector_t len)1739 void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len)
1740 {
1741 struct address_space *bd_mapping = bdev->bd_mapping;
1742 const int blkbits = bd_mapping->host->i_blkbits;
1743 struct folio_batch fbatch;
1744 pgoff_t index = ((loff_t)block << blkbits) / PAGE_SIZE;
1745 pgoff_t end;
1746 int i, count;
1747 struct buffer_head *bh;
1748 struct buffer_head *head;
1749
1750 end = ((loff_t)(block + len - 1) << blkbits) / PAGE_SIZE;
1751 folio_batch_init(&fbatch);
1752 while (filemap_get_folios(bd_mapping, &index, end, &fbatch)) {
1753 count = folio_batch_count(&fbatch);
1754 for (i = 0; i < count; i++) {
1755 struct folio *folio = fbatch.folios[i];
1756
1757 if (!folio_buffers(folio))
1758 continue;
1759 /*
1760 * We use folio lock instead of bd_mapping->i_private_lock
1761 * to pin buffers here since we can afford to sleep and
1762 * it scales better than a global spinlock lock.
1763 */
1764 folio_lock(folio);
1765 /* Recheck when the folio is locked which pins bhs */
1766 head = folio_buffers(folio);
1767 if (!head)
1768 goto unlock_page;
1769 bh = head;
1770 do {
1771 if (!buffer_mapped(bh) || (bh->b_blocknr < block))
1772 goto next;
1773 if (bh->b_blocknr >= block + len)
1774 break;
1775 clear_buffer_dirty(bh);
1776 wait_on_buffer(bh);
1777 clear_buffer_req(bh);
1778 next:
1779 bh = bh->b_this_page;
1780 } while (bh != head);
1781 unlock_page:
1782 folio_unlock(folio);
1783 }
1784 folio_batch_release(&fbatch);
1785 cond_resched();
1786 /* End of range already reached? */
1787 if (index > end || !index)
1788 break;
1789 }
1790 }
1791 EXPORT_SYMBOL(clean_bdev_aliases);
1792
folio_create_buffers(struct folio * folio,struct inode * inode,unsigned int b_state)1793 static struct buffer_head *folio_create_buffers(struct folio *folio,
1794 struct inode *inode,
1795 unsigned int b_state)
1796 {
1797 struct buffer_head *bh;
1798
1799 BUG_ON(!folio_test_locked(folio));
1800
1801 bh = folio_buffers(folio);
1802 if (!bh)
1803 bh = create_empty_buffers(folio,
1804 1 << READ_ONCE(inode->i_blkbits), b_state);
1805 return bh;
1806 }
1807
1808 /*
1809 * NOTE! All mapped/uptodate combinations are valid:
1810 *
1811 * Mapped Uptodate Meaning
1812 *
1813 * No No "unknown" - must do get_block()
1814 * No Yes "hole" - zero-filled
1815 * Yes No "allocated" - allocated on disk, not read in
1816 * Yes Yes "valid" - allocated and up-to-date in memory.
1817 *
1818 * "Dirty" is valid only with the last case (mapped+uptodate).
1819 */
1820
1821 /*
1822 * While block_write_full_folio is writing back the dirty buffers under
1823 * the page lock, whoever dirtied the buffers may decide to clean them
1824 * again at any time. We handle that by only looking at the buffer
1825 * state inside lock_buffer().
1826 *
1827 * If block_write_full_folio() is called for regular writeback
1828 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1829 * locked buffer. This only can happen if someone has written the buffer
1830 * directly, with submit_bh(). At the address_space level PageWriteback
1831 * prevents this contention from occurring.
1832 *
1833 * If block_write_full_folio() is called with wbc->sync_mode ==
1834 * WB_SYNC_ALL, the writes are posted using REQ_SYNC; this
1835 * causes the writes to be flagged as synchronous writes.
1836 */
__block_write_full_folio(struct inode * inode,struct folio * folio,get_block_t * get_block,struct writeback_control * wbc)1837 int __block_write_full_folio(struct inode *inode, struct folio *folio,
1838 get_block_t *get_block, struct writeback_control *wbc)
1839 {
1840 int err;
1841 sector_t block;
1842 sector_t last_block;
1843 struct buffer_head *bh, *head;
1844 size_t blocksize;
1845 int nr_underway = 0;
1846 blk_opf_t write_flags = wbc_to_write_flags(wbc);
1847
1848 head = folio_create_buffers(folio, inode,
1849 (1 << BH_Dirty) | (1 << BH_Uptodate));
1850
1851 /*
1852 * Be very careful. We have no exclusion from block_dirty_folio
1853 * here, and the (potentially unmapped) buffers may become dirty at
1854 * any time. If a buffer becomes dirty here after we've inspected it
1855 * then we just miss that fact, and the folio stays dirty.
1856 *
1857 * Buffers outside i_size may be dirtied by block_dirty_folio;
1858 * handle that here by just cleaning them.
1859 */
1860
1861 bh = head;
1862 blocksize = bh->b_size;
1863
1864 block = div_u64(folio_pos(folio), blocksize);
1865 last_block = div_u64(i_size_read(inode) - 1, blocksize);
1866
1867 /*
1868 * Get all the dirty buffers mapped to disk addresses and
1869 * handle any aliases from the underlying blockdev's mapping.
1870 */
1871 do {
1872 if (block > last_block) {
1873 /*
1874 * mapped buffers outside i_size will occur, because
1875 * this folio can be outside i_size when there is a
1876 * truncate in progress.
1877 */
1878 /*
1879 * The buffer was zeroed by block_write_full_folio()
1880 */
1881 clear_buffer_dirty(bh);
1882 set_buffer_uptodate(bh);
1883 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1884 buffer_dirty(bh)) {
1885 WARN_ON(bh->b_size != blocksize);
1886 err = get_block(inode, block, bh, 1);
1887 if (err)
1888 goto recover;
1889 clear_buffer_delay(bh);
1890 if (buffer_new(bh)) {
1891 /* blockdev mappings never come here */
1892 clear_buffer_new(bh);
1893 clean_bdev_bh_alias(bh);
1894 }
1895 }
1896 bh = bh->b_this_page;
1897 block++;
1898 } while (bh != head);
1899
1900 do {
1901 if (!buffer_mapped(bh))
1902 continue;
1903 /*
1904 * If it's a fully non-blocking write attempt and we cannot
1905 * lock the buffer then redirty the folio. Note that this can
1906 * potentially cause a busy-wait loop from writeback threads
1907 * and kswapd activity, but those code paths have their own
1908 * higher-level throttling.
1909 */
1910 if (wbc->sync_mode != WB_SYNC_NONE) {
1911 lock_buffer(bh);
1912 } else if (!trylock_buffer(bh)) {
1913 folio_redirty_for_writepage(wbc, folio);
1914 continue;
1915 }
1916 if (test_clear_buffer_dirty(bh)) {
1917 mark_buffer_async_write_endio(bh,
1918 end_buffer_async_write);
1919 } else {
1920 unlock_buffer(bh);
1921 }
1922 } while ((bh = bh->b_this_page) != head);
1923
1924 /*
1925 * The folio and its buffers are protected by the writeback flag,
1926 * so we can drop the bh refcounts early.
1927 */
1928 BUG_ON(folio_test_writeback(folio));
1929 folio_start_writeback(folio);
1930
1931 do {
1932 struct buffer_head *next = bh->b_this_page;
1933 if (buffer_async_write(bh)) {
1934 submit_bh_wbc(REQ_OP_WRITE | write_flags, bh,
1935 inode->i_write_hint, wbc);
1936 nr_underway++;
1937 }
1938 bh = next;
1939 } while (bh != head);
1940 folio_unlock(folio);
1941
1942 err = 0;
1943 done:
1944 if (nr_underway == 0) {
1945 /*
1946 * The folio was marked dirty, but the buffers were
1947 * clean. Someone wrote them back by hand with
1948 * write_dirty_buffer/submit_bh. A rare case.
1949 */
1950 folio_end_writeback(folio);
1951
1952 /*
1953 * The folio and buffer_heads can be released at any time from
1954 * here on.
1955 */
1956 }
1957 return err;
1958
1959 recover:
1960 /*
1961 * ENOSPC, or some other error. We may already have added some
1962 * blocks to the file, so we need to write these out to avoid
1963 * exposing stale data.
1964 * The folio is currently locked and not marked for writeback
1965 */
1966 bh = head;
1967 /* Recovery: lock and submit the mapped buffers */
1968 do {
1969 if (buffer_mapped(bh) && buffer_dirty(bh) &&
1970 !buffer_delay(bh)) {
1971 lock_buffer(bh);
1972 mark_buffer_async_write_endio(bh,
1973 end_buffer_async_write);
1974 } else {
1975 /*
1976 * The buffer may have been set dirty during
1977 * attachment to a dirty folio.
1978 */
1979 clear_buffer_dirty(bh);
1980 }
1981 } while ((bh = bh->b_this_page) != head);
1982 BUG_ON(folio_test_writeback(folio));
1983 mapping_set_error(folio->mapping, err);
1984 folio_start_writeback(folio);
1985 do {
1986 struct buffer_head *next = bh->b_this_page;
1987 if (buffer_async_write(bh)) {
1988 clear_buffer_dirty(bh);
1989 submit_bh_wbc(REQ_OP_WRITE | write_flags, bh,
1990 inode->i_write_hint, wbc);
1991 nr_underway++;
1992 }
1993 bh = next;
1994 } while (bh != head);
1995 folio_unlock(folio);
1996 goto done;
1997 }
1998 EXPORT_SYMBOL(__block_write_full_folio);
1999
2000 /*
2001 * If a folio has any new buffers, zero them out here, and mark them uptodate
2002 * and dirty so they'll be written out (in order to prevent uninitialised
2003 * block data from leaking). And clear the new bit.
2004 */
folio_zero_new_buffers(struct folio * folio,size_t from,size_t to)2005 void folio_zero_new_buffers(struct folio *folio, size_t from, size_t to)
2006 {
2007 size_t block_start, block_end;
2008 struct buffer_head *head, *bh;
2009
2010 BUG_ON(!folio_test_locked(folio));
2011 head = folio_buffers(folio);
2012 if (!head)
2013 return;
2014
2015 bh = head;
2016 block_start = 0;
2017 do {
2018 block_end = block_start + bh->b_size;
2019
2020 if (buffer_new(bh)) {
2021 if (block_end > from && block_start < to) {
2022 if (!folio_test_uptodate(folio)) {
2023 size_t start, xend;
2024
2025 start = max(from, block_start);
2026 xend = min(to, block_end);
2027
2028 folio_zero_segment(folio, start, xend);
2029 set_buffer_uptodate(bh);
2030 }
2031
2032 clear_buffer_new(bh);
2033 mark_buffer_dirty(bh);
2034 }
2035 }
2036
2037 block_start = block_end;
2038 bh = bh->b_this_page;
2039 } while (bh != head);
2040 }
2041 EXPORT_SYMBOL(folio_zero_new_buffers);
2042
2043 static int
iomap_to_bh(struct inode * inode,sector_t block,struct buffer_head * bh,const struct iomap * iomap)2044 iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
2045 const struct iomap *iomap)
2046 {
2047 loff_t offset = (loff_t)block << inode->i_blkbits;
2048
2049 bh->b_bdev = iomap->bdev;
2050
2051 /*
2052 * Block points to offset in file we need to map, iomap contains
2053 * the offset at which the map starts. If the map ends before the
2054 * current block, then do not map the buffer and let the caller
2055 * handle it.
2056 */
2057 if (offset >= iomap->offset + iomap->length)
2058 return -EIO;
2059
2060 switch (iomap->type) {
2061 case IOMAP_HOLE:
2062 /*
2063 * If the buffer is not up to date or beyond the current EOF,
2064 * we need to mark it as new to ensure sub-block zeroing is
2065 * executed if necessary.
2066 */
2067 if (!buffer_uptodate(bh) ||
2068 (offset >= i_size_read(inode)))
2069 set_buffer_new(bh);
2070 return 0;
2071 case IOMAP_DELALLOC:
2072 if (!buffer_uptodate(bh) ||
2073 (offset >= i_size_read(inode)))
2074 set_buffer_new(bh);
2075 set_buffer_uptodate(bh);
2076 set_buffer_mapped(bh);
2077 set_buffer_delay(bh);
2078 return 0;
2079 case IOMAP_UNWRITTEN:
2080 /*
2081 * For unwritten regions, we always need to ensure that regions
2082 * in the block we are not writing to are zeroed. Mark the
2083 * buffer as new to ensure this.
2084 */
2085 set_buffer_new(bh);
2086 set_buffer_unwritten(bh);
2087 fallthrough;
2088 case IOMAP_MAPPED:
2089 if ((iomap->flags & IOMAP_F_NEW) ||
2090 offset >= i_size_read(inode)) {
2091 /*
2092 * This can happen if truncating the block device races
2093 * with the check in the caller as i_size updates on
2094 * block devices aren't synchronized by i_rwsem for
2095 * block devices.
2096 */
2097 if (S_ISBLK(inode->i_mode))
2098 return -EIO;
2099 set_buffer_new(bh);
2100 }
2101 bh->b_blocknr = (iomap->addr + offset - iomap->offset) >>
2102 inode->i_blkbits;
2103 set_buffer_mapped(bh);
2104 return 0;
2105 default:
2106 WARN_ON_ONCE(1);
2107 return -EIO;
2108 }
2109 }
2110
__block_write_begin_int(struct folio * folio,loff_t pos,unsigned len,get_block_t * get_block,const struct iomap * iomap)2111 int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
2112 get_block_t *get_block, const struct iomap *iomap)
2113 {
2114 size_t from = offset_in_folio(folio, pos);
2115 size_t to = from + len;
2116 struct inode *inode = folio->mapping->host;
2117 size_t block_start, block_end;
2118 sector_t block;
2119 int err = 0;
2120 size_t blocksize;
2121 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
2122
2123 BUG_ON(!folio_test_locked(folio));
2124 BUG_ON(to > folio_size(folio));
2125 BUG_ON(from > to);
2126
2127 head = folio_create_buffers(folio, inode, 0);
2128 blocksize = head->b_size;
2129 block = div_u64(folio_pos(folio), blocksize);
2130
2131 for (bh = head, block_start = 0; bh != head || !block_start;
2132 block++, block_start=block_end, bh = bh->b_this_page) {
2133 block_end = block_start + blocksize;
2134 if (block_end <= from || block_start >= to) {
2135 if (folio_test_uptodate(folio)) {
2136 if (!buffer_uptodate(bh))
2137 set_buffer_uptodate(bh);
2138 }
2139 continue;
2140 }
2141 if (buffer_new(bh))
2142 clear_buffer_new(bh);
2143 if (!buffer_mapped(bh)) {
2144 WARN_ON(bh->b_size != blocksize);
2145 if (get_block)
2146 err = get_block(inode, block, bh, 1);
2147 else
2148 err = iomap_to_bh(inode, block, bh, iomap);
2149 if (err)
2150 break;
2151
2152 if (buffer_new(bh)) {
2153 clean_bdev_bh_alias(bh);
2154 if (folio_test_uptodate(folio)) {
2155 clear_buffer_new(bh);
2156 set_buffer_uptodate(bh);
2157 mark_buffer_dirty(bh);
2158 continue;
2159 }
2160 if (block_end > to || block_start < from)
2161 folio_zero_segments(folio,
2162 to, block_end,
2163 block_start, from);
2164 continue;
2165 }
2166 }
2167 if (folio_test_uptodate(folio)) {
2168 if (!buffer_uptodate(bh))
2169 set_buffer_uptodate(bh);
2170 continue;
2171 }
2172 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
2173 !buffer_unwritten(bh) &&
2174 (block_start < from || block_end > to)) {
2175 bh_read_nowait(bh, 0);
2176 *wait_bh++=bh;
2177 }
2178 }
2179 /*
2180 * If we issued read requests - let them complete.
2181 */
2182 while(wait_bh > wait) {
2183 wait_on_buffer(*--wait_bh);
2184 if (!buffer_uptodate(*wait_bh))
2185 err = -EIO;
2186 }
2187 if (unlikely(err))
2188 folio_zero_new_buffers(folio, from, to);
2189 return err;
2190 }
2191
__block_write_begin(struct folio * folio,loff_t pos,unsigned len,get_block_t * get_block)2192 int __block_write_begin(struct folio *folio, loff_t pos, unsigned len,
2193 get_block_t *get_block)
2194 {
2195 return __block_write_begin_int(folio, pos, len, get_block, NULL);
2196 }
2197 EXPORT_SYMBOL(__block_write_begin);
2198
__block_commit_write(struct folio * folio,size_t from,size_t to)2199 static void __block_commit_write(struct folio *folio, size_t from, size_t to)
2200 {
2201 size_t block_start, block_end;
2202 bool partial = false;
2203 unsigned blocksize;
2204 struct buffer_head *bh, *head;
2205
2206 bh = head = folio_buffers(folio);
2207 if (!bh)
2208 return;
2209 blocksize = bh->b_size;
2210
2211 block_start = 0;
2212 do {
2213 block_end = block_start + blocksize;
2214 if (block_end <= from || block_start >= to) {
2215 if (!buffer_uptodate(bh))
2216 partial = true;
2217 } else {
2218 set_buffer_uptodate(bh);
2219 mark_buffer_dirty(bh);
2220 }
2221 if (buffer_new(bh))
2222 clear_buffer_new(bh);
2223
2224 block_start = block_end;
2225 bh = bh->b_this_page;
2226 } while (bh != head);
2227
2228 /*
2229 * If this is a partial write which happened to make all buffers
2230 * uptodate then we can optimize away a bogus read_folio() for
2231 * the next read(). Here we 'discover' whether the folio went
2232 * uptodate as a result of this (potentially partial) write.
2233 */
2234 if (!partial)
2235 folio_mark_uptodate(folio);
2236 }
2237
2238 /*
2239 * block_write_begin takes care of the basic task of block allocation and
2240 * bringing partial write blocks uptodate first.
2241 *
2242 * The filesystem needs to handle block truncation upon failure.
2243 */
block_write_begin(struct address_space * mapping,loff_t pos,unsigned len,struct folio ** foliop,get_block_t * get_block)2244 int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
2245 struct folio **foliop, get_block_t *get_block)
2246 {
2247 pgoff_t index = pos >> PAGE_SHIFT;
2248 struct folio *folio;
2249 int status;
2250
2251 folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
2252 mapping_gfp_mask(mapping));
2253 if (IS_ERR(folio))
2254 return PTR_ERR(folio);
2255
2256 status = __block_write_begin_int(folio, pos, len, get_block, NULL);
2257 if (unlikely(status)) {
2258 folio_unlock(folio);
2259 folio_put(folio);
2260 folio = NULL;
2261 }
2262
2263 *foliop = folio;
2264 return status;
2265 }
2266 EXPORT_SYMBOL(block_write_begin);
2267
block_write_end(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct folio * folio,void * fsdata)2268 int block_write_end(struct file *file, struct address_space *mapping,
2269 loff_t pos, unsigned len, unsigned copied,
2270 struct folio *folio, void *fsdata)
2271 {
2272 size_t start = pos - folio_pos(folio);
2273
2274 if (unlikely(copied < len)) {
2275 /*
2276 * The buffers that were written will now be uptodate, so
2277 * we don't have to worry about a read_folio reading them
2278 * and overwriting a partial write. However if we have
2279 * encountered a short write and only partially written
2280 * into a buffer, it will not be marked uptodate, so a
2281 * read_folio might come in and destroy our partial write.
2282 *
2283 * Do the simplest thing, and just treat any short write to a
2284 * non uptodate folio as a zero-length write, and force the
2285 * caller to redo the whole thing.
2286 */
2287 if (!folio_test_uptodate(folio))
2288 copied = 0;
2289
2290 folio_zero_new_buffers(folio, start+copied, start+len);
2291 }
2292 flush_dcache_folio(folio);
2293
2294 /* This could be a short (even 0-length) commit */
2295 __block_commit_write(folio, start, start + copied);
2296
2297 return copied;
2298 }
2299 EXPORT_SYMBOL(block_write_end);
2300
generic_write_end(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct folio * folio,void * fsdata)2301 int generic_write_end(struct file *file, struct address_space *mapping,
2302 loff_t pos, unsigned len, unsigned copied,
2303 struct folio *folio, void *fsdata)
2304 {
2305 struct inode *inode = mapping->host;
2306 loff_t old_size = inode->i_size;
2307 bool i_size_changed = false;
2308
2309 copied = block_write_end(file, mapping, pos, len, copied, folio, fsdata);
2310
2311 /*
2312 * No need to use i_size_read() here, the i_size cannot change under us
2313 * because we hold i_rwsem.
2314 *
2315 * But it's important to update i_size while still holding folio lock:
2316 * page writeout could otherwise come in and zero beyond i_size.
2317 */
2318 if (pos + copied > inode->i_size) {
2319 i_size_write(inode, pos + copied);
2320 i_size_changed = true;
2321 }
2322
2323 folio_unlock(folio);
2324 folio_put(folio);
2325
2326 if (old_size < pos)
2327 pagecache_isize_extended(inode, old_size, pos);
2328 /*
2329 * Don't mark the inode dirty under page lock. First, it unnecessarily
2330 * makes the holding time of page lock longer. Second, it forces lock
2331 * ordering of page lock and transaction start for journaling
2332 * filesystems.
2333 */
2334 if (i_size_changed)
2335 mark_inode_dirty(inode);
2336 return copied;
2337 }
2338 EXPORT_SYMBOL(generic_write_end);
2339
2340 /*
2341 * block_is_partially_uptodate checks whether buffers within a folio are
2342 * uptodate or not.
2343 *
2344 * Returns true if all buffers which correspond to the specified part
2345 * of the folio are uptodate.
2346 */
block_is_partially_uptodate(struct folio * folio,size_t from,size_t count)2347 bool block_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
2348 {
2349 unsigned block_start, block_end, blocksize;
2350 unsigned to;
2351 struct buffer_head *bh, *head;
2352 bool ret = true;
2353
2354 head = folio_buffers(folio);
2355 if (!head)
2356 return false;
2357 blocksize = head->b_size;
2358 to = min_t(unsigned, folio_size(folio) - from, count);
2359 to = from + to;
2360 if (from < blocksize && to > folio_size(folio) - blocksize)
2361 return false;
2362
2363 bh = head;
2364 block_start = 0;
2365 do {
2366 block_end = block_start + blocksize;
2367 if (block_end > from && block_start < to) {
2368 if (!buffer_uptodate(bh)) {
2369 ret = false;
2370 break;
2371 }
2372 if (block_end >= to)
2373 break;
2374 }
2375 block_start = block_end;
2376 bh = bh->b_this_page;
2377 } while (bh != head);
2378
2379 return ret;
2380 }
2381 EXPORT_SYMBOL(block_is_partially_uptodate);
2382
2383 /*
2384 * Generic "read_folio" function for block devices that have the normal
2385 * get_block functionality. This is most of the block device filesystems.
2386 * Reads the folio asynchronously --- the unlock_buffer() and
2387 * set/clear_buffer_uptodate() functions propagate buffer state into the
2388 * folio once IO has completed.
2389 */
block_read_full_folio(struct folio * folio,get_block_t * get_block)2390 int block_read_full_folio(struct folio *folio, get_block_t *get_block)
2391 {
2392 struct inode *inode = folio->mapping->host;
2393 sector_t iblock, lblock;
2394 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2395 size_t blocksize;
2396 int nr, i;
2397 int fully_mapped = 1;
2398 bool page_error = false;
2399 loff_t limit = i_size_read(inode);
2400
2401 /* This is needed for ext4. */
2402 if (IS_ENABLED(CONFIG_FS_VERITY) && IS_VERITY(inode))
2403 limit = inode->i_sb->s_maxbytes;
2404
2405 VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
2406
2407 head = folio_create_buffers(folio, inode, 0);
2408 blocksize = head->b_size;
2409
2410 iblock = div_u64(folio_pos(folio), blocksize);
2411 lblock = div_u64(limit + blocksize - 1, blocksize);
2412 bh = head;
2413 nr = 0;
2414 i = 0;
2415
2416 do {
2417 if (buffer_uptodate(bh))
2418 continue;
2419
2420 if (!buffer_mapped(bh)) {
2421 int err = 0;
2422
2423 fully_mapped = 0;
2424 if (iblock < lblock) {
2425 WARN_ON(bh->b_size != blocksize);
2426 err = get_block(inode, iblock, bh, 0);
2427 if (err)
2428 page_error = true;
2429 }
2430 if (!buffer_mapped(bh)) {
2431 folio_zero_range(folio, i * blocksize,
2432 blocksize);
2433 if (!err)
2434 set_buffer_uptodate(bh);
2435 continue;
2436 }
2437 /*
2438 * get_block() might have updated the buffer
2439 * synchronously
2440 */
2441 if (buffer_uptodate(bh))
2442 continue;
2443 }
2444 arr[nr++] = bh;
2445 } while (i++, iblock++, (bh = bh->b_this_page) != head);
2446
2447 if (fully_mapped)
2448 folio_set_mappedtodisk(folio);
2449
2450 if (!nr) {
2451 /*
2452 * All buffers are uptodate or get_block() returned an
2453 * error when trying to map them - we can finish the read.
2454 */
2455 folio_end_read(folio, !page_error);
2456 return 0;
2457 }
2458
2459 /* Stage two: lock the buffers */
2460 for (i = 0; i < nr; i++) {
2461 bh = arr[i];
2462 lock_buffer(bh);
2463 mark_buffer_async_read(bh);
2464 }
2465
2466 /*
2467 * Stage 3: start the IO. Check for uptodateness
2468 * inside the buffer lock in case another process reading
2469 * the underlying blockdev brought it uptodate (the sct fix).
2470 */
2471 for (i = 0; i < nr; i++) {
2472 bh = arr[i];
2473 if (buffer_uptodate(bh))
2474 end_buffer_async_read(bh, 1);
2475 else
2476 submit_bh(REQ_OP_READ, bh);
2477 }
2478 return 0;
2479 }
2480 EXPORT_SYMBOL(block_read_full_folio);
2481
2482 /* utility function for filesystems that need to do work on expanding
2483 * truncates. Uses filesystem pagecache writes to allow the filesystem to
2484 * deal with the hole.
2485 */
generic_cont_expand_simple(struct inode * inode,loff_t size)2486 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2487 {
2488 struct address_space *mapping = inode->i_mapping;
2489 const struct address_space_operations *aops = mapping->a_ops;
2490 struct folio *folio;
2491 void *fsdata = NULL;
2492 int err;
2493
2494 err = inode_newsize_ok(inode, size);
2495 if (err)
2496 goto out;
2497
2498 err = aops->write_begin(NULL, mapping, size, 0, &folio, &fsdata);
2499 if (err)
2500 goto out;
2501
2502 err = aops->write_end(NULL, mapping, size, 0, 0, folio, fsdata);
2503 BUG_ON(err > 0);
2504
2505 out:
2506 return err;
2507 }
2508 EXPORT_SYMBOL(generic_cont_expand_simple);
2509
cont_expand_zero(struct file * file,struct address_space * mapping,loff_t pos,loff_t * bytes)2510 static int cont_expand_zero(struct file *file, struct address_space *mapping,
2511 loff_t pos, loff_t *bytes)
2512 {
2513 struct inode *inode = mapping->host;
2514 const struct address_space_operations *aops = mapping->a_ops;
2515 unsigned int blocksize = i_blocksize(inode);
2516 struct folio *folio;
2517 void *fsdata = NULL;
2518 pgoff_t index, curidx;
2519 loff_t curpos;
2520 unsigned zerofrom, offset, len;
2521 int err = 0;
2522
2523 index = pos >> PAGE_SHIFT;
2524 offset = pos & ~PAGE_MASK;
2525
2526 while (index > (curidx = (curpos = *bytes)>>PAGE_SHIFT)) {
2527 zerofrom = curpos & ~PAGE_MASK;
2528 if (zerofrom & (blocksize-1)) {
2529 *bytes |= (blocksize-1);
2530 (*bytes)++;
2531 }
2532 len = PAGE_SIZE - zerofrom;
2533
2534 err = aops->write_begin(file, mapping, curpos, len,
2535 &folio, &fsdata);
2536 if (err)
2537 goto out;
2538 folio_zero_range(folio, offset_in_folio(folio, curpos), len);
2539 err = aops->write_end(file, mapping, curpos, len, len,
2540 folio, fsdata);
2541 if (err < 0)
2542 goto out;
2543 BUG_ON(err != len);
2544 err = 0;
2545
2546 balance_dirty_pages_ratelimited(mapping);
2547
2548 if (fatal_signal_pending(current)) {
2549 err = -EINTR;
2550 goto out;
2551 }
2552 }
2553
2554 /* page covers the boundary, find the boundary offset */
2555 if (index == curidx) {
2556 zerofrom = curpos & ~PAGE_MASK;
2557 /* if we will expand the thing last block will be filled */
2558 if (offset <= zerofrom) {
2559 goto out;
2560 }
2561 if (zerofrom & (blocksize-1)) {
2562 *bytes |= (blocksize-1);
2563 (*bytes)++;
2564 }
2565 len = offset - zerofrom;
2566
2567 err = aops->write_begin(file, mapping, curpos, len,
2568 &folio, &fsdata);
2569 if (err)
2570 goto out;
2571 folio_zero_range(folio, offset_in_folio(folio, curpos), len);
2572 err = aops->write_end(file, mapping, curpos, len, len,
2573 folio, fsdata);
2574 if (err < 0)
2575 goto out;
2576 BUG_ON(err != len);
2577 err = 0;
2578 }
2579 out:
2580 return err;
2581 }
2582
2583 /*
2584 * For moronic filesystems that do not allow holes in file.
2585 * We may have to extend the file.
2586 */
cont_write_begin(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,struct folio ** foliop,void ** fsdata,get_block_t * get_block,loff_t * bytes)2587 int cont_write_begin(struct file *file, struct address_space *mapping,
2588 loff_t pos, unsigned len,
2589 struct folio **foliop, void **fsdata,
2590 get_block_t *get_block, loff_t *bytes)
2591 {
2592 struct inode *inode = mapping->host;
2593 unsigned int blocksize = i_blocksize(inode);
2594 unsigned int zerofrom;
2595 int err;
2596
2597 err = cont_expand_zero(file, mapping, pos, bytes);
2598 if (err)
2599 return err;
2600
2601 zerofrom = *bytes & ~PAGE_MASK;
2602 if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2603 *bytes |= (blocksize-1);
2604 (*bytes)++;
2605 }
2606
2607 return block_write_begin(mapping, pos, len, foliop, get_block);
2608 }
2609 EXPORT_SYMBOL(cont_write_begin);
2610
block_commit_write(struct page * page,unsigned from,unsigned to)2611 void block_commit_write(struct page *page, unsigned from, unsigned to)
2612 {
2613 struct folio *folio = page_folio(page);
2614 __block_commit_write(folio, from, to);
2615 }
2616 EXPORT_SYMBOL(block_commit_write);
2617
2618 /*
2619 * block_page_mkwrite() is not allowed to change the file size as it gets
2620 * called from a page fault handler when a page is first dirtied. Hence we must
2621 * be careful to check for EOF conditions here. We set the page up correctly
2622 * for a written page which means we get ENOSPC checking when writing into
2623 * holes and correct delalloc and unwritten extent mapping on filesystems that
2624 * support these features.
2625 *
2626 * We are not allowed to take the i_mutex here so we have to play games to
2627 * protect against truncate races as the page could now be beyond EOF. Because
2628 * truncate writes the inode size before removing pages, once we have the
2629 * page lock we can determine safely if the page is beyond EOF. If it is not
2630 * beyond EOF, then the page is guaranteed safe against truncation until we
2631 * unlock the page.
2632 *
2633 * Direct callers of this function should protect against filesystem freezing
2634 * using sb_start_pagefault() - sb_end_pagefault() functions.
2635 */
block_page_mkwrite(struct vm_area_struct * vma,struct vm_fault * vmf,get_block_t get_block)2636 int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2637 get_block_t get_block)
2638 {
2639 struct folio *folio = page_folio(vmf->page);
2640 struct inode *inode = file_inode(vma->vm_file);
2641 unsigned long end;
2642 loff_t size;
2643 int ret;
2644
2645 folio_lock(folio);
2646 size = i_size_read(inode);
2647 if ((folio->mapping != inode->i_mapping) ||
2648 (folio_pos(folio) >= size)) {
2649 /* We overload EFAULT to mean page got truncated */
2650 ret = -EFAULT;
2651 goto out_unlock;
2652 }
2653
2654 end = folio_size(folio);
2655 /* folio is wholly or partially inside EOF */
2656 if (folio_pos(folio) + end > size)
2657 end = size - folio_pos(folio);
2658
2659 ret = __block_write_begin_int(folio, 0, end, get_block, NULL);
2660 if (unlikely(ret))
2661 goto out_unlock;
2662
2663 __block_commit_write(folio, 0, end);
2664
2665 folio_mark_dirty(folio);
2666 folio_wait_stable(folio);
2667 return 0;
2668 out_unlock:
2669 folio_unlock(folio);
2670 return ret;
2671 }
2672 EXPORT_SYMBOL(block_page_mkwrite);
2673
block_truncate_page(struct address_space * mapping,loff_t from,get_block_t * get_block)2674 int block_truncate_page(struct address_space *mapping,
2675 loff_t from, get_block_t *get_block)
2676 {
2677 pgoff_t index = from >> PAGE_SHIFT;
2678 unsigned blocksize;
2679 sector_t iblock;
2680 size_t offset, length, pos;
2681 struct inode *inode = mapping->host;
2682 struct folio *folio;
2683 struct buffer_head *bh;
2684 int err = 0;
2685
2686 blocksize = i_blocksize(inode);
2687 length = from & (blocksize - 1);
2688
2689 /* Block boundary? Nothing to do */
2690 if (!length)
2691 return 0;
2692
2693 length = blocksize - length;
2694 iblock = ((loff_t)index * PAGE_SIZE) >> inode->i_blkbits;
2695
2696 folio = filemap_grab_folio(mapping, index);
2697 if (IS_ERR(folio))
2698 return PTR_ERR(folio);
2699
2700 bh = folio_buffers(folio);
2701 if (!bh)
2702 bh = create_empty_buffers(folio, blocksize, 0);
2703
2704 /* Find the buffer that contains "offset" */
2705 offset = offset_in_folio(folio, from);
2706 pos = blocksize;
2707 while (offset >= pos) {
2708 bh = bh->b_this_page;
2709 iblock++;
2710 pos += blocksize;
2711 }
2712
2713 if (!buffer_mapped(bh)) {
2714 WARN_ON(bh->b_size != blocksize);
2715 err = get_block(inode, iblock, bh, 0);
2716 if (err)
2717 goto unlock;
2718 /* unmapped? It's a hole - nothing to do */
2719 if (!buffer_mapped(bh))
2720 goto unlock;
2721 }
2722
2723 /* Ok, it's mapped. Make sure it's up-to-date */
2724 if (folio_test_uptodate(folio))
2725 set_buffer_uptodate(bh);
2726
2727 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2728 err = bh_read(bh, 0);
2729 /* Uhhuh. Read error. Complain and punt. */
2730 if (err < 0)
2731 goto unlock;
2732 }
2733
2734 folio_zero_range(folio, offset, length);
2735 mark_buffer_dirty(bh);
2736
2737 unlock:
2738 folio_unlock(folio);
2739 folio_put(folio);
2740
2741 return err;
2742 }
2743 EXPORT_SYMBOL(block_truncate_page);
2744
2745 /*
2746 * The generic ->writepage function for buffer-backed address_spaces
2747 */
block_write_full_folio(struct folio * folio,struct writeback_control * wbc,void * get_block)2748 int block_write_full_folio(struct folio *folio, struct writeback_control *wbc,
2749 void *get_block)
2750 {
2751 struct inode * const inode = folio->mapping->host;
2752 loff_t i_size = i_size_read(inode);
2753
2754 /* Is the folio fully inside i_size? */
2755 if (folio_pos(folio) + folio_size(folio) <= i_size)
2756 return __block_write_full_folio(inode, folio, get_block, wbc);
2757
2758 /* Is the folio fully outside i_size? (truncate in progress) */
2759 if (folio_pos(folio) >= i_size) {
2760 folio_unlock(folio);
2761 return 0; /* don't care */
2762 }
2763
2764 /*
2765 * The folio straddles i_size. It must be zeroed out on each and every
2766 * writepage invocation because it may be mmapped. "A file is mapped
2767 * in multiples of the page size. For a file that is not a multiple of
2768 * the page size, the remaining memory is zeroed when mapped, and
2769 * writes to that region are not written out to the file."
2770 */
2771 folio_zero_segment(folio, offset_in_folio(folio, i_size),
2772 folio_size(folio));
2773 return __block_write_full_folio(inode, folio, get_block, wbc);
2774 }
2775
generic_block_bmap(struct address_space * mapping,sector_t block,get_block_t * get_block)2776 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2777 get_block_t *get_block)
2778 {
2779 struct inode *inode = mapping->host;
2780 struct buffer_head tmp = {
2781 .b_size = i_blocksize(inode),
2782 };
2783
2784 get_block(inode, block, &tmp, 0);
2785 return tmp.b_blocknr;
2786 }
2787 EXPORT_SYMBOL(generic_block_bmap);
2788
end_bio_bh_io_sync(struct bio * bio)2789 static void end_bio_bh_io_sync(struct bio *bio)
2790 {
2791 struct buffer_head *bh = bio->bi_private;
2792
2793 if (unlikely(bio_flagged(bio, BIO_QUIET)))
2794 set_bit(BH_Quiet, &bh->b_state);
2795
2796 bh->b_end_io(bh, !bio->bi_status);
2797 bio_put(bio);
2798 }
2799
submit_bh_wbc(blk_opf_t opf,struct buffer_head * bh,enum rw_hint write_hint,struct writeback_control * wbc)2800 static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
2801 enum rw_hint write_hint,
2802 struct writeback_control *wbc)
2803 {
2804 const enum req_op op = opf & REQ_OP_MASK;
2805 struct bio *bio;
2806
2807 BUG_ON(!buffer_locked(bh));
2808 BUG_ON(!buffer_mapped(bh));
2809 BUG_ON(!bh->b_end_io);
2810 BUG_ON(buffer_delay(bh));
2811 BUG_ON(buffer_unwritten(bh));
2812
2813 /*
2814 * Only clear out a write error when rewriting
2815 */
2816 if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE))
2817 clear_buffer_write_io_error(bh);
2818
2819 if (buffer_meta(bh))
2820 opf |= REQ_META;
2821 if (buffer_prio(bh))
2822 opf |= REQ_PRIO;
2823
2824 bio = bio_alloc(bh->b_bdev, 1, opf, GFP_NOIO);
2825
2826 fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
2827
2828 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2829 bio->bi_write_hint = write_hint;
2830
2831 bio_add_folio_nofail(bio, bh->b_folio, bh->b_size, bh_offset(bh));
2832
2833 bio->bi_end_io = end_bio_bh_io_sync;
2834 bio->bi_private = bh;
2835
2836 /* Take care of bh's that straddle the end of the device */
2837 guard_bio_eod(bio);
2838
2839 if (wbc) {
2840 wbc_init_bio(wbc, bio);
2841 wbc_account_cgroup_owner(wbc, bh->b_folio, bh->b_size);
2842 }
2843
2844 submit_bio(bio);
2845 }
2846
submit_bh(blk_opf_t opf,struct buffer_head * bh)2847 void submit_bh(blk_opf_t opf, struct buffer_head *bh)
2848 {
2849 submit_bh_wbc(opf, bh, WRITE_LIFE_NOT_SET, NULL);
2850 }
2851 EXPORT_SYMBOL(submit_bh);
2852
write_dirty_buffer(struct buffer_head * bh,blk_opf_t op_flags)2853 void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
2854 {
2855 lock_buffer(bh);
2856 if (!test_clear_buffer_dirty(bh)) {
2857 unlock_buffer(bh);
2858 return;
2859 }
2860 bh->b_end_io = end_buffer_write_sync;
2861 get_bh(bh);
2862 submit_bh(REQ_OP_WRITE | op_flags, bh);
2863 }
2864 EXPORT_SYMBOL(write_dirty_buffer);
2865
2866 /*
2867 * For a data-integrity writeout, we need to wait upon any in-progress I/O
2868 * and then start new I/O and then wait upon it. The caller must have a ref on
2869 * the buffer_head.
2870 */
__sync_dirty_buffer(struct buffer_head * bh,blk_opf_t op_flags)2871 int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
2872 {
2873 WARN_ON(atomic_read(&bh->b_count) < 1);
2874 lock_buffer(bh);
2875 if (test_clear_buffer_dirty(bh)) {
2876 /*
2877 * The bh should be mapped, but it might not be if the
2878 * device was hot-removed. Not much we can do but fail the I/O.
2879 */
2880 if (!buffer_mapped(bh)) {
2881 unlock_buffer(bh);
2882 return -EIO;
2883 }
2884
2885 get_bh(bh);
2886 bh->b_end_io = end_buffer_write_sync;
2887 submit_bh(REQ_OP_WRITE | op_flags, bh);
2888 wait_on_buffer(bh);
2889 if (!buffer_uptodate(bh))
2890 return -EIO;
2891 } else {
2892 unlock_buffer(bh);
2893 }
2894 return 0;
2895 }
2896 EXPORT_SYMBOL(__sync_dirty_buffer);
2897
sync_dirty_buffer(struct buffer_head * bh)2898 int sync_dirty_buffer(struct buffer_head *bh)
2899 {
2900 return __sync_dirty_buffer(bh, REQ_SYNC);
2901 }
2902 EXPORT_SYMBOL(sync_dirty_buffer);
2903
buffer_busy(struct buffer_head * bh)2904 static inline int buffer_busy(struct buffer_head *bh)
2905 {
2906 return atomic_read(&bh->b_count) |
2907 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
2908 }
2909
2910 static bool
drop_buffers(struct folio * folio,struct buffer_head ** buffers_to_free)2911 drop_buffers(struct folio *folio, struct buffer_head **buffers_to_free)
2912 {
2913 struct buffer_head *head = folio_buffers(folio);
2914 struct buffer_head *bh;
2915
2916 bh = head;
2917 do {
2918 if (buffer_busy(bh))
2919 goto failed;
2920 bh = bh->b_this_page;
2921 } while (bh != head);
2922
2923 do {
2924 struct buffer_head *next = bh->b_this_page;
2925
2926 if (bh->b_assoc_map)
2927 __remove_assoc_queue(bh);
2928 bh = next;
2929 } while (bh != head);
2930 *buffers_to_free = head;
2931 folio_detach_private(folio);
2932 return true;
2933 failed:
2934 return false;
2935 }
2936
2937 /**
2938 * try_to_free_buffers - Release buffers attached to this folio.
2939 * @folio: The folio.
2940 *
2941 * If any buffers are in use (dirty, under writeback, elevated refcount),
2942 * no buffers will be freed.
2943 *
2944 * If the folio is dirty but all the buffers are clean then we need to
2945 * be sure to mark the folio clean as well. This is because the folio
2946 * may be against a block device, and a later reattachment of buffers
2947 * to a dirty folio will set *all* buffers dirty. Which would corrupt
2948 * filesystem data on the same device.
2949 *
2950 * The same applies to regular filesystem folios: if all the buffers are
2951 * clean then we set the folio clean and proceed. To do that, we require
2952 * total exclusion from block_dirty_folio(). That is obtained with
2953 * i_private_lock.
2954 *
2955 * Exclusion against try_to_free_buffers may be obtained by either
2956 * locking the folio or by holding its mapping's i_private_lock.
2957 *
2958 * Context: Process context. @folio must be locked. Will not sleep.
2959 * Return: true if all buffers attached to this folio were freed.
2960 */
try_to_free_buffers(struct folio * folio)2961 bool try_to_free_buffers(struct folio *folio)
2962 {
2963 struct address_space * const mapping = folio->mapping;
2964 struct buffer_head *buffers_to_free = NULL;
2965 bool ret = 0;
2966
2967 BUG_ON(!folio_test_locked(folio));
2968 if (folio_test_writeback(folio))
2969 return false;
2970
2971 if (mapping == NULL) { /* can this still happen? */
2972 ret = drop_buffers(folio, &buffers_to_free);
2973 goto out;
2974 }
2975
2976 spin_lock(&mapping->i_private_lock);
2977 ret = drop_buffers(folio, &buffers_to_free);
2978
2979 /*
2980 * If the filesystem writes its buffers by hand (eg ext3)
2981 * then we can have clean buffers against a dirty folio. We
2982 * clean the folio here; otherwise the VM will never notice
2983 * that the filesystem did any IO at all.
2984 *
2985 * Also, during truncate, discard_buffer will have marked all
2986 * the folio's buffers clean. We discover that here and clean
2987 * the folio also.
2988 *
2989 * i_private_lock must be held over this entire operation in order
2990 * to synchronise against block_dirty_folio and prevent the
2991 * dirty bit from being lost.
2992 */
2993 if (ret)
2994 folio_cancel_dirty(folio);
2995 spin_unlock(&mapping->i_private_lock);
2996 out:
2997 if (buffers_to_free) {
2998 struct buffer_head *bh = buffers_to_free;
2999
3000 do {
3001 struct buffer_head *next = bh->b_this_page;
3002 free_buffer_head(bh);
3003 bh = next;
3004 } while (bh != buffers_to_free);
3005 }
3006 return ret;
3007 }
3008 EXPORT_SYMBOL(try_to_free_buffers);
3009
3010 /*
3011 * Buffer-head allocation
3012 */
3013 static struct kmem_cache *bh_cachep __ro_after_init;
3014
3015 /*
3016 * Once the number of bh's in the machine exceeds this level, we start
3017 * stripping them in writeback.
3018 */
3019 static unsigned long max_buffer_heads __ro_after_init;
3020
3021 int buffer_heads_over_limit;
3022
3023 struct bh_accounting {
3024 int nr; /* Number of live bh's */
3025 int ratelimit; /* Limit cacheline bouncing */
3026 };
3027
3028 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3029
recalc_bh_state(void)3030 static void recalc_bh_state(void)
3031 {
3032 int i;
3033 int tot = 0;
3034
3035 if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096)
3036 return;
3037 __this_cpu_write(bh_accounting.ratelimit, 0);
3038 for_each_online_cpu(i)
3039 tot += per_cpu(bh_accounting, i).nr;
3040 buffer_heads_over_limit = (tot > max_buffer_heads);
3041 }
3042
alloc_buffer_head(gfp_t gfp_flags)3043 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3044 {
3045 struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
3046 if (ret) {
3047 INIT_LIST_HEAD(&ret->b_assoc_buffers);
3048 spin_lock_init(&ret->b_uptodate_lock);
3049 preempt_disable();
3050 __this_cpu_inc(bh_accounting.nr);
3051 recalc_bh_state();
3052 preempt_enable();
3053 }
3054 return ret;
3055 }
3056 EXPORT_SYMBOL(alloc_buffer_head);
3057
free_buffer_head(struct buffer_head * bh)3058 void free_buffer_head(struct buffer_head *bh)
3059 {
3060 BUG_ON(!list_empty(&bh->b_assoc_buffers));
3061 kmem_cache_free(bh_cachep, bh);
3062 preempt_disable();
3063 __this_cpu_dec(bh_accounting.nr);
3064 recalc_bh_state();
3065 preempt_enable();
3066 }
3067 EXPORT_SYMBOL(free_buffer_head);
3068
buffer_exit_cpu_dead(unsigned int cpu)3069 static int buffer_exit_cpu_dead(unsigned int cpu)
3070 {
3071 int i;
3072 struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3073
3074 for (i = 0; i < BH_LRU_SIZE; i++) {
3075 brelse(b->bhs[i]);
3076 b->bhs[i] = NULL;
3077 }
3078 this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr);
3079 per_cpu(bh_accounting, cpu).nr = 0;
3080 return 0;
3081 }
3082
3083 /**
3084 * bh_uptodate_or_lock - Test whether the buffer is uptodate
3085 * @bh: struct buffer_head
3086 *
3087 * Return true if the buffer is up-to-date and false,
3088 * with the buffer locked, if not.
3089 */
bh_uptodate_or_lock(struct buffer_head * bh)3090 int bh_uptodate_or_lock(struct buffer_head *bh)
3091 {
3092 if (!buffer_uptodate(bh)) {
3093 lock_buffer(bh);
3094 if (!buffer_uptodate(bh))
3095 return 0;
3096 unlock_buffer(bh);
3097 }
3098 return 1;
3099 }
3100 EXPORT_SYMBOL(bh_uptodate_or_lock);
3101
3102 /**
3103 * __bh_read - Submit read for a locked buffer
3104 * @bh: struct buffer_head
3105 * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
3106 * @wait: wait until reading finish
3107 *
3108 * Returns zero on success or don't wait, and -EIO on error.
3109 */
__bh_read(struct buffer_head * bh,blk_opf_t op_flags,bool wait)3110 int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait)
3111 {
3112 int ret = 0;
3113
3114 BUG_ON(!buffer_locked(bh));
3115
3116 get_bh(bh);
3117 bh->b_end_io = end_buffer_read_sync;
3118 submit_bh(REQ_OP_READ | op_flags, bh);
3119 if (wait) {
3120 wait_on_buffer(bh);
3121 if (!buffer_uptodate(bh))
3122 ret = -EIO;
3123 }
3124 return ret;
3125 }
3126 EXPORT_SYMBOL(__bh_read);
3127
3128 /**
3129 * __bh_read_batch - Submit read for a batch of unlocked buffers
3130 * @nr: entry number of the buffer batch
3131 * @bhs: a batch of struct buffer_head
3132 * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
3133 * @force_lock: force to get a lock on the buffer if set, otherwise drops any
3134 * buffer that cannot lock.
3135 *
3136 * Returns zero on success or don't wait, and -EIO on error.
3137 */
__bh_read_batch(int nr,struct buffer_head * bhs[],blk_opf_t op_flags,bool force_lock)3138 void __bh_read_batch(int nr, struct buffer_head *bhs[],
3139 blk_opf_t op_flags, bool force_lock)
3140 {
3141 int i;
3142
3143 for (i = 0; i < nr; i++) {
3144 struct buffer_head *bh = bhs[i];
3145
3146 if (buffer_uptodate(bh))
3147 continue;
3148
3149 if (force_lock)
3150 lock_buffer(bh);
3151 else
3152 if (!trylock_buffer(bh))
3153 continue;
3154
3155 if (buffer_uptodate(bh)) {
3156 unlock_buffer(bh);
3157 continue;
3158 }
3159
3160 bh->b_end_io = end_buffer_read_sync;
3161 get_bh(bh);
3162 submit_bh(REQ_OP_READ | op_flags, bh);
3163 }
3164 }
3165 EXPORT_SYMBOL(__bh_read_batch);
3166
buffer_init(void)3167 void __init buffer_init(void)
3168 {
3169 unsigned long nrpages;
3170 int ret;
3171
3172 bh_cachep = KMEM_CACHE(buffer_head,
3173 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC);
3174 /*
3175 * Limit the bh occupancy to 10% of ZONE_NORMAL
3176 */
3177 nrpages = (nr_free_buffer_pages() * 10) / 100;
3178 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3179 ret = cpuhp_setup_state_nocalls(CPUHP_FS_BUFF_DEAD, "fs/buffer:dead",
3180 NULL, buffer_exit_cpu_dead);
3181 WARN_ON(ret < 0);
3182 }
3183