1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <linux/bitops.h>
4 #include <linux/slab.h>
5 #include <linux/bio.h>
6 #include <linux/mm.h>
7 #include <linux/pagemap.h>
8 #include <linux/page-flags.h>
9 #include <linux/sched/mm.h>
10 #include <linux/spinlock.h>
11 #include <linux/blkdev.h>
12 #include <linux/swap.h>
13 #include <linux/writeback.h>
14 #include <linux/pagevec.h>
15 #include <linux/prefetch.h>
16 #include <linux/cleancache.h>
17 #include <linux/fsverity.h>
18 #include "misc.h"
19 #include "extent_io.h"
20 #include "extent-io-tree.h"
21 #include "extent_map.h"
22 #include "ctree.h"
23 #include "btrfs_inode.h"
24 #include "bio.h"
25 #include "check-integrity.h"
26 #include "locking.h"
27 #include "rcu-string.h"
28 #include "backref.h"
29 #include "disk-io.h"
30 #include "subpage.h"
31 #include "zoned.h"
32 #include "block-group.h"
33 #include "compression.h"
34 #include "fs.h"
35 #include "accessors.h"
36 #include "file-item.h"
37 #include "file.h"
38 #include "dev-replace.h"
39 #include "super.h"
40 #include "transaction.h"
41
42 static struct kmem_cache *extent_buffer_cache;
43
44 #ifdef CONFIG_BTRFS_DEBUG
btrfs_leak_debug_add_eb(struct extent_buffer * eb)45 static inline void btrfs_leak_debug_add_eb(struct extent_buffer *eb)
46 {
47 struct btrfs_fs_info *fs_info = eb->fs_info;
48 unsigned long flags;
49
50 spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
51 list_add(&eb->leak_list, &fs_info->allocated_ebs);
52 spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
53 }
54
btrfs_leak_debug_del_eb(struct extent_buffer * eb)55 static inline void btrfs_leak_debug_del_eb(struct extent_buffer *eb)
56 {
57 struct btrfs_fs_info *fs_info = eb->fs_info;
58 unsigned long flags;
59
60 spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
61 list_del(&eb->leak_list);
62 spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
63 }
64
btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info * fs_info)65 void btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info *fs_info)
66 {
67 struct extent_buffer *eb;
68 unsigned long flags;
69
70 /*
71 * If we didn't get into open_ctree our allocated_ebs will not be
72 * initialized, so just skip this.
73 */
74 if (!fs_info->allocated_ebs.next)
75 return;
76
77 WARN_ON(!list_empty(&fs_info->allocated_ebs));
78 spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
79 while (!list_empty(&fs_info->allocated_ebs)) {
80 eb = list_first_entry(&fs_info->allocated_ebs,
81 struct extent_buffer, leak_list);
82 pr_err(
83 "BTRFS: buffer leak start %llu len %lu refs %d bflags %lu owner %llu\n",
84 eb->start, eb->len, atomic_read(&eb->refs), eb->bflags,
85 btrfs_header_owner(eb));
86 list_del(&eb->leak_list);
87 kmem_cache_free(extent_buffer_cache, eb);
88 }
89 spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
90 }
91 #else
92 #define btrfs_leak_debug_add_eb(eb) do {} while (0)
93 #define btrfs_leak_debug_del_eb(eb) do {} while (0)
94 #endif
95
96 /*
97 * Structure to record info about the bio being assembled, and other info like
98 * how many bytes are there before stripe/ordered extent boundary.
99 */
100 struct btrfs_bio_ctrl {
101 struct btrfs_bio *bbio;
102 enum btrfs_compression_type compress_type;
103 u32 len_to_oe_boundary;
104 blk_opf_t opf;
105 btrfs_bio_end_io_t end_io_func;
106 struct writeback_control *wbc;
107 };
108
submit_one_bio(struct btrfs_bio_ctrl * bio_ctrl)109 static void submit_one_bio(struct btrfs_bio_ctrl *bio_ctrl)
110 {
111 struct btrfs_bio *bbio = bio_ctrl->bbio;
112
113 if (!bbio)
114 return;
115
116 /* Caller should ensure the bio has at least some range added */
117 ASSERT(bbio->bio.bi_iter.bi_size);
118
119 if (btrfs_op(&bbio->bio) == BTRFS_MAP_READ &&
120 bio_ctrl->compress_type != BTRFS_COMPRESS_NONE)
121 btrfs_submit_compressed_read(bbio);
122 else
123 btrfs_submit_bio(bbio, 0);
124
125 /* The bbio is owned by the end_io handler now */
126 bio_ctrl->bbio = NULL;
127 }
128
129 /*
130 * Submit or fail the current bio in the bio_ctrl structure.
131 */
submit_write_bio(struct btrfs_bio_ctrl * bio_ctrl,int ret)132 static void submit_write_bio(struct btrfs_bio_ctrl *bio_ctrl, int ret)
133 {
134 struct btrfs_bio *bbio = bio_ctrl->bbio;
135
136 if (!bbio)
137 return;
138
139 if (ret) {
140 ASSERT(ret < 0);
141 btrfs_bio_end_io(bbio, errno_to_blk_status(ret));
142 /* The bio is owned by the end_io handler now */
143 bio_ctrl->bbio = NULL;
144 } else {
145 submit_one_bio(bio_ctrl);
146 }
147 }
148
extent_buffer_init_cachep(void)149 int __init extent_buffer_init_cachep(void)
150 {
151 extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
152 sizeof(struct extent_buffer), 0,
153 SLAB_MEM_SPREAD, NULL);
154 if (!extent_buffer_cache)
155 return -ENOMEM;
156
157 return 0;
158 }
159
extent_buffer_free_cachep(void)160 void __cold extent_buffer_free_cachep(void)
161 {
162 /*
163 * Make sure all delayed rcu free are flushed before we
164 * destroy caches.
165 */
166 rcu_barrier();
167 kmem_cache_destroy(extent_buffer_cache);
168 }
169
extent_range_clear_dirty_for_io(struct inode * inode,u64 start,u64 end)170 void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
171 {
172 unsigned long index = start >> PAGE_SHIFT;
173 unsigned long end_index = end >> PAGE_SHIFT;
174 struct page *page;
175
176 while (index <= end_index) {
177 page = find_get_page(inode->i_mapping, index);
178 BUG_ON(!page); /* Pages should be in the extent_io_tree */
179 clear_page_dirty_for_io(page);
180 put_page(page);
181 index++;
182 }
183 }
184
process_one_page(struct btrfs_fs_info * fs_info,struct page * page,struct page * locked_page,unsigned long page_ops,u64 start,u64 end)185 static void process_one_page(struct btrfs_fs_info *fs_info,
186 struct page *page, struct page *locked_page,
187 unsigned long page_ops, u64 start, u64 end)
188 {
189 u32 len;
190
191 ASSERT(end + 1 - start != 0 && end + 1 - start < U32_MAX);
192 len = end + 1 - start;
193
194 if (page_ops & PAGE_SET_ORDERED)
195 btrfs_page_clamp_set_ordered(fs_info, page, start, len);
196 if (page_ops & PAGE_START_WRITEBACK) {
197 btrfs_page_clamp_clear_dirty(fs_info, page, start, len);
198 btrfs_page_clamp_set_writeback(fs_info, page, start, len);
199 }
200 if (page_ops & PAGE_END_WRITEBACK)
201 btrfs_page_clamp_clear_writeback(fs_info, page, start, len);
202
203 if (page != locked_page && (page_ops & PAGE_UNLOCK))
204 btrfs_page_end_writer_lock(fs_info, page, start, len);
205 }
206
__process_pages_contig(struct address_space * mapping,struct page * locked_page,u64 start,u64 end,unsigned long page_ops)207 static void __process_pages_contig(struct address_space *mapping,
208 struct page *locked_page, u64 start, u64 end,
209 unsigned long page_ops)
210 {
211 struct btrfs_fs_info *fs_info = btrfs_sb(mapping->host->i_sb);
212 pgoff_t start_index = start >> PAGE_SHIFT;
213 pgoff_t end_index = end >> PAGE_SHIFT;
214 pgoff_t index = start_index;
215 struct folio_batch fbatch;
216 int i;
217
218 folio_batch_init(&fbatch);
219 while (index <= end_index) {
220 int found_folios;
221
222 found_folios = filemap_get_folios_contig(mapping, &index,
223 end_index, &fbatch);
224 for (i = 0; i < found_folios; i++) {
225 struct folio *folio = fbatch.folios[i];
226
227 process_one_page(fs_info, &folio->page, locked_page,
228 page_ops, start, end);
229 }
230 folio_batch_release(&fbatch);
231 cond_resched();
232 }
233 }
234
__unlock_for_delalloc(struct inode * inode,struct page * locked_page,u64 start,u64 end)235 static noinline void __unlock_for_delalloc(struct inode *inode,
236 struct page *locked_page,
237 u64 start, u64 end)
238 {
239 unsigned long index = start >> PAGE_SHIFT;
240 unsigned long end_index = end >> PAGE_SHIFT;
241
242 ASSERT(locked_page);
243 if (index == locked_page->index && end_index == index)
244 return;
245
246 __process_pages_contig(inode->i_mapping, locked_page, start, end,
247 PAGE_UNLOCK);
248 }
249
lock_delalloc_pages(struct inode * inode,struct page * locked_page,u64 start,u64 end)250 static noinline int lock_delalloc_pages(struct inode *inode,
251 struct page *locked_page,
252 u64 start,
253 u64 end)
254 {
255 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
256 struct address_space *mapping = inode->i_mapping;
257 pgoff_t start_index = start >> PAGE_SHIFT;
258 pgoff_t end_index = end >> PAGE_SHIFT;
259 pgoff_t index = start_index;
260 u64 processed_end = start;
261 struct folio_batch fbatch;
262
263 if (index == locked_page->index && index == end_index)
264 return 0;
265
266 folio_batch_init(&fbatch);
267 while (index <= end_index) {
268 unsigned int found_folios, i;
269
270 found_folios = filemap_get_folios_contig(mapping, &index,
271 end_index, &fbatch);
272 if (found_folios == 0)
273 goto out;
274
275 for (i = 0; i < found_folios; i++) {
276 struct page *page = &fbatch.folios[i]->page;
277 u32 len = end + 1 - start;
278
279 if (page == locked_page)
280 continue;
281
282 if (btrfs_page_start_writer_lock(fs_info, page, start,
283 len))
284 goto out;
285
286 if (!PageDirty(page) || page->mapping != mapping) {
287 btrfs_page_end_writer_lock(fs_info, page, start,
288 len);
289 goto out;
290 }
291
292 processed_end = page_offset(page) + PAGE_SIZE - 1;
293 }
294 folio_batch_release(&fbatch);
295 cond_resched();
296 }
297
298 return 0;
299 out:
300 folio_batch_release(&fbatch);
301 if (processed_end > start)
302 __unlock_for_delalloc(inode, locked_page, start, processed_end);
303 return -EAGAIN;
304 }
305
306 /*
307 * Find and lock a contiguous range of bytes in the file marked as delalloc, no
308 * more than @max_bytes.
309 *
310 * @start: The original start bytenr to search.
311 * Will store the extent range start bytenr.
312 * @end: The original end bytenr of the search range
313 * Will store the extent range end bytenr.
314 *
315 * Return true if we find a delalloc range which starts inside the original
316 * range, and @start/@end will store the delalloc range start/end.
317 *
318 * Return false if we can't find any delalloc range which starts inside the
319 * original range, and @start/@end will be the non-delalloc range start/end.
320 */
321 EXPORT_FOR_TESTS
find_lock_delalloc_range(struct inode * inode,struct page * locked_page,u64 * start,u64 * end)322 noinline_for_stack bool find_lock_delalloc_range(struct inode *inode,
323 struct page *locked_page, u64 *start,
324 u64 *end)
325 {
326 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
327 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
328 const u64 orig_start = *start;
329 const u64 orig_end = *end;
330 /* The sanity tests may not set a valid fs_info. */
331 u64 max_bytes = fs_info ? fs_info->max_extent_size : BTRFS_MAX_EXTENT_SIZE;
332 u64 delalloc_start;
333 u64 delalloc_end;
334 bool found;
335 struct extent_state *cached_state = NULL;
336 int ret;
337 int loops = 0;
338
339 /* Caller should pass a valid @end to indicate the search range end */
340 ASSERT(orig_end > orig_start);
341
342 /* The range should at least cover part of the page */
343 ASSERT(!(orig_start >= page_offset(locked_page) + PAGE_SIZE ||
344 orig_end <= page_offset(locked_page)));
345 again:
346 /* step one, find a bunch of delalloc bytes starting at start */
347 delalloc_start = *start;
348 delalloc_end = 0;
349 found = btrfs_find_delalloc_range(tree, &delalloc_start, &delalloc_end,
350 max_bytes, &cached_state);
351 if (!found || delalloc_end <= *start || delalloc_start > orig_end) {
352 *start = delalloc_start;
353
354 /* @delalloc_end can be -1, never go beyond @orig_end */
355 *end = min(delalloc_end, orig_end);
356 free_extent_state(cached_state);
357 return false;
358 }
359
360 /*
361 * start comes from the offset of locked_page. We have to lock
362 * pages in order, so we can't process delalloc bytes before
363 * locked_page
364 */
365 if (delalloc_start < *start)
366 delalloc_start = *start;
367
368 /*
369 * make sure to limit the number of pages we try to lock down
370 */
371 if (delalloc_end + 1 - delalloc_start > max_bytes)
372 delalloc_end = delalloc_start + max_bytes - 1;
373
374 /* step two, lock all the pages after the page that has start */
375 ret = lock_delalloc_pages(inode, locked_page,
376 delalloc_start, delalloc_end);
377 ASSERT(!ret || ret == -EAGAIN);
378 if (ret == -EAGAIN) {
379 /* some of the pages are gone, lets avoid looping by
380 * shortening the size of the delalloc range we're searching
381 */
382 free_extent_state(cached_state);
383 cached_state = NULL;
384 if (!loops) {
385 max_bytes = PAGE_SIZE;
386 loops = 1;
387 goto again;
388 } else {
389 found = false;
390 goto out_failed;
391 }
392 }
393
394 /* step three, lock the state bits for the whole range */
395 lock_extent(tree, delalloc_start, delalloc_end, &cached_state);
396
397 /* then test to make sure it is all still delalloc */
398 ret = test_range_bit(tree, delalloc_start, delalloc_end,
399 EXTENT_DELALLOC, 1, cached_state);
400 if (!ret) {
401 unlock_extent(tree, delalloc_start, delalloc_end,
402 &cached_state);
403 __unlock_for_delalloc(inode, locked_page,
404 delalloc_start, delalloc_end);
405 cond_resched();
406 goto again;
407 }
408 free_extent_state(cached_state);
409 *start = delalloc_start;
410 *end = delalloc_end;
411 out_failed:
412 return found;
413 }
414
extent_clear_unlock_delalloc(struct btrfs_inode * inode,u64 start,u64 end,struct page * locked_page,u32 clear_bits,unsigned long page_ops)415 void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
416 struct page *locked_page,
417 u32 clear_bits, unsigned long page_ops)
418 {
419 clear_extent_bit(&inode->io_tree, start, end, clear_bits, NULL);
420
421 __process_pages_contig(inode->vfs_inode.i_mapping, locked_page,
422 start, end, page_ops);
423 }
424
btrfs_verify_page(struct page * page,u64 start)425 static bool btrfs_verify_page(struct page *page, u64 start)
426 {
427 if (!fsverity_active(page->mapping->host) ||
428 PageUptodate(page) ||
429 start >= i_size_read(page->mapping->host))
430 return true;
431 return fsverity_verify_page(page);
432 }
433
end_page_read(struct page * page,bool uptodate,u64 start,u32 len)434 static void end_page_read(struct page *page, bool uptodate, u64 start, u32 len)
435 {
436 struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
437
438 ASSERT(page_offset(page) <= start &&
439 start + len <= page_offset(page) + PAGE_SIZE);
440
441 if (uptodate && btrfs_verify_page(page, start))
442 btrfs_page_set_uptodate(fs_info, page, start, len);
443 else
444 btrfs_page_clear_uptodate(fs_info, page, start, len);
445
446 if (!btrfs_is_subpage(fs_info, page))
447 unlock_page(page);
448 else
449 btrfs_subpage_end_reader(fs_info, page, start, len);
450 }
451
452 /*
453 * after a writepage IO is done, we need to:
454 * clear the uptodate bits on error
455 * clear the writeback bits in the extent tree for this IO
456 * end_page_writeback if the page has no more pending IO
457 *
458 * Scheduling is not allowed, so the extent state tree is expected
459 * to have one and only one object corresponding to this IO.
460 */
end_bio_extent_writepage(struct btrfs_bio * bbio)461 static void end_bio_extent_writepage(struct btrfs_bio *bbio)
462 {
463 struct bio *bio = &bbio->bio;
464 int error = blk_status_to_errno(bio->bi_status);
465 struct bio_vec *bvec;
466 struct bvec_iter_all iter_all;
467
468 ASSERT(!bio_flagged(bio, BIO_CLONED));
469 bio_for_each_segment_all(bvec, bio, iter_all) {
470 struct page *page = bvec->bv_page;
471 struct inode *inode = page->mapping->host;
472 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
473 const u32 sectorsize = fs_info->sectorsize;
474 u64 start = page_offset(page) + bvec->bv_offset;
475 u32 len = bvec->bv_len;
476
477 /* Our read/write should always be sector aligned. */
478 if (!IS_ALIGNED(bvec->bv_offset, sectorsize))
479 btrfs_err(fs_info,
480 "partial page write in btrfs with offset %u and length %u",
481 bvec->bv_offset, bvec->bv_len);
482 else if (!IS_ALIGNED(bvec->bv_len, sectorsize))
483 btrfs_info(fs_info,
484 "incomplete page write with offset %u and length %u",
485 bvec->bv_offset, bvec->bv_len);
486
487 btrfs_finish_ordered_extent(bbio->ordered, page, start, len, !error);
488 if (error)
489 mapping_set_error(page->mapping, error);
490 btrfs_page_clear_writeback(fs_info, page, start, len);
491 }
492
493 bio_put(bio);
494 }
495
496 /*
497 * Record previously processed extent range
498 *
499 * For endio_readpage_release_extent() to handle a full extent range, reducing
500 * the extent io operations.
501 */
502 struct processed_extent {
503 struct btrfs_inode *inode;
504 /* Start of the range in @inode */
505 u64 start;
506 /* End of the range in @inode */
507 u64 end;
508 bool uptodate;
509 };
510
511 /*
512 * Try to release processed extent range
513 *
514 * May not release the extent range right now if the current range is
515 * contiguous to processed extent.
516 *
517 * Will release processed extent when any of @inode, @uptodate, the range is
518 * no longer contiguous to the processed range.
519 *
520 * Passing @inode == NULL will force processed extent to be released.
521 */
endio_readpage_release_extent(struct processed_extent * processed,struct btrfs_inode * inode,u64 start,u64 end,bool uptodate)522 static void endio_readpage_release_extent(struct processed_extent *processed,
523 struct btrfs_inode *inode, u64 start, u64 end,
524 bool uptodate)
525 {
526 struct extent_state *cached = NULL;
527 struct extent_io_tree *tree;
528
529 /* The first extent, initialize @processed */
530 if (!processed->inode)
531 goto update;
532
533 /*
534 * Contiguous to processed extent, just uptodate the end.
535 *
536 * Several things to notice:
537 *
538 * - bio can be merged as long as on-disk bytenr is contiguous
539 * This means we can have page belonging to other inodes, thus need to
540 * check if the inode still matches.
541 * - bvec can contain range beyond current page for multi-page bvec
542 * Thus we need to do processed->end + 1 >= start check
543 */
544 if (processed->inode == inode && processed->uptodate == uptodate &&
545 processed->end + 1 >= start && end >= processed->end) {
546 processed->end = end;
547 return;
548 }
549
550 tree = &processed->inode->io_tree;
551 /*
552 * Now we don't have range contiguous to the processed range, release
553 * the processed range now.
554 */
555 unlock_extent(tree, processed->start, processed->end, &cached);
556
557 update:
558 /* Update processed to current range */
559 processed->inode = inode;
560 processed->start = start;
561 processed->end = end;
562 processed->uptodate = uptodate;
563 }
564
begin_page_read(struct btrfs_fs_info * fs_info,struct page * page)565 static void begin_page_read(struct btrfs_fs_info *fs_info, struct page *page)
566 {
567 ASSERT(PageLocked(page));
568 if (!btrfs_is_subpage(fs_info, page))
569 return;
570
571 ASSERT(PagePrivate(page));
572 btrfs_subpage_start_reader(fs_info, page, page_offset(page), PAGE_SIZE);
573 }
574
575 /*
576 * after a readpage IO is done, we need to:
577 * clear the uptodate bits on error
578 * set the uptodate bits if things worked
579 * set the page up to date if all extents in the tree are uptodate
580 * clear the lock bit in the extent tree
581 * unlock the page if there are no other extents locked for it
582 *
583 * Scheduling is not allowed, so the extent state tree is expected
584 * to have one and only one object corresponding to this IO.
585 */
end_bio_extent_readpage(struct btrfs_bio * bbio)586 static void end_bio_extent_readpage(struct btrfs_bio *bbio)
587 {
588 struct bio *bio = &bbio->bio;
589 struct bio_vec *bvec;
590 struct processed_extent processed = { 0 };
591 /*
592 * The offset to the beginning of a bio, since one bio can never be
593 * larger than UINT_MAX, u32 here is enough.
594 */
595 u32 bio_offset = 0;
596 struct bvec_iter_all iter_all;
597
598 ASSERT(!bio_flagged(bio, BIO_CLONED));
599 bio_for_each_segment_all(bvec, bio, iter_all) {
600 bool uptodate = !bio->bi_status;
601 struct page *page = bvec->bv_page;
602 struct inode *inode = page->mapping->host;
603 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
604 const u32 sectorsize = fs_info->sectorsize;
605 u64 start;
606 u64 end;
607 u32 len;
608
609 btrfs_debug(fs_info,
610 "end_bio_extent_readpage: bi_sector=%llu, err=%d, mirror=%u",
611 bio->bi_iter.bi_sector, bio->bi_status,
612 bbio->mirror_num);
613
614 /*
615 * We always issue full-sector reads, but if some block in a
616 * page fails to read, blk_update_request() will advance
617 * bv_offset and adjust bv_len to compensate. Print a warning
618 * for unaligned offsets, and an error if they don't add up to
619 * a full sector.
620 */
621 if (!IS_ALIGNED(bvec->bv_offset, sectorsize))
622 btrfs_err(fs_info,
623 "partial page read in btrfs with offset %u and length %u",
624 bvec->bv_offset, bvec->bv_len);
625 else if (!IS_ALIGNED(bvec->bv_offset + bvec->bv_len,
626 sectorsize))
627 btrfs_info(fs_info,
628 "incomplete page read with offset %u and length %u",
629 bvec->bv_offset, bvec->bv_len);
630
631 start = page_offset(page) + bvec->bv_offset;
632 end = start + bvec->bv_len - 1;
633 len = bvec->bv_len;
634
635 if (likely(uptodate)) {
636 loff_t i_size = i_size_read(inode);
637 pgoff_t end_index = i_size >> PAGE_SHIFT;
638
639 /*
640 * Zero out the remaining part if this range straddles
641 * i_size.
642 *
643 * Here we should only zero the range inside the bvec,
644 * not touch anything else.
645 *
646 * NOTE: i_size is exclusive while end is inclusive.
647 */
648 if (page->index == end_index && i_size <= end) {
649 u32 zero_start = max(offset_in_page(i_size),
650 offset_in_page(start));
651
652 zero_user_segment(page, zero_start,
653 offset_in_page(end) + 1);
654 }
655 }
656
657 /* Update page status and unlock. */
658 end_page_read(page, uptodate, start, len);
659 endio_readpage_release_extent(&processed, BTRFS_I(inode),
660 start, end, uptodate);
661
662 ASSERT(bio_offset + len > bio_offset);
663 bio_offset += len;
664
665 }
666 /* Release the last extent */
667 endio_readpage_release_extent(&processed, NULL, 0, 0, false);
668 bio_put(bio);
669 }
670
671 /*
672 * Populate every free slot in a provided array with pages.
673 *
674 * @nr_pages: number of pages to allocate
675 * @page_array: the array to fill with pages; any existing non-null entries in
676 * the array will be skipped
677 *
678 * Return: 0 if all pages were able to be allocated;
679 * -ENOMEM otherwise, the partially allocated pages would be freed and
680 * the array slots zeroed
681 */
btrfs_alloc_page_array(unsigned int nr_pages,struct page ** page_array)682 int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array)
683 {
684 unsigned int allocated;
685
686 for (allocated = 0; allocated < nr_pages;) {
687 unsigned int last = allocated;
688
689 allocated = alloc_pages_bulk_array(GFP_NOFS, nr_pages, page_array);
690 if (unlikely(allocated == last)) {
691 /* No progress, fail and do cleanup. */
692 for (int i = 0; i < allocated; i++) {
693 __free_page(page_array[i]);
694 page_array[i] = NULL;
695 }
696 return -ENOMEM;
697 }
698 }
699 return 0;
700 }
701
btrfs_bio_is_contig(struct btrfs_bio_ctrl * bio_ctrl,struct page * page,u64 disk_bytenr,unsigned int pg_offset)702 static bool btrfs_bio_is_contig(struct btrfs_bio_ctrl *bio_ctrl,
703 struct page *page, u64 disk_bytenr,
704 unsigned int pg_offset)
705 {
706 struct bio *bio = &bio_ctrl->bbio->bio;
707 struct bio_vec *bvec = bio_last_bvec_all(bio);
708 const sector_t sector = disk_bytenr >> SECTOR_SHIFT;
709
710 if (bio_ctrl->compress_type != BTRFS_COMPRESS_NONE) {
711 /*
712 * For compression, all IO should have its logical bytenr set
713 * to the starting bytenr of the compressed extent.
714 */
715 return bio->bi_iter.bi_sector == sector;
716 }
717
718 /*
719 * The contig check requires the following conditions to be met:
720 *
721 * 1) The pages are belonging to the same inode
722 * This is implied by the call chain.
723 *
724 * 2) The range has adjacent logical bytenr
725 *
726 * 3) The range has adjacent file offset
727 * This is required for the usage of btrfs_bio->file_offset.
728 */
729 return bio_end_sector(bio) == sector &&
730 page_offset(bvec->bv_page) + bvec->bv_offset + bvec->bv_len ==
731 page_offset(page) + pg_offset;
732 }
733
alloc_new_bio(struct btrfs_inode * inode,struct btrfs_bio_ctrl * bio_ctrl,u64 disk_bytenr,u64 file_offset)734 static void alloc_new_bio(struct btrfs_inode *inode,
735 struct btrfs_bio_ctrl *bio_ctrl,
736 u64 disk_bytenr, u64 file_offset)
737 {
738 struct btrfs_fs_info *fs_info = inode->root->fs_info;
739 struct btrfs_bio *bbio;
740
741 bbio = btrfs_bio_alloc(BIO_MAX_VECS, bio_ctrl->opf, fs_info,
742 bio_ctrl->end_io_func, NULL);
743 bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
744 bbio->inode = inode;
745 bbio->file_offset = file_offset;
746 bio_ctrl->bbio = bbio;
747 bio_ctrl->len_to_oe_boundary = U32_MAX;
748
749 /* Limit data write bios to the ordered boundary. */
750 if (bio_ctrl->wbc) {
751 struct btrfs_ordered_extent *ordered;
752
753 ordered = btrfs_lookup_ordered_extent(inode, file_offset);
754 if (ordered) {
755 bio_ctrl->len_to_oe_boundary = min_t(u32, U32_MAX,
756 ordered->file_offset +
757 ordered->disk_num_bytes - file_offset);
758 bbio->ordered = ordered;
759 }
760
761 /*
762 * Pick the last added device to support cgroup writeback. For
763 * multi-device file systems this means blk-cgroup policies have
764 * to always be set on the last added/replaced device.
765 * This is a bit odd but has been like that for a long time.
766 */
767 bio_set_dev(&bbio->bio, fs_info->fs_devices->latest_dev->bdev);
768 wbc_init_bio(bio_ctrl->wbc, &bbio->bio);
769 }
770 }
771
772 /*
773 * @disk_bytenr: logical bytenr where the write will be
774 * @page: page to add to the bio
775 * @size: portion of page that we want to write to
776 * @pg_offset: offset of the new bio or to check whether we are adding
777 * a contiguous page to the previous one
778 *
779 * The will either add the page into the existing @bio_ctrl->bbio, or allocate a
780 * new one in @bio_ctrl->bbio.
781 * The mirror number for this IO should already be initizlied in
782 * @bio_ctrl->mirror_num.
783 */
submit_extent_page(struct btrfs_bio_ctrl * bio_ctrl,u64 disk_bytenr,struct page * page,size_t size,unsigned long pg_offset)784 static void submit_extent_page(struct btrfs_bio_ctrl *bio_ctrl,
785 u64 disk_bytenr, struct page *page,
786 size_t size, unsigned long pg_offset)
787 {
788 struct btrfs_inode *inode = BTRFS_I(page->mapping->host);
789
790 ASSERT(pg_offset + size <= PAGE_SIZE);
791 ASSERT(bio_ctrl->end_io_func);
792
793 if (bio_ctrl->bbio &&
794 !btrfs_bio_is_contig(bio_ctrl, page, disk_bytenr, pg_offset))
795 submit_one_bio(bio_ctrl);
796
797 do {
798 u32 len = size;
799
800 /* Allocate new bio if needed */
801 if (!bio_ctrl->bbio) {
802 alloc_new_bio(inode, bio_ctrl, disk_bytenr,
803 page_offset(page) + pg_offset);
804 }
805
806 /* Cap to the current ordered extent boundary if there is one. */
807 if (len > bio_ctrl->len_to_oe_boundary) {
808 ASSERT(bio_ctrl->compress_type == BTRFS_COMPRESS_NONE);
809 ASSERT(is_data_inode(&inode->vfs_inode));
810 len = bio_ctrl->len_to_oe_boundary;
811 }
812
813 if (bio_add_page(&bio_ctrl->bbio->bio, page, len, pg_offset) != len) {
814 /* bio full: move on to a new one */
815 submit_one_bio(bio_ctrl);
816 continue;
817 }
818
819 if (bio_ctrl->wbc)
820 wbc_account_cgroup_owner(bio_ctrl->wbc, page, len);
821
822 size -= len;
823 pg_offset += len;
824 disk_bytenr += len;
825
826 /*
827 * len_to_oe_boundary defaults to U32_MAX, which isn't page or
828 * sector aligned. alloc_new_bio() then sets it to the end of
829 * our ordered extent for writes into zoned devices.
830 *
831 * When len_to_oe_boundary is tracking an ordered extent, we
832 * trust the ordered extent code to align things properly, and
833 * the check above to cap our write to the ordered extent
834 * boundary is correct.
835 *
836 * When len_to_oe_boundary is U32_MAX, the cap above would
837 * result in a 4095 byte IO for the last page right before
838 * we hit the bio limit of UINT_MAX. bio_add_page() has all
839 * the checks required to make sure we don't overflow the bio,
840 * and we should just ignore len_to_oe_boundary completely
841 * unless we're using it to track an ordered extent.
842 *
843 * It's pretty hard to make a bio sized U32_MAX, but it can
844 * happen when the page cache is able to feed us contiguous
845 * pages for large extents.
846 */
847 if (bio_ctrl->len_to_oe_boundary != U32_MAX)
848 bio_ctrl->len_to_oe_boundary -= len;
849
850 /* Ordered extent boundary: move on to a new bio. */
851 if (bio_ctrl->len_to_oe_boundary == 0)
852 submit_one_bio(bio_ctrl);
853 } while (size);
854 }
855
attach_extent_buffer_page(struct extent_buffer * eb,struct page * page,struct btrfs_subpage * prealloc)856 static int attach_extent_buffer_page(struct extent_buffer *eb,
857 struct page *page,
858 struct btrfs_subpage *prealloc)
859 {
860 struct btrfs_fs_info *fs_info = eb->fs_info;
861 int ret = 0;
862
863 /*
864 * If the page is mapped to btree inode, we should hold the private
865 * lock to prevent race.
866 * For cloned or dummy extent buffers, their pages are not mapped and
867 * will not race with any other ebs.
868 */
869 if (page->mapping)
870 lockdep_assert_held(&page->mapping->private_lock);
871
872 if (fs_info->nodesize >= PAGE_SIZE) {
873 if (!PagePrivate(page))
874 attach_page_private(page, eb);
875 else
876 WARN_ON(page->private != (unsigned long)eb);
877 return 0;
878 }
879
880 /* Already mapped, just free prealloc */
881 if (PagePrivate(page)) {
882 btrfs_free_subpage(prealloc);
883 return 0;
884 }
885
886 if (prealloc)
887 /* Has preallocated memory for subpage */
888 attach_page_private(page, prealloc);
889 else
890 /* Do new allocation to attach subpage */
891 ret = btrfs_attach_subpage(fs_info, page,
892 BTRFS_SUBPAGE_METADATA);
893 return ret;
894 }
895
set_page_extent_mapped(struct page * page)896 int set_page_extent_mapped(struct page *page)
897 {
898 struct btrfs_fs_info *fs_info;
899
900 ASSERT(page->mapping);
901
902 if (PagePrivate(page))
903 return 0;
904
905 fs_info = btrfs_sb(page->mapping->host->i_sb);
906
907 if (btrfs_is_subpage(fs_info, page))
908 return btrfs_attach_subpage(fs_info, page, BTRFS_SUBPAGE_DATA);
909
910 attach_page_private(page, (void *)EXTENT_PAGE_PRIVATE);
911 return 0;
912 }
913
clear_page_extent_mapped(struct page * page)914 void clear_page_extent_mapped(struct page *page)
915 {
916 struct btrfs_fs_info *fs_info;
917
918 ASSERT(page->mapping);
919
920 if (!PagePrivate(page))
921 return;
922
923 fs_info = btrfs_sb(page->mapping->host->i_sb);
924 if (btrfs_is_subpage(fs_info, page))
925 return btrfs_detach_subpage(fs_info, page);
926
927 detach_page_private(page);
928 }
929
930 static struct extent_map *
__get_extent_map(struct inode * inode,struct page * page,size_t pg_offset,u64 start,u64 len,struct extent_map ** em_cached)931 __get_extent_map(struct inode *inode, struct page *page, size_t pg_offset,
932 u64 start, u64 len, struct extent_map **em_cached)
933 {
934 struct extent_map *em;
935
936 if (em_cached && *em_cached) {
937 em = *em_cached;
938 if (extent_map_in_tree(em) && start >= em->start &&
939 start < extent_map_end(em)) {
940 refcount_inc(&em->refs);
941 return em;
942 }
943
944 free_extent_map(em);
945 *em_cached = NULL;
946 }
947
948 em = btrfs_get_extent(BTRFS_I(inode), page, pg_offset, start, len);
949 if (em_cached && !IS_ERR(em)) {
950 BUG_ON(*em_cached);
951 refcount_inc(&em->refs);
952 *em_cached = em;
953 }
954 return em;
955 }
956 /*
957 * basic readpage implementation. Locked extent state structs are inserted
958 * into the tree that are removed when the IO is done (by the end_io
959 * handlers)
960 * XXX JDM: This needs looking at to ensure proper page locking
961 * return 0 on success, otherwise return error
962 */
btrfs_do_readpage(struct page * page,struct extent_map ** em_cached,struct btrfs_bio_ctrl * bio_ctrl,u64 * prev_em_start)963 static int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
964 struct btrfs_bio_ctrl *bio_ctrl, u64 *prev_em_start)
965 {
966 struct inode *inode = page->mapping->host;
967 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
968 u64 start = page_offset(page);
969 const u64 end = start + PAGE_SIZE - 1;
970 u64 cur = start;
971 u64 extent_offset;
972 u64 last_byte = i_size_read(inode);
973 u64 block_start;
974 struct extent_map *em;
975 int ret = 0;
976 size_t pg_offset = 0;
977 size_t iosize;
978 size_t blocksize = fs_info->sectorsize;
979 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
980
981 ret = set_page_extent_mapped(page);
982 if (ret < 0) {
983 unlock_extent(tree, start, end, NULL);
984 unlock_page(page);
985 return ret;
986 }
987
988 if (!PageUptodate(page)) {
989 if (cleancache_get_page(page) == 0) {
990 BUG_ON(blocksize != PAGE_SIZE);
991 unlock_extent(tree, start, end, NULL);
992 unlock_page(page);
993 return ret;
994 }
995 }
996
997 if (page->index == last_byte >> PAGE_SHIFT) {
998 size_t zero_offset = offset_in_page(last_byte);
999
1000 if (zero_offset) {
1001 iosize = PAGE_SIZE - zero_offset;
1002 memzero_page(page, zero_offset, iosize);
1003 }
1004 }
1005 bio_ctrl->end_io_func = end_bio_extent_readpage;
1006 begin_page_read(fs_info, page);
1007 while (cur <= end) {
1008 enum btrfs_compression_type compress_type = BTRFS_COMPRESS_NONE;
1009 bool force_bio_submit = false;
1010 u64 disk_bytenr;
1011
1012 ASSERT(IS_ALIGNED(cur, fs_info->sectorsize));
1013 if (cur >= last_byte) {
1014 iosize = PAGE_SIZE - pg_offset;
1015 memzero_page(page, pg_offset, iosize);
1016 unlock_extent(tree, cur, cur + iosize - 1, NULL);
1017 end_page_read(page, true, cur, iosize);
1018 break;
1019 }
1020 em = __get_extent_map(inode, page, pg_offset, cur,
1021 end - cur + 1, em_cached);
1022 if (IS_ERR(em)) {
1023 unlock_extent(tree, cur, end, NULL);
1024 end_page_read(page, false, cur, end + 1 - cur);
1025 return PTR_ERR(em);
1026 }
1027 extent_offset = cur - em->start;
1028 BUG_ON(extent_map_end(em) <= cur);
1029 BUG_ON(end < cur);
1030
1031 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
1032 compress_type = em->compress_type;
1033
1034 iosize = min(extent_map_end(em) - cur, end - cur + 1);
1035 iosize = ALIGN(iosize, blocksize);
1036 if (compress_type != BTRFS_COMPRESS_NONE)
1037 disk_bytenr = em->block_start;
1038 else
1039 disk_bytenr = em->block_start + extent_offset;
1040 block_start = em->block_start;
1041 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
1042 block_start = EXTENT_MAP_HOLE;
1043
1044 /*
1045 * If we have a file range that points to a compressed extent
1046 * and it's followed by a consecutive file range that points
1047 * to the same compressed extent (possibly with a different
1048 * offset and/or length, so it either points to the whole extent
1049 * or only part of it), we must make sure we do not submit a
1050 * single bio to populate the pages for the 2 ranges because
1051 * this makes the compressed extent read zero out the pages
1052 * belonging to the 2nd range. Imagine the following scenario:
1053 *
1054 * File layout
1055 * [0 - 8K] [8K - 24K]
1056 * | |
1057 * | |
1058 * points to extent X, points to extent X,
1059 * offset 4K, length of 8K offset 0, length 16K
1060 *
1061 * [extent X, compressed length = 4K uncompressed length = 16K]
1062 *
1063 * If the bio to read the compressed extent covers both ranges,
1064 * it will decompress extent X into the pages belonging to the
1065 * first range and then it will stop, zeroing out the remaining
1066 * pages that belong to the other range that points to extent X.
1067 * So here we make sure we submit 2 bios, one for the first
1068 * range and another one for the third range. Both will target
1069 * the same physical extent from disk, but we can't currently
1070 * make the compressed bio endio callback populate the pages
1071 * for both ranges because each compressed bio is tightly
1072 * coupled with a single extent map, and each range can have
1073 * an extent map with a different offset value relative to the
1074 * uncompressed data of our extent and different lengths. This
1075 * is a corner case so we prioritize correctness over
1076 * non-optimal behavior (submitting 2 bios for the same extent).
1077 */
1078 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) &&
1079 prev_em_start && *prev_em_start != (u64)-1 &&
1080 *prev_em_start != em->start)
1081 force_bio_submit = true;
1082
1083 if (prev_em_start)
1084 *prev_em_start = em->start;
1085
1086 free_extent_map(em);
1087 em = NULL;
1088
1089 /* we've found a hole, just zero and go on */
1090 if (block_start == EXTENT_MAP_HOLE) {
1091 memzero_page(page, pg_offset, iosize);
1092
1093 unlock_extent(tree, cur, cur + iosize - 1, NULL);
1094 end_page_read(page, true, cur, iosize);
1095 cur = cur + iosize;
1096 pg_offset += iosize;
1097 continue;
1098 }
1099 /* the get_extent function already copied into the page */
1100 if (block_start == EXTENT_MAP_INLINE) {
1101 unlock_extent(tree, cur, cur + iosize - 1, NULL);
1102 end_page_read(page, true, cur, iosize);
1103 cur = cur + iosize;
1104 pg_offset += iosize;
1105 continue;
1106 }
1107
1108 if (bio_ctrl->compress_type != compress_type) {
1109 submit_one_bio(bio_ctrl);
1110 bio_ctrl->compress_type = compress_type;
1111 }
1112
1113 if (force_bio_submit)
1114 submit_one_bio(bio_ctrl);
1115 submit_extent_page(bio_ctrl, disk_bytenr, page, iosize,
1116 pg_offset);
1117 cur = cur + iosize;
1118 pg_offset += iosize;
1119 }
1120
1121 return 0;
1122 }
1123
btrfs_read_folio(struct file * file,struct folio * folio)1124 int btrfs_read_folio(struct file *file, struct folio *folio)
1125 {
1126 struct page *page = &folio->page;
1127 struct btrfs_inode *inode = BTRFS_I(page->mapping->host);
1128 u64 start = page_offset(page);
1129 u64 end = start + PAGE_SIZE - 1;
1130 struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ };
1131 int ret;
1132
1133 btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
1134
1135 ret = btrfs_do_readpage(page, NULL, &bio_ctrl, NULL);
1136 /*
1137 * If btrfs_do_readpage() failed we will want to submit the assembled
1138 * bio to do the cleanup.
1139 */
1140 submit_one_bio(&bio_ctrl);
1141 return ret;
1142 }
1143
contiguous_readpages(struct page * pages[],int nr_pages,u64 start,u64 end,struct extent_map ** em_cached,struct btrfs_bio_ctrl * bio_ctrl,u64 * prev_em_start)1144 static inline void contiguous_readpages(struct page *pages[], int nr_pages,
1145 u64 start, u64 end,
1146 struct extent_map **em_cached,
1147 struct btrfs_bio_ctrl *bio_ctrl,
1148 u64 *prev_em_start)
1149 {
1150 struct btrfs_inode *inode = BTRFS_I(pages[0]->mapping->host);
1151 int index;
1152
1153 btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
1154
1155 for (index = 0; index < nr_pages; index++) {
1156 btrfs_do_readpage(pages[index], em_cached, bio_ctrl,
1157 prev_em_start);
1158 put_page(pages[index]);
1159 }
1160 }
1161
1162 /*
1163 * helper for __extent_writepage, doing all of the delayed allocation setup.
1164 *
1165 * This returns 1 if btrfs_run_delalloc_range function did all the work required
1166 * to write the page (copy into inline extent). In this case the IO has
1167 * been started and the page is already unlocked.
1168 *
1169 * This returns 0 if all went well (page still locked)
1170 * This returns < 0 if there were errors (page still locked)
1171 */
writepage_delalloc(struct btrfs_inode * inode,struct page * page,struct writeback_control * wbc)1172 static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
1173 struct page *page, struct writeback_control *wbc)
1174 {
1175 const u64 page_start = page_offset(page);
1176 const u64 page_end = page_start + PAGE_SIZE - 1;
1177 u64 delalloc_start = page_start;
1178 u64 delalloc_end = page_end;
1179 u64 delalloc_to_write = 0;
1180 int ret = 0;
1181
1182 while (delalloc_start < page_end) {
1183 delalloc_end = page_end;
1184 if (!find_lock_delalloc_range(&inode->vfs_inode, page,
1185 &delalloc_start, &delalloc_end)) {
1186 delalloc_start = delalloc_end + 1;
1187 continue;
1188 }
1189
1190 ret = btrfs_run_delalloc_range(inode, page, delalloc_start,
1191 delalloc_end, wbc);
1192 if (ret < 0)
1193 return ret;
1194
1195 delalloc_start = delalloc_end + 1;
1196 }
1197
1198 /*
1199 * delalloc_end is already one less than the total length, so
1200 * we don't subtract one from PAGE_SIZE
1201 */
1202 delalloc_to_write +=
1203 DIV_ROUND_UP(delalloc_end + 1 - page_start, PAGE_SIZE);
1204
1205 /*
1206 * If btrfs_run_dealloc_range() already started I/O and unlocked
1207 * the pages, we just need to account for them here.
1208 */
1209 if (ret == 1) {
1210 wbc->nr_to_write -= delalloc_to_write;
1211 return 1;
1212 }
1213
1214 if (wbc->nr_to_write < delalloc_to_write) {
1215 int thresh = 8192;
1216
1217 if (delalloc_to_write < thresh * 2)
1218 thresh = delalloc_to_write;
1219 wbc->nr_to_write = min_t(u64, delalloc_to_write,
1220 thresh);
1221 }
1222
1223 return 0;
1224 }
1225
1226 /*
1227 * Find the first byte we need to write.
1228 *
1229 * For subpage, one page can contain several sectors, and
1230 * __extent_writepage_io() will just grab all extent maps in the page
1231 * range and try to submit all non-inline/non-compressed extents.
1232 *
1233 * This is a big problem for subpage, we shouldn't re-submit already written
1234 * data at all.
1235 * This function will lookup subpage dirty bit to find which range we really
1236 * need to submit.
1237 *
1238 * Return the next dirty range in [@start, @end).
1239 * If no dirty range is found, @start will be page_offset(page) + PAGE_SIZE.
1240 */
find_next_dirty_byte(struct btrfs_fs_info * fs_info,struct page * page,u64 * start,u64 * end)1241 static void find_next_dirty_byte(struct btrfs_fs_info *fs_info,
1242 struct page *page, u64 *start, u64 *end)
1243 {
1244 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
1245 struct btrfs_subpage_info *spi = fs_info->subpage_info;
1246 u64 orig_start = *start;
1247 /* Declare as unsigned long so we can use bitmap ops */
1248 unsigned long flags;
1249 int range_start_bit;
1250 int range_end_bit;
1251
1252 /*
1253 * For regular sector size == page size case, since one page only
1254 * contains one sector, we return the page offset directly.
1255 */
1256 if (!btrfs_is_subpage(fs_info, page)) {
1257 *start = page_offset(page);
1258 *end = page_offset(page) + PAGE_SIZE;
1259 return;
1260 }
1261
1262 range_start_bit = spi->dirty_offset +
1263 (offset_in_page(orig_start) >> fs_info->sectorsize_bits);
1264
1265 /* We should have the page locked, but just in case */
1266 spin_lock_irqsave(&subpage->lock, flags);
1267 bitmap_next_set_region(subpage->bitmaps, &range_start_bit, &range_end_bit,
1268 spi->dirty_offset + spi->bitmap_nr_bits);
1269 spin_unlock_irqrestore(&subpage->lock, flags);
1270
1271 range_start_bit -= spi->dirty_offset;
1272 range_end_bit -= spi->dirty_offset;
1273
1274 *start = page_offset(page) + range_start_bit * fs_info->sectorsize;
1275 *end = page_offset(page) + range_end_bit * fs_info->sectorsize;
1276 }
1277
1278 /*
1279 * helper for __extent_writepage. This calls the writepage start hooks,
1280 * and does the loop to map the page into extents and bios.
1281 *
1282 * We return 1 if the IO is started and the page is unlocked,
1283 * 0 if all went well (page still locked)
1284 * < 0 if there were errors (page still locked)
1285 */
__extent_writepage_io(struct btrfs_inode * inode,struct page * page,struct btrfs_bio_ctrl * bio_ctrl,loff_t i_size,int * nr_ret)1286 static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
1287 struct page *page,
1288 struct btrfs_bio_ctrl *bio_ctrl,
1289 loff_t i_size,
1290 int *nr_ret)
1291 {
1292 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1293 u64 cur = page_offset(page);
1294 u64 end = cur + PAGE_SIZE - 1;
1295 u64 extent_offset;
1296 u64 block_start;
1297 struct extent_map *em;
1298 int ret = 0;
1299 int nr = 0;
1300
1301 ret = btrfs_writepage_cow_fixup(page);
1302 if (ret) {
1303 /* Fixup worker will requeue */
1304 redirty_page_for_writepage(bio_ctrl->wbc, page);
1305 unlock_page(page);
1306 return 1;
1307 }
1308
1309 bio_ctrl->end_io_func = end_bio_extent_writepage;
1310 while (cur <= end) {
1311 u32 len = end - cur + 1;
1312 u64 disk_bytenr;
1313 u64 em_end;
1314 u64 dirty_range_start = cur;
1315 u64 dirty_range_end;
1316 u32 iosize;
1317
1318 if (cur >= i_size) {
1319 btrfs_mark_ordered_io_finished(inode, page, cur, len,
1320 true);
1321 /*
1322 * This range is beyond i_size, thus we don't need to
1323 * bother writing back.
1324 * But we still need to clear the dirty subpage bit, or
1325 * the next time the page gets dirtied, we will try to
1326 * writeback the sectors with subpage dirty bits,
1327 * causing writeback without ordered extent.
1328 */
1329 btrfs_page_clear_dirty(fs_info, page, cur, len);
1330 break;
1331 }
1332
1333 find_next_dirty_byte(fs_info, page, &dirty_range_start,
1334 &dirty_range_end);
1335 if (cur < dirty_range_start) {
1336 cur = dirty_range_start;
1337 continue;
1338 }
1339
1340 em = btrfs_get_extent(inode, NULL, 0, cur, len);
1341 if (IS_ERR(em)) {
1342 ret = PTR_ERR_OR_ZERO(em);
1343 goto out_error;
1344 }
1345
1346 extent_offset = cur - em->start;
1347 em_end = extent_map_end(em);
1348 ASSERT(cur <= em_end);
1349 ASSERT(cur < end);
1350 ASSERT(IS_ALIGNED(em->start, fs_info->sectorsize));
1351 ASSERT(IS_ALIGNED(em->len, fs_info->sectorsize));
1352
1353 block_start = em->block_start;
1354 disk_bytenr = em->block_start + extent_offset;
1355
1356 ASSERT(!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags));
1357 ASSERT(block_start != EXTENT_MAP_HOLE);
1358 ASSERT(block_start != EXTENT_MAP_INLINE);
1359
1360 /*
1361 * Note that em_end from extent_map_end() and dirty_range_end from
1362 * find_next_dirty_byte() are all exclusive
1363 */
1364 iosize = min(min(em_end, end + 1), dirty_range_end) - cur;
1365 free_extent_map(em);
1366 em = NULL;
1367
1368 btrfs_set_range_writeback(inode, cur, cur + iosize - 1);
1369 if (!PageWriteback(page)) {
1370 btrfs_err(inode->root->fs_info,
1371 "page %lu not writeback, cur %llu end %llu",
1372 page->index, cur, end);
1373 }
1374
1375 /*
1376 * Although the PageDirty bit is cleared before entering this
1377 * function, subpage dirty bit is not cleared.
1378 * So clear subpage dirty bit here so next time we won't submit
1379 * page for range already written to disk.
1380 */
1381 btrfs_page_clear_dirty(fs_info, page, cur, iosize);
1382
1383 submit_extent_page(bio_ctrl, disk_bytenr, page, iosize,
1384 cur - page_offset(page));
1385 cur += iosize;
1386 nr++;
1387 }
1388
1389 btrfs_page_assert_not_dirty(fs_info, page);
1390 *nr_ret = nr;
1391 return 0;
1392
1393 out_error:
1394 /*
1395 * If we finish without problem, we should not only clear page dirty,
1396 * but also empty subpage dirty bits
1397 */
1398 *nr_ret = nr;
1399 return ret;
1400 }
1401
1402 /*
1403 * the writepage semantics are similar to regular writepage. extent
1404 * records are inserted to lock ranges in the tree, and as dirty areas
1405 * are found, they are marked writeback. Then the lock bits are removed
1406 * and the end_io handler clears the writeback ranges
1407 *
1408 * Return 0 if everything goes well.
1409 * Return <0 for error.
1410 */
__extent_writepage(struct page * page,struct btrfs_bio_ctrl * bio_ctrl)1411 static int __extent_writepage(struct page *page, struct btrfs_bio_ctrl *bio_ctrl)
1412 {
1413 struct folio *folio = page_folio(page);
1414 struct inode *inode = page->mapping->host;
1415 const u64 page_start = page_offset(page);
1416 int ret;
1417 int nr = 0;
1418 size_t pg_offset;
1419 loff_t i_size = i_size_read(inode);
1420 unsigned long end_index = i_size >> PAGE_SHIFT;
1421
1422 trace___extent_writepage(page, inode, bio_ctrl->wbc);
1423
1424 WARN_ON(!PageLocked(page));
1425
1426 pg_offset = offset_in_page(i_size);
1427 if (page->index > end_index ||
1428 (page->index == end_index && !pg_offset)) {
1429 folio_invalidate(folio, 0, folio_size(folio));
1430 folio_unlock(folio);
1431 return 0;
1432 }
1433
1434 if (page->index == end_index)
1435 memzero_page(page, pg_offset, PAGE_SIZE - pg_offset);
1436
1437 ret = set_page_extent_mapped(page);
1438 if (ret < 0)
1439 goto done;
1440
1441 ret = writepage_delalloc(BTRFS_I(inode), page, bio_ctrl->wbc);
1442 if (ret == 1)
1443 return 0;
1444 if (ret)
1445 goto done;
1446
1447 ret = __extent_writepage_io(BTRFS_I(inode), page, bio_ctrl, i_size, &nr);
1448 if (ret == 1)
1449 return 0;
1450
1451 bio_ctrl->wbc->nr_to_write--;
1452
1453 done:
1454 if (nr == 0) {
1455 /* make sure the mapping tag for page dirty gets cleared */
1456 set_page_writeback(page);
1457 end_page_writeback(page);
1458 }
1459 if (ret) {
1460 btrfs_mark_ordered_io_finished(BTRFS_I(inode), page, page_start,
1461 PAGE_SIZE, !ret);
1462 mapping_set_error(page->mapping, ret);
1463 }
1464 unlock_page(page);
1465 ASSERT(ret <= 0);
1466 return ret;
1467 }
1468
wait_on_extent_buffer_writeback(struct extent_buffer * eb)1469 void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
1470 {
1471 wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK,
1472 TASK_UNINTERRUPTIBLE);
1473 }
1474
1475 /*
1476 * Lock extent buffer status and pages for writeback.
1477 *
1478 * Return %false if the extent buffer doesn't need to be submitted (e.g. the
1479 * extent buffer is not dirty)
1480 * Return %true is the extent buffer is submitted to bio.
1481 */
lock_extent_buffer_for_io(struct extent_buffer * eb,struct writeback_control * wbc)1482 static noinline_for_stack bool lock_extent_buffer_for_io(struct extent_buffer *eb,
1483 struct writeback_control *wbc)
1484 {
1485 struct btrfs_fs_info *fs_info = eb->fs_info;
1486 bool ret = false;
1487
1488 btrfs_tree_lock(eb);
1489 while (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
1490 btrfs_tree_unlock(eb);
1491 if (wbc->sync_mode != WB_SYNC_ALL)
1492 return false;
1493 wait_on_extent_buffer_writeback(eb);
1494 btrfs_tree_lock(eb);
1495 }
1496
1497 /*
1498 * We need to do this to prevent races in people who check if the eb is
1499 * under IO since we can end up having no IO bits set for a short period
1500 * of time.
1501 */
1502 spin_lock(&eb->refs_lock);
1503 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
1504 set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
1505 spin_unlock(&eb->refs_lock);
1506 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
1507 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
1508 -eb->len,
1509 fs_info->dirty_metadata_batch);
1510 ret = true;
1511 } else {
1512 spin_unlock(&eb->refs_lock);
1513 }
1514 btrfs_tree_unlock(eb);
1515 return ret;
1516 }
1517
set_btree_ioerr(struct extent_buffer * eb)1518 static void set_btree_ioerr(struct extent_buffer *eb)
1519 {
1520 struct btrfs_fs_info *fs_info = eb->fs_info;
1521
1522 set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
1523
1524 /*
1525 * A read may stumble upon this buffer later, make sure that it gets an
1526 * error and knows there was an error.
1527 */
1528 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
1529
1530 /*
1531 * We need to set the mapping with the io error as well because a write
1532 * error will flip the file system readonly, and then syncfs() will
1533 * return a 0 because we are readonly if we don't modify the err seq for
1534 * the superblock.
1535 */
1536 mapping_set_error(eb->fs_info->btree_inode->i_mapping, -EIO);
1537
1538 /*
1539 * If writeback for a btree extent that doesn't belong to a log tree
1540 * failed, increment the counter transaction->eb_write_errors.
1541 * We do this because while the transaction is running and before it's
1542 * committing (when we call filemap_fdata[write|wait]_range against
1543 * the btree inode), we might have
1544 * btree_inode->i_mapping->a_ops->writepages() called by the VM - if it
1545 * returns an error or an error happens during writeback, when we're
1546 * committing the transaction we wouldn't know about it, since the pages
1547 * can be no longer dirty nor marked anymore for writeback (if a
1548 * subsequent modification to the extent buffer didn't happen before the
1549 * transaction commit), which makes filemap_fdata[write|wait]_range not
1550 * able to find the pages tagged with SetPageError at transaction
1551 * commit time. So if this happens we must abort the transaction,
1552 * otherwise we commit a super block with btree roots that point to
1553 * btree nodes/leafs whose content on disk is invalid - either garbage
1554 * or the content of some node/leaf from a past generation that got
1555 * cowed or deleted and is no longer valid.
1556 *
1557 * Note: setting AS_EIO/AS_ENOSPC in the btree inode's i_mapping would
1558 * not be enough - we need to distinguish between log tree extents vs
1559 * non-log tree extents, and the next filemap_fdatawait_range() call
1560 * will catch and clear such errors in the mapping - and that call might
1561 * be from a log sync and not from a transaction commit. Also, checking
1562 * for the eb flag EXTENT_BUFFER_WRITE_ERR at transaction commit time is
1563 * not done and would not be reliable - the eb might have been released
1564 * from memory and reading it back again means that flag would not be
1565 * set (since it's a runtime flag, not persisted on disk).
1566 *
1567 * Using the flags below in the btree inode also makes us achieve the
1568 * goal of AS_EIO/AS_ENOSPC when writepages() returns success, started
1569 * writeback for all dirty pages and before filemap_fdatawait_range()
1570 * is called, the writeback for all dirty pages had already finished
1571 * with errors - because we were not using AS_EIO/AS_ENOSPC,
1572 * filemap_fdatawait_range() would return success, as it could not know
1573 * that writeback errors happened (the pages were no longer tagged for
1574 * writeback).
1575 */
1576 switch (eb->log_index) {
1577 case -1:
1578 set_bit(BTRFS_FS_BTREE_ERR, &fs_info->flags);
1579 break;
1580 case 0:
1581 set_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags);
1582 break;
1583 case 1:
1584 set_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags);
1585 break;
1586 default:
1587 BUG(); /* unexpected, logic error */
1588 }
1589 }
1590
1591 /*
1592 * The endio specific version which won't touch any unsafe spinlock in endio
1593 * context.
1594 */
find_extent_buffer_nolock(struct btrfs_fs_info * fs_info,u64 start)1595 static struct extent_buffer *find_extent_buffer_nolock(
1596 struct btrfs_fs_info *fs_info, u64 start)
1597 {
1598 struct extent_buffer *eb;
1599
1600 rcu_read_lock();
1601 eb = radix_tree_lookup(&fs_info->buffer_radix,
1602 start >> fs_info->sectorsize_bits);
1603 if (eb && atomic_inc_not_zero(&eb->refs)) {
1604 rcu_read_unlock();
1605 return eb;
1606 }
1607 rcu_read_unlock();
1608 return NULL;
1609 }
1610
extent_buffer_write_end_io(struct btrfs_bio * bbio)1611 static void extent_buffer_write_end_io(struct btrfs_bio *bbio)
1612 {
1613 struct extent_buffer *eb = bbio->private;
1614 struct btrfs_fs_info *fs_info = eb->fs_info;
1615 bool uptodate = !bbio->bio.bi_status;
1616 struct bvec_iter_all iter_all;
1617 struct bio_vec *bvec;
1618 u32 bio_offset = 0;
1619
1620 if (!uptodate)
1621 set_btree_ioerr(eb);
1622
1623 bio_for_each_segment_all(bvec, &bbio->bio, iter_all) {
1624 u64 start = eb->start + bio_offset;
1625 struct page *page = bvec->bv_page;
1626 u32 len = bvec->bv_len;
1627
1628 btrfs_page_clear_writeback(fs_info, page, start, len);
1629 bio_offset += len;
1630 }
1631
1632 clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
1633 smp_mb__after_atomic();
1634 wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
1635
1636 bio_put(&bbio->bio);
1637 }
1638
prepare_eb_write(struct extent_buffer * eb)1639 static void prepare_eb_write(struct extent_buffer *eb)
1640 {
1641 u32 nritems;
1642 unsigned long start;
1643 unsigned long end;
1644
1645 clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
1646
1647 /* Set btree blocks beyond nritems with 0 to avoid stale content */
1648 nritems = btrfs_header_nritems(eb);
1649 if (btrfs_header_level(eb) > 0) {
1650 end = btrfs_node_key_ptr_offset(eb, nritems);
1651 memzero_extent_buffer(eb, end, eb->len - end);
1652 } else {
1653 /*
1654 * Leaf:
1655 * header 0 1 2 .. N ... data_N .. data_2 data_1 data_0
1656 */
1657 start = btrfs_item_nr_offset(eb, nritems);
1658 end = btrfs_item_nr_offset(eb, 0);
1659 if (nritems == 0)
1660 end += BTRFS_LEAF_DATA_SIZE(eb->fs_info);
1661 else
1662 end += btrfs_item_offset(eb, nritems - 1);
1663 memzero_extent_buffer(eb, start, end - start);
1664 }
1665 }
1666
write_one_eb(struct extent_buffer * eb,struct writeback_control * wbc)1667 static noinline_for_stack void write_one_eb(struct extent_buffer *eb,
1668 struct writeback_control *wbc)
1669 {
1670 struct btrfs_fs_info *fs_info = eb->fs_info;
1671 struct btrfs_bio *bbio;
1672
1673 prepare_eb_write(eb);
1674
1675 bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES,
1676 REQ_OP_WRITE | REQ_META | wbc_to_write_flags(wbc),
1677 eb->fs_info, extent_buffer_write_end_io, eb);
1678 bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT;
1679 bio_set_dev(&bbio->bio, fs_info->fs_devices->latest_dev->bdev);
1680 wbc_init_bio(wbc, &bbio->bio);
1681 bbio->inode = BTRFS_I(eb->fs_info->btree_inode);
1682 bbio->file_offset = eb->start;
1683 if (fs_info->nodesize < PAGE_SIZE) {
1684 struct page *p = eb->pages[0];
1685
1686 lock_page(p);
1687 btrfs_subpage_set_writeback(fs_info, p, eb->start, eb->len);
1688 if (btrfs_subpage_clear_and_test_dirty(fs_info, p, eb->start,
1689 eb->len)) {
1690 clear_page_dirty_for_io(p);
1691 wbc->nr_to_write--;
1692 }
1693 __bio_add_page(&bbio->bio, p, eb->len, eb->start - page_offset(p));
1694 wbc_account_cgroup_owner(wbc, p, eb->len);
1695 unlock_page(p);
1696 } else {
1697 for (int i = 0; i < num_extent_pages(eb); i++) {
1698 struct page *p = eb->pages[i];
1699
1700 lock_page(p);
1701 clear_page_dirty_for_io(p);
1702 set_page_writeback(p);
1703 __bio_add_page(&bbio->bio, p, PAGE_SIZE, 0);
1704 wbc_account_cgroup_owner(wbc, p, PAGE_SIZE);
1705 wbc->nr_to_write--;
1706 unlock_page(p);
1707 }
1708 }
1709 btrfs_submit_bio(bbio, 0);
1710 }
1711
1712 /*
1713 * Submit one subpage btree page.
1714 *
1715 * The main difference to submit_eb_page() is:
1716 * - Page locking
1717 * For subpage, we don't rely on page locking at all.
1718 *
1719 * - Flush write bio
1720 * We only flush bio if we may be unable to fit current extent buffers into
1721 * current bio.
1722 *
1723 * Return >=0 for the number of submitted extent buffers.
1724 * Return <0 for fatal error.
1725 */
submit_eb_subpage(struct page * page,struct writeback_control * wbc)1726 static int submit_eb_subpage(struct page *page, struct writeback_control *wbc)
1727 {
1728 struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
1729 int submitted = 0;
1730 u64 page_start = page_offset(page);
1731 int bit_start = 0;
1732 int sectors_per_node = fs_info->nodesize >> fs_info->sectorsize_bits;
1733
1734 /* Lock and write each dirty extent buffers in the range */
1735 while (bit_start < fs_info->subpage_info->bitmap_nr_bits) {
1736 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
1737 struct extent_buffer *eb;
1738 unsigned long flags;
1739 u64 start;
1740
1741 /*
1742 * Take private lock to ensure the subpage won't be detached
1743 * in the meantime.
1744 */
1745 spin_lock(&page->mapping->private_lock);
1746 if (!PagePrivate(page)) {
1747 spin_unlock(&page->mapping->private_lock);
1748 break;
1749 }
1750 spin_lock_irqsave(&subpage->lock, flags);
1751 if (!test_bit(bit_start + fs_info->subpage_info->dirty_offset,
1752 subpage->bitmaps)) {
1753 spin_unlock_irqrestore(&subpage->lock, flags);
1754 spin_unlock(&page->mapping->private_lock);
1755 bit_start++;
1756 continue;
1757 }
1758
1759 start = page_start + bit_start * fs_info->sectorsize;
1760 bit_start += sectors_per_node;
1761
1762 /*
1763 * Here we just want to grab the eb without touching extra
1764 * spin locks, so call find_extent_buffer_nolock().
1765 */
1766 eb = find_extent_buffer_nolock(fs_info, start);
1767 spin_unlock_irqrestore(&subpage->lock, flags);
1768 spin_unlock(&page->mapping->private_lock);
1769
1770 /*
1771 * The eb has already reached 0 refs thus find_extent_buffer()
1772 * doesn't return it. We don't need to write back such eb
1773 * anyway.
1774 */
1775 if (!eb)
1776 continue;
1777
1778 if (lock_extent_buffer_for_io(eb, wbc)) {
1779 write_one_eb(eb, wbc);
1780 submitted++;
1781 }
1782 free_extent_buffer(eb);
1783 }
1784 return submitted;
1785 }
1786
1787 /*
1788 * Submit all page(s) of one extent buffer.
1789 *
1790 * @page: the page of one extent buffer
1791 * @eb_context: to determine if we need to submit this page, if current page
1792 * belongs to this eb, we don't need to submit
1793 *
1794 * The caller should pass each page in their bytenr order, and here we use
1795 * @eb_context to determine if we have submitted pages of one extent buffer.
1796 *
1797 * If we have, we just skip until we hit a new page that doesn't belong to
1798 * current @eb_context.
1799 *
1800 * If not, we submit all the page(s) of the extent buffer.
1801 *
1802 * Return >0 if we have submitted the extent buffer successfully.
1803 * Return 0 if we don't need to submit the page, as it's already submitted by
1804 * previous call.
1805 * Return <0 for fatal error.
1806 */
submit_eb_page(struct page * page,struct btrfs_eb_write_context * ctx)1807 static int submit_eb_page(struct page *page, struct btrfs_eb_write_context *ctx)
1808 {
1809 struct writeback_control *wbc = ctx->wbc;
1810 struct address_space *mapping = page->mapping;
1811 struct extent_buffer *eb;
1812 int ret;
1813
1814 if (!PagePrivate(page))
1815 return 0;
1816
1817 if (btrfs_sb(page->mapping->host->i_sb)->nodesize < PAGE_SIZE)
1818 return submit_eb_subpage(page, wbc);
1819
1820 spin_lock(&mapping->private_lock);
1821 if (!PagePrivate(page)) {
1822 spin_unlock(&mapping->private_lock);
1823 return 0;
1824 }
1825
1826 eb = (struct extent_buffer *)page->private;
1827
1828 /*
1829 * Shouldn't happen and normally this would be a BUG_ON but no point
1830 * crashing the machine for something we can survive anyway.
1831 */
1832 if (WARN_ON(!eb)) {
1833 spin_unlock(&mapping->private_lock);
1834 return 0;
1835 }
1836
1837 if (eb == ctx->eb) {
1838 spin_unlock(&mapping->private_lock);
1839 return 0;
1840 }
1841 ret = atomic_inc_not_zero(&eb->refs);
1842 spin_unlock(&mapping->private_lock);
1843 if (!ret)
1844 return 0;
1845
1846 ctx->eb = eb;
1847
1848 ret = btrfs_check_meta_write_pointer(eb->fs_info, ctx);
1849 if (ret) {
1850 if (ret == -EBUSY)
1851 ret = 0;
1852 free_extent_buffer(eb);
1853 return ret;
1854 }
1855
1856 if (!lock_extent_buffer_for_io(eb, wbc)) {
1857 free_extent_buffer(eb);
1858 return 0;
1859 }
1860 /* Implies write in zoned mode. */
1861 if (ctx->zoned_bg) {
1862 /* Mark the last eb in the block group. */
1863 btrfs_schedule_zone_finish_bg(ctx->zoned_bg, eb);
1864 ctx->zoned_bg->meta_write_pointer += eb->len;
1865 }
1866 write_one_eb(eb, wbc);
1867 free_extent_buffer(eb);
1868 return 1;
1869 }
1870
btree_write_cache_pages(struct address_space * mapping,struct writeback_control * wbc)1871 int btree_write_cache_pages(struct address_space *mapping,
1872 struct writeback_control *wbc)
1873 {
1874 struct btrfs_eb_write_context ctx = { .wbc = wbc };
1875 struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info;
1876 int ret = 0;
1877 int done = 0;
1878 int nr_to_write_done = 0;
1879 struct folio_batch fbatch;
1880 unsigned int nr_folios;
1881 pgoff_t index;
1882 pgoff_t end; /* Inclusive */
1883 int scanned = 0;
1884 xa_mark_t tag;
1885
1886 folio_batch_init(&fbatch);
1887 if (wbc->range_cyclic) {
1888 index = mapping->writeback_index; /* Start from prev offset */
1889 end = -1;
1890 /*
1891 * Start from the beginning does not need to cycle over the
1892 * range, mark it as scanned.
1893 */
1894 scanned = (index == 0);
1895 } else {
1896 index = wbc->range_start >> PAGE_SHIFT;
1897 end = wbc->range_end >> PAGE_SHIFT;
1898 scanned = 1;
1899 }
1900 if (wbc->sync_mode == WB_SYNC_ALL)
1901 tag = PAGECACHE_TAG_TOWRITE;
1902 else
1903 tag = PAGECACHE_TAG_DIRTY;
1904 btrfs_zoned_meta_io_lock(fs_info);
1905 retry:
1906 if (wbc->sync_mode == WB_SYNC_ALL)
1907 tag_pages_for_writeback(mapping, index, end);
1908 while (!done && !nr_to_write_done && (index <= end) &&
1909 (nr_folios = filemap_get_folios_tag(mapping, &index, end,
1910 tag, &fbatch))) {
1911 unsigned i;
1912
1913 for (i = 0; i < nr_folios; i++) {
1914 struct folio *folio = fbatch.folios[i];
1915
1916 ret = submit_eb_page(&folio->page, &ctx);
1917 if (ret == 0)
1918 continue;
1919 if (ret < 0) {
1920 done = 1;
1921 break;
1922 }
1923
1924 /*
1925 * the filesystem may choose to bump up nr_to_write.
1926 * We have to make sure to honor the new nr_to_write
1927 * at any time
1928 */
1929 nr_to_write_done = wbc->nr_to_write <= 0;
1930 }
1931 folio_batch_release(&fbatch);
1932 cond_resched();
1933 }
1934 if (!scanned && !done) {
1935 /*
1936 * We hit the last page and there is more work to be done: wrap
1937 * back to the start of the file
1938 */
1939 scanned = 1;
1940 index = 0;
1941 goto retry;
1942 }
1943 /*
1944 * If something went wrong, don't allow any metadata write bio to be
1945 * submitted.
1946 *
1947 * This would prevent use-after-free if we had dirty pages not
1948 * cleaned up, which can still happen by fuzzed images.
1949 *
1950 * - Bad extent tree
1951 * Allowing existing tree block to be allocated for other trees.
1952 *
1953 * - Log tree operations
1954 * Exiting tree blocks get allocated to log tree, bumps its
1955 * generation, then get cleaned in tree re-balance.
1956 * Such tree block will not be written back, since it's clean,
1957 * thus no WRITTEN flag set.
1958 * And after log writes back, this tree block is not traced by
1959 * any dirty extent_io_tree.
1960 *
1961 * - Offending tree block gets re-dirtied from its original owner
1962 * Since it has bumped generation, no WRITTEN flag, it can be
1963 * reused without COWing. This tree block will not be traced
1964 * by btrfs_transaction::dirty_pages.
1965 *
1966 * Now such dirty tree block will not be cleaned by any dirty
1967 * extent io tree. Thus we don't want to submit such wild eb
1968 * if the fs already has error.
1969 *
1970 * We can get ret > 0 from submit_extent_page() indicating how many ebs
1971 * were submitted. Reset it to 0 to avoid false alerts for the caller.
1972 */
1973 if (ret > 0)
1974 ret = 0;
1975 if (!ret && BTRFS_FS_ERROR(fs_info))
1976 ret = -EROFS;
1977
1978 if (ctx.zoned_bg)
1979 btrfs_put_block_group(ctx.zoned_bg);
1980 btrfs_zoned_meta_io_unlock(fs_info);
1981 return ret;
1982 }
1983
1984 /*
1985 * Walk the list of dirty pages of the given address space and write all of them.
1986 *
1987 * @mapping: address space structure to write
1988 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
1989 * @bio_ctrl: holds context for the write, namely the bio
1990 *
1991 * If a page is already under I/O, write_cache_pages() skips it, even
1992 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
1993 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
1994 * and msync() need to guarantee that all the data which was dirty at the time
1995 * the call was made get new I/O started against them. If wbc->sync_mode is
1996 * WB_SYNC_ALL then we were called for data integrity and we must wait for
1997 * existing IO to complete.
1998 */
extent_write_cache_pages(struct address_space * mapping,struct btrfs_bio_ctrl * bio_ctrl)1999 static int extent_write_cache_pages(struct address_space *mapping,
2000 struct btrfs_bio_ctrl *bio_ctrl)
2001 {
2002 struct writeback_control *wbc = bio_ctrl->wbc;
2003 struct inode *inode = mapping->host;
2004 int ret = 0;
2005 int done = 0;
2006 int nr_to_write_done = 0;
2007 struct folio_batch fbatch;
2008 unsigned int nr_folios;
2009 pgoff_t index;
2010 pgoff_t end; /* Inclusive */
2011 pgoff_t done_index;
2012 int range_whole = 0;
2013 int scanned = 0;
2014 xa_mark_t tag;
2015
2016 /*
2017 * We have to hold onto the inode so that ordered extents can do their
2018 * work when the IO finishes. The alternative to this is failing to add
2019 * an ordered extent if the igrab() fails there and that is a huge pain
2020 * to deal with, so instead just hold onto the inode throughout the
2021 * writepages operation. If it fails here we are freeing up the inode
2022 * anyway and we'd rather not waste our time writing out stuff that is
2023 * going to be truncated anyway.
2024 */
2025 if (!igrab(inode))
2026 return 0;
2027
2028 folio_batch_init(&fbatch);
2029 if (wbc->range_cyclic) {
2030 index = mapping->writeback_index; /* Start from prev offset */
2031 end = -1;
2032 /*
2033 * Start from the beginning does not need to cycle over the
2034 * range, mark it as scanned.
2035 */
2036 scanned = (index == 0);
2037 } else {
2038 index = wbc->range_start >> PAGE_SHIFT;
2039 end = wbc->range_end >> PAGE_SHIFT;
2040 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2041 range_whole = 1;
2042 scanned = 1;
2043 }
2044
2045 /*
2046 * We do the tagged writepage as long as the snapshot flush bit is set
2047 * and we are the first one who do the filemap_flush() on this inode.
2048 *
2049 * The nr_to_write == LONG_MAX is needed to make sure other flushers do
2050 * not race in and drop the bit.
2051 */
2052 if (range_whole && wbc->nr_to_write == LONG_MAX &&
2053 test_and_clear_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
2054 &BTRFS_I(inode)->runtime_flags))
2055 wbc->tagged_writepages = 1;
2056
2057 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2058 tag = PAGECACHE_TAG_TOWRITE;
2059 else
2060 tag = PAGECACHE_TAG_DIRTY;
2061 retry:
2062 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2063 tag_pages_for_writeback(mapping, index, end);
2064 done_index = index;
2065 while (!done && !nr_to_write_done && (index <= end) &&
2066 (nr_folios = filemap_get_folios_tag(mapping, &index,
2067 end, tag, &fbatch))) {
2068 unsigned i;
2069
2070 for (i = 0; i < nr_folios; i++) {
2071 struct folio *folio = fbatch.folios[i];
2072
2073 done_index = folio_next_index(folio);
2074 /*
2075 * At this point we hold neither the i_pages lock nor
2076 * the page lock: the page may be truncated or
2077 * invalidated (changing page->mapping to NULL),
2078 * or even swizzled back from swapper_space to
2079 * tmpfs file mapping
2080 */
2081 if (!folio_trylock(folio)) {
2082 submit_write_bio(bio_ctrl, 0);
2083 folio_lock(folio);
2084 }
2085
2086 if (unlikely(folio->mapping != mapping)) {
2087 folio_unlock(folio);
2088 continue;
2089 }
2090
2091 if (!folio_test_dirty(folio)) {
2092 /* Someone wrote it for us. */
2093 folio_unlock(folio);
2094 continue;
2095 }
2096
2097 if (wbc->sync_mode != WB_SYNC_NONE) {
2098 if (folio_test_writeback(folio))
2099 submit_write_bio(bio_ctrl, 0);
2100 folio_wait_writeback(folio);
2101 }
2102
2103 if (folio_test_writeback(folio) ||
2104 !folio_clear_dirty_for_io(folio)) {
2105 folio_unlock(folio);
2106 continue;
2107 }
2108
2109 ret = __extent_writepage(&folio->page, bio_ctrl);
2110 if (ret < 0) {
2111 done = 1;
2112 break;
2113 }
2114
2115 /*
2116 * The filesystem may choose to bump up nr_to_write.
2117 * We have to make sure to honor the new nr_to_write
2118 * at any time.
2119 */
2120 nr_to_write_done = (wbc->sync_mode == WB_SYNC_NONE &&
2121 wbc->nr_to_write <= 0);
2122 }
2123 folio_batch_release(&fbatch);
2124 cond_resched();
2125 }
2126 if (!scanned && !done) {
2127 /*
2128 * We hit the last page and there is more work to be done: wrap
2129 * back to the start of the file
2130 */
2131 scanned = 1;
2132 index = 0;
2133
2134 /*
2135 * If we're looping we could run into a page that is locked by a
2136 * writer and that writer could be waiting on writeback for a
2137 * page in our current bio, and thus deadlock, so flush the
2138 * write bio here.
2139 */
2140 submit_write_bio(bio_ctrl, 0);
2141 goto retry;
2142 }
2143
2144 if (wbc->range_cyclic || (wbc->nr_to_write > 0 && range_whole))
2145 mapping->writeback_index = done_index;
2146
2147 btrfs_add_delayed_iput(BTRFS_I(inode));
2148 return ret;
2149 }
2150
2151 /*
2152 * Submit the pages in the range to bio for call sites which delalloc range has
2153 * already been ran (aka, ordered extent inserted) and all pages are still
2154 * locked.
2155 */
extent_write_locked_range(struct inode * inode,struct page * locked_page,u64 start,u64 end,struct writeback_control * wbc,bool pages_dirty)2156 void extent_write_locked_range(struct inode *inode, struct page *locked_page,
2157 u64 start, u64 end, struct writeback_control *wbc,
2158 bool pages_dirty)
2159 {
2160 bool found_error = false;
2161 int ret = 0;
2162 struct address_space *mapping = inode->i_mapping;
2163 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2164 const u32 sectorsize = fs_info->sectorsize;
2165 loff_t i_size = i_size_read(inode);
2166 u64 cur = start;
2167 struct btrfs_bio_ctrl bio_ctrl = {
2168 .wbc = wbc,
2169 .opf = REQ_OP_WRITE | wbc_to_write_flags(wbc),
2170 };
2171
2172 if (wbc->no_cgroup_owner)
2173 bio_ctrl.opf |= REQ_BTRFS_CGROUP_PUNT;
2174
2175 ASSERT(IS_ALIGNED(start, sectorsize) && IS_ALIGNED(end + 1, sectorsize));
2176
2177 while (cur <= end) {
2178 u64 cur_end = min(round_down(cur, PAGE_SIZE) + PAGE_SIZE - 1, end);
2179 u32 cur_len = cur_end + 1 - cur;
2180 struct page *page;
2181 int nr = 0;
2182
2183 page = find_get_page(mapping, cur >> PAGE_SHIFT);
2184 ASSERT(PageLocked(page));
2185 if (pages_dirty && page != locked_page)
2186 ASSERT(PageDirty(page));
2187
2188 ret = __extent_writepage_io(BTRFS_I(inode), page, &bio_ctrl,
2189 i_size, &nr);
2190 if (ret == 1)
2191 goto next_page;
2192
2193 /* Make sure the mapping tag for page dirty gets cleared. */
2194 if (nr == 0) {
2195 set_page_writeback(page);
2196 end_page_writeback(page);
2197 }
2198 if (ret) {
2199 btrfs_mark_ordered_io_finished(BTRFS_I(inode), page,
2200 cur, cur_len, !ret);
2201 mapping_set_error(page->mapping, ret);
2202 }
2203 btrfs_page_unlock_writer(fs_info, page, cur, cur_len);
2204 if (ret < 0)
2205 found_error = true;
2206 next_page:
2207 put_page(page);
2208 cur = cur_end + 1;
2209 }
2210
2211 submit_write_bio(&bio_ctrl, found_error ? ret : 0);
2212 }
2213
extent_writepages(struct address_space * mapping,struct writeback_control * wbc)2214 int extent_writepages(struct address_space *mapping,
2215 struct writeback_control *wbc)
2216 {
2217 struct inode *inode = mapping->host;
2218 int ret = 0;
2219 struct btrfs_bio_ctrl bio_ctrl = {
2220 .wbc = wbc,
2221 .opf = REQ_OP_WRITE | wbc_to_write_flags(wbc),
2222 };
2223
2224 /*
2225 * Allow only a single thread to do the reloc work in zoned mode to
2226 * protect the write pointer updates.
2227 */
2228 btrfs_zoned_data_reloc_lock(BTRFS_I(inode));
2229 ret = extent_write_cache_pages(mapping, &bio_ctrl);
2230 submit_write_bio(&bio_ctrl, ret);
2231 btrfs_zoned_data_reloc_unlock(BTRFS_I(inode));
2232 return ret;
2233 }
2234
extent_readahead(struct readahead_control * rac)2235 void extent_readahead(struct readahead_control *rac)
2236 {
2237 struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ | REQ_RAHEAD };
2238 struct page *pagepool[16];
2239 struct extent_map *em_cached = NULL;
2240 u64 prev_em_start = (u64)-1;
2241 int nr;
2242
2243 while ((nr = readahead_page_batch(rac, pagepool))) {
2244 u64 contig_start = readahead_pos(rac);
2245 u64 contig_end = contig_start + readahead_batch_length(rac) - 1;
2246
2247 contiguous_readpages(pagepool, nr, contig_start, contig_end,
2248 &em_cached, &bio_ctrl, &prev_em_start);
2249 }
2250
2251 if (em_cached)
2252 free_extent_map(em_cached);
2253 submit_one_bio(&bio_ctrl);
2254 }
2255
2256 /*
2257 * basic invalidate_folio code, this waits on any locked or writeback
2258 * ranges corresponding to the folio, and then deletes any extent state
2259 * records from the tree
2260 */
extent_invalidate_folio(struct extent_io_tree * tree,struct folio * folio,size_t offset)2261 int extent_invalidate_folio(struct extent_io_tree *tree,
2262 struct folio *folio, size_t offset)
2263 {
2264 struct extent_state *cached_state = NULL;
2265 u64 start = folio_pos(folio);
2266 u64 end = start + folio_size(folio) - 1;
2267 size_t blocksize = btrfs_sb(folio->mapping->host->i_sb)->sectorsize;
2268
2269 /* This function is only called for the btree inode */
2270 ASSERT(tree->owner == IO_TREE_BTREE_INODE_IO);
2271
2272 start += ALIGN(offset, blocksize);
2273 if (start > end)
2274 return 0;
2275
2276 lock_extent(tree, start, end, &cached_state);
2277 folio_wait_writeback(folio);
2278
2279 /*
2280 * Currently for btree io tree, only EXTENT_LOCKED is utilized,
2281 * so here we only need to unlock the extent range to free any
2282 * existing extent state.
2283 */
2284 unlock_extent(tree, start, end, &cached_state);
2285 return 0;
2286 }
2287
2288 /*
2289 * a helper for release_folio, this tests for areas of the page that
2290 * are locked or under IO and drops the related state bits if it is safe
2291 * to drop the page.
2292 */
try_release_extent_state(struct extent_io_tree * tree,struct page * page,gfp_t mask)2293 static int try_release_extent_state(struct extent_io_tree *tree,
2294 struct page *page, gfp_t mask)
2295 {
2296 u64 start = page_offset(page);
2297 u64 end = start + PAGE_SIZE - 1;
2298 int ret = 1;
2299
2300 if (test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL)) {
2301 ret = 0;
2302 } else {
2303 u32 clear_bits = ~(EXTENT_LOCKED | EXTENT_NODATASUM |
2304 EXTENT_DELALLOC_NEW | EXTENT_CTLBITS |
2305 EXTENT_QGROUP_RESERVED);
2306
2307 /*
2308 * At this point we can safely clear everything except the
2309 * locked bit, the nodatasum bit and the delalloc new bit.
2310 * The delalloc new bit will be cleared by ordered extent
2311 * completion.
2312 */
2313 ret = __clear_extent_bit(tree, start, end, clear_bits, NULL, NULL);
2314
2315 /* if clear_extent_bit failed for enomem reasons,
2316 * we can't allow the release to continue.
2317 */
2318 if (ret < 0)
2319 ret = 0;
2320 else
2321 ret = 1;
2322 }
2323 return ret;
2324 }
2325
2326 /*
2327 * a helper for release_folio. As long as there are no locked extents
2328 * in the range corresponding to the page, both state records and extent
2329 * map records are removed
2330 */
try_release_extent_mapping(struct page * page,gfp_t mask)2331 int try_release_extent_mapping(struct page *page, gfp_t mask)
2332 {
2333 struct extent_map *em;
2334 u64 start = page_offset(page);
2335 u64 end = start + PAGE_SIZE - 1;
2336 struct btrfs_inode *btrfs_inode = BTRFS_I(page->mapping->host);
2337 struct extent_io_tree *tree = &btrfs_inode->io_tree;
2338 struct extent_map_tree *map = &btrfs_inode->extent_tree;
2339
2340 if (gfpflags_allow_blocking(mask) &&
2341 page->mapping->host->i_size > SZ_16M) {
2342 u64 len;
2343 while (start <= end) {
2344 struct btrfs_fs_info *fs_info;
2345 u64 cur_gen;
2346
2347 len = end - start + 1;
2348 write_lock(&map->lock);
2349 em = lookup_extent_mapping(map, start, len);
2350 if (!em) {
2351 write_unlock(&map->lock);
2352 break;
2353 }
2354 if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
2355 em->start != start) {
2356 write_unlock(&map->lock);
2357 free_extent_map(em);
2358 break;
2359 }
2360 if (test_range_bit(tree, em->start,
2361 extent_map_end(em) - 1,
2362 EXTENT_LOCKED, 0, NULL))
2363 goto next;
2364 /*
2365 * If it's not in the list of modified extents, used
2366 * by a fast fsync, we can remove it. If it's being
2367 * logged we can safely remove it since fsync took an
2368 * extra reference on the em.
2369 */
2370 if (list_empty(&em->list) ||
2371 test_bit(EXTENT_FLAG_LOGGING, &em->flags))
2372 goto remove_em;
2373 /*
2374 * If it's in the list of modified extents, remove it
2375 * only if its generation is older then the current one,
2376 * in which case we don't need it for a fast fsync.
2377 * Otherwise don't remove it, we could be racing with an
2378 * ongoing fast fsync that could miss the new extent.
2379 */
2380 fs_info = btrfs_inode->root->fs_info;
2381 spin_lock(&fs_info->trans_lock);
2382 cur_gen = fs_info->generation;
2383 spin_unlock(&fs_info->trans_lock);
2384 if (em->generation >= cur_gen)
2385 goto next;
2386 remove_em:
2387 /*
2388 * We only remove extent maps that are not in the list of
2389 * modified extents or that are in the list but with a
2390 * generation lower then the current generation, so there
2391 * is no need to set the full fsync flag on the inode (it
2392 * hurts the fsync performance for workloads with a data
2393 * size that exceeds or is close to the system's memory).
2394 */
2395 remove_extent_mapping(map, em);
2396 /* once for the rb tree */
2397 free_extent_map(em);
2398 next:
2399 start = extent_map_end(em);
2400 write_unlock(&map->lock);
2401
2402 /* once for us */
2403 free_extent_map(em);
2404
2405 cond_resched(); /* Allow large-extent preemption. */
2406 }
2407 }
2408 return try_release_extent_state(tree, page, mask);
2409 }
2410
2411 struct btrfs_fiemap_entry {
2412 u64 offset;
2413 u64 phys;
2414 u64 len;
2415 u32 flags;
2416 };
2417
2418 /*
2419 * Indicate the caller of emit_fiemap_extent() that it needs to unlock the file
2420 * range from the inode's io tree, unlock the subvolume tree search path, flush
2421 * the fiemap cache and relock the file range and research the subvolume tree.
2422 * The value here is something negative that can't be confused with a valid
2423 * errno value and different from 1 because that's also a return value from
2424 * fiemap_fill_next_extent() and also it's often used to mean some btree search
2425 * did not find a key, so make it some distinct negative value.
2426 */
2427 #define BTRFS_FIEMAP_FLUSH_CACHE (-(MAX_ERRNO + 1))
2428
2429 /*
2430 * Used to:
2431 *
2432 * - Cache the next entry to be emitted to the fiemap buffer, so that we can
2433 * merge extents that are contiguous and can be grouped as a single one;
2434 *
2435 * - Store extents ready to be written to the fiemap buffer in an intermediary
2436 * buffer. This intermediary buffer is to ensure that in case the fiemap
2437 * buffer is memory mapped to the fiemap target file, we don't deadlock
2438 * during btrfs_page_mkwrite(). This is because during fiemap we are locking
2439 * an extent range in order to prevent races with delalloc flushing and
2440 * ordered extent completion, which is needed in order to reliably detect
2441 * delalloc in holes and prealloc extents. And this can lead to a deadlock
2442 * if the fiemap buffer is memory mapped to the file we are running fiemap
2443 * against (a silly, useless in practice scenario, but possible) because
2444 * btrfs_page_mkwrite() will try to lock the same extent range.
2445 */
2446 struct fiemap_cache {
2447 /* An array of ready fiemap entries. */
2448 struct btrfs_fiemap_entry *entries;
2449 /* Number of entries in the entries array. */
2450 int entries_size;
2451 /* Index of the next entry in the entries array to write to. */
2452 int entries_pos;
2453 /*
2454 * Once the entries array is full, this indicates what's the offset for
2455 * the next file extent item we must search for in the inode's subvolume
2456 * tree after unlocking the extent range in the inode's io tree and
2457 * releasing the search path.
2458 */
2459 u64 next_search_offset;
2460 /*
2461 * This matches struct fiemap_extent_info::fi_mapped_extents, we use it
2462 * to count ourselves emitted extents and stop instead of relying on
2463 * fiemap_fill_next_extent() because we buffer ready fiemap entries at
2464 * the @entries array, and we want to stop as soon as we hit the max
2465 * amount of extents to map, not just to save time but also to make the
2466 * logic at extent_fiemap() simpler.
2467 */
2468 unsigned int extents_mapped;
2469 /* Fields for the cached extent (unsubmitted, not ready, extent). */
2470 u64 offset;
2471 u64 phys;
2472 u64 len;
2473 u32 flags;
2474 bool cached;
2475 };
2476
flush_fiemap_cache(struct fiemap_extent_info * fieinfo,struct fiemap_cache * cache)2477 static int flush_fiemap_cache(struct fiemap_extent_info *fieinfo,
2478 struct fiemap_cache *cache)
2479 {
2480 for (int i = 0; i < cache->entries_pos; i++) {
2481 struct btrfs_fiemap_entry *entry = &cache->entries[i];
2482 int ret;
2483
2484 ret = fiemap_fill_next_extent(fieinfo, entry->offset,
2485 entry->phys, entry->len,
2486 entry->flags);
2487 /*
2488 * Ignore 1 (reached max entries) because we keep track of that
2489 * ourselves in emit_fiemap_extent().
2490 */
2491 if (ret < 0)
2492 return ret;
2493 }
2494 cache->entries_pos = 0;
2495
2496 return 0;
2497 }
2498
2499 /*
2500 * Helper to submit fiemap extent.
2501 *
2502 * Will try to merge current fiemap extent specified by @offset, @phys,
2503 * @len and @flags with cached one.
2504 * And only when we fails to merge, cached one will be submitted as
2505 * fiemap extent.
2506 *
2507 * Return value is the same as fiemap_fill_next_extent().
2508 */
emit_fiemap_extent(struct fiemap_extent_info * fieinfo,struct fiemap_cache * cache,u64 offset,u64 phys,u64 len,u32 flags)2509 static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
2510 struct fiemap_cache *cache,
2511 u64 offset, u64 phys, u64 len, u32 flags)
2512 {
2513 struct btrfs_fiemap_entry *entry;
2514 u64 cache_end;
2515
2516 /* Set at the end of extent_fiemap(). */
2517 ASSERT((flags & FIEMAP_EXTENT_LAST) == 0);
2518
2519 if (!cache->cached)
2520 goto assign;
2521
2522 /*
2523 * When iterating the extents of the inode, at extent_fiemap(), we may
2524 * find an extent that starts at an offset behind the end offset of the
2525 * previous extent we processed. This happens if fiemap is called
2526 * without FIEMAP_FLAG_SYNC and there are ordered extents completing
2527 * after we had to unlock the file range, release the search path, emit
2528 * the fiemap extents stored in the buffer (cache->entries array) and
2529 * the lock the remainder of the range and re-search the btree.
2530 *
2531 * For example we are in leaf X processing its last item, which is the
2532 * file extent item for file range [512K, 1M[, and after
2533 * btrfs_next_leaf() releases the path, there's an ordered extent that
2534 * completes for the file range [768K, 2M[, and that results in trimming
2535 * the file extent item so that it now corresponds to the file range
2536 * [512K, 768K[ and a new file extent item is inserted for the file
2537 * range [768K, 2M[, which may end up as the last item of leaf X or as
2538 * the first item of the next leaf - in either case btrfs_next_leaf()
2539 * will leave us with a path pointing to the new extent item, for the
2540 * file range [768K, 2M[, since that's the first key that follows the
2541 * last one we processed. So in order not to report overlapping extents
2542 * to user space, we trim the length of the previously cached extent and
2543 * emit it.
2544 *
2545 * Upon calling btrfs_next_leaf() we may also find an extent with an
2546 * offset smaller than or equals to cache->offset, and this happens
2547 * when we had a hole or prealloc extent with several delalloc ranges in
2548 * it, but after btrfs_next_leaf() released the path, delalloc was
2549 * flushed and the resulting ordered extents were completed, so we can
2550 * now have found a file extent item for an offset that is smaller than
2551 * or equals to what we have in cache->offset. We deal with this as
2552 * described below.
2553 */
2554 cache_end = cache->offset + cache->len;
2555 if (cache_end > offset) {
2556 if (offset == cache->offset) {
2557 /*
2558 * We cached a dealloc range (found in the io tree) for
2559 * a hole or prealloc extent and we have now found a
2560 * file extent item for the same offset. What we have
2561 * now is more recent and up to date, so discard what
2562 * we had in the cache and use what we have just found.
2563 */
2564 goto assign;
2565 } else if (offset > cache->offset) {
2566 /*
2567 * The extent range we previously found ends after the
2568 * offset of the file extent item we found and that
2569 * offset falls somewhere in the middle of that previous
2570 * extent range. So adjust the range we previously found
2571 * to end at the offset of the file extent item we have
2572 * just found, since this extent is more up to date.
2573 * Emit that adjusted range and cache the file extent
2574 * item we have just found. This corresponds to the case
2575 * where a previously found file extent item was split
2576 * due to an ordered extent completing.
2577 */
2578 cache->len = offset - cache->offset;
2579 goto emit;
2580 } else {
2581 const u64 range_end = offset + len;
2582
2583 /*
2584 * The offset of the file extent item we have just found
2585 * is behind the cached offset. This means we were
2586 * processing a hole or prealloc extent for which we
2587 * have found delalloc ranges (in the io tree), so what
2588 * we have in the cache is the last delalloc range we
2589 * found while the file extent item we found can be
2590 * either for a whole delalloc range we previously
2591 * emmitted or only a part of that range.
2592 *
2593 * We have two cases here:
2594 *
2595 * 1) The file extent item's range ends at or behind the
2596 * cached extent's end. In this case just ignore the
2597 * current file extent item because we don't want to
2598 * overlap with previous ranges that may have been
2599 * emmitted already;
2600 *
2601 * 2) The file extent item starts behind the currently
2602 * cached extent but its end offset goes beyond the
2603 * end offset of the cached extent. We don't want to
2604 * overlap with a previous range that may have been
2605 * emmitted already, so we emit the currently cached
2606 * extent and then partially store the current file
2607 * extent item's range in the cache, for the subrange
2608 * going the cached extent's end to the end of the
2609 * file extent item.
2610 */
2611 if (range_end <= cache_end)
2612 return 0;
2613
2614 if (!(flags & (FIEMAP_EXTENT_ENCODED | FIEMAP_EXTENT_DELALLOC)))
2615 phys += cache_end - offset;
2616
2617 offset = cache_end;
2618 len = range_end - cache_end;
2619 goto emit;
2620 }
2621 }
2622
2623 /*
2624 * Only merges fiemap extents if
2625 * 1) Their logical addresses are continuous
2626 *
2627 * 2) Their physical addresses are continuous
2628 * So truly compressed (physical size smaller than logical size)
2629 * extents won't get merged with each other
2630 *
2631 * 3) Share same flags
2632 */
2633 if (cache->offset + cache->len == offset &&
2634 cache->phys + cache->len == phys &&
2635 cache->flags == flags) {
2636 cache->len += len;
2637 return 0;
2638 }
2639
2640 emit:
2641 /* Not mergeable, need to submit cached one */
2642
2643 if (cache->entries_pos == cache->entries_size) {
2644 /*
2645 * We will need to research for the end offset of the last
2646 * stored extent and not from the current offset, because after
2647 * unlocking the range and releasing the path, if there's a hole
2648 * between that end offset and this current offset, a new extent
2649 * may have been inserted due to a new write, so we don't want
2650 * to miss it.
2651 */
2652 entry = &cache->entries[cache->entries_size - 1];
2653 cache->next_search_offset = entry->offset + entry->len;
2654 cache->cached = false;
2655
2656 return BTRFS_FIEMAP_FLUSH_CACHE;
2657 }
2658
2659 entry = &cache->entries[cache->entries_pos];
2660 entry->offset = cache->offset;
2661 entry->phys = cache->phys;
2662 entry->len = cache->len;
2663 entry->flags = cache->flags;
2664 cache->entries_pos++;
2665 cache->extents_mapped++;
2666
2667 if (cache->extents_mapped == fieinfo->fi_extents_max) {
2668 cache->cached = false;
2669 return 1;
2670 }
2671 assign:
2672 cache->cached = true;
2673 cache->offset = offset;
2674 cache->phys = phys;
2675 cache->len = len;
2676 cache->flags = flags;
2677
2678 return 0;
2679 }
2680
2681 /*
2682 * Emit last fiemap cache
2683 *
2684 * The last fiemap cache may still be cached in the following case:
2685 * 0 4k 8k
2686 * |<- Fiemap range ->|
2687 * |<------------ First extent ----------->|
2688 *
2689 * In this case, the first extent range will be cached but not emitted.
2690 * So we must emit it before ending extent_fiemap().
2691 */
emit_last_fiemap_cache(struct fiemap_extent_info * fieinfo,struct fiemap_cache * cache)2692 static int emit_last_fiemap_cache(struct fiemap_extent_info *fieinfo,
2693 struct fiemap_cache *cache)
2694 {
2695 int ret;
2696
2697 if (!cache->cached)
2698 return 0;
2699
2700 ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
2701 cache->len, cache->flags);
2702 cache->cached = false;
2703 if (ret > 0)
2704 ret = 0;
2705 return ret;
2706 }
2707
fiemap_next_leaf_item(struct btrfs_inode * inode,struct btrfs_path * path)2708 static int fiemap_next_leaf_item(struct btrfs_inode *inode, struct btrfs_path *path)
2709 {
2710 struct extent_buffer *clone;
2711 struct btrfs_key key;
2712 int slot;
2713 int ret;
2714
2715 path->slots[0]++;
2716 if (path->slots[0] < btrfs_header_nritems(path->nodes[0]))
2717 return 0;
2718
2719 ret = btrfs_next_leaf(inode->root, path);
2720 if (ret != 0)
2721 return ret;
2722
2723 /*
2724 * Don't bother with cloning if there are no more file extent items for
2725 * our inode.
2726 */
2727 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2728 if (key.objectid != btrfs_ino(inode) || key.type != BTRFS_EXTENT_DATA_KEY)
2729 return 1;
2730
2731 /* See the comment at fiemap_search_slot() about why we clone. */
2732 clone = btrfs_clone_extent_buffer(path->nodes[0]);
2733 if (!clone)
2734 return -ENOMEM;
2735
2736 slot = path->slots[0];
2737 btrfs_release_path(path);
2738 path->nodes[0] = clone;
2739 path->slots[0] = slot;
2740
2741 return 0;
2742 }
2743
2744 /*
2745 * Search for the first file extent item that starts at a given file offset or
2746 * the one that starts immediately before that offset.
2747 * Returns: 0 on success, < 0 on error, 1 if not found.
2748 */
fiemap_search_slot(struct btrfs_inode * inode,struct btrfs_path * path,u64 file_offset)2749 static int fiemap_search_slot(struct btrfs_inode *inode, struct btrfs_path *path,
2750 u64 file_offset)
2751 {
2752 const u64 ino = btrfs_ino(inode);
2753 struct btrfs_root *root = inode->root;
2754 struct extent_buffer *clone;
2755 struct btrfs_key key;
2756 int slot;
2757 int ret;
2758
2759 key.objectid = ino;
2760 key.type = BTRFS_EXTENT_DATA_KEY;
2761 key.offset = file_offset;
2762
2763 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2764 if (ret < 0)
2765 return ret;
2766
2767 if (ret > 0 && path->slots[0] > 0) {
2768 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1);
2769 if (key.objectid == ino && key.type == BTRFS_EXTENT_DATA_KEY)
2770 path->slots[0]--;
2771 }
2772
2773 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
2774 ret = btrfs_next_leaf(root, path);
2775 if (ret != 0)
2776 return ret;
2777
2778 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2779 if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY)
2780 return 1;
2781 }
2782
2783 /*
2784 * We clone the leaf and use it during fiemap. This is because while
2785 * using the leaf we do expensive things like checking if an extent is
2786 * shared, which can take a long time. In order to prevent blocking
2787 * other tasks for too long, we use a clone of the leaf. We have locked
2788 * the file range in the inode's io tree, so we know none of our file
2789 * extent items can change. This way we avoid blocking other tasks that
2790 * want to insert items for other inodes in the same leaf or b+tree
2791 * rebalance operations (triggered for example when someone is trying
2792 * to push items into this leaf when trying to insert an item in a
2793 * neighbour leaf).
2794 * We also need the private clone because holding a read lock on an
2795 * extent buffer of the subvolume's b+tree will make lockdep unhappy
2796 * when we check if extents are shared, as backref walking may need to
2797 * lock the same leaf we are processing.
2798 */
2799 clone = btrfs_clone_extent_buffer(path->nodes[0]);
2800 if (!clone)
2801 return -ENOMEM;
2802
2803 slot = path->slots[0];
2804 btrfs_release_path(path);
2805 path->nodes[0] = clone;
2806 path->slots[0] = slot;
2807
2808 return 0;
2809 }
2810
2811 /*
2812 * Process a range which is a hole or a prealloc extent in the inode's subvolume
2813 * btree. If @disk_bytenr is 0, we are dealing with a hole, otherwise a prealloc
2814 * extent. The end offset (@end) is inclusive.
2815 */
fiemap_process_hole(struct btrfs_inode * inode,struct fiemap_extent_info * fieinfo,struct fiemap_cache * cache,struct extent_state ** delalloc_cached_state,struct btrfs_backref_share_check_ctx * backref_ctx,u64 disk_bytenr,u64 extent_offset,u64 extent_gen,u64 start,u64 end)2816 static int fiemap_process_hole(struct btrfs_inode *inode,
2817 struct fiemap_extent_info *fieinfo,
2818 struct fiemap_cache *cache,
2819 struct extent_state **delalloc_cached_state,
2820 struct btrfs_backref_share_check_ctx *backref_ctx,
2821 u64 disk_bytenr, u64 extent_offset,
2822 u64 extent_gen,
2823 u64 start, u64 end)
2824 {
2825 const u64 i_size = i_size_read(&inode->vfs_inode);
2826 u64 cur_offset = start;
2827 u64 last_delalloc_end = 0;
2828 u32 prealloc_flags = FIEMAP_EXTENT_UNWRITTEN;
2829 bool checked_extent_shared = false;
2830 int ret;
2831
2832 /*
2833 * There can be no delalloc past i_size, so don't waste time looking for
2834 * it beyond i_size.
2835 */
2836 while (cur_offset < end && cur_offset < i_size) {
2837 u64 delalloc_start;
2838 u64 delalloc_end;
2839 u64 prealloc_start;
2840 u64 prealloc_len = 0;
2841 bool delalloc;
2842
2843 delalloc = btrfs_find_delalloc_in_range(inode, cur_offset, end,
2844 delalloc_cached_state,
2845 &delalloc_start,
2846 &delalloc_end);
2847 if (!delalloc)
2848 break;
2849
2850 /*
2851 * If this is a prealloc extent we have to report every section
2852 * of it that has no delalloc.
2853 */
2854 if (disk_bytenr != 0) {
2855 if (last_delalloc_end == 0) {
2856 prealloc_start = start;
2857 prealloc_len = delalloc_start - start;
2858 } else {
2859 prealloc_start = last_delalloc_end + 1;
2860 prealloc_len = delalloc_start - prealloc_start;
2861 }
2862 }
2863
2864 if (prealloc_len > 0) {
2865 if (!checked_extent_shared && fieinfo->fi_extents_max) {
2866 ret = btrfs_is_data_extent_shared(inode,
2867 disk_bytenr,
2868 extent_gen,
2869 backref_ctx);
2870 if (ret < 0)
2871 return ret;
2872 else if (ret > 0)
2873 prealloc_flags |= FIEMAP_EXTENT_SHARED;
2874
2875 checked_extent_shared = true;
2876 }
2877 ret = emit_fiemap_extent(fieinfo, cache, prealloc_start,
2878 disk_bytenr + extent_offset,
2879 prealloc_len, prealloc_flags);
2880 if (ret)
2881 return ret;
2882 extent_offset += prealloc_len;
2883 }
2884
2885 ret = emit_fiemap_extent(fieinfo, cache, delalloc_start, 0,
2886 delalloc_end + 1 - delalloc_start,
2887 FIEMAP_EXTENT_DELALLOC |
2888 FIEMAP_EXTENT_UNKNOWN);
2889 if (ret)
2890 return ret;
2891
2892 last_delalloc_end = delalloc_end;
2893 cur_offset = delalloc_end + 1;
2894 extent_offset += cur_offset - delalloc_start;
2895 cond_resched();
2896 }
2897
2898 /*
2899 * Either we found no delalloc for the whole prealloc extent or we have
2900 * a prealloc extent that spans i_size or starts at or after i_size.
2901 */
2902 if (disk_bytenr != 0 && last_delalloc_end < end) {
2903 u64 prealloc_start;
2904 u64 prealloc_len;
2905
2906 if (last_delalloc_end == 0) {
2907 prealloc_start = start;
2908 prealloc_len = end + 1 - start;
2909 } else {
2910 prealloc_start = last_delalloc_end + 1;
2911 prealloc_len = end + 1 - prealloc_start;
2912 }
2913
2914 if (!checked_extent_shared && fieinfo->fi_extents_max) {
2915 ret = btrfs_is_data_extent_shared(inode,
2916 disk_bytenr,
2917 extent_gen,
2918 backref_ctx);
2919 if (ret < 0)
2920 return ret;
2921 else if (ret > 0)
2922 prealloc_flags |= FIEMAP_EXTENT_SHARED;
2923 }
2924 ret = emit_fiemap_extent(fieinfo, cache, prealloc_start,
2925 disk_bytenr + extent_offset,
2926 prealloc_len, prealloc_flags);
2927 if (ret)
2928 return ret;
2929 }
2930
2931 return 0;
2932 }
2933
fiemap_find_last_extent_offset(struct btrfs_inode * inode,struct btrfs_path * path,u64 * last_extent_end_ret)2934 static int fiemap_find_last_extent_offset(struct btrfs_inode *inode,
2935 struct btrfs_path *path,
2936 u64 *last_extent_end_ret)
2937 {
2938 const u64 ino = btrfs_ino(inode);
2939 struct btrfs_root *root = inode->root;
2940 struct extent_buffer *leaf;
2941 struct btrfs_file_extent_item *ei;
2942 struct btrfs_key key;
2943 u64 disk_bytenr;
2944 int ret;
2945
2946 /*
2947 * Lookup the last file extent. We're not using i_size here because
2948 * there might be preallocation past i_size.
2949 */
2950 ret = btrfs_lookup_file_extent(NULL, root, path, ino, (u64)-1, 0);
2951 /* There can't be a file extent item at offset (u64)-1 */
2952 ASSERT(ret != 0);
2953 if (ret < 0)
2954 return ret;
2955
2956 /*
2957 * For a non-existing key, btrfs_search_slot() always leaves us at a
2958 * slot > 0, except if the btree is empty, which is impossible because
2959 * at least it has the inode item for this inode and all the items for
2960 * the root inode 256.
2961 */
2962 ASSERT(path->slots[0] > 0);
2963 path->slots[0]--;
2964 leaf = path->nodes[0];
2965 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2966 if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY) {
2967 /* No file extent items in the subvolume tree. */
2968 *last_extent_end_ret = 0;
2969 return 0;
2970 }
2971
2972 /*
2973 * For an inline extent, the disk_bytenr is where inline data starts at,
2974 * so first check if we have an inline extent item before checking if we
2975 * have an implicit hole (disk_bytenr == 0).
2976 */
2977 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
2978 if (btrfs_file_extent_type(leaf, ei) == BTRFS_FILE_EXTENT_INLINE) {
2979 *last_extent_end_ret = btrfs_file_extent_end(path);
2980 return 0;
2981 }
2982
2983 /*
2984 * Find the last file extent item that is not a hole (when NO_HOLES is
2985 * not enabled). This should take at most 2 iterations in the worst
2986 * case: we have one hole file extent item at slot 0 of a leaf and
2987 * another hole file extent item as the last item in the previous leaf.
2988 * This is because we merge file extent items that represent holes.
2989 */
2990 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei);
2991 while (disk_bytenr == 0) {
2992 ret = btrfs_previous_item(root, path, ino, BTRFS_EXTENT_DATA_KEY);
2993 if (ret < 0) {
2994 return ret;
2995 } else if (ret > 0) {
2996 /* No file extent items that are not holes. */
2997 *last_extent_end_ret = 0;
2998 return 0;
2999 }
3000 leaf = path->nodes[0];
3001 ei = btrfs_item_ptr(leaf, path->slots[0],
3002 struct btrfs_file_extent_item);
3003 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei);
3004 }
3005
3006 *last_extent_end_ret = btrfs_file_extent_end(path);
3007 return 0;
3008 }
3009
extent_fiemap(struct btrfs_inode * inode,struct fiemap_extent_info * fieinfo,u64 start,u64 len)3010 int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
3011 u64 start, u64 len)
3012 {
3013 const u64 ino = btrfs_ino(inode);
3014 struct extent_state *cached_state = NULL;
3015 struct extent_state *delalloc_cached_state = NULL;
3016 struct btrfs_path *path;
3017 struct fiemap_cache cache = { 0 };
3018 struct btrfs_backref_share_check_ctx *backref_ctx;
3019 u64 last_extent_end;
3020 u64 prev_extent_end;
3021 u64 range_start;
3022 u64 range_end;
3023 const u64 sectorsize = inode->root->fs_info->sectorsize;
3024 bool stopped = false;
3025 int ret;
3026
3027 cache.entries_size = PAGE_SIZE / sizeof(struct btrfs_fiemap_entry);
3028 cache.entries = kmalloc_array(cache.entries_size,
3029 sizeof(struct btrfs_fiemap_entry),
3030 GFP_KERNEL);
3031 backref_ctx = btrfs_alloc_backref_share_check_ctx();
3032 path = btrfs_alloc_path();
3033 if (!cache.entries || !backref_ctx || !path) {
3034 ret = -ENOMEM;
3035 goto out;
3036 }
3037
3038 restart:
3039 range_start = round_down(start, sectorsize);
3040 range_end = round_up(start + len, sectorsize);
3041 prev_extent_end = range_start;
3042
3043 lock_extent(&inode->io_tree, range_start, range_end, &cached_state);
3044
3045 ret = fiemap_find_last_extent_offset(inode, path, &last_extent_end);
3046 if (ret < 0)
3047 goto out_unlock;
3048 btrfs_release_path(path);
3049
3050 path->reada = READA_FORWARD;
3051 ret = fiemap_search_slot(inode, path, range_start);
3052 if (ret < 0) {
3053 goto out_unlock;
3054 } else if (ret > 0) {
3055 /*
3056 * No file extent item found, but we may have delalloc between
3057 * the current offset and i_size. So check for that.
3058 */
3059 ret = 0;
3060 goto check_eof_delalloc;
3061 }
3062
3063 while (prev_extent_end < range_end) {
3064 struct extent_buffer *leaf = path->nodes[0];
3065 struct btrfs_file_extent_item *ei;
3066 struct btrfs_key key;
3067 u64 extent_end;
3068 u64 extent_len;
3069 u64 extent_offset = 0;
3070 u64 extent_gen;
3071 u64 disk_bytenr = 0;
3072 u64 flags = 0;
3073 int extent_type;
3074 u8 compression;
3075
3076 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3077 if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY)
3078 break;
3079
3080 extent_end = btrfs_file_extent_end(path);
3081
3082 /*
3083 * The first iteration can leave us at an extent item that ends
3084 * before our range's start. Move to the next item.
3085 */
3086 if (extent_end <= range_start)
3087 goto next_item;
3088
3089 backref_ctx->curr_leaf_bytenr = leaf->start;
3090
3091 /* We have in implicit hole (NO_HOLES feature enabled). */
3092 if (prev_extent_end < key.offset) {
3093 const u64 hole_end = min(key.offset, range_end) - 1;
3094
3095 ret = fiemap_process_hole(inode, fieinfo, &cache,
3096 &delalloc_cached_state,
3097 backref_ctx, 0, 0, 0,
3098 prev_extent_end, hole_end);
3099 if (ret < 0) {
3100 goto out_unlock;
3101 } else if (ret > 0) {
3102 /* fiemap_fill_next_extent() told us to stop. */
3103 stopped = true;
3104 break;
3105 }
3106
3107 /* We've reached the end of the fiemap range, stop. */
3108 if (key.offset >= range_end) {
3109 stopped = true;
3110 break;
3111 }
3112 }
3113
3114 extent_len = extent_end - key.offset;
3115 ei = btrfs_item_ptr(leaf, path->slots[0],
3116 struct btrfs_file_extent_item);
3117 compression = btrfs_file_extent_compression(leaf, ei);
3118 extent_type = btrfs_file_extent_type(leaf, ei);
3119 extent_gen = btrfs_file_extent_generation(leaf, ei);
3120
3121 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
3122 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei);
3123 if (compression == BTRFS_COMPRESS_NONE)
3124 extent_offset = btrfs_file_extent_offset(leaf, ei);
3125 }
3126
3127 if (compression != BTRFS_COMPRESS_NONE)
3128 flags |= FIEMAP_EXTENT_ENCODED;
3129
3130 if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
3131 flags |= FIEMAP_EXTENT_DATA_INLINE;
3132 flags |= FIEMAP_EXTENT_NOT_ALIGNED;
3133 ret = emit_fiemap_extent(fieinfo, &cache, key.offset, 0,
3134 extent_len, flags);
3135 } else if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
3136 ret = fiemap_process_hole(inode, fieinfo, &cache,
3137 &delalloc_cached_state,
3138 backref_ctx,
3139 disk_bytenr, extent_offset,
3140 extent_gen, key.offset,
3141 extent_end - 1);
3142 } else if (disk_bytenr == 0) {
3143 /* We have an explicit hole. */
3144 ret = fiemap_process_hole(inode, fieinfo, &cache,
3145 &delalloc_cached_state,
3146 backref_ctx, 0, 0, 0,
3147 key.offset, extent_end - 1);
3148 } else {
3149 /* We have a regular extent. */
3150 if (fieinfo->fi_extents_max) {
3151 ret = btrfs_is_data_extent_shared(inode,
3152 disk_bytenr,
3153 extent_gen,
3154 backref_ctx);
3155 if (ret < 0)
3156 goto out_unlock;
3157 else if (ret > 0)
3158 flags |= FIEMAP_EXTENT_SHARED;
3159 }
3160
3161 ret = emit_fiemap_extent(fieinfo, &cache, key.offset,
3162 disk_bytenr + extent_offset,
3163 extent_len, flags);
3164 }
3165
3166 if (ret < 0) {
3167 goto out_unlock;
3168 } else if (ret > 0) {
3169 /* emit_fiemap_extent() told us to stop. */
3170 stopped = true;
3171 break;
3172 }
3173
3174 prev_extent_end = extent_end;
3175 next_item:
3176 if (fatal_signal_pending(current)) {
3177 ret = -EINTR;
3178 goto out_unlock;
3179 }
3180
3181 ret = fiemap_next_leaf_item(inode, path);
3182 if (ret < 0) {
3183 goto out_unlock;
3184 } else if (ret > 0) {
3185 /* No more file extent items for this inode. */
3186 break;
3187 }
3188 cond_resched();
3189 }
3190
3191 check_eof_delalloc:
3192 if (!stopped && prev_extent_end < range_end) {
3193 ret = fiemap_process_hole(inode, fieinfo, &cache,
3194 &delalloc_cached_state, backref_ctx,
3195 0, 0, 0, prev_extent_end, range_end - 1);
3196 if (ret < 0)
3197 goto out_unlock;
3198 prev_extent_end = range_end;
3199 }
3200
3201 if (cache.cached && cache.offset + cache.len >= last_extent_end) {
3202 const u64 i_size = i_size_read(&inode->vfs_inode);
3203
3204 if (prev_extent_end < i_size) {
3205 u64 delalloc_start;
3206 u64 delalloc_end;
3207 bool delalloc;
3208
3209 delalloc = btrfs_find_delalloc_in_range(inode,
3210 prev_extent_end,
3211 i_size - 1,
3212 &delalloc_cached_state,
3213 &delalloc_start,
3214 &delalloc_end);
3215 if (!delalloc)
3216 cache.flags |= FIEMAP_EXTENT_LAST;
3217 } else {
3218 cache.flags |= FIEMAP_EXTENT_LAST;
3219 }
3220 }
3221
3222 out_unlock:
3223 unlock_extent(&inode->io_tree, range_start, range_end, &cached_state);
3224
3225 if (ret == BTRFS_FIEMAP_FLUSH_CACHE) {
3226 btrfs_release_path(path);
3227 ret = flush_fiemap_cache(fieinfo, &cache);
3228 if (ret)
3229 goto out;
3230 len -= cache.next_search_offset - start;
3231 start = cache.next_search_offset;
3232 goto restart;
3233 } else if (ret < 0) {
3234 goto out;
3235 }
3236
3237 /*
3238 * Must free the path before emitting to the fiemap buffer because we
3239 * may have a non-cloned leaf and if the fiemap buffer is memory mapped
3240 * to a file, a write into it (through btrfs_page_mkwrite()) may trigger
3241 * waiting for an ordered extent that in order to complete needs to
3242 * modify that leaf, therefore leading to a deadlock.
3243 */
3244 btrfs_free_path(path);
3245 path = NULL;
3246
3247 ret = flush_fiemap_cache(fieinfo, &cache);
3248 if (ret)
3249 goto out;
3250
3251 ret = emit_last_fiemap_cache(fieinfo, &cache);
3252 out:
3253 free_extent_state(delalloc_cached_state);
3254 kfree(cache.entries);
3255 btrfs_free_backref_share_ctx(backref_ctx);
3256 btrfs_free_path(path);
3257 return ret;
3258 }
3259
__free_extent_buffer(struct extent_buffer * eb)3260 static void __free_extent_buffer(struct extent_buffer *eb)
3261 {
3262 kmem_cache_free(extent_buffer_cache, eb);
3263 }
3264
extent_buffer_under_io(const struct extent_buffer * eb)3265 static int extent_buffer_under_io(const struct extent_buffer *eb)
3266 {
3267 return (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
3268 test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
3269 }
3270
page_range_has_eb(struct btrfs_fs_info * fs_info,struct page * page)3271 static bool page_range_has_eb(struct btrfs_fs_info *fs_info, struct page *page)
3272 {
3273 struct btrfs_subpage *subpage;
3274
3275 lockdep_assert_held(&page->mapping->private_lock);
3276
3277 if (PagePrivate(page)) {
3278 subpage = (struct btrfs_subpage *)page->private;
3279 if (atomic_read(&subpage->eb_refs))
3280 return true;
3281 /*
3282 * Even there is no eb refs here, we may still have
3283 * end_page_read() call relying on page::private.
3284 */
3285 if (atomic_read(&subpage->readers))
3286 return true;
3287 }
3288 return false;
3289 }
3290
detach_extent_buffer_page(struct extent_buffer * eb,struct page * page)3291 static void detach_extent_buffer_page(struct extent_buffer *eb, struct page *page)
3292 {
3293 struct btrfs_fs_info *fs_info = eb->fs_info;
3294 const bool mapped = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
3295
3296 /*
3297 * For mapped eb, we're going to change the page private, which should
3298 * be done under the private_lock.
3299 */
3300 if (mapped)
3301 spin_lock(&page->mapping->private_lock);
3302
3303 if (!PagePrivate(page)) {
3304 if (mapped)
3305 spin_unlock(&page->mapping->private_lock);
3306 return;
3307 }
3308
3309 if (fs_info->nodesize >= PAGE_SIZE) {
3310 /*
3311 * We do this since we'll remove the pages after we've
3312 * removed the eb from the radix tree, so we could race
3313 * and have this page now attached to the new eb. So
3314 * only clear page_private if it's still connected to
3315 * this eb.
3316 */
3317 if (PagePrivate(page) &&
3318 page->private == (unsigned long)eb) {
3319 BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
3320 BUG_ON(PageDirty(page));
3321 BUG_ON(PageWriteback(page));
3322 /*
3323 * We need to make sure we haven't be attached
3324 * to a new eb.
3325 */
3326 detach_page_private(page);
3327 }
3328 if (mapped)
3329 spin_unlock(&page->mapping->private_lock);
3330 return;
3331 }
3332
3333 /*
3334 * For subpage, we can have dummy eb with page private. In this case,
3335 * we can directly detach the private as such page is only attached to
3336 * one dummy eb, no sharing.
3337 */
3338 if (!mapped) {
3339 btrfs_detach_subpage(fs_info, page);
3340 return;
3341 }
3342
3343 btrfs_page_dec_eb_refs(fs_info, page);
3344
3345 /*
3346 * We can only detach the page private if there are no other ebs in the
3347 * page range and no unfinished IO.
3348 */
3349 if (!page_range_has_eb(fs_info, page))
3350 btrfs_detach_subpage(fs_info, page);
3351
3352 spin_unlock(&page->mapping->private_lock);
3353 }
3354
3355 /* Release all pages attached to the extent buffer */
btrfs_release_extent_buffer_pages(struct extent_buffer * eb)3356 static void btrfs_release_extent_buffer_pages(struct extent_buffer *eb)
3357 {
3358 int i;
3359 int num_pages;
3360
3361 ASSERT(!extent_buffer_under_io(eb));
3362
3363 num_pages = num_extent_pages(eb);
3364 for (i = 0; i < num_pages; i++) {
3365 struct page *page = eb->pages[i];
3366
3367 if (!page)
3368 continue;
3369
3370 detach_extent_buffer_page(eb, page);
3371
3372 /* One for when we allocated the page */
3373 put_page(page);
3374 }
3375 }
3376
3377 /*
3378 * Helper for releasing the extent buffer.
3379 */
btrfs_release_extent_buffer(struct extent_buffer * eb)3380 static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
3381 {
3382 btrfs_release_extent_buffer_pages(eb);
3383 btrfs_leak_debug_del_eb(eb);
3384 __free_extent_buffer(eb);
3385 }
3386
3387 static struct extent_buffer *
__alloc_extent_buffer(struct btrfs_fs_info * fs_info,u64 start,unsigned long len)3388 __alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
3389 unsigned long len)
3390 {
3391 struct extent_buffer *eb = NULL;
3392
3393 eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS|__GFP_NOFAIL);
3394 eb->start = start;
3395 eb->len = len;
3396 eb->fs_info = fs_info;
3397 init_rwsem(&eb->lock);
3398
3399 btrfs_leak_debug_add_eb(eb);
3400
3401 spin_lock_init(&eb->refs_lock);
3402 atomic_set(&eb->refs, 1);
3403
3404 ASSERT(len <= BTRFS_MAX_METADATA_BLOCKSIZE);
3405
3406 return eb;
3407 }
3408
btrfs_clone_extent_buffer(const struct extent_buffer * src)3409 struct extent_buffer *btrfs_clone_extent_buffer(const struct extent_buffer *src)
3410 {
3411 int i;
3412 struct extent_buffer *new;
3413 int num_pages = num_extent_pages(src);
3414 int ret;
3415
3416 new = __alloc_extent_buffer(src->fs_info, src->start, src->len);
3417 if (new == NULL)
3418 return NULL;
3419
3420 /*
3421 * Set UNMAPPED before calling btrfs_release_extent_buffer(), as
3422 * btrfs_release_extent_buffer() have different behavior for
3423 * UNMAPPED subpage extent buffer.
3424 */
3425 set_bit(EXTENT_BUFFER_UNMAPPED, &new->bflags);
3426
3427 ret = btrfs_alloc_page_array(num_pages, new->pages);
3428 if (ret) {
3429 btrfs_release_extent_buffer(new);
3430 return NULL;
3431 }
3432
3433 for (i = 0; i < num_pages; i++) {
3434 int ret;
3435 struct page *p = new->pages[i];
3436
3437 ret = attach_extent_buffer_page(new, p, NULL);
3438 if (ret < 0) {
3439 btrfs_release_extent_buffer(new);
3440 return NULL;
3441 }
3442 WARN_ON(PageDirty(p));
3443 }
3444 copy_extent_buffer_full(new, src);
3445 set_extent_buffer_uptodate(new);
3446
3447 return new;
3448 }
3449
__alloc_dummy_extent_buffer(struct btrfs_fs_info * fs_info,u64 start,unsigned long len)3450 struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
3451 u64 start, unsigned long len)
3452 {
3453 struct extent_buffer *eb;
3454 int num_pages;
3455 int i;
3456 int ret;
3457
3458 eb = __alloc_extent_buffer(fs_info, start, len);
3459 if (!eb)
3460 return NULL;
3461
3462 num_pages = num_extent_pages(eb);
3463 ret = btrfs_alloc_page_array(num_pages, eb->pages);
3464 if (ret)
3465 goto err;
3466
3467 for (i = 0; i < num_pages; i++) {
3468 struct page *p = eb->pages[i];
3469
3470 ret = attach_extent_buffer_page(eb, p, NULL);
3471 if (ret < 0)
3472 goto err;
3473 }
3474
3475 set_extent_buffer_uptodate(eb);
3476 btrfs_set_header_nritems(eb, 0);
3477 set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
3478
3479 return eb;
3480 err:
3481 for (i = 0; i < num_pages; i++) {
3482 if (eb->pages[i]) {
3483 detach_extent_buffer_page(eb, eb->pages[i]);
3484 __free_page(eb->pages[i]);
3485 }
3486 }
3487 __free_extent_buffer(eb);
3488 return NULL;
3489 }
3490
alloc_dummy_extent_buffer(struct btrfs_fs_info * fs_info,u64 start)3491 struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
3492 u64 start)
3493 {
3494 return __alloc_dummy_extent_buffer(fs_info, start, fs_info->nodesize);
3495 }
3496
check_buffer_tree_ref(struct extent_buffer * eb)3497 static void check_buffer_tree_ref(struct extent_buffer *eb)
3498 {
3499 int refs;
3500 /*
3501 * The TREE_REF bit is first set when the extent_buffer is added
3502 * to the radix tree. It is also reset, if unset, when a new reference
3503 * is created by find_extent_buffer.
3504 *
3505 * It is only cleared in two cases: freeing the last non-tree
3506 * reference to the extent_buffer when its STALE bit is set or
3507 * calling release_folio when the tree reference is the only reference.
3508 *
3509 * In both cases, care is taken to ensure that the extent_buffer's
3510 * pages are not under io. However, release_folio can be concurrently
3511 * called with creating new references, which is prone to race
3512 * conditions between the calls to check_buffer_tree_ref in those
3513 * codepaths and clearing TREE_REF in try_release_extent_buffer.
3514 *
3515 * The actual lifetime of the extent_buffer in the radix tree is
3516 * adequately protected by the refcount, but the TREE_REF bit and
3517 * its corresponding reference are not. To protect against this
3518 * class of races, we call check_buffer_tree_ref from the codepaths
3519 * which trigger io. Note that once io is initiated, TREE_REF can no
3520 * longer be cleared, so that is the moment at which any such race is
3521 * best fixed.
3522 */
3523 refs = atomic_read(&eb->refs);
3524 if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
3525 return;
3526
3527 spin_lock(&eb->refs_lock);
3528 if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
3529 atomic_inc(&eb->refs);
3530 spin_unlock(&eb->refs_lock);
3531 }
3532
mark_extent_buffer_accessed(struct extent_buffer * eb,struct page * accessed)3533 static void mark_extent_buffer_accessed(struct extent_buffer *eb,
3534 struct page *accessed)
3535 {
3536 int num_pages, i;
3537
3538 check_buffer_tree_ref(eb);
3539
3540 num_pages = num_extent_pages(eb);
3541 for (i = 0; i < num_pages; i++) {
3542 struct page *p = eb->pages[i];
3543
3544 if (p != accessed)
3545 mark_page_accessed(p);
3546 }
3547 }
3548
find_extent_buffer(struct btrfs_fs_info * fs_info,u64 start)3549 struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
3550 u64 start)
3551 {
3552 struct extent_buffer *eb;
3553
3554 eb = find_extent_buffer_nolock(fs_info, start);
3555 if (!eb)
3556 return NULL;
3557 /*
3558 * Lock our eb's refs_lock to avoid races with free_extent_buffer().
3559 * When we get our eb it might be flagged with EXTENT_BUFFER_STALE and
3560 * another task running free_extent_buffer() might have seen that flag
3561 * set, eb->refs == 2, that the buffer isn't under IO (dirty and
3562 * writeback flags not set) and it's still in the tree (flag
3563 * EXTENT_BUFFER_TREE_REF set), therefore being in the process of
3564 * decrementing the extent buffer's reference count twice. So here we
3565 * could race and increment the eb's reference count, clear its stale
3566 * flag, mark it as dirty and drop our reference before the other task
3567 * finishes executing free_extent_buffer, which would later result in
3568 * an attempt to free an extent buffer that is dirty.
3569 */
3570 if (test_bit(EXTENT_BUFFER_STALE, &eb->bflags)) {
3571 spin_lock(&eb->refs_lock);
3572 spin_unlock(&eb->refs_lock);
3573 }
3574 mark_extent_buffer_accessed(eb, NULL);
3575 return eb;
3576 }
3577
3578 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
alloc_test_extent_buffer(struct btrfs_fs_info * fs_info,u64 start)3579 struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
3580 u64 start)
3581 {
3582 struct extent_buffer *eb, *exists = NULL;
3583 int ret;
3584
3585 eb = find_extent_buffer(fs_info, start);
3586 if (eb)
3587 return eb;
3588 eb = alloc_dummy_extent_buffer(fs_info, start);
3589 if (!eb)
3590 return ERR_PTR(-ENOMEM);
3591 eb->fs_info = fs_info;
3592 again:
3593 ret = radix_tree_preload(GFP_NOFS);
3594 if (ret) {
3595 exists = ERR_PTR(ret);
3596 goto free_eb;
3597 }
3598 spin_lock(&fs_info->buffer_lock);
3599 ret = radix_tree_insert(&fs_info->buffer_radix,
3600 start >> fs_info->sectorsize_bits, eb);
3601 spin_unlock(&fs_info->buffer_lock);
3602 radix_tree_preload_end();
3603 if (ret == -EEXIST) {
3604 exists = find_extent_buffer(fs_info, start);
3605 if (exists)
3606 goto free_eb;
3607 else
3608 goto again;
3609 }
3610 check_buffer_tree_ref(eb);
3611 set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
3612
3613 return eb;
3614 free_eb:
3615 btrfs_release_extent_buffer(eb);
3616 return exists;
3617 }
3618 #endif
3619
grab_extent_buffer(struct btrfs_fs_info * fs_info,struct page * page)3620 static struct extent_buffer *grab_extent_buffer(
3621 struct btrfs_fs_info *fs_info, struct page *page)
3622 {
3623 struct extent_buffer *exists;
3624
3625 /*
3626 * For subpage case, we completely rely on radix tree to ensure we
3627 * don't try to insert two ebs for the same bytenr. So here we always
3628 * return NULL and just continue.
3629 */
3630 if (fs_info->nodesize < PAGE_SIZE)
3631 return NULL;
3632
3633 /* Page not yet attached to an extent buffer */
3634 if (!PagePrivate(page))
3635 return NULL;
3636
3637 /*
3638 * We could have already allocated an eb for this page and attached one
3639 * so lets see if we can get a ref on the existing eb, and if we can we
3640 * know it's good and we can just return that one, else we know we can
3641 * just overwrite page->private.
3642 */
3643 exists = (struct extent_buffer *)page->private;
3644 if (atomic_inc_not_zero(&exists->refs))
3645 return exists;
3646
3647 WARN_ON(PageDirty(page));
3648 detach_page_private(page);
3649 return NULL;
3650 }
3651
check_eb_alignment(struct btrfs_fs_info * fs_info,u64 start)3652 static int check_eb_alignment(struct btrfs_fs_info *fs_info, u64 start)
3653 {
3654 if (!IS_ALIGNED(start, fs_info->sectorsize)) {
3655 btrfs_err(fs_info, "bad tree block start %llu", start);
3656 return -EINVAL;
3657 }
3658
3659 if (fs_info->nodesize < PAGE_SIZE &&
3660 offset_in_page(start) + fs_info->nodesize > PAGE_SIZE) {
3661 btrfs_err(fs_info,
3662 "tree block crosses page boundary, start %llu nodesize %u",
3663 start, fs_info->nodesize);
3664 return -EINVAL;
3665 }
3666 if (fs_info->nodesize >= PAGE_SIZE &&
3667 !PAGE_ALIGNED(start)) {
3668 btrfs_err(fs_info,
3669 "tree block is not page aligned, start %llu nodesize %u",
3670 start, fs_info->nodesize);
3671 return -EINVAL;
3672 }
3673 return 0;
3674 }
3675
alloc_extent_buffer(struct btrfs_fs_info * fs_info,u64 start,u64 owner_root,int level)3676 struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
3677 u64 start, u64 owner_root, int level)
3678 {
3679 unsigned long len = fs_info->nodesize;
3680 int num_pages;
3681 int i;
3682 unsigned long index = start >> PAGE_SHIFT;
3683 struct extent_buffer *eb;
3684 struct extent_buffer *exists = NULL;
3685 struct page *p;
3686 struct address_space *mapping = fs_info->btree_inode->i_mapping;
3687 struct btrfs_subpage *prealloc = NULL;
3688 u64 lockdep_owner = owner_root;
3689 int uptodate = 1;
3690 int ret;
3691
3692 if (check_eb_alignment(fs_info, start))
3693 return ERR_PTR(-EINVAL);
3694
3695 #if BITS_PER_LONG == 32
3696 if (start >= MAX_LFS_FILESIZE) {
3697 btrfs_err_rl(fs_info,
3698 "extent buffer %llu is beyond 32bit page cache limit", start);
3699 btrfs_err_32bit_limit(fs_info);
3700 return ERR_PTR(-EOVERFLOW);
3701 }
3702 if (start >= BTRFS_32BIT_EARLY_WARN_THRESHOLD)
3703 btrfs_warn_32bit_limit(fs_info);
3704 #endif
3705
3706 eb = find_extent_buffer(fs_info, start);
3707 if (eb)
3708 return eb;
3709
3710 eb = __alloc_extent_buffer(fs_info, start, len);
3711 if (!eb)
3712 return ERR_PTR(-ENOMEM);
3713
3714 /*
3715 * The reloc trees are just snapshots, so we need them to appear to be
3716 * just like any other fs tree WRT lockdep.
3717 */
3718 if (lockdep_owner == BTRFS_TREE_RELOC_OBJECTID)
3719 lockdep_owner = BTRFS_FS_TREE_OBJECTID;
3720
3721 btrfs_set_buffer_lockdep_class(lockdep_owner, eb, level);
3722
3723 num_pages = num_extent_pages(eb);
3724
3725 /*
3726 * Preallocate page->private for subpage case, so that we won't
3727 * allocate memory with private_lock nor page lock hold.
3728 *
3729 * The memory will be freed by attach_extent_buffer_page() or freed
3730 * manually if we exit earlier.
3731 */
3732 if (fs_info->nodesize < PAGE_SIZE) {
3733 prealloc = btrfs_alloc_subpage(fs_info, BTRFS_SUBPAGE_METADATA);
3734 if (IS_ERR(prealloc)) {
3735 exists = ERR_CAST(prealloc);
3736 goto free_eb;
3737 }
3738 }
3739
3740 for (i = 0; i < num_pages; i++, index++) {
3741 p = find_or_create_page(mapping, index, GFP_NOFS|__GFP_NOFAIL);
3742 if (!p) {
3743 exists = ERR_PTR(-ENOMEM);
3744 btrfs_free_subpage(prealloc);
3745 goto free_eb;
3746 }
3747
3748 spin_lock(&mapping->private_lock);
3749 exists = grab_extent_buffer(fs_info, p);
3750 if (exists) {
3751 spin_unlock(&mapping->private_lock);
3752 unlock_page(p);
3753 put_page(p);
3754 mark_extent_buffer_accessed(exists, p);
3755 btrfs_free_subpage(prealloc);
3756 goto free_eb;
3757 }
3758 /* Should not fail, as we have preallocated the memory */
3759 ret = attach_extent_buffer_page(eb, p, prealloc);
3760 ASSERT(!ret);
3761 /*
3762 * To inform we have extra eb under allocation, so that
3763 * detach_extent_buffer_page() won't release the page private
3764 * when the eb hasn't yet been inserted into radix tree.
3765 *
3766 * The ref will be decreased when the eb released the page, in
3767 * detach_extent_buffer_page().
3768 * Thus needs no special handling in error path.
3769 */
3770 btrfs_page_inc_eb_refs(fs_info, p);
3771 spin_unlock(&mapping->private_lock);
3772
3773 WARN_ON(btrfs_page_test_dirty(fs_info, p, eb->start, eb->len));
3774 eb->pages[i] = p;
3775 if (!btrfs_page_test_uptodate(fs_info, p, eb->start, eb->len))
3776 uptodate = 0;
3777
3778 /*
3779 * We can't unlock the pages just yet since the extent buffer
3780 * hasn't been properly inserted in the radix tree, this
3781 * opens a race with btree_release_folio which can free a page
3782 * while we are still filling in all pages for the buffer and
3783 * we could crash.
3784 */
3785 }
3786 if (uptodate)
3787 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3788 again:
3789 ret = radix_tree_preload(GFP_NOFS);
3790 if (ret) {
3791 exists = ERR_PTR(ret);
3792 goto free_eb;
3793 }
3794
3795 spin_lock(&fs_info->buffer_lock);
3796 ret = radix_tree_insert(&fs_info->buffer_radix,
3797 start >> fs_info->sectorsize_bits, eb);
3798 spin_unlock(&fs_info->buffer_lock);
3799 radix_tree_preload_end();
3800 if (ret == -EEXIST) {
3801 exists = find_extent_buffer(fs_info, start);
3802 if (exists)
3803 goto free_eb;
3804 else
3805 goto again;
3806 }
3807 /* add one reference for the tree */
3808 check_buffer_tree_ref(eb);
3809 set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
3810
3811 /*
3812 * Now it's safe to unlock the pages because any calls to
3813 * btree_release_folio will correctly detect that a page belongs to a
3814 * live buffer and won't free them prematurely.
3815 */
3816 for (i = 0; i < num_pages; i++)
3817 unlock_page(eb->pages[i]);
3818 return eb;
3819
3820 free_eb:
3821 WARN_ON(!atomic_dec_and_test(&eb->refs));
3822 for (i = 0; i < num_pages; i++) {
3823 if (eb->pages[i])
3824 unlock_page(eb->pages[i]);
3825 }
3826
3827 btrfs_release_extent_buffer(eb);
3828 return exists;
3829 }
3830
btrfs_release_extent_buffer_rcu(struct rcu_head * head)3831 static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
3832 {
3833 struct extent_buffer *eb =
3834 container_of(head, struct extent_buffer, rcu_head);
3835
3836 __free_extent_buffer(eb);
3837 }
3838
release_extent_buffer(struct extent_buffer * eb)3839 static int release_extent_buffer(struct extent_buffer *eb)
3840 __releases(&eb->refs_lock)
3841 {
3842 lockdep_assert_held(&eb->refs_lock);
3843
3844 WARN_ON(atomic_read(&eb->refs) == 0);
3845 if (atomic_dec_and_test(&eb->refs)) {
3846 if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) {
3847 struct btrfs_fs_info *fs_info = eb->fs_info;
3848
3849 spin_unlock(&eb->refs_lock);
3850
3851 spin_lock(&fs_info->buffer_lock);
3852 radix_tree_delete(&fs_info->buffer_radix,
3853 eb->start >> fs_info->sectorsize_bits);
3854 spin_unlock(&fs_info->buffer_lock);
3855 } else {
3856 spin_unlock(&eb->refs_lock);
3857 }
3858
3859 btrfs_leak_debug_del_eb(eb);
3860 /* Should be safe to release our pages at this point */
3861 btrfs_release_extent_buffer_pages(eb);
3862 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
3863 if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags))) {
3864 __free_extent_buffer(eb);
3865 return 1;
3866 }
3867 #endif
3868 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
3869 return 1;
3870 }
3871 spin_unlock(&eb->refs_lock);
3872
3873 return 0;
3874 }
3875
free_extent_buffer(struct extent_buffer * eb)3876 void free_extent_buffer(struct extent_buffer *eb)
3877 {
3878 int refs;
3879 if (!eb)
3880 return;
3881
3882 refs = atomic_read(&eb->refs);
3883 while (1) {
3884 if ((!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) && refs <= 3)
3885 || (test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) &&
3886 refs == 1))
3887 break;
3888 if (atomic_try_cmpxchg(&eb->refs, &refs, refs - 1))
3889 return;
3890 }
3891
3892 spin_lock(&eb->refs_lock);
3893 if (atomic_read(&eb->refs) == 2 &&
3894 test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
3895 !extent_buffer_under_io(eb) &&
3896 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
3897 atomic_dec(&eb->refs);
3898
3899 /*
3900 * I know this is terrible, but it's temporary until we stop tracking
3901 * the uptodate bits and such for the extent buffers.
3902 */
3903 release_extent_buffer(eb);
3904 }
3905
free_extent_buffer_stale(struct extent_buffer * eb)3906 void free_extent_buffer_stale(struct extent_buffer *eb)
3907 {
3908 if (!eb)
3909 return;
3910
3911 spin_lock(&eb->refs_lock);
3912 set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
3913
3914 if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
3915 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
3916 atomic_dec(&eb->refs);
3917 release_extent_buffer(eb);
3918 }
3919
btree_clear_page_dirty(struct page * page)3920 static void btree_clear_page_dirty(struct page *page)
3921 {
3922 ASSERT(PageDirty(page));
3923 ASSERT(PageLocked(page));
3924 clear_page_dirty_for_io(page);
3925 xa_lock_irq(&page->mapping->i_pages);
3926 if (!PageDirty(page))
3927 __xa_clear_mark(&page->mapping->i_pages,
3928 page_index(page), PAGECACHE_TAG_DIRTY);
3929 xa_unlock_irq(&page->mapping->i_pages);
3930 }
3931
clear_subpage_extent_buffer_dirty(const struct extent_buffer * eb)3932 static void clear_subpage_extent_buffer_dirty(const struct extent_buffer *eb)
3933 {
3934 struct btrfs_fs_info *fs_info = eb->fs_info;
3935 struct page *page = eb->pages[0];
3936 bool last;
3937
3938 /* btree_clear_page_dirty() needs page locked */
3939 lock_page(page);
3940 last = btrfs_subpage_clear_and_test_dirty(fs_info, page, eb->start,
3941 eb->len);
3942 if (last)
3943 btree_clear_page_dirty(page);
3944 unlock_page(page);
3945 WARN_ON(atomic_read(&eb->refs) == 0);
3946 }
3947
btrfs_clear_buffer_dirty(struct btrfs_trans_handle * trans,struct extent_buffer * eb)3948 void btrfs_clear_buffer_dirty(struct btrfs_trans_handle *trans,
3949 struct extent_buffer *eb)
3950 {
3951 struct btrfs_fs_info *fs_info = eb->fs_info;
3952 int i;
3953 int num_pages;
3954 struct page *page;
3955
3956 btrfs_assert_tree_write_locked(eb);
3957
3958 if (trans && btrfs_header_generation(eb) != trans->transid)
3959 return;
3960
3961 if (!test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags))
3962 return;
3963
3964 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, -eb->len,
3965 fs_info->dirty_metadata_batch);
3966
3967 if (eb->fs_info->nodesize < PAGE_SIZE)
3968 return clear_subpage_extent_buffer_dirty(eb);
3969
3970 num_pages = num_extent_pages(eb);
3971
3972 for (i = 0; i < num_pages; i++) {
3973 page = eb->pages[i];
3974 if (!PageDirty(page))
3975 continue;
3976 lock_page(page);
3977 btree_clear_page_dirty(page);
3978 unlock_page(page);
3979 }
3980 WARN_ON(atomic_read(&eb->refs) == 0);
3981 }
3982
set_extent_buffer_dirty(struct extent_buffer * eb)3983 void set_extent_buffer_dirty(struct extent_buffer *eb)
3984 {
3985 int i;
3986 int num_pages;
3987 bool was_dirty;
3988
3989 check_buffer_tree_ref(eb);
3990
3991 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
3992
3993 num_pages = num_extent_pages(eb);
3994 WARN_ON(atomic_read(&eb->refs) == 0);
3995 WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
3996
3997 if (!was_dirty) {
3998 bool subpage = eb->fs_info->nodesize < PAGE_SIZE;
3999
4000 /*
4001 * For subpage case, we can have other extent buffers in the
4002 * same page, and in clear_subpage_extent_buffer_dirty() we
4003 * have to clear page dirty without subpage lock held.
4004 * This can cause race where our page gets dirty cleared after
4005 * we just set it.
4006 *
4007 * Thankfully, clear_subpage_extent_buffer_dirty() has locked
4008 * its page for other reasons, we can use page lock to prevent
4009 * the above race.
4010 */
4011 if (subpage)
4012 lock_page(eb->pages[0]);
4013 for (i = 0; i < num_pages; i++)
4014 btrfs_page_set_dirty(eb->fs_info, eb->pages[i],
4015 eb->start, eb->len);
4016 if (subpage)
4017 unlock_page(eb->pages[0]);
4018 percpu_counter_add_batch(&eb->fs_info->dirty_metadata_bytes,
4019 eb->len,
4020 eb->fs_info->dirty_metadata_batch);
4021 }
4022 #ifdef CONFIG_BTRFS_DEBUG
4023 for (i = 0; i < num_pages; i++)
4024 ASSERT(PageDirty(eb->pages[i]));
4025 #endif
4026 }
4027
clear_extent_buffer_uptodate(struct extent_buffer * eb)4028 void clear_extent_buffer_uptodate(struct extent_buffer *eb)
4029 {
4030 struct btrfs_fs_info *fs_info = eb->fs_info;
4031 struct page *page;
4032 int num_pages;
4033 int i;
4034
4035 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4036 num_pages = num_extent_pages(eb);
4037 for (i = 0; i < num_pages; i++) {
4038 page = eb->pages[i];
4039 if (!page)
4040 continue;
4041
4042 /*
4043 * This is special handling for metadata subpage, as regular
4044 * btrfs_is_subpage() can not handle cloned/dummy metadata.
4045 */
4046 if (fs_info->nodesize >= PAGE_SIZE)
4047 ClearPageUptodate(page);
4048 else
4049 btrfs_subpage_clear_uptodate(fs_info, page, eb->start,
4050 eb->len);
4051 }
4052 }
4053
set_extent_buffer_uptodate(struct extent_buffer * eb)4054 void set_extent_buffer_uptodate(struct extent_buffer *eb)
4055 {
4056 struct btrfs_fs_info *fs_info = eb->fs_info;
4057 struct page *page;
4058 int num_pages;
4059 int i;
4060
4061 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4062 num_pages = num_extent_pages(eb);
4063 for (i = 0; i < num_pages; i++) {
4064 page = eb->pages[i];
4065
4066 /*
4067 * This is special handling for metadata subpage, as regular
4068 * btrfs_is_subpage() can not handle cloned/dummy metadata.
4069 */
4070 if (fs_info->nodesize >= PAGE_SIZE)
4071 SetPageUptodate(page);
4072 else
4073 btrfs_subpage_set_uptodate(fs_info, page, eb->start,
4074 eb->len);
4075 }
4076 }
4077
extent_buffer_read_end_io(struct btrfs_bio * bbio)4078 static void extent_buffer_read_end_io(struct btrfs_bio *bbio)
4079 {
4080 struct extent_buffer *eb = bbio->private;
4081 struct btrfs_fs_info *fs_info = eb->fs_info;
4082 bool uptodate = !bbio->bio.bi_status;
4083 struct bvec_iter_all iter_all;
4084 struct bio_vec *bvec;
4085 u32 bio_offset = 0;
4086
4087 eb->read_mirror = bbio->mirror_num;
4088
4089 if (uptodate &&
4090 btrfs_validate_extent_buffer(eb, &bbio->parent_check) < 0)
4091 uptodate = false;
4092
4093 if (uptodate) {
4094 set_extent_buffer_uptodate(eb);
4095 } else {
4096 clear_extent_buffer_uptodate(eb);
4097 set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
4098 }
4099
4100 bio_for_each_segment_all(bvec, &bbio->bio, iter_all) {
4101 u64 start = eb->start + bio_offset;
4102 struct page *page = bvec->bv_page;
4103 u32 len = bvec->bv_len;
4104
4105 if (uptodate)
4106 btrfs_page_set_uptodate(fs_info, page, start, len);
4107 else
4108 btrfs_page_clear_uptodate(fs_info, page, start, len);
4109
4110 bio_offset += len;
4111 }
4112
4113 clear_bit(EXTENT_BUFFER_READING, &eb->bflags);
4114 smp_mb__after_atomic();
4115 wake_up_bit(&eb->bflags, EXTENT_BUFFER_READING);
4116 free_extent_buffer(eb);
4117
4118 bio_put(&bbio->bio);
4119 }
4120
read_extent_buffer_pages(struct extent_buffer * eb,int wait,int mirror_num,struct btrfs_tree_parent_check * check)4121 int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num,
4122 struct btrfs_tree_parent_check *check)
4123 {
4124 int num_pages = num_extent_pages(eb), i;
4125 struct btrfs_bio *bbio;
4126
4127 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
4128 return 0;
4129
4130 /*
4131 * We could have had EXTENT_BUFFER_UPTODATE cleared by the write
4132 * operation, which could potentially still be in flight. In this case
4133 * we simply want to return an error.
4134 */
4135 if (unlikely(test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)))
4136 return -EIO;
4137
4138 /* Someone else is already reading the buffer, just wait for it. */
4139 if (test_and_set_bit(EXTENT_BUFFER_READING, &eb->bflags))
4140 goto done;
4141
4142 /*
4143 * Between the initial test_bit(EXTENT_BUFFER_UPTODATE) and the above
4144 * test_and_set_bit(EXTENT_BUFFER_READING), someone else could have
4145 * started and finished reading the same eb. In this case, UPTODATE
4146 * will now be set, and we shouldn't read it in again.
4147 */
4148 if (unlikely(test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))) {
4149 clear_bit(EXTENT_BUFFER_READING, &eb->bflags);
4150 smp_mb__after_atomic();
4151 wake_up_bit(&eb->bflags, EXTENT_BUFFER_READING);
4152 return 0;
4153 }
4154
4155 clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
4156 eb->read_mirror = 0;
4157 check_buffer_tree_ref(eb);
4158 atomic_inc(&eb->refs);
4159
4160 bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES,
4161 REQ_OP_READ | REQ_META, eb->fs_info,
4162 extent_buffer_read_end_io, eb);
4163 bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT;
4164 bbio->inode = BTRFS_I(eb->fs_info->btree_inode);
4165 bbio->file_offset = eb->start;
4166 memcpy(&bbio->parent_check, check, sizeof(*check));
4167 if (eb->fs_info->nodesize < PAGE_SIZE) {
4168 __bio_add_page(&bbio->bio, eb->pages[0], eb->len,
4169 eb->start - page_offset(eb->pages[0]));
4170 } else {
4171 for (i = 0; i < num_pages; i++)
4172 __bio_add_page(&bbio->bio, eb->pages[i], PAGE_SIZE, 0);
4173 }
4174 btrfs_submit_bio(bbio, mirror_num);
4175
4176 done:
4177 if (wait == WAIT_COMPLETE) {
4178 wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_READING, TASK_UNINTERRUPTIBLE);
4179 if (!test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
4180 return -EIO;
4181 }
4182
4183 return 0;
4184 }
4185
report_eb_range(const struct extent_buffer * eb,unsigned long start,unsigned long len)4186 static bool report_eb_range(const struct extent_buffer *eb, unsigned long start,
4187 unsigned long len)
4188 {
4189 btrfs_warn(eb->fs_info,
4190 "access to eb bytenr %llu len %lu out of range start %lu len %lu",
4191 eb->start, eb->len, start, len);
4192 WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
4193
4194 return true;
4195 }
4196
4197 /*
4198 * Check if the [start, start + len) range is valid before reading/writing
4199 * the eb.
4200 * NOTE: @start and @len are offset inside the eb, not logical address.
4201 *
4202 * Caller should not touch the dst/src memory if this function returns error.
4203 */
check_eb_range(const struct extent_buffer * eb,unsigned long start,unsigned long len)4204 static inline int check_eb_range(const struct extent_buffer *eb,
4205 unsigned long start, unsigned long len)
4206 {
4207 unsigned long offset;
4208
4209 /* start, start + len should not go beyond eb->len nor overflow */
4210 if (unlikely(check_add_overflow(start, len, &offset) || offset > eb->len))
4211 return report_eb_range(eb, start, len);
4212
4213 return false;
4214 }
4215
read_extent_buffer(const struct extent_buffer * eb,void * dstv,unsigned long start,unsigned long len)4216 void read_extent_buffer(const struct extent_buffer *eb, void *dstv,
4217 unsigned long start, unsigned long len)
4218 {
4219 size_t cur;
4220 size_t offset;
4221 struct page *page;
4222 char *kaddr;
4223 char *dst = (char *)dstv;
4224 unsigned long i = get_eb_page_index(start);
4225
4226 if (check_eb_range(eb, start, len)) {
4227 /*
4228 * Invalid range hit, reset the memory, so callers won't get
4229 * some random garbage for their uninitialzed memory.
4230 */
4231 memset(dstv, 0, len);
4232 return;
4233 }
4234
4235 offset = get_eb_offset_in_page(eb, start);
4236
4237 while (len > 0) {
4238 page = eb->pages[i];
4239
4240 cur = min(len, (PAGE_SIZE - offset));
4241 kaddr = page_address(page);
4242 memcpy(dst, kaddr + offset, cur);
4243
4244 dst += cur;
4245 len -= cur;
4246 offset = 0;
4247 i++;
4248 }
4249 }
4250
read_extent_buffer_to_user_nofault(const struct extent_buffer * eb,void __user * dstv,unsigned long start,unsigned long len)4251 int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb,
4252 void __user *dstv,
4253 unsigned long start, unsigned long len)
4254 {
4255 size_t cur;
4256 size_t offset;
4257 struct page *page;
4258 char *kaddr;
4259 char __user *dst = (char __user *)dstv;
4260 unsigned long i = get_eb_page_index(start);
4261 int ret = 0;
4262
4263 WARN_ON(start > eb->len);
4264 WARN_ON(start + len > eb->start + eb->len);
4265
4266 offset = get_eb_offset_in_page(eb, start);
4267
4268 while (len > 0) {
4269 page = eb->pages[i];
4270
4271 cur = min(len, (PAGE_SIZE - offset));
4272 kaddr = page_address(page);
4273 if (copy_to_user_nofault(dst, kaddr + offset, cur)) {
4274 ret = -EFAULT;
4275 break;
4276 }
4277
4278 dst += cur;
4279 len -= cur;
4280 offset = 0;
4281 i++;
4282 }
4283
4284 return ret;
4285 }
4286
memcmp_extent_buffer(const struct extent_buffer * eb,const void * ptrv,unsigned long start,unsigned long len)4287 int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
4288 unsigned long start, unsigned long len)
4289 {
4290 size_t cur;
4291 size_t offset;
4292 struct page *page;
4293 char *kaddr;
4294 char *ptr = (char *)ptrv;
4295 unsigned long i = get_eb_page_index(start);
4296 int ret = 0;
4297
4298 if (check_eb_range(eb, start, len))
4299 return -EINVAL;
4300
4301 offset = get_eb_offset_in_page(eb, start);
4302
4303 while (len > 0) {
4304 page = eb->pages[i];
4305
4306 cur = min(len, (PAGE_SIZE - offset));
4307
4308 kaddr = page_address(page);
4309 ret = memcmp(ptr, kaddr + offset, cur);
4310 if (ret)
4311 break;
4312
4313 ptr += cur;
4314 len -= cur;
4315 offset = 0;
4316 i++;
4317 }
4318 return ret;
4319 }
4320
4321 /*
4322 * Check that the extent buffer is uptodate.
4323 *
4324 * For regular sector size == PAGE_SIZE case, check if @page is uptodate.
4325 * For subpage case, check if the range covered by the eb has EXTENT_UPTODATE.
4326 */
assert_eb_page_uptodate(const struct extent_buffer * eb,struct page * page)4327 static void assert_eb_page_uptodate(const struct extent_buffer *eb,
4328 struct page *page)
4329 {
4330 struct btrfs_fs_info *fs_info = eb->fs_info;
4331
4332 /*
4333 * If we are using the commit root we could potentially clear a page
4334 * Uptodate while we're using the extent buffer that we've previously
4335 * looked up. We don't want to complain in this case, as the page was
4336 * valid before, we just didn't write it out. Instead we want to catch
4337 * the case where we didn't actually read the block properly, which
4338 * would have !PageUptodate and !EXTENT_BUFFER_WRITE_ERR.
4339 */
4340 if (test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))
4341 return;
4342
4343 if (fs_info->nodesize < PAGE_SIZE) {
4344 if (WARN_ON(!btrfs_subpage_test_uptodate(fs_info, page,
4345 eb->start, eb->len)))
4346 btrfs_subpage_dump_bitmap(fs_info, page, eb->start, eb->len);
4347 } else {
4348 WARN_ON(!PageUptodate(page));
4349 }
4350 }
4351
__write_extent_buffer(const struct extent_buffer * eb,const void * srcv,unsigned long start,unsigned long len,bool use_memmove)4352 static void __write_extent_buffer(const struct extent_buffer *eb,
4353 const void *srcv, unsigned long start,
4354 unsigned long len, bool use_memmove)
4355 {
4356 size_t cur;
4357 size_t offset;
4358 struct page *page;
4359 char *kaddr;
4360 char *src = (char *)srcv;
4361 unsigned long i = get_eb_page_index(start);
4362 /* For unmapped (dummy) ebs, no need to check their uptodate status. */
4363 const bool check_uptodate = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
4364
4365 WARN_ON(test_bit(EXTENT_BUFFER_NO_CHECK, &eb->bflags));
4366
4367 if (check_eb_range(eb, start, len))
4368 return;
4369
4370 offset = get_eb_offset_in_page(eb, start);
4371
4372 while (len > 0) {
4373 page = eb->pages[i];
4374 if (check_uptodate)
4375 assert_eb_page_uptodate(eb, page);
4376
4377 cur = min(len, PAGE_SIZE - offset);
4378 kaddr = page_address(page);
4379 if (use_memmove)
4380 memmove(kaddr + offset, src, cur);
4381 else
4382 memcpy(kaddr + offset, src, cur);
4383
4384 src += cur;
4385 len -= cur;
4386 offset = 0;
4387 i++;
4388 }
4389 }
4390
write_extent_buffer(const struct extent_buffer * eb,const void * srcv,unsigned long start,unsigned long len)4391 void write_extent_buffer(const struct extent_buffer *eb, const void *srcv,
4392 unsigned long start, unsigned long len)
4393 {
4394 return __write_extent_buffer(eb, srcv, start, len, false);
4395 }
4396
memset_extent_buffer(const struct extent_buffer * eb,int c,unsigned long start,unsigned long len)4397 static void memset_extent_buffer(const struct extent_buffer *eb, int c,
4398 unsigned long start, unsigned long len)
4399 {
4400 unsigned long cur = start;
4401
4402 while (cur < start + len) {
4403 unsigned long index = get_eb_page_index(cur);
4404 unsigned int offset = get_eb_offset_in_page(eb, cur);
4405 unsigned int cur_len = min(start + len - cur, PAGE_SIZE - offset);
4406 struct page *page = eb->pages[index];
4407
4408 assert_eb_page_uptodate(eb, page);
4409 memset(page_address(page) + offset, c, cur_len);
4410
4411 cur += cur_len;
4412 }
4413 }
4414
memzero_extent_buffer(const struct extent_buffer * eb,unsigned long start,unsigned long len)4415 void memzero_extent_buffer(const struct extent_buffer *eb, unsigned long start,
4416 unsigned long len)
4417 {
4418 if (check_eb_range(eb, start, len))
4419 return;
4420 return memset_extent_buffer(eb, 0, start, len);
4421 }
4422
copy_extent_buffer_full(const struct extent_buffer * dst,const struct extent_buffer * src)4423 void copy_extent_buffer_full(const struct extent_buffer *dst,
4424 const struct extent_buffer *src)
4425 {
4426 unsigned long cur = 0;
4427
4428 ASSERT(dst->len == src->len);
4429
4430 while (cur < src->len) {
4431 unsigned long index = get_eb_page_index(cur);
4432 unsigned long offset = get_eb_offset_in_page(src, cur);
4433 unsigned long cur_len = min(src->len, PAGE_SIZE - offset);
4434 void *addr = page_address(src->pages[index]) + offset;
4435
4436 write_extent_buffer(dst, addr, cur, cur_len);
4437
4438 cur += cur_len;
4439 }
4440 }
4441
copy_extent_buffer(const struct extent_buffer * dst,const struct extent_buffer * src,unsigned long dst_offset,unsigned long src_offset,unsigned long len)4442 void copy_extent_buffer(const struct extent_buffer *dst,
4443 const struct extent_buffer *src,
4444 unsigned long dst_offset, unsigned long src_offset,
4445 unsigned long len)
4446 {
4447 u64 dst_len = dst->len;
4448 size_t cur;
4449 size_t offset;
4450 struct page *page;
4451 char *kaddr;
4452 unsigned long i = get_eb_page_index(dst_offset);
4453
4454 if (check_eb_range(dst, dst_offset, len) ||
4455 check_eb_range(src, src_offset, len))
4456 return;
4457
4458 WARN_ON(src->len != dst_len);
4459
4460 offset = get_eb_offset_in_page(dst, dst_offset);
4461
4462 while (len > 0) {
4463 page = dst->pages[i];
4464 assert_eb_page_uptodate(dst, page);
4465
4466 cur = min(len, (unsigned long)(PAGE_SIZE - offset));
4467
4468 kaddr = page_address(page);
4469 read_extent_buffer(src, kaddr + offset, src_offset, cur);
4470
4471 src_offset += cur;
4472 len -= cur;
4473 offset = 0;
4474 i++;
4475 }
4476 }
4477
4478 /*
4479 * eb_bitmap_offset() - calculate the page and offset of the byte containing the
4480 * given bit number
4481 * @eb: the extent buffer
4482 * @start: offset of the bitmap item in the extent buffer
4483 * @nr: bit number
4484 * @page_index: return index of the page in the extent buffer that contains the
4485 * given bit number
4486 * @page_offset: return offset into the page given by page_index
4487 *
4488 * This helper hides the ugliness of finding the byte in an extent buffer which
4489 * contains a given bit.
4490 */
eb_bitmap_offset(const struct extent_buffer * eb,unsigned long start,unsigned long nr,unsigned long * page_index,size_t * page_offset)4491 static inline void eb_bitmap_offset(const struct extent_buffer *eb,
4492 unsigned long start, unsigned long nr,
4493 unsigned long *page_index,
4494 size_t *page_offset)
4495 {
4496 size_t byte_offset = BIT_BYTE(nr);
4497 size_t offset;
4498
4499 /*
4500 * The byte we want is the offset of the extent buffer + the offset of
4501 * the bitmap item in the extent buffer + the offset of the byte in the
4502 * bitmap item.
4503 */
4504 offset = start + offset_in_page(eb->start) + byte_offset;
4505
4506 *page_index = offset >> PAGE_SHIFT;
4507 *page_offset = offset_in_page(offset);
4508 }
4509
4510 /*
4511 * Determine whether a bit in a bitmap item is set.
4512 *
4513 * @eb: the extent buffer
4514 * @start: offset of the bitmap item in the extent buffer
4515 * @nr: bit number to test
4516 */
extent_buffer_test_bit(const struct extent_buffer * eb,unsigned long start,unsigned long nr)4517 int extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start,
4518 unsigned long nr)
4519 {
4520 u8 *kaddr;
4521 struct page *page;
4522 unsigned long i;
4523 size_t offset;
4524
4525 eb_bitmap_offset(eb, start, nr, &i, &offset);
4526 page = eb->pages[i];
4527 assert_eb_page_uptodate(eb, page);
4528 kaddr = page_address(page);
4529 return 1U & (kaddr[offset] >> (nr & (BITS_PER_BYTE - 1)));
4530 }
4531
extent_buffer_get_byte(const struct extent_buffer * eb,unsigned long bytenr)4532 static u8 *extent_buffer_get_byte(const struct extent_buffer *eb, unsigned long bytenr)
4533 {
4534 unsigned long index = get_eb_page_index(bytenr);
4535
4536 if (check_eb_range(eb, bytenr, 1))
4537 return NULL;
4538 return page_address(eb->pages[index]) + get_eb_offset_in_page(eb, bytenr);
4539 }
4540
4541 /*
4542 * Set an area of a bitmap to 1.
4543 *
4544 * @eb: the extent buffer
4545 * @start: offset of the bitmap item in the extent buffer
4546 * @pos: bit number of the first bit
4547 * @len: number of bits to set
4548 */
extent_buffer_bitmap_set(const struct extent_buffer * eb,unsigned long start,unsigned long pos,unsigned long len)4549 void extent_buffer_bitmap_set(const struct extent_buffer *eb, unsigned long start,
4550 unsigned long pos, unsigned long len)
4551 {
4552 unsigned int first_byte = start + BIT_BYTE(pos);
4553 unsigned int last_byte = start + BIT_BYTE(pos + len - 1);
4554 const bool same_byte = (first_byte == last_byte);
4555 u8 mask = BITMAP_FIRST_BYTE_MASK(pos);
4556 u8 *kaddr;
4557
4558 if (same_byte)
4559 mask &= BITMAP_LAST_BYTE_MASK(pos + len);
4560
4561 /* Handle the first byte. */
4562 kaddr = extent_buffer_get_byte(eb, first_byte);
4563 *kaddr |= mask;
4564 if (same_byte)
4565 return;
4566
4567 /* Handle the byte aligned part. */
4568 ASSERT(first_byte + 1 <= last_byte);
4569 memset_extent_buffer(eb, 0xff, first_byte + 1, last_byte - first_byte - 1);
4570
4571 /* Handle the last byte. */
4572 kaddr = extent_buffer_get_byte(eb, last_byte);
4573 *kaddr |= BITMAP_LAST_BYTE_MASK(pos + len);
4574 }
4575
4576
4577 /*
4578 * Clear an area of a bitmap.
4579 *
4580 * @eb: the extent buffer
4581 * @start: offset of the bitmap item in the extent buffer
4582 * @pos: bit number of the first bit
4583 * @len: number of bits to clear
4584 */
extent_buffer_bitmap_clear(const struct extent_buffer * eb,unsigned long start,unsigned long pos,unsigned long len)4585 void extent_buffer_bitmap_clear(const struct extent_buffer *eb,
4586 unsigned long start, unsigned long pos,
4587 unsigned long len)
4588 {
4589 unsigned int first_byte = start + BIT_BYTE(pos);
4590 unsigned int last_byte = start + BIT_BYTE(pos + len - 1);
4591 const bool same_byte = (first_byte == last_byte);
4592 u8 mask = BITMAP_FIRST_BYTE_MASK(pos);
4593 u8 *kaddr;
4594
4595 if (same_byte)
4596 mask &= BITMAP_LAST_BYTE_MASK(pos + len);
4597
4598 /* Handle the first byte. */
4599 kaddr = extent_buffer_get_byte(eb, first_byte);
4600 *kaddr &= ~mask;
4601 if (same_byte)
4602 return;
4603
4604 /* Handle the byte aligned part. */
4605 ASSERT(first_byte + 1 <= last_byte);
4606 memset_extent_buffer(eb, 0, first_byte + 1, last_byte - first_byte - 1);
4607
4608 /* Handle the last byte. */
4609 kaddr = extent_buffer_get_byte(eb, last_byte);
4610 *kaddr &= ~BITMAP_LAST_BYTE_MASK(pos + len);
4611 }
4612
areas_overlap(unsigned long src,unsigned long dst,unsigned long len)4613 static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
4614 {
4615 unsigned long distance = (src > dst) ? src - dst : dst - src;
4616 return distance < len;
4617 }
4618
memcpy_extent_buffer(const struct extent_buffer * dst,unsigned long dst_offset,unsigned long src_offset,unsigned long len)4619 void memcpy_extent_buffer(const struct extent_buffer *dst,
4620 unsigned long dst_offset, unsigned long src_offset,
4621 unsigned long len)
4622 {
4623 unsigned long cur_off = 0;
4624
4625 if (check_eb_range(dst, dst_offset, len) ||
4626 check_eb_range(dst, src_offset, len))
4627 return;
4628
4629 while (cur_off < len) {
4630 unsigned long cur_src = cur_off + src_offset;
4631 unsigned long pg_index = get_eb_page_index(cur_src);
4632 unsigned long pg_off = get_eb_offset_in_page(dst, cur_src);
4633 unsigned long cur_len = min(src_offset + len - cur_src,
4634 PAGE_SIZE - pg_off);
4635 void *src_addr = page_address(dst->pages[pg_index]) + pg_off;
4636 const bool use_memmove = areas_overlap(src_offset + cur_off,
4637 dst_offset + cur_off, cur_len);
4638
4639 __write_extent_buffer(dst, src_addr, dst_offset + cur_off, cur_len,
4640 use_memmove);
4641 cur_off += cur_len;
4642 }
4643 }
4644
memmove_extent_buffer(const struct extent_buffer * dst,unsigned long dst_offset,unsigned long src_offset,unsigned long len)4645 void memmove_extent_buffer(const struct extent_buffer *dst,
4646 unsigned long dst_offset, unsigned long src_offset,
4647 unsigned long len)
4648 {
4649 unsigned long dst_end = dst_offset + len - 1;
4650 unsigned long src_end = src_offset + len - 1;
4651
4652 if (check_eb_range(dst, dst_offset, len) ||
4653 check_eb_range(dst, src_offset, len))
4654 return;
4655
4656 if (dst_offset < src_offset) {
4657 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
4658 return;
4659 }
4660
4661 while (len > 0) {
4662 unsigned long src_i;
4663 size_t cur;
4664 size_t dst_off_in_page;
4665 size_t src_off_in_page;
4666 void *src_addr;
4667 bool use_memmove;
4668
4669 src_i = get_eb_page_index(src_end);
4670
4671 dst_off_in_page = get_eb_offset_in_page(dst, dst_end);
4672 src_off_in_page = get_eb_offset_in_page(dst, src_end);
4673
4674 cur = min_t(unsigned long, len, src_off_in_page + 1);
4675 cur = min(cur, dst_off_in_page + 1);
4676
4677 src_addr = page_address(dst->pages[src_i]) + src_off_in_page -
4678 cur + 1;
4679 use_memmove = areas_overlap(src_end - cur + 1, dst_end - cur + 1,
4680 cur);
4681
4682 __write_extent_buffer(dst, src_addr, dst_end - cur + 1, cur,
4683 use_memmove);
4684
4685 dst_end -= cur;
4686 src_end -= cur;
4687 len -= cur;
4688 }
4689 }
4690
4691 #define GANG_LOOKUP_SIZE 16
get_next_extent_buffer(struct btrfs_fs_info * fs_info,struct page * page,u64 bytenr)4692 static struct extent_buffer *get_next_extent_buffer(
4693 struct btrfs_fs_info *fs_info, struct page *page, u64 bytenr)
4694 {
4695 struct extent_buffer *gang[GANG_LOOKUP_SIZE];
4696 struct extent_buffer *found = NULL;
4697 u64 page_start = page_offset(page);
4698 u64 cur = page_start;
4699
4700 ASSERT(in_range(bytenr, page_start, PAGE_SIZE));
4701 lockdep_assert_held(&fs_info->buffer_lock);
4702
4703 while (cur < page_start + PAGE_SIZE) {
4704 int ret;
4705 int i;
4706
4707 ret = radix_tree_gang_lookup(&fs_info->buffer_radix,
4708 (void **)gang, cur >> fs_info->sectorsize_bits,
4709 min_t(unsigned int, GANG_LOOKUP_SIZE,
4710 PAGE_SIZE / fs_info->nodesize));
4711 if (ret == 0)
4712 goto out;
4713 for (i = 0; i < ret; i++) {
4714 /* Already beyond page end */
4715 if (gang[i]->start >= page_start + PAGE_SIZE)
4716 goto out;
4717 /* Found one */
4718 if (gang[i]->start >= bytenr) {
4719 found = gang[i];
4720 goto out;
4721 }
4722 }
4723 cur = gang[ret - 1]->start + gang[ret - 1]->len;
4724 }
4725 out:
4726 return found;
4727 }
4728
try_release_subpage_extent_buffer(struct page * page)4729 static int try_release_subpage_extent_buffer(struct page *page)
4730 {
4731 struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
4732 u64 cur = page_offset(page);
4733 const u64 end = page_offset(page) + PAGE_SIZE;
4734 int ret;
4735
4736 while (cur < end) {
4737 struct extent_buffer *eb = NULL;
4738
4739 /*
4740 * Unlike try_release_extent_buffer() which uses page->private
4741 * to grab buffer, for subpage case we rely on radix tree, thus
4742 * we need to ensure radix tree consistency.
4743 *
4744 * We also want an atomic snapshot of the radix tree, thus go
4745 * with spinlock rather than RCU.
4746 */
4747 spin_lock(&fs_info->buffer_lock);
4748 eb = get_next_extent_buffer(fs_info, page, cur);
4749 if (!eb) {
4750 /* No more eb in the page range after or at cur */
4751 spin_unlock(&fs_info->buffer_lock);
4752 break;
4753 }
4754 cur = eb->start + eb->len;
4755
4756 /*
4757 * The same as try_release_extent_buffer(), to ensure the eb
4758 * won't disappear out from under us.
4759 */
4760 spin_lock(&eb->refs_lock);
4761 if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
4762 spin_unlock(&eb->refs_lock);
4763 spin_unlock(&fs_info->buffer_lock);
4764 break;
4765 }
4766 spin_unlock(&fs_info->buffer_lock);
4767
4768 /*
4769 * If tree ref isn't set then we know the ref on this eb is a
4770 * real ref, so just return, this eb will likely be freed soon
4771 * anyway.
4772 */
4773 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
4774 spin_unlock(&eb->refs_lock);
4775 break;
4776 }
4777
4778 /*
4779 * Here we don't care about the return value, we will always
4780 * check the page private at the end. And
4781 * release_extent_buffer() will release the refs_lock.
4782 */
4783 release_extent_buffer(eb);
4784 }
4785 /*
4786 * Finally to check if we have cleared page private, as if we have
4787 * released all ebs in the page, the page private should be cleared now.
4788 */
4789 spin_lock(&page->mapping->private_lock);
4790 if (!PagePrivate(page))
4791 ret = 1;
4792 else
4793 ret = 0;
4794 spin_unlock(&page->mapping->private_lock);
4795 return ret;
4796
4797 }
4798
try_release_extent_buffer(struct page * page)4799 int try_release_extent_buffer(struct page *page)
4800 {
4801 struct extent_buffer *eb;
4802
4803 if (btrfs_sb(page->mapping->host->i_sb)->nodesize < PAGE_SIZE)
4804 return try_release_subpage_extent_buffer(page);
4805
4806 /*
4807 * We need to make sure nobody is changing page->private, as we rely on
4808 * page->private as the pointer to extent buffer.
4809 */
4810 spin_lock(&page->mapping->private_lock);
4811 if (!PagePrivate(page)) {
4812 spin_unlock(&page->mapping->private_lock);
4813 return 1;
4814 }
4815
4816 eb = (struct extent_buffer *)page->private;
4817 BUG_ON(!eb);
4818
4819 /*
4820 * This is a little awful but should be ok, we need to make sure that
4821 * the eb doesn't disappear out from under us while we're looking at
4822 * this page.
4823 */
4824 spin_lock(&eb->refs_lock);
4825 if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
4826 spin_unlock(&eb->refs_lock);
4827 spin_unlock(&page->mapping->private_lock);
4828 return 0;
4829 }
4830 spin_unlock(&page->mapping->private_lock);
4831
4832 /*
4833 * If tree ref isn't set then we know the ref on this eb is a real ref,
4834 * so just return, this page will likely be freed soon anyway.
4835 */
4836 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
4837 spin_unlock(&eb->refs_lock);
4838 return 0;
4839 }
4840
4841 return release_extent_buffer(eb);
4842 }
4843
4844 /*
4845 * btrfs_readahead_tree_block - attempt to readahead a child block
4846 * @fs_info: the fs_info
4847 * @bytenr: bytenr to read
4848 * @owner_root: objectid of the root that owns this eb
4849 * @gen: generation for the uptodate check, can be 0
4850 * @level: level for the eb
4851 *
4852 * Attempt to readahead a tree block at @bytenr. If @gen is 0 then we do a
4853 * normal uptodate check of the eb, without checking the generation. If we have
4854 * to read the block we will not block on anything.
4855 */
btrfs_readahead_tree_block(struct btrfs_fs_info * fs_info,u64 bytenr,u64 owner_root,u64 gen,int level)4856 void btrfs_readahead_tree_block(struct btrfs_fs_info *fs_info,
4857 u64 bytenr, u64 owner_root, u64 gen, int level)
4858 {
4859 struct btrfs_tree_parent_check check = {
4860 .has_first_key = 0,
4861 .level = level,
4862 .transid = gen
4863 };
4864 struct extent_buffer *eb;
4865 int ret;
4866
4867 eb = btrfs_find_create_tree_block(fs_info, bytenr, owner_root, level);
4868 if (IS_ERR(eb))
4869 return;
4870
4871 if (btrfs_buffer_uptodate(eb, gen, 1)) {
4872 free_extent_buffer(eb);
4873 return;
4874 }
4875
4876 ret = read_extent_buffer_pages(eb, WAIT_NONE, 0, &check);
4877 if (ret < 0)
4878 free_extent_buffer_stale(eb);
4879 else
4880 free_extent_buffer(eb);
4881 }
4882
4883 /*
4884 * btrfs_readahead_node_child - readahead a node's child block
4885 * @node: parent node we're reading from
4886 * @slot: slot in the parent node for the child we want to read
4887 *
4888 * A helper for btrfs_readahead_tree_block, we simply read the bytenr pointed at
4889 * the slot in the node provided.
4890 */
btrfs_readahead_node_child(struct extent_buffer * node,int slot)4891 void btrfs_readahead_node_child(struct extent_buffer *node, int slot)
4892 {
4893 btrfs_readahead_tree_block(node->fs_info,
4894 btrfs_node_blockptr(node, slot),
4895 btrfs_header_owner(node),
4896 btrfs_node_ptr_generation(node, slot),
4897 btrfs_header_level(node) - 1);
4898 }
4899