1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <linux/slab.h>
4 #include "messages.h"
5 #include "ctree.h"
6 #include "subpage.h"
7 #include "btrfs_inode.h"
8
9 /*
10 * Subpage (sectorsize < PAGE_SIZE) support overview:
11 *
12 * Limitations:
13 *
14 * - Only support 64K page size for now
15 * This is to make metadata handling easier, as 64K page would ensure
16 * all nodesize would fit inside one page, thus we don't need to handle
17 * cases where a tree block crosses several pages.
18 *
19 * - Only metadata read-write for now
20 * The data read-write part is in development.
21 *
22 * - Metadata can't cross 64K page boundary
23 * btrfs-progs and kernel have done that for a while, thus only ancient
24 * filesystems could have such problem. For such case, do a graceful
25 * rejection.
26 *
27 * Special behavior:
28 *
29 * - Metadata
30 * Metadata read is fully supported.
31 * Meaning when reading one tree block will only trigger the read for the
32 * needed range, other unrelated range in the same page will not be touched.
33 *
34 * Metadata write support is partial.
35 * The writeback is still for the full page, but we will only submit
36 * the dirty extent buffers in the page.
37 *
38 * This means, if we have a metadata page like this:
39 *
40 * Page offset
41 * 0 16K 32K 48K 64K
42 * |/////////| |///////////|
43 * \- Tree block A \- Tree block B
44 *
45 * Even if we just want to writeback tree block A, we will also writeback
46 * tree block B if it's also dirty.
47 *
48 * This may cause extra metadata writeback which results more COW.
49 *
50 * Implementation:
51 *
52 * - Common
53 * Both metadata and data will use a new structure, btrfs_subpage, to
54 * record the status of each sector inside a page. This provides the extra
55 * granularity needed.
56 *
57 * - Metadata
58 * Since we have multiple tree blocks inside one page, we can't rely on page
59 * locking anymore, or we will have greatly reduced concurrency or even
60 * deadlocks (hold one tree lock while trying to lock another tree lock in
61 * the same page).
62 *
63 * Thus for metadata locking, subpage support relies on io_tree locking only.
64 * This means a slightly higher tree locking latency.
65 */
66
67 #if PAGE_SIZE > SZ_4K
btrfs_is_subpage(const struct btrfs_fs_info * fs_info,struct address_space * mapping)68 bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info, struct address_space *mapping)
69 {
70 if (fs_info->sectorsize >= PAGE_SIZE)
71 return false;
72
73 /*
74 * Only data pages (either through DIO or compression) can have no
75 * mapping. And if page->mapping->host is data inode, it's subpage.
76 * As we have ruled our sectorsize >= PAGE_SIZE case already.
77 */
78 if (!mapping || !mapping->host || is_data_inode(BTRFS_I(mapping->host)))
79 return true;
80
81 /*
82 * Now the only remaining case is metadata, which we only go subpage
83 * routine if nodesize < PAGE_SIZE.
84 */
85 if (fs_info->nodesize < PAGE_SIZE)
86 return true;
87 return false;
88 }
89 #endif
90
btrfs_attach_subpage(const struct btrfs_fs_info * fs_info,struct folio * folio,enum btrfs_subpage_type type)91 int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info,
92 struct folio *folio, enum btrfs_subpage_type type)
93 {
94 struct btrfs_subpage *subpage;
95
96 /*
97 * We have cases like a dummy extent buffer page, which is not mapped
98 * and doesn't need to be locked.
99 */
100 if (folio->mapping)
101 ASSERT(folio_test_locked(folio));
102
103 /* Either not subpage, or the folio already has private attached. */
104 if (!btrfs_is_subpage(fs_info, folio->mapping) || folio_test_private(folio))
105 return 0;
106
107 subpage = btrfs_alloc_subpage(fs_info, type);
108 if (IS_ERR(subpage))
109 return PTR_ERR(subpage);
110
111 folio_attach_private(folio, subpage);
112 return 0;
113 }
114
btrfs_detach_subpage(const struct btrfs_fs_info * fs_info,struct folio * folio)115 void btrfs_detach_subpage(const struct btrfs_fs_info *fs_info, struct folio *folio)
116 {
117 struct btrfs_subpage *subpage;
118
119 /* Either not subpage, or the folio already has private attached. */
120 if (!btrfs_is_subpage(fs_info, folio->mapping) || !folio_test_private(folio))
121 return;
122
123 subpage = folio_detach_private(folio);
124 ASSERT(subpage);
125 btrfs_free_subpage(subpage);
126 }
127
btrfs_alloc_subpage(const struct btrfs_fs_info * fs_info,enum btrfs_subpage_type type)128 struct btrfs_subpage *btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info,
129 enum btrfs_subpage_type type)
130 {
131 struct btrfs_subpage *ret;
132 unsigned int real_size;
133
134 ASSERT(fs_info->sectorsize < PAGE_SIZE);
135
136 real_size = struct_size(ret, bitmaps,
137 BITS_TO_LONGS(btrfs_bitmap_nr_max * fs_info->sectors_per_page));
138 ret = kzalloc(real_size, GFP_NOFS);
139 if (!ret)
140 return ERR_PTR(-ENOMEM);
141
142 spin_lock_init(&ret->lock);
143 if (type == BTRFS_SUBPAGE_METADATA)
144 atomic_set(&ret->eb_refs, 0);
145 else
146 atomic_set(&ret->nr_locked, 0);
147 return ret;
148 }
149
btrfs_free_subpage(struct btrfs_subpage * subpage)150 void btrfs_free_subpage(struct btrfs_subpage *subpage)
151 {
152 kfree(subpage);
153 }
154
155 /*
156 * Increase the eb_refs of current subpage.
157 *
158 * This is important for eb allocation, to prevent race with last eb freeing
159 * of the same page.
160 * With the eb_refs increased before the eb inserted into radix tree,
161 * detach_extent_buffer_page() won't detach the folio private while we're still
162 * allocating the extent buffer.
163 */
btrfs_folio_inc_eb_refs(const struct btrfs_fs_info * fs_info,struct folio * folio)164 void btrfs_folio_inc_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio)
165 {
166 struct btrfs_subpage *subpage;
167
168 if (!btrfs_is_subpage(fs_info, folio->mapping))
169 return;
170
171 ASSERT(folio_test_private(folio) && folio->mapping);
172 lockdep_assert_held(&folio->mapping->i_private_lock);
173
174 subpage = folio_get_private(folio);
175 atomic_inc(&subpage->eb_refs);
176 }
177
btrfs_folio_dec_eb_refs(const struct btrfs_fs_info * fs_info,struct folio * folio)178 void btrfs_folio_dec_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio)
179 {
180 struct btrfs_subpage *subpage;
181
182 if (!btrfs_is_subpage(fs_info, folio->mapping))
183 return;
184
185 ASSERT(folio_test_private(folio) && folio->mapping);
186 lockdep_assert_held(&folio->mapping->i_private_lock);
187
188 subpage = folio_get_private(folio);
189 ASSERT(atomic_read(&subpage->eb_refs));
190 atomic_dec(&subpage->eb_refs);
191 }
192
btrfs_subpage_assert(const struct btrfs_fs_info * fs_info,struct folio * folio,u64 start,u32 len)193 static void btrfs_subpage_assert(const struct btrfs_fs_info *fs_info,
194 struct folio *folio, u64 start, u32 len)
195 {
196 /* For subpage support, the folio must be single page. */
197 ASSERT(folio_order(folio) == 0);
198
199 /* Basic checks */
200 ASSERT(folio_test_private(folio) && folio_get_private(folio));
201 ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
202 IS_ALIGNED(len, fs_info->sectorsize));
203 /*
204 * The range check only works for mapped page, we can still have
205 * unmapped page like dummy extent buffer pages.
206 */
207 if (folio->mapping)
208 ASSERT(folio_pos(folio) <= start &&
209 start + len <= folio_pos(folio) + PAGE_SIZE);
210 }
211
212 #define subpage_calc_start_bit(fs_info, folio, name, start, len) \
213 ({ \
214 unsigned int __start_bit; \
215 \
216 btrfs_subpage_assert(fs_info, folio, start, len); \
217 __start_bit = offset_in_page(start) >> fs_info->sectorsize_bits; \
218 __start_bit += fs_info->sectors_per_page * btrfs_bitmap_nr_##name; \
219 __start_bit; \
220 })
221
btrfs_subpage_clamp_range(struct folio * folio,u64 * start,u32 * len)222 static void btrfs_subpage_clamp_range(struct folio *folio, u64 *start, u32 *len)
223 {
224 u64 orig_start = *start;
225 u32 orig_len = *len;
226
227 *start = max_t(u64, folio_pos(folio), orig_start);
228 /*
229 * For certain call sites like btrfs_drop_pages(), we may have pages
230 * beyond the target range. In that case, just set @len to 0, subpage
231 * helpers can handle @len == 0 without any problem.
232 */
233 if (folio_pos(folio) >= orig_start + orig_len)
234 *len = 0;
235 else
236 *len = min_t(u64, folio_pos(folio) + PAGE_SIZE,
237 orig_start + orig_len) - *start;
238 }
239
btrfs_subpage_end_and_test_lock(const struct btrfs_fs_info * fs_info,struct folio * folio,u64 start,u32 len)240 static bool btrfs_subpage_end_and_test_lock(const struct btrfs_fs_info *fs_info,
241 struct folio *folio, u64 start, u32 len)
242 {
243 struct btrfs_subpage *subpage = folio_get_private(folio);
244 const int start_bit = subpage_calc_start_bit(fs_info, folio, locked, start, len);
245 const int nbits = (len >> fs_info->sectorsize_bits);
246 unsigned long flags;
247 unsigned int cleared = 0;
248 int bit = start_bit;
249 bool last;
250
251 btrfs_subpage_assert(fs_info, folio, start, len);
252
253 spin_lock_irqsave(&subpage->lock, flags);
254 /*
255 * We have call sites passing @lock_page into
256 * extent_clear_unlock_delalloc() for compression path.
257 *
258 * This @locked_page is locked by plain lock_page(), thus its
259 * subpage::locked is 0. Handle them in a special way.
260 */
261 if (atomic_read(&subpage->nr_locked) == 0) {
262 spin_unlock_irqrestore(&subpage->lock, flags);
263 return true;
264 }
265
266 for_each_set_bit_from(bit, subpage->bitmaps, start_bit + nbits) {
267 clear_bit(bit, subpage->bitmaps);
268 cleared++;
269 }
270 ASSERT(atomic_read(&subpage->nr_locked) >= cleared);
271 last = atomic_sub_and_test(cleared, &subpage->nr_locked);
272 spin_unlock_irqrestore(&subpage->lock, flags);
273 return last;
274 }
275
276 /*
277 * Handle different locked folios:
278 *
279 * - Non-subpage folio
280 * Just unlock it.
281 *
282 * - folio locked but without any subpage locked
283 * This happens either before writepage_delalloc() or the delalloc range is
284 * already handled by previous folio.
285 * We can simple unlock it.
286 *
287 * - folio locked with subpage range locked.
288 * We go through the locked sectors inside the range and clear their locked
289 * bitmap, reduce the writer lock number, and unlock the page if that's
290 * the last locked range.
291 */
btrfs_folio_end_lock(const struct btrfs_fs_info * fs_info,struct folio * folio,u64 start,u32 len)292 void btrfs_folio_end_lock(const struct btrfs_fs_info *fs_info,
293 struct folio *folio, u64 start, u32 len)
294 {
295 struct btrfs_subpage *subpage = folio_get_private(folio);
296
297 ASSERT(folio_test_locked(folio));
298
299 if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, folio->mapping)) {
300 folio_unlock(folio);
301 return;
302 }
303
304 /*
305 * For subpage case, there are two types of locked page. With or
306 * without locked number.
307 *
308 * Since we own the page lock, no one else could touch subpage::locked
309 * and we are safe to do several atomic operations without spinlock.
310 */
311 if (atomic_read(&subpage->nr_locked) == 0) {
312 /* No subpage lock, locked by plain lock_page(). */
313 folio_unlock(folio);
314 return;
315 }
316
317 btrfs_subpage_clamp_range(folio, &start, &len);
318 if (btrfs_subpage_end_and_test_lock(fs_info, folio, start, len))
319 folio_unlock(folio);
320 }
321
btrfs_folio_end_lock_bitmap(const struct btrfs_fs_info * fs_info,struct folio * folio,unsigned long bitmap)322 void btrfs_folio_end_lock_bitmap(const struct btrfs_fs_info *fs_info,
323 struct folio *folio, unsigned long bitmap)
324 {
325 struct btrfs_subpage *subpage = folio_get_private(folio);
326 const int start_bit = fs_info->sectors_per_page * btrfs_bitmap_nr_locked;
327 unsigned long flags;
328 bool last = false;
329 int cleared = 0;
330 int bit;
331
332 if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, folio->mapping)) {
333 folio_unlock(folio);
334 return;
335 }
336
337 if (atomic_read(&subpage->nr_locked) == 0) {
338 /* No subpage lock, locked by plain lock_page(). */
339 folio_unlock(folio);
340 return;
341 }
342
343 spin_lock_irqsave(&subpage->lock, flags);
344 for_each_set_bit(bit, &bitmap, fs_info->sectors_per_page) {
345 if (test_and_clear_bit(bit + start_bit, subpage->bitmaps))
346 cleared++;
347 }
348 ASSERT(atomic_read(&subpage->nr_locked) >= cleared);
349 last = atomic_sub_and_test(cleared, &subpage->nr_locked);
350 spin_unlock_irqrestore(&subpage->lock, flags);
351 if (last)
352 folio_unlock(folio);
353 }
354
355 #define subpage_test_bitmap_all_set(fs_info, subpage, name) \
356 bitmap_test_range_all_set(subpage->bitmaps, \
357 fs_info->sectors_per_page * btrfs_bitmap_nr_##name, \
358 fs_info->sectors_per_page)
359
360 #define subpage_test_bitmap_all_zero(fs_info, subpage, name) \
361 bitmap_test_range_all_zero(subpage->bitmaps, \
362 fs_info->sectors_per_page * btrfs_bitmap_nr_##name, \
363 fs_info->sectors_per_page)
364
btrfs_subpage_set_uptodate(const struct btrfs_fs_info * fs_info,struct folio * folio,u64 start,u32 len)365 void btrfs_subpage_set_uptodate(const struct btrfs_fs_info *fs_info,
366 struct folio *folio, u64 start, u32 len)
367 {
368 struct btrfs_subpage *subpage = folio_get_private(folio);
369 unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
370 uptodate, start, len);
371 unsigned long flags;
372
373 spin_lock_irqsave(&subpage->lock, flags);
374 bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
375 if (subpage_test_bitmap_all_set(fs_info, subpage, uptodate))
376 folio_mark_uptodate(folio);
377 spin_unlock_irqrestore(&subpage->lock, flags);
378 }
379
btrfs_subpage_clear_uptodate(const struct btrfs_fs_info * fs_info,struct folio * folio,u64 start,u32 len)380 void btrfs_subpage_clear_uptodate(const struct btrfs_fs_info *fs_info,
381 struct folio *folio, u64 start, u32 len)
382 {
383 struct btrfs_subpage *subpage = folio_get_private(folio);
384 unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
385 uptodate, start, len);
386 unsigned long flags;
387
388 spin_lock_irqsave(&subpage->lock, flags);
389 bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
390 folio_clear_uptodate(folio);
391 spin_unlock_irqrestore(&subpage->lock, flags);
392 }
393
btrfs_subpage_set_dirty(const struct btrfs_fs_info * fs_info,struct folio * folio,u64 start,u32 len)394 void btrfs_subpage_set_dirty(const struct btrfs_fs_info *fs_info,
395 struct folio *folio, u64 start, u32 len)
396 {
397 struct btrfs_subpage *subpage = folio_get_private(folio);
398 unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
399 dirty, start, len);
400 unsigned long flags;
401
402 spin_lock_irqsave(&subpage->lock, flags);
403 bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
404 spin_unlock_irqrestore(&subpage->lock, flags);
405 folio_mark_dirty(folio);
406 }
407
408 /*
409 * Extra clear_and_test function for subpage dirty bitmap.
410 *
411 * Return true if we're the last bits in the dirty_bitmap and clear the
412 * dirty_bitmap.
413 * Return false otherwise.
414 *
415 * NOTE: Callers should manually clear page dirty for true case, as we have
416 * extra handling for tree blocks.
417 */
btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info * fs_info,struct folio * folio,u64 start,u32 len)418 bool btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info *fs_info,
419 struct folio *folio, u64 start, u32 len)
420 {
421 struct btrfs_subpage *subpage = folio_get_private(folio);
422 unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
423 dirty, start, len);
424 unsigned long flags;
425 bool last = false;
426
427 spin_lock_irqsave(&subpage->lock, flags);
428 bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
429 if (subpage_test_bitmap_all_zero(fs_info, subpage, dirty))
430 last = true;
431 spin_unlock_irqrestore(&subpage->lock, flags);
432 return last;
433 }
434
btrfs_subpage_clear_dirty(const struct btrfs_fs_info * fs_info,struct folio * folio,u64 start,u32 len)435 void btrfs_subpage_clear_dirty(const struct btrfs_fs_info *fs_info,
436 struct folio *folio, u64 start, u32 len)
437 {
438 bool last;
439
440 last = btrfs_subpage_clear_and_test_dirty(fs_info, folio, start, len);
441 if (last)
442 folio_clear_dirty_for_io(folio);
443 }
444
btrfs_subpage_set_writeback(const struct btrfs_fs_info * fs_info,struct folio * folio,u64 start,u32 len)445 void btrfs_subpage_set_writeback(const struct btrfs_fs_info *fs_info,
446 struct folio *folio, u64 start, u32 len)
447 {
448 struct btrfs_subpage *subpage = folio_get_private(folio);
449 unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
450 writeback, start, len);
451 unsigned long flags;
452
453 spin_lock_irqsave(&subpage->lock, flags);
454 bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
455
456 /*
457 * Don't clear the TOWRITE tag when starting writeback on a still-dirty
458 * folio. Doing so can cause WB_SYNC_ALL writepages() to overlook it,
459 * assume writeback is complete, and exit too early — violating sync
460 * ordering guarantees.
461 */
462 if (!folio_test_writeback(folio))
463 __folio_start_writeback(folio, true);
464 if (!folio_test_dirty(folio)) {
465 struct address_space *mapping = folio_mapping(folio);
466 XA_STATE(xas, &mapping->i_pages, folio->index);
467 unsigned long flags;
468
469 xas_lock_irqsave(&xas, flags);
470 xas_load(&xas);
471 xas_clear_mark(&xas, PAGECACHE_TAG_TOWRITE);
472 xas_unlock_irqrestore(&xas, flags);
473 }
474 spin_unlock_irqrestore(&subpage->lock, flags);
475 }
476
btrfs_subpage_clear_writeback(const struct btrfs_fs_info * fs_info,struct folio * folio,u64 start,u32 len)477 void btrfs_subpage_clear_writeback(const struct btrfs_fs_info *fs_info,
478 struct folio *folio, u64 start, u32 len)
479 {
480 struct btrfs_subpage *subpage = folio_get_private(folio);
481 unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
482 writeback, start, len);
483 unsigned long flags;
484
485 spin_lock_irqsave(&subpage->lock, flags);
486 bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
487 if (subpage_test_bitmap_all_zero(fs_info, subpage, writeback)) {
488 ASSERT(folio_test_writeback(folio));
489 folio_end_writeback(folio);
490 }
491 spin_unlock_irqrestore(&subpage->lock, flags);
492 }
493
btrfs_subpage_set_ordered(const struct btrfs_fs_info * fs_info,struct folio * folio,u64 start,u32 len)494 void btrfs_subpage_set_ordered(const struct btrfs_fs_info *fs_info,
495 struct folio *folio, u64 start, u32 len)
496 {
497 struct btrfs_subpage *subpage = folio_get_private(folio);
498 unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
499 ordered, start, len);
500 unsigned long flags;
501
502 spin_lock_irqsave(&subpage->lock, flags);
503 bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
504 folio_set_ordered(folio);
505 spin_unlock_irqrestore(&subpage->lock, flags);
506 }
507
btrfs_subpage_clear_ordered(const struct btrfs_fs_info * fs_info,struct folio * folio,u64 start,u32 len)508 void btrfs_subpage_clear_ordered(const struct btrfs_fs_info *fs_info,
509 struct folio *folio, u64 start, u32 len)
510 {
511 struct btrfs_subpage *subpage = folio_get_private(folio);
512 unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
513 ordered, start, len);
514 unsigned long flags;
515
516 spin_lock_irqsave(&subpage->lock, flags);
517 bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
518 if (subpage_test_bitmap_all_zero(fs_info, subpage, ordered))
519 folio_clear_ordered(folio);
520 spin_unlock_irqrestore(&subpage->lock, flags);
521 }
522
btrfs_subpage_set_checked(const struct btrfs_fs_info * fs_info,struct folio * folio,u64 start,u32 len)523 void btrfs_subpage_set_checked(const struct btrfs_fs_info *fs_info,
524 struct folio *folio, u64 start, u32 len)
525 {
526 struct btrfs_subpage *subpage = folio_get_private(folio);
527 unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
528 checked, start, len);
529 unsigned long flags;
530
531 spin_lock_irqsave(&subpage->lock, flags);
532 bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
533 if (subpage_test_bitmap_all_set(fs_info, subpage, checked))
534 folio_set_checked(folio);
535 spin_unlock_irqrestore(&subpage->lock, flags);
536 }
537
btrfs_subpage_clear_checked(const struct btrfs_fs_info * fs_info,struct folio * folio,u64 start,u32 len)538 void btrfs_subpage_clear_checked(const struct btrfs_fs_info *fs_info,
539 struct folio *folio, u64 start, u32 len)
540 {
541 struct btrfs_subpage *subpage = folio_get_private(folio);
542 unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
543 checked, start, len);
544 unsigned long flags;
545
546 spin_lock_irqsave(&subpage->lock, flags);
547 bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
548 folio_clear_checked(folio);
549 spin_unlock_irqrestore(&subpage->lock, flags);
550 }
551
552 /*
553 * Unlike set/clear which is dependent on each page status, for test all bits
554 * are tested in the same way.
555 */
556 #define IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(name) \
557 bool btrfs_subpage_test_##name(const struct btrfs_fs_info *fs_info, \
558 struct folio *folio, u64 start, u32 len) \
559 { \
560 struct btrfs_subpage *subpage = folio_get_private(folio); \
561 unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, \
562 name, start, len); \
563 unsigned long flags; \
564 bool ret; \
565 \
566 spin_lock_irqsave(&subpage->lock, flags); \
567 ret = bitmap_test_range_all_set(subpage->bitmaps, start_bit, \
568 len >> fs_info->sectorsize_bits); \
569 spin_unlock_irqrestore(&subpage->lock, flags); \
570 return ret; \
571 }
572 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(uptodate);
573 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(dirty);
574 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(writeback);
575 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(ordered);
576 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(checked);
577
578 /*
579 * Note that, in selftests (extent-io-tests), we can have empty fs_info passed
580 * in. We only test sectorsize == PAGE_SIZE cases so far, thus we can fall
581 * back to regular sectorsize branch.
582 */
583 #define IMPLEMENT_BTRFS_PAGE_OPS(name, folio_set_func, \
584 folio_clear_func, folio_test_func) \
585 void btrfs_folio_set_##name(const struct btrfs_fs_info *fs_info, \
586 struct folio *folio, u64 start, u32 len) \
587 { \
588 if (unlikely(!fs_info) || \
589 !btrfs_is_subpage(fs_info, folio->mapping)) { \
590 folio_set_func(folio); \
591 return; \
592 } \
593 btrfs_subpage_set_##name(fs_info, folio, start, len); \
594 } \
595 void btrfs_folio_clear_##name(const struct btrfs_fs_info *fs_info, \
596 struct folio *folio, u64 start, u32 len) \
597 { \
598 if (unlikely(!fs_info) || \
599 !btrfs_is_subpage(fs_info, folio->mapping)) { \
600 folio_clear_func(folio); \
601 return; \
602 } \
603 btrfs_subpage_clear_##name(fs_info, folio, start, len); \
604 } \
605 bool btrfs_folio_test_##name(const struct btrfs_fs_info *fs_info, \
606 struct folio *folio, u64 start, u32 len) \
607 { \
608 if (unlikely(!fs_info) || \
609 !btrfs_is_subpage(fs_info, folio->mapping)) \
610 return folio_test_func(folio); \
611 return btrfs_subpage_test_##name(fs_info, folio, start, len); \
612 } \
613 void btrfs_folio_clamp_set_##name(const struct btrfs_fs_info *fs_info, \
614 struct folio *folio, u64 start, u32 len) \
615 { \
616 if (unlikely(!fs_info) || \
617 !btrfs_is_subpage(fs_info, folio->mapping)) { \
618 folio_set_func(folio); \
619 return; \
620 } \
621 btrfs_subpage_clamp_range(folio, &start, &len); \
622 btrfs_subpage_set_##name(fs_info, folio, start, len); \
623 } \
624 void btrfs_folio_clamp_clear_##name(const struct btrfs_fs_info *fs_info, \
625 struct folio *folio, u64 start, u32 len) \
626 { \
627 if (unlikely(!fs_info) || \
628 !btrfs_is_subpage(fs_info, folio->mapping)) { \
629 folio_clear_func(folio); \
630 return; \
631 } \
632 btrfs_subpage_clamp_range(folio, &start, &len); \
633 btrfs_subpage_clear_##name(fs_info, folio, start, len); \
634 } \
635 bool btrfs_folio_clamp_test_##name(const struct btrfs_fs_info *fs_info, \
636 struct folio *folio, u64 start, u32 len) \
637 { \
638 if (unlikely(!fs_info) || \
639 !btrfs_is_subpage(fs_info, folio->mapping)) \
640 return folio_test_func(folio); \
641 btrfs_subpage_clamp_range(folio, &start, &len); \
642 return btrfs_subpage_test_##name(fs_info, folio, start, len); \
643 }
644 IMPLEMENT_BTRFS_PAGE_OPS(uptodate, folio_mark_uptodate, folio_clear_uptodate,
645 folio_test_uptodate);
646 IMPLEMENT_BTRFS_PAGE_OPS(dirty, folio_mark_dirty, folio_clear_dirty_for_io,
647 folio_test_dirty);
648 IMPLEMENT_BTRFS_PAGE_OPS(writeback, folio_start_writeback, folio_end_writeback,
649 folio_test_writeback);
650 IMPLEMENT_BTRFS_PAGE_OPS(ordered, folio_set_ordered, folio_clear_ordered,
651 folio_test_ordered);
652 IMPLEMENT_BTRFS_PAGE_OPS(checked, folio_set_checked, folio_clear_checked,
653 folio_test_checked);
654
655 /*
656 * Make sure not only the page dirty bit is cleared, but also subpage dirty bit
657 * is cleared.
658 */
btrfs_folio_assert_not_dirty(const struct btrfs_fs_info * fs_info,struct folio * folio,u64 start,u32 len)659 void btrfs_folio_assert_not_dirty(const struct btrfs_fs_info *fs_info,
660 struct folio *folio, u64 start, u32 len)
661 {
662 struct btrfs_subpage *subpage;
663 unsigned int start_bit;
664 unsigned int nbits;
665 unsigned long flags;
666
667 if (!IS_ENABLED(CONFIG_BTRFS_ASSERT))
668 return;
669
670 if (!btrfs_is_subpage(fs_info, folio->mapping)) {
671 ASSERT(!folio_test_dirty(folio));
672 return;
673 }
674
675 start_bit = subpage_calc_start_bit(fs_info, folio, dirty, start, len);
676 nbits = len >> fs_info->sectorsize_bits;
677 subpage = folio_get_private(folio);
678 ASSERT(subpage);
679 spin_lock_irqsave(&subpage->lock, flags);
680 ASSERT(bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits));
681 spin_unlock_irqrestore(&subpage->lock, flags);
682 }
683
684 /*
685 * This is for folio already locked by plain lock_page()/folio_lock(), which
686 * doesn't have any subpage awareness.
687 *
688 * This populates the involved subpage ranges so that subpage helpers can
689 * properly unlock them.
690 */
btrfs_folio_set_lock(const struct btrfs_fs_info * fs_info,struct folio * folio,u64 start,u32 len)691 void btrfs_folio_set_lock(const struct btrfs_fs_info *fs_info,
692 struct folio *folio, u64 start, u32 len)
693 {
694 struct btrfs_subpage *subpage;
695 unsigned long flags;
696 unsigned int start_bit;
697 unsigned int nbits;
698 int ret;
699
700 ASSERT(folio_test_locked(folio));
701 if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, folio->mapping))
702 return;
703
704 subpage = folio_get_private(folio);
705 start_bit = subpage_calc_start_bit(fs_info, folio, locked, start, len);
706 nbits = len >> fs_info->sectorsize_bits;
707 spin_lock_irqsave(&subpage->lock, flags);
708 /* Target range should not yet be locked. */
709 ASSERT(bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits));
710 bitmap_set(subpage->bitmaps, start_bit, nbits);
711 ret = atomic_add_return(nbits, &subpage->nr_locked);
712 ASSERT(ret <= fs_info->sectors_per_page);
713 spin_unlock_irqrestore(&subpage->lock, flags);
714 }
715
716 #define GET_SUBPAGE_BITMAP(subpage, fs_info, name, dst) \
717 { \
718 const int sectors_per_page = fs_info->sectors_per_page; \
719 \
720 ASSERT(sectors_per_page < BITS_PER_LONG); \
721 *dst = bitmap_read(subpage->bitmaps, \
722 sectors_per_page * btrfs_bitmap_nr_##name, \
723 sectors_per_page); \
724 }
725
btrfs_subpage_dump_bitmap(const struct btrfs_fs_info * fs_info,struct folio * folio,u64 start,u32 len)726 void __cold btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info,
727 struct folio *folio, u64 start, u32 len)
728 {
729 struct btrfs_subpage *subpage;
730 const u32 sectors_per_page = fs_info->sectors_per_page;
731 unsigned long uptodate_bitmap;
732 unsigned long dirty_bitmap;
733 unsigned long writeback_bitmap;
734 unsigned long ordered_bitmap;
735 unsigned long checked_bitmap;
736 unsigned long locked_bitmap;
737 unsigned long flags;
738
739 ASSERT(folio_test_private(folio) && folio_get_private(folio));
740 ASSERT(sectors_per_page > 1);
741 subpage = folio_get_private(folio);
742
743 spin_lock_irqsave(&subpage->lock, flags);
744 GET_SUBPAGE_BITMAP(subpage, fs_info, uptodate, &uptodate_bitmap);
745 GET_SUBPAGE_BITMAP(subpage, fs_info, dirty, &dirty_bitmap);
746 GET_SUBPAGE_BITMAP(subpage, fs_info, writeback, &writeback_bitmap);
747 GET_SUBPAGE_BITMAP(subpage, fs_info, ordered, &ordered_bitmap);
748 GET_SUBPAGE_BITMAP(subpage, fs_info, checked, &checked_bitmap);
749 GET_SUBPAGE_BITMAP(subpage, fs_info, locked, &locked_bitmap);
750 spin_unlock_irqrestore(&subpage->lock, flags);
751
752 dump_page(folio_page(folio, 0), "btrfs subpage dump");
753 btrfs_warn(fs_info,
754 "start=%llu len=%u page=%llu, bitmaps uptodate=%*pbl dirty=%*pbl locked=%*pbl writeback=%*pbl ordered=%*pbl checked=%*pbl",
755 start, len, folio_pos(folio),
756 sectors_per_page, &uptodate_bitmap,
757 sectors_per_page, &dirty_bitmap,
758 sectors_per_page, &locked_bitmap,
759 sectors_per_page, &writeback_bitmap,
760 sectors_per_page, &ordered_bitmap,
761 sectors_per_page, &checked_bitmap);
762 }
763
btrfs_get_subpage_dirty_bitmap(struct btrfs_fs_info * fs_info,struct folio * folio,unsigned long * ret_bitmap)764 void btrfs_get_subpage_dirty_bitmap(struct btrfs_fs_info *fs_info,
765 struct folio *folio,
766 unsigned long *ret_bitmap)
767 {
768 struct btrfs_subpage *subpage;
769 unsigned long flags;
770
771 ASSERT(folio_test_private(folio) && folio_get_private(folio));
772 ASSERT(fs_info->sectors_per_page > 1);
773 subpage = folio_get_private(folio);
774
775 spin_lock_irqsave(&subpage->lock, flags);
776 GET_SUBPAGE_BITMAP(subpage, fs_info, dirty, ret_bitmap);
777 spin_unlock_irqrestore(&subpage->lock, flags);
778 }
779