1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) 2018-2023 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <djwong@kernel.org>
5 */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_bit.h"
10 #include "xfs_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_btree.h"
14 #include "scrub/scrub.h"
15 #include "scrub/bitmap.h"
16
17 #include <linux/interval_tree_generic.h>
18
19 struct xbitmap_node {
20 struct rb_node bn_rbnode;
21
22 /* First set bit of this interval and subtree. */
23 uint64_t bn_start;
24
25 /* Last set bit of this interval. */
26 uint64_t bn_last;
27
28 /* Last set bit of this subtree. Do not touch this. */
29 uint64_t __bn_subtree_last;
30 };
31
32 /* Define our own interval tree type with uint64_t parameters. */
33
34 #define START(node) ((node)->bn_start)
35 #define LAST(node) ((node)->bn_last)
36
37 /*
38 * These functions are defined by the INTERVAL_TREE_DEFINE macro, but we'll
39 * forward-declare them anyway for clarity.
40 */
41 static inline void
42 xbitmap_tree_insert(struct xbitmap_node *node, struct rb_root_cached *root);
43
44 static inline void
45 xbitmap_tree_remove(struct xbitmap_node *node, struct rb_root_cached *root);
46
47 static inline struct xbitmap_node *
48 xbitmap_tree_iter_first(struct rb_root_cached *root, uint64_t start,
49 uint64_t last);
50
51 static inline struct xbitmap_node *
52 xbitmap_tree_iter_next(struct xbitmap_node *node, uint64_t start,
53 uint64_t last);
54
INTERVAL_TREE_DEFINE(struct xbitmap_node,bn_rbnode,uint64_t,__bn_subtree_last,START,LAST,static inline,xbitmap_tree)55 INTERVAL_TREE_DEFINE(struct xbitmap_node, bn_rbnode, uint64_t,
56 __bn_subtree_last, START, LAST, static inline, xbitmap_tree)
57
58 /* Iterate each interval of a bitmap. Do not change the bitmap. */
59 #define for_each_xbitmap_extent(bn, bitmap) \
60 for ((bn) = rb_entry_safe(rb_first(&(bitmap)->xb_root.rb_root), \
61 struct xbitmap_node, bn_rbnode); \
62 (bn) != NULL; \
63 (bn) = rb_entry_safe(rb_next(&(bn)->bn_rbnode), \
64 struct xbitmap_node, bn_rbnode))
65
66 /* Clear a range of this bitmap. */
67 int
68 xbitmap_clear(
69 struct xbitmap *bitmap,
70 uint64_t start,
71 uint64_t len)
72 {
73 struct xbitmap_node *bn;
74 struct xbitmap_node *new_bn;
75 uint64_t last = start + len - 1;
76
77 while ((bn = xbitmap_tree_iter_first(&bitmap->xb_root, start, last))) {
78 if (bn->bn_start < start && bn->bn_last > last) {
79 uint64_t old_last = bn->bn_last;
80
81 /* overlaps with the entire clearing range */
82 xbitmap_tree_remove(bn, &bitmap->xb_root);
83 bn->bn_last = start - 1;
84 xbitmap_tree_insert(bn, &bitmap->xb_root);
85
86 /* add an extent */
87 new_bn = kmalloc(sizeof(struct xbitmap_node),
88 XCHK_GFP_FLAGS);
89 if (!new_bn)
90 return -ENOMEM;
91 new_bn->bn_start = last + 1;
92 new_bn->bn_last = old_last;
93 xbitmap_tree_insert(new_bn, &bitmap->xb_root);
94 } else if (bn->bn_start < start) {
95 /* overlaps with the left side of the clearing range */
96 xbitmap_tree_remove(bn, &bitmap->xb_root);
97 bn->bn_last = start - 1;
98 xbitmap_tree_insert(bn, &bitmap->xb_root);
99 } else if (bn->bn_last > last) {
100 /* overlaps with the right side of the clearing range */
101 xbitmap_tree_remove(bn, &bitmap->xb_root);
102 bn->bn_start = last + 1;
103 xbitmap_tree_insert(bn, &bitmap->xb_root);
104 break;
105 } else {
106 /* in the middle of the clearing range */
107 xbitmap_tree_remove(bn, &bitmap->xb_root);
108 kfree(bn);
109 }
110 }
111
112 return 0;
113 }
114
115 /* Set a range of this bitmap. */
116 int
xbitmap_set(struct xbitmap * bitmap,uint64_t start,uint64_t len)117 xbitmap_set(
118 struct xbitmap *bitmap,
119 uint64_t start,
120 uint64_t len)
121 {
122 struct xbitmap_node *left;
123 struct xbitmap_node *right;
124 uint64_t last = start + len - 1;
125 int error;
126
127 /* Is this whole range already set? */
128 left = xbitmap_tree_iter_first(&bitmap->xb_root, start, last);
129 if (left && left->bn_start <= start && left->bn_last >= last)
130 return 0;
131
132 /* Clear out everything in the range we want to set. */
133 error = xbitmap_clear(bitmap, start, len);
134 if (error)
135 return error;
136
137 /* Do we have a left-adjacent extent? */
138 left = xbitmap_tree_iter_first(&bitmap->xb_root, start - 1, start - 1);
139 ASSERT(!left || left->bn_last + 1 == start);
140
141 /* Do we have a right-adjacent extent? */
142 right = xbitmap_tree_iter_first(&bitmap->xb_root, last + 1, last + 1);
143 ASSERT(!right || right->bn_start == last + 1);
144
145 if (left && right) {
146 /* combine left and right adjacent extent */
147 xbitmap_tree_remove(left, &bitmap->xb_root);
148 xbitmap_tree_remove(right, &bitmap->xb_root);
149 left->bn_last = right->bn_last;
150 xbitmap_tree_insert(left, &bitmap->xb_root);
151 kfree(right);
152 } else if (left) {
153 /* combine with left extent */
154 xbitmap_tree_remove(left, &bitmap->xb_root);
155 left->bn_last = last;
156 xbitmap_tree_insert(left, &bitmap->xb_root);
157 } else if (right) {
158 /* combine with right extent */
159 xbitmap_tree_remove(right, &bitmap->xb_root);
160 right->bn_start = start;
161 xbitmap_tree_insert(right, &bitmap->xb_root);
162 } else {
163 /* add an extent */
164 left = kmalloc(sizeof(struct xbitmap_node), XCHK_GFP_FLAGS);
165 if (!left)
166 return -ENOMEM;
167 left->bn_start = start;
168 left->bn_last = last;
169 xbitmap_tree_insert(left, &bitmap->xb_root);
170 }
171
172 return 0;
173 }
174
175 /* Free everything related to this bitmap. */
176 void
xbitmap_destroy(struct xbitmap * bitmap)177 xbitmap_destroy(
178 struct xbitmap *bitmap)
179 {
180 struct xbitmap_node *bn;
181
182 while ((bn = xbitmap_tree_iter_first(&bitmap->xb_root, 0, -1ULL))) {
183 xbitmap_tree_remove(bn, &bitmap->xb_root);
184 kfree(bn);
185 }
186 }
187
188 /* Set up a per-AG block bitmap. */
189 void
xbitmap_init(struct xbitmap * bitmap)190 xbitmap_init(
191 struct xbitmap *bitmap)
192 {
193 bitmap->xb_root = RB_ROOT_CACHED;
194 }
195
196 /*
197 * Remove all the blocks mentioned in @sub from the extents in @bitmap.
198 *
199 * The intent is that callers will iterate the rmapbt for all of its records
200 * for a given owner to generate @bitmap; and iterate all the blocks of the
201 * metadata structures that are not being rebuilt and have the same rmapbt
202 * owner to generate @sub. This routine subtracts all the extents
203 * mentioned in sub from all the extents linked in @bitmap, which leaves
204 * @bitmap as the list of blocks that are not accounted for, which we assume
205 * are the dead blocks of the old metadata structure. The blocks mentioned in
206 * @bitmap can be reaped.
207 *
208 * This is the logical equivalent of bitmap &= ~sub.
209 */
210 int
xbitmap_disunion(struct xbitmap * bitmap,struct xbitmap * sub)211 xbitmap_disunion(
212 struct xbitmap *bitmap,
213 struct xbitmap *sub)
214 {
215 struct xbitmap_node *bn;
216 int error;
217
218 if (xbitmap_empty(bitmap) || xbitmap_empty(sub))
219 return 0;
220
221 for_each_xbitmap_extent(bn, sub) {
222 error = xbitmap_clear(bitmap, bn->bn_start,
223 bn->bn_last - bn->bn_start + 1);
224 if (error)
225 return error;
226 }
227
228 return 0;
229 }
230
231 /*
232 * Record all btree blocks seen while iterating all records of a btree.
233 *
234 * We know that the btree query_all function starts at the left edge and walks
235 * towards the right edge of the tree. Therefore, we know that we can walk up
236 * the btree cursor towards the root; if the pointer for a given level points
237 * to the first record/key in that block, we haven't seen this block before;
238 * and therefore we need to remember that we saw this block in the btree.
239 *
240 * So if our btree is:
241 *
242 * 4
243 * / | \
244 * 1 2 3
245 *
246 * Pretend for this example that each leaf block has 100 btree records. For
247 * the first btree record, we'll observe that bc_levels[0].ptr == 1, so we
248 * record that we saw block 1. Then we observe that bc_levels[1].ptr == 1, so
249 * we record block 4. The list is [1, 4].
250 *
251 * For the second btree record, we see that bc_levels[0].ptr == 2, so we exit
252 * the loop. The list remains [1, 4].
253 *
254 * For the 101st btree record, we've moved onto leaf block 2. Now
255 * bc_levels[0].ptr == 1 again, so we record that we saw block 2. We see that
256 * bc_levels[1].ptr == 2, so we exit the loop. The list is now [1, 4, 2].
257 *
258 * For the 102nd record, bc_levels[0].ptr == 2, so we continue.
259 *
260 * For the 201st record, we've moved on to leaf block 3.
261 * bc_levels[0].ptr == 1, so we add 3 to the list. Now it is [1, 4, 2, 3].
262 *
263 * For the 300th record we just exit, with the list being [1, 4, 2, 3].
264 */
265
266 /* Mark a btree block to the agblock bitmap. */
267 STATIC int
xagb_bitmap_visit_btblock(struct xfs_btree_cur * cur,int level,void * priv)268 xagb_bitmap_visit_btblock(
269 struct xfs_btree_cur *cur,
270 int level,
271 void *priv)
272 {
273 struct xagb_bitmap *bitmap = priv;
274 struct xfs_buf *bp;
275 xfs_fsblock_t fsbno;
276 xfs_agblock_t agbno;
277
278 xfs_btree_get_block(cur, level, &bp);
279 if (!bp)
280 return 0;
281
282 fsbno = XFS_DADDR_TO_FSB(cur->bc_mp, xfs_buf_daddr(bp));
283 agbno = XFS_FSB_TO_AGBNO(cur->bc_mp, fsbno);
284
285 return xagb_bitmap_set(bitmap, agbno, 1);
286 }
287
288 /* Mark all (per-AG) btree blocks in the agblock bitmap. */
289 int
xagb_bitmap_set_btblocks(struct xagb_bitmap * bitmap,struct xfs_btree_cur * cur)290 xagb_bitmap_set_btblocks(
291 struct xagb_bitmap *bitmap,
292 struct xfs_btree_cur *cur)
293 {
294 return xfs_btree_visit_blocks(cur, xagb_bitmap_visit_btblock,
295 XFS_BTREE_VISIT_ALL, bitmap);
296 }
297
298 /*
299 * Record all the buffers pointed to by the btree cursor. Callers already
300 * engaged in a btree walk should call this function to capture the list of
301 * blocks going from the leaf towards the root.
302 */
303 int
xagb_bitmap_set_btcur_path(struct xagb_bitmap * bitmap,struct xfs_btree_cur * cur)304 xagb_bitmap_set_btcur_path(
305 struct xagb_bitmap *bitmap,
306 struct xfs_btree_cur *cur)
307 {
308 int i;
309 int error;
310
311 for (i = 0; i < cur->bc_nlevels && cur->bc_levels[i].ptr == 1; i++) {
312 error = xagb_bitmap_visit_btblock(cur, i, bitmap);
313 if (error)
314 return error;
315 }
316
317 return 0;
318 }
319
320 /* How many bits are set in this bitmap? */
321 uint64_t
xbitmap_hweight(struct xbitmap * bitmap)322 xbitmap_hweight(
323 struct xbitmap *bitmap)
324 {
325 struct xbitmap_node *bn;
326 uint64_t ret = 0;
327
328 for_each_xbitmap_extent(bn, bitmap)
329 ret += bn->bn_last - bn->bn_start + 1;
330
331 return ret;
332 }
333
334 /* Call a function for every run of set bits in this bitmap. */
335 int
xbitmap_walk(struct xbitmap * bitmap,xbitmap_walk_fn fn,void * priv)336 xbitmap_walk(
337 struct xbitmap *bitmap,
338 xbitmap_walk_fn fn,
339 void *priv)
340 {
341 struct xbitmap_node *bn;
342 int error = 0;
343
344 for_each_xbitmap_extent(bn, bitmap) {
345 error = fn(bn->bn_start, bn->bn_last - bn->bn_start + 1, priv);
346 if (error)
347 break;
348 }
349
350 return error;
351 }
352
353 /* Does this bitmap have no bits set at all? */
354 bool
xbitmap_empty(struct xbitmap * bitmap)355 xbitmap_empty(
356 struct xbitmap *bitmap)
357 {
358 return bitmap->xb_root.rb_root.rb_node == NULL;
359 }
360
361 /* Is the start of the range set or clear? And for how long? */
362 bool
xbitmap_test(struct xbitmap * bitmap,uint64_t start,uint64_t * len)363 xbitmap_test(
364 struct xbitmap *bitmap,
365 uint64_t start,
366 uint64_t *len)
367 {
368 struct xbitmap_node *bn;
369 uint64_t last = start + *len - 1;
370
371 bn = xbitmap_tree_iter_first(&bitmap->xb_root, start, last);
372 if (!bn)
373 return false;
374 if (bn->bn_start <= start) {
375 if (bn->bn_last < last)
376 *len = bn->bn_last - start + 1;
377 return true;
378 }
379 *len = bn->bn_start - start;
380 return false;
381 }
382