1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2012 Fusion-io All rights reserved.
4 * Copyright (C) 2012 Intel Corp. All rights reserved.
5 */
6
7 #include <linux/sched.h>
8 #include <linux/bio.h>
9 #include <linux/slab.h>
10 #include <linux/blkdev.h>
11 #include <linux/raid/pq.h>
12 #include <linux/hash.h>
13 #include <linux/list_sort.h>
14 #include <linux/raid/xor.h>
15 #include <linux/mm.h>
16 #include "ctree.h"
17 #include "disk-io.h"
18 #include "volumes.h"
19 #include "raid56.h"
20 #include "async-thread.h"
21
22 /* set when additional merges to this rbio are not allowed */
23 #define RBIO_RMW_LOCKED_BIT 1
24
25 /*
26 * set when this rbio is sitting in the hash, but it is just a cache
27 * of past RMW
28 */
29 #define RBIO_CACHE_BIT 2
30
31 /*
32 * set when it is safe to trust the stripe_pages for caching
33 */
34 #define RBIO_CACHE_READY_BIT 3
35
36 #define RBIO_CACHE_SIZE 1024
37
38 #define BTRFS_STRIPE_HASH_TABLE_BITS 11
39
40 /* Used by the raid56 code to lock stripes for read/modify/write */
41 struct btrfs_stripe_hash {
42 struct list_head hash_list;
43 spinlock_t lock;
44 };
45
46 /* Used by the raid56 code to lock stripes for read/modify/write */
47 struct btrfs_stripe_hash_table {
48 struct list_head stripe_cache;
49 spinlock_t cache_lock;
50 int cache_size;
51 struct btrfs_stripe_hash table[];
52 };
53
54 enum btrfs_rbio_ops {
55 BTRFS_RBIO_WRITE,
56 BTRFS_RBIO_READ_REBUILD,
57 BTRFS_RBIO_PARITY_SCRUB,
58 BTRFS_RBIO_REBUILD_MISSING,
59 };
60
61 struct btrfs_raid_bio {
62 struct btrfs_fs_info *fs_info;
63 struct btrfs_bio *bbio;
64
65 /* while we're doing rmw on a stripe
66 * we put it into a hash table so we can
67 * lock the stripe and merge more rbios
68 * into it.
69 */
70 struct list_head hash_list;
71
72 /*
73 * LRU list for the stripe cache
74 */
75 struct list_head stripe_cache;
76
77 /*
78 * for scheduling work in the helper threads
79 */
80 struct btrfs_work work;
81
82 /*
83 * bio list and bio_list_lock are used
84 * to add more bios into the stripe
85 * in hopes of avoiding the full rmw
86 */
87 struct bio_list bio_list;
88 spinlock_t bio_list_lock;
89
90 /* also protected by the bio_list_lock, the
91 * plug list is used by the plugging code
92 * to collect partial bios while plugged. The
93 * stripe locking code also uses it to hand off
94 * the stripe lock to the next pending IO
95 */
96 struct list_head plug_list;
97
98 /*
99 * flags that tell us if it is safe to
100 * merge with this bio
101 */
102 unsigned long flags;
103
104 /* size of each individual stripe on disk */
105 int stripe_len;
106
107 /* number of data stripes (no p/q) */
108 int nr_data;
109
110 int real_stripes;
111
112 int stripe_npages;
113 /*
114 * set if we're doing a parity rebuild
115 * for a read from higher up, which is handled
116 * differently from a parity rebuild as part of
117 * rmw
118 */
119 enum btrfs_rbio_ops operation;
120
121 /* first bad stripe */
122 int faila;
123
124 /* second bad stripe (for raid6 use) */
125 int failb;
126
127 int scrubp;
128 /*
129 * number of pages needed to represent the full
130 * stripe
131 */
132 int nr_pages;
133
134 /*
135 * size of all the bios in the bio_list. This
136 * helps us decide if the rbio maps to a full
137 * stripe or not
138 */
139 int bio_list_bytes;
140
141 int generic_bio_cnt;
142
143 refcount_t refs;
144
145 atomic_t stripes_pending;
146
147 atomic_t error;
148 /*
149 * these are two arrays of pointers. We allocate the
150 * rbio big enough to hold them both and setup their
151 * locations when the rbio is allocated
152 */
153
154 /* pointers to pages that we allocated for
155 * reading/writing stripes directly from the disk (including P/Q)
156 */
157 struct page **stripe_pages;
158
159 /*
160 * pointers to the pages in the bio_list. Stored
161 * here for faster lookup
162 */
163 struct page **bio_pages;
164
165 /*
166 * bitmap to record which horizontal stripe has data
167 */
168 unsigned long *dbitmap;
169
170 /* allocated with real_stripes-many pointers for finish_*() calls */
171 void **finish_pointers;
172
173 /* allocated with stripe_npages-many bits for finish_*() calls */
174 unsigned long *finish_pbitmap;
175 };
176
177 static int __raid56_parity_recover(struct btrfs_raid_bio *rbio);
178 static noinline void finish_rmw(struct btrfs_raid_bio *rbio);
179 static void rmw_work(struct btrfs_work *work);
180 static void read_rebuild_work(struct btrfs_work *work);
181 static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio);
182 static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed);
183 static void __free_raid_bio(struct btrfs_raid_bio *rbio);
184 static void index_rbio_pages(struct btrfs_raid_bio *rbio);
185 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
186
187 static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
188 int need_check);
189 static void scrub_parity_work(struct btrfs_work *work);
190
start_async_work(struct btrfs_raid_bio * rbio,btrfs_func_t work_func)191 static void start_async_work(struct btrfs_raid_bio *rbio, btrfs_func_t work_func)
192 {
193 btrfs_init_work(&rbio->work, work_func, NULL, NULL);
194 btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
195 }
196
197 /*
198 * the stripe hash table is used for locking, and to collect
199 * bios in hopes of making a full stripe
200 */
btrfs_alloc_stripe_hash_table(struct btrfs_fs_info * info)201 int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
202 {
203 struct btrfs_stripe_hash_table *table;
204 struct btrfs_stripe_hash_table *x;
205 struct btrfs_stripe_hash *cur;
206 struct btrfs_stripe_hash *h;
207 int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS;
208 int i;
209
210 if (info->stripe_hash_table)
211 return 0;
212
213 /*
214 * The table is large, starting with order 4 and can go as high as
215 * order 7 in case lock debugging is turned on.
216 *
217 * Try harder to allocate and fallback to vmalloc to lower the chance
218 * of a failing mount.
219 */
220 table = kvzalloc(struct_size(table, table, num_entries), GFP_KERNEL);
221 if (!table)
222 return -ENOMEM;
223
224 spin_lock_init(&table->cache_lock);
225 INIT_LIST_HEAD(&table->stripe_cache);
226
227 h = table->table;
228
229 for (i = 0; i < num_entries; i++) {
230 cur = h + i;
231 INIT_LIST_HEAD(&cur->hash_list);
232 spin_lock_init(&cur->lock);
233 }
234
235 x = cmpxchg(&info->stripe_hash_table, NULL, table);
236 if (x)
237 kvfree(x);
238 return 0;
239 }
240
241 /*
242 * caching an rbio means to copy anything from the
243 * bio_pages array into the stripe_pages array. We
244 * use the page uptodate bit in the stripe cache array
245 * to indicate if it has valid data
246 *
247 * once the caching is done, we set the cache ready
248 * bit.
249 */
cache_rbio_pages(struct btrfs_raid_bio * rbio)250 static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
251 {
252 int i;
253 char *s;
254 char *d;
255 int ret;
256
257 ret = alloc_rbio_pages(rbio);
258 if (ret)
259 return;
260
261 for (i = 0; i < rbio->nr_pages; i++) {
262 if (!rbio->bio_pages[i])
263 continue;
264
265 s = kmap(rbio->bio_pages[i]);
266 d = kmap(rbio->stripe_pages[i]);
267
268 copy_page(d, s);
269
270 kunmap(rbio->bio_pages[i]);
271 kunmap(rbio->stripe_pages[i]);
272 SetPageUptodate(rbio->stripe_pages[i]);
273 }
274 set_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
275 }
276
277 /*
278 * we hash on the first logical address of the stripe
279 */
rbio_bucket(struct btrfs_raid_bio * rbio)280 static int rbio_bucket(struct btrfs_raid_bio *rbio)
281 {
282 u64 num = rbio->bbio->raid_map[0];
283
284 /*
285 * we shift down quite a bit. We're using byte
286 * addressing, and most of the lower bits are zeros.
287 * This tends to upset hash_64, and it consistently
288 * returns just one or two different values.
289 *
290 * shifting off the lower bits fixes things.
291 */
292 return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS);
293 }
294
295 /*
296 * stealing an rbio means taking all the uptodate pages from the stripe
297 * array in the source rbio and putting them into the destination rbio
298 */
steal_rbio(struct btrfs_raid_bio * src,struct btrfs_raid_bio * dest)299 static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest)
300 {
301 int i;
302 struct page *s;
303 struct page *d;
304
305 if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags))
306 return;
307
308 for (i = 0; i < dest->nr_pages; i++) {
309 s = src->stripe_pages[i];
310 if (!s || !PageUptodate(s)) {
311 continue;
312 }
313
314 d = dest->stripe_pages[i];
315 if (d)
316 __free_page(d);
317
318 dest->stripe_pages[i] = s;
319 src->stripe_pages[i] = NULL;
320 }
321 }
322
323 /*
324 * merging means we take the bio_list from the victim and
325 * splice it into the destination. The victim should
326 * be discarded afterwards.
327 *
328 * must be called with dest->rbio_list_lock held
329 */
merge_rbio(struct btrfs_raid_bio * dest,struct btrfs_raid_bio * victim)330 static void merge_rbio(struct btrfs_raid_bio *dest,
331 struct btrfs_raid_bio *victim)
332 {
333 bio_list_merge(&dest->bio_list, &victim->bio_list);
334 dest->bio_list_bytes += victim->bio_list_bytes;
335 /* Also inherit the bitmaps from @victim. */
336 bitmap_or(dest->dbitmap, victim->dbitmap, dest->dbitmap,
337 dest->stripe_npages);
338 dest->generic_bio_cnt += victim->generic_bio_cnt;
339 bio_list_init(&victim->bio_list);
340 }
341
342 /*
343 * used to prune items that are in the cache. The caller
344 * must hold the hash table lock.
345 */
__remove_rbio_from_cache(struct btrfs_raid_bio * rbio)346 static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
347 {
348 int bucket = rbio_bucket(rbio);
349 struct btrfs_stripe_hash_table *table;
350 struct btrfs_stripe_hash *h;
351 int freeit = 0;
352
353 /*
354 * check the bit again under the hash table lock.
355 */
356 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
357 return;
358
359 table = rbio->fs_info->stripe_hash_table;
360 h = table->table + bucket;
361
362 /* hold the lock for the bucket because we may be
363 * removing it from the hash table
364 */
365 spin_lock(&h->lock);
366
367 /*
368 * hold the lock for the bio list because we need
369 * to make sure the bio list is empty
370 */
371 spin_lock(&rbio->bio_list_lock);
372
373 if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) {
374 list_del_init(&rbio->stripe_cache);
375 table->cache_size -= 1;
376 freeit = 1;
377
378 /* if the bio list isn't empty, this rbio is
379 * still involved in an IO. We take it out
380 * of the cache list, and drop the ref that
381 * was held for the list.
382 *
383 * If the bio_list was empty, we also remove
384 * the rbio from the hash_table, and drop
385 * the corresponding ref
386 */
387 if (bio_list_empty(&rbio->bio_list)) {
388 if (!list_empty(&rbio->hash_list)) {
389 list_del_init(&rbio->hash_list);
390 refcount_dec(&rbio->refs);
391 BUG_ON(!list_empty(&rbio->plug_list));
392 }
393 }
394 }
395
396 spin_unlock(&rbio->bio_list_lock);
397 spin_unlock(&h->lock);
398
399 if (freeit)
400 __free_raid_bio(rbio);
401 }
402
403 /*
404 * prune a given rbio from the cache
405 */
remove_rbio_from_cache(struct btrfs_raid_bio * rbio)406 static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
407 {
408 struct btrfs_stripe_hash_table *table;
409 unsigned long flags;
410
411 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
412 return;
413
414 table = rbio->fs_info->stripe_hash_table;
415
416 spin_lock_irqsave(&table->cache_lock, flags);
417 __remove_rbio_from_cache(rbio);
418 spin_unlock_irqrestore(&table->cache_lock, flags);
419 }
420
421 /*
422 * remove everything in the cache
423 */
btrfs_clear_rbio_cache(struct btrfs_fs_info * info)424 static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info)
425 {
426 struct btrfs_stripe_hash_table *table;
427 unsigned long flags;
428 struct btrfs_raid_bio *rbio;
429
430 table = info->stripe_hash_table;
431
432 spin_lock_irqsave(&table->cache_lock, flags);
433 while (!list_empty(&table->stripe_cache)) {
434 rbio = list_entry(table->stripe_cache.next,
435 struct btrfs_raid_bio,
436 stripe_cache);
437 __remove_rbio_from_cache(rbio);
438 }
439 spin_unlock_irqrestore(&table->cache_lock, flags);
440 }
441
442 /*
443 * remove all cached entries and free the hash table
444 * used by unmount
445 */
btrfs_free_stripe_hash_table(struct btrfs_fs_info * info)446 void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info)
447 {
448 if (!info->stripe_hash_table)
449 return;
450 btrfs_clear_rbio_cache(info);
451 kvfree(info->stripe_hash_table);
452 info->stripe_hash_table = NULL;
453 }
454
455 /*
456 * insert an rbio into the stripe cache. It
457 * must have already been prepared by calling
458 * cache_rbio_pages
459 *
460 * If this rbio was already cached, it gets
461 * moved to the front of the lru.
462 *
463 * If the size of the rbio cache is too big, we
464 * prune an item.
465 */
cache_rbio(struct btrfs_raid_bio * rbio)466 static void cache_rbio(struct btrfs_raid_bio *rbio)
467 {
468 struct btrfs_stripe_hash_table *table;
469 unsigned long flags;
470
471 if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags))
472 return;
473
474 table = rbio->fs_info->stripe_hash_table;
475
476 spin_lock_irqsave(&table->cache_lock, flags);
477 spin_lock(&rbio->bio_list_lock);
478
479 /* bump our ref if we were not in the list before */
480 if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags))
481 refcount_inc(&rbio->refs);
482
483 if (!list_empty(&rbio->stripe_cache)){
484 list_move(&rbio->stripe_cache, &table->stripe_cache);
485 } else {
486 list_add(&rbio->stripe_cache, &table->stripe_cache);
487 table->cache_size += 1;
488 }
489
490 spin_unlock(&rbio->bio_list_lock);
491
492 if (table->cache_size > RBIO_CACHE_SIZE) {
493 struct btrfs_raid_bio *found;
494
495 found = list_entry(table->stripe_cache.prev,
496 struct btrfs_raid_bio,
497 stripe_cache);
498
499 if (found != rbio)
500 __remove_rbio_from_cache(found);
501 }
502
503 spin_unlock_irqrestore(&table->cache_lock, flags);
504 }
505
506 /*
507 * helper function to run the xor_blocks api. It is only
508 * able to do MAX_XOR_BLOCKS at a time, so we need to
509 * loop through.
510 */
run_xor(void ** pages,int src_cnt,ssize_t len)511 static void run_xor(void **pages, int src_cnt, ssize_t len)
512 {
513 int src_off = 0;
514 int xor_src_cnt = 0;
515 void *dest = pages[src_cnt];
516
517 while(src_cnt > 0) {
518 xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
519 xor_blocks(xor_src_cnt, len, dest, pages + src_off);
520
521 src_cnt -= xor_src_cnt;
522 src_off += xor_src_cnt;
523 }
524 }
525
526 /*
527 * Returns true if the bio list inside this rbio covers an entire stripe (no
528 * rmw required).
529 */
rbio_is_full(struct btrfs_raid_bio * rbio)530 static int rbio_is_full(struct btrfs_raid_bio *rbio)
531 {
532 unsigned long flags;
533 unsigned long size = rbio->bio_list_bytes;
534 int ret = 1;
535
536 spin_lock_irqsave(&rbio->bio_list_lock, flags);
537 if (size != rbio->nr_data * rbio->stripe_len)
538 ret = 0;
539 BUG_ON(size > rbio->nr_data * rbio->stripe_len);
540 spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
541
542 return ret;
543 }
544
545 /*
546 * returns 1 if it is safe to merge two rbios together.
547 * The merging is safe if the two rbios correspond to
548 * the same stripe and if they are both going in the same
549 * direction (read vs write), and if neither one is
550 * locked for final IO
551 *
552 * The caller is responsible for locking such that
553 * rmw_locked is safe to test
554 */
rbio_can_merge(struct btrfs_raid_bio * last,struct btrfs_raid_bio * cur)555 static int rbio_can_merge(struct btrfs_raid_bio *last,
556 struct btrfs_raid_bio *cur)
557 {
558 if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) ||
559 test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags))
560 return 0;
561
562 /*
563 * we can't merge with cached rbios, since the
564 * idea is that when we merge the destination
565 * rbio is going to run our IO for us. We can
566 * steal from cached rbios though, other functions
567 * handle that.
568 */
569 if (test_bit(RBIO_CACHE_BIT, &last->flags) ||
570 test_bit(RBIO_CACHE_BIT, &cur->flags))
571 return 0;
572
573 if (last->bbio->raid_map[0] !=
574 cur->bbio->raid_map[0])
575 return 0;
576
577 /* we can't merge with different operations */
578 if (last->operation != cur->operation)
579 return 0;
580 /*
581 * We've need read the full stripe from the drive.
582 * check and repair the parity and write the new results.
583 *
584 * We're not allowed to add any new bios to the
585 * bio list here, anyone else that wants to
586 * change this stripe needs to do their own rmw.
587 */
588 if (last->operation == BTRFS_RBIO_PARITY_SCRUB)
589 return 0;
590
591 if (last->operation == BTRFS_RBIO_REBUILD_MISSING)
592 return 0;
593
594 if (last->operation == BTRFS_RBIO_READ_REBUILD) {
595 int fa = last->faila;
596 int fb = last->failb;
597 int cur_fa = cur->faila;
598 int cur_fb = cur->failb;
599
600 if (last->faila >= last->failb) {
601 fa = last->failb;
602 fb = last->faila;
603 }
604
605 if (cur->faila >= cur->failb) {
606 cur_fa = cur->failb;
607 cur_fb = cur->faila;
608 }
609
610 if (fa != cur_fa || fb != cur_fb)
611 return 0;
612 }
613 return 1;
614 }
615
rbio_stripe_page_index(struct btrfs_raid_bio * rbio,int stripe,int index)616 static int rbio_stripe_page_index(struct btrfs_raid_bio *rbio, int stripe,
617 int index)
618 {
619 return stripe * rbio->stripe_npages + index;
620 }
621
622 /*
623 * these are just the pages from the rbio array, not from anything
624 * the FS sent down to us
625 */
rbio_stripe_page(struct btrfs_raid_bio * rbio,int stripe,int index)626 static struct page *rbio_stripe_page(struct btrfs_raid_bio *rbio, int stripe,
627 int index)
628 {
629 return rbio->stripe_pages[rbio_stripe_page_index(rbio, stripe, index)];
630 }
631
632 /*
633 * helper to index into the pstripe
634 */
rbio_pstripe_page(struct btrfs_raid_bio * rbio,int index)635 static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index)
636 {
637 return rbio_stripe_page(rbio, rbio->nr_data, index);
638 }
639
640 /*
641 * helper to index into the qstripe, returns null
642 * if there is no qstripe
643 */
rbio_qstripe_page(struct btrfs_raid_bio * rbio,int index)644 static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index)
645 {
646 if (rbio->nr_data + 1 == rbio->real_stripes)
647 return NULL;
648 return rbio_stripe_page(rbio, rbio->nr_data + 1, index);
649 }
650
651 /*
652 * The first stripe in the table for a logical address
653 * has the lock. rbios are added in one of three ways:
654 *
655 * 1) Nobody has the stripe locked yet. The rbio is given
656 * the lock and 0 is returned. The caller must start the IO
657 * themselves.
658 *
659 * 2) Someone has the stripe locked, but we're able to merge
660 * with the lock owner. The rbio is freed and the IO will
661 * start automatically along with the existing rbio. 1 is returned.
662 *
663 * 3) Someone has the stripe locked, but we're not able to merge.
664 * The rbio is added to the lock owner's plug list, or merged into
665 * an rbio already on the plug list. When the lock owner unlocks,
666 * the next rbio on the list is run and the IO is started automatically.
667 * 1 is returned
668 *
669 * If we return 0, the caller still owns the rbio and must continue with
670 * IO submission. If we return 1, the caller must assume the rbio has
671 * already been freed.
672 */
lock_stripe_add(struct btrfs_raid_bio * rbio)673 static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
674 {
675 struct btrfs_stripe_hash *h;
676 struct btrfs_raid_bio *cur;
677 struct btrfs_raid_bio *pending;
678 unsigned long flags;
679 struct btrfs_raid_bio *freeit = NULL;
680 struct btrfs_raid_bio *cache_drop = NULL;
681 int ret = 0;
682
683 h = rbio->fs_info->stripe_hash_table->table + rbio_bucket(rbio);
684
685 spin_lock_irqsave(&h->lock, flags);
686 list_for_each_entry(cur, &h->hash_list, hash_list) {
687 if (cur->bbio->raid_map[0] != rbio->bbio->raid_map[0])
688 continue;
689
690 spin_lock(&cur->bio_list_lock);
691
692 /* Can we steal this cached rbio's pages? */
693 if (bio_list_empty(&cur->bio_list) &&
694 list_empty(&cur->plug_list) &&
695 test_bit(RBIO_CACHE_BIT, &cur->flags) &&
696 !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
697 list_del_init(&cur->hash_list);
698 refcount_dec(&cur->refs);
699
700 steal_rbio(cur, rbio);
701 cache_drop = cur;
702 spin_unlock(&cur->bio_list_lock);
703
704 goto lockit;
705 }
706
707 /* Can we merge into the lock owner? */
708 if (rbio_can_merge(cur, rbio)) {
709 merge_rbio(cur, rbio);
710 spin_unlock(&cur->bio_list_lock);
711 freeit = rbio;
712 ret = 1;
713 goto out;
714 }
715
716
717 /*
718 * We couldn't merge with the running rbio, see if we can merge
719 * with the pending ones. We don't have to check for rmw_locked
720 * because there is no way they are inside finish_rmw right now
721 */
722 list_for_each_entry(pending, &cur->plug_list, plug_list) {
723 if (rbio_can_merge(pending, rbio)) {
724 merge_rbio(pending, rbio);
725 spin_unlock(&cur->bio_list_lock);
726 freeit = rbio;
727 ret = 1;
728 goto out;
729 }
730 }
731
732 /*
733 * No merging, put us on the tail of the plug list, our rbio
734 * will be started with the currently running rbio unlocks
735 */
736 list_add_tail(&rbio->plug_list, &cur->plug_list);
737 spin_unlock(&cur->bio_list_lock);
738 ret = 1;
739 goto out;
740 }
741 lockit:
742 refcount_inc(&rbio->refs);
743 list_add(&rbio->hash_list, &h->hash_list);
744 out:
745 spin_unlock_irqrestore(&h->lock, flags);
746 if (cache_drop)
747 remove_rbio_from_cache(cache_drop);
748 if (freeit)
749 __free_raid_bio(freeit);
750 return ret;
751 }
752
753 /*
754 * called as rmw or parity rebuild is completed. If the plug list has more
755 * rbios waiting for this stripe, the next one on the list will be started
756 */
unlock_stripe(struct btrfs_raid_bio * rbio)757 static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
758 {
759 int bucket;
760 struct btrfs_stripe_hash *h;
761 unsigned long flags;
762 int keep_cache = 0;
763
764 bucket = rbio_bucket(rbio);
765 h = rbio->fs_info->stripe_hash_table->table + bucket;
766
767 if (list_empty(&rbio->plug_list))
768 cache_rbio(rbio);
769
770 spin_lock_irqsave(&h->lock, flags);
771 spin_lock(&rbio->bio_list_lock);
772
773 if (!list_empty(&rbio->hash_list)) {
774 /*
775 * if we're still cached and there is no other IO
776 * to perform, just leave this rbio here for others
777 * to steal from later
778 */
779 if (list_empty(&rbio->plug_list) &&
780 test_bit(RBIO_CACHE_BIT, &rbio->flags)) {
781 keep_cache = 1;
782 clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
783 BUG_ON(!bio_list_empty(&rbio->bio_list));
784 goto done;
785 }
786
787 list_del_init(&rbio->hash_list);
788 refcount_dec(&rbio->refs);
789
790 /*
791 * we use the plug list to hold all the rbios
792 * waiting for the chance to lock this stripe.
793 * hand the lock over to one of them.
794 */
795 if (!list_empty(&rbio->plug_list)) {
796 struct btrfs_raid_bio *next;
797 struct list_head *head = rbio->plug_list.next;
798
799 next = list_entry(head, struct btrfs_raid_bio,
800 plug_list);
801
802 list_del_init(&rbio->plug_list);
803
804 list_add(&next->hash_list, &h->hash_list);
805 refcount_inc(&next->refs);
806 spin_unlock(&rbio->bio_list_lock);
807 spin_unlock_irqrestore(&h->lock, flags);
808
809 if (next->operation == BTRFS_RBIO_READ_REBUILD)
810 start_async_work(next, read_rebuild_work);
811 else if (next->operation == BTRFS_RBIO_REBUILD_MISSING) {
812 steal_rbio(rbio, next);
813 start_async_work(next, read_rebuild_work);
814 } else if (next->operation == BTRFS_RBIO_WRITE) {
815 steal_rbio(rbio, next);
816 start_async_work(next, rmw_work);
817 } else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) {
818 steal_rbio(rbio, next);
819 start_async_work(next, scrub_parity_work);
820 }
821
822 goto done_nolock;
823 }
824 }
825 done:
826 spin_unlock(&rbio->bio_list_lock);
827 spin_unlock_irqrestore(&h->lock, flags);
828
829 done_nolock:
830 if (!keep_cache)
831 remove_rbio_from_cache(rbio);
832 }
833
__free_raid_bio(struct btrfs_raid_bio * rbio)834 static void __free_raid_bio(struct btrfs_raid_bio *rbio)
835 {
836 int i;
837
838 if (!refcount_dec_and_test(&rbio->refs))
839 return;
840
841 WARN_ON(!list_empty(&rbio->stripe_cache));
842 WARN_ON(!list_empty(&rbio->hash_list));
843 WARN_ON(!bio_list_empty(&rbio->bio_list));
844
845 for (i = 0; i < rbio->nr_pages; i++) {
846 if (rbio->stripe_pages[i]) {
847 __free_page(rbio->stripe_pages[i]);
848 rbio->stripe_pages[i] = NULL;
849 }
850 }
851
852 btrfs_put_bbio(rbio->bbio);
853 kfree(rbio);
854 }
855
rbio_endio_bio_list(struct bio * cur,blk_status_t err)856 static void rbio_endio_bio_list(struct bio *cur, blk_status_t err)
857 {
858 struct bio *next;
859
860 while (cur) {
861 next = cur->bi_next;
862 cur->bi_next = NULL;
863 cur->bi_status = err;
864 bio_endio(cur);
865 cur = next;
866 }
867 }
868
869 /*
870 * this frees the rbio and runs through all the bios in the
871 * bio_list and calls end_io on them
872 */
rbio_orig_end_io(struct btrfs_raid_bio * rbio,blk_status_t err)873 static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err)
874 {
875 struct bio *cur = bio_list_get(&rbio->bio_list);
876 struct bio *extra;
877
878 if (rbio->generic_bio_cnt)
879 btrfs_bio_counter_sub(rbio->fs_info, rbio->generic_bio_cnt);
880 /*
881 * Clear the data bitmap, as the rbio may be cached for later usage.
882 * do this before before unlock_stripe() so there will be no new bio
883 * for this bio.
884 */
885 bitmap_clear(rbio->dbitmap, 0, rbio->stripe_npages);
886
887 /*
888 * At this moment, rbio->bio_list is empty, however since rbio does not
889 * always have RBIO_RMW_LOCKED_BIT set and rbio is still linked on the
890 * hash list, rbio may be merged with others so that rbio->bio_list
891 * becomes non-empty.
892 * Once unlock_stripe() is done, rbio->bio_list will not be updated any
893 * more and we can call bio_endio() on all queued bios.
894 */
895 unlock_stripe(rbio);
896 extra = bio_list_get(&rbio->bio_list);
897 __free_raid_bio(rbio);
898
899 rbio_endio_bio_list(cur, err);
900 if (extra)
901 rbio_endio_bio_list(extra, err);
902 }
903
904 /*
905 * end io function used by finish_rmw. When we finally
906 * get here, we've written a full stripe
907 */
raid_write_end_io(struct bio * bio)908 static void raid_write_end_io(struct bio *bio)
909 {
910 struct btrfs_raid_bio *rbio = bio->bi_private;
911 blk_status_t err = bio->bi_status;
912 int max_errors;
913
914 if (err)
915 fail_bio_stripe(rbio, bio);
916
917 bio_put(bio);
918
919 if (!atomic_dec_and_test(&rbio->stripes_pending))
920 return;
921
922 err = BLK_STS_OK;
923
924 /* OK, we have read all the stripes we need to. */
925 max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ?
926 0 : rbio->bbio->max_errors;
927 if (atomic_read(&rbio->error) > max_errors)
928 err = BLK_STS_IOERR;
929
930 rbio_orig_end_io(rbio, err);
931 }
932
933 /*
934 * the read/modify/write code wants to use the original bio for
935 * any pages it included, and then use the rbio for everything
936 * else. This function decides if a given index (stripe number)
937 * and page number in that stripe fall inside the original bio
938 * or the rbio.
939 *
940 * if you set bio_list_only, you'll get a NULL back for any ranges
941 * that are outside the bio_list
942 *
943 * This doesn't take any refs on anything, you get a bare page pointer
944 * and the caller must bump refs as required.
945 *
946 * You must call index_rbio_pages once before you can trust
947 * the answers from this function.
948 */
page_in_rbio(struct btrfs_raid_bio * rbio,int index,int pagenr,int bio_list_only)949 static struct page *page_in_rbio(struct btrfs_raid_bio *rbio,
950 int index, int pagenr, int bio_list_only)
951 {
952 int chunk_page;
953 struct page *p = NULL;
954
955 chunk_page = index * (rbio->stripe_len >> PAGE_SHIFT) + pagenr;
956
957 spin_lock_irq(&rbio->bio_list_lock);
958 p = rbio->bio_pages[chunk_page];
959 spin_unlock_irq(&rbio->bio_list_lock);
960
961 if (p || bio_list_only)
962 return p;
963
964 return rbio->stripe_pages[chunk_page];
965 }
966
967 /*
968 * number of pages we need for the entire stripe across all the
969 * drives
970 */
rbio_nr_pages(unsigned long stripe_len,int nr_stripes)971 static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
972 {
973 return DIV_ROUND_UP(stripe_len, PAGE_SIZE) * nr_stripes;
974 }
975
976 /*
977 * allocation and initial setup for the btrfs_raid_bio. Not
978 * this does not allocate any pages for rbio->pages.
979 */
alloc_rbio(struct btrfs_fs_info * fs_info,struct btrfs_bio * bbio,u64 stripe_len)980 static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info,
981 struct btrfs_bio *bbio,
982 u64 stripe_len)
983 {
984 struct btrfs_raid_bio *rbio;
985 int nr_data = 0;
986 int real_stripes = bbio->num_stripes - bbio->num_tgtdevs;
987 int num_pages = rbio_nr_pages(stripe_len, real_stripes);
988 int stripe_npages = DIV_ROUND_UP(stripe_len, PAGE_SIZE);
989 void *p;
990
991 rbio = kzalloc(sizeof(*rbio) +
992 sizeof(*rbio->stripe_pages) * num_pages +
993 sizeof(*rbio->bio_pages) * num_pages +
994 sizeof(*rbio->finish_pointers) * real_stripes +
995 sizeof(*rbio->dbitmap) * BITS_TO_LONGS(stripe_npages) +
996 sizeof(*rbio->finish_pbitmap) *
997 BITS_TO_LONGS(stripe_npages),
998 GFP_NOFS);
999 if (!rbio)
1000 return ERR_PTR(-ENOMEM);
1001
1002 bio_list_init(&rbio->bio_list);
1003 INIT_LIST_HEAD(&rbio->plug_list);
1004 spin_lock_init(&rbio->bio_list_lock);
1005 INIT_LIST_HEAD(&rbio->stripe_cache);
1006 INIT_LIST_HEAD(&rbio->hash_list);
1007 rbio->bbio = bbio;
1008 rbio->fs_info = fs_info;
1009 rbio->stripe_len = stripe_len;
1010 rbio->nr_pages = num_pages;
1011 rbio->real_stripes = real_stripes;
1012 rbio->stripe_npages = stripe_npages;
1013 rbio->faila = -1;
1014 rbio->failb = -1;
1015 refcount_set(&rbio->refs, 1);
1016 atomic_set(&rbio->error, 0);
1017 atomic_set(&rbio->stripes_pending, 0);
1018
1019 /*
1020 * the stripe_pages, bio_pages, etc arrays point to the extra
1021 * memory we allocated past the end of the rbio
1022 */
1023 p = rbio + 1;
1024 #define CONSUME_ALLOC(ptr, count) do { \
1025 ptr = p; \
1026 p = (unsigned char *)p + sizeof(*(ptr)) * (count); \
1027 } while (0)
1028 CONSUME_ALLOC(rbio->stripe_pages, num_pages);
1029 CONSUME_ALLOC(rbio->bio_pages, num_pages);
1030 CONSUME_ALLOC(rbio->finish_pointers, real_stripes);
1031 CONSUME_ALLOC(rbio->dbitmap, BITS_TO_LONGS(stripe_npages));
1032 CONSUME_ALLOC(rbio->finish_pbitmap, BITS_TO_LONGS(stripe_npages));
1033 #undef CONSUME_ALLOC
1034
1035 if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
1036 nr_data = real_stripes - 1;
1037 else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
1038 nr_data = real_stripes - 2;
1039 else
1040 BUG();
1041
1042 rbio->nr_data = nr_data;
1043 return rbio;
1044 }
1045
1046 /* allocate pages for all the stripes in the bio, including parity */
alloc_rbio_pages(struct btrfs_raid_bio * rbio)1047 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio)
1048 {
1049 int i;
1050 struct page *page;
1051
1052 for (i = 0; i < rbio->nr_pages; i++) {
1053 if (rbio->stripe_pages[i])
1054 continue;
1055 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1056 if (!page)
1057 return -ENOMEM;
1058 rbio->stripe_pages[i] = page;
1059 }
1060 return 0;
1061 }
1062
1063 /* only allocate pages for p/q stripes */
alloc_rbio_parity_pages(struct btrfs_raid_bio * rbio)1064 static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
1065 {
1066 int i;
1067 struct page *page;
1068
1069 i = rbio_stripe_page_index(rbio, rbio->nr_data, 0);
1070
1071 for (; i < rbio->nr_pages; i++) {
1072 if (rbio->stripe_pages[i])
1073 continue;
1074 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1075 if (!page)
1076 return -ENOMEM;
1077 rbio->stripe_pages[i] = page;
1078 }
1079 return 0;
1080 }
1081
1082 /*
1083 * add a single page from a specific stripe into our list of bios for IO
1084 * this will try to merge into existing bios if possible, and returns
1085 * zero if all went well.
1086 */
rbio_add_io_page(struct btrfs_raid_bio * rbio,struct bio_list * bio_list,struct page * page,int stripe_nr,unsigned long page_index,unsigned long bio_max_len)1087 static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
1088 struct bio_list *bio_list,
1089 struct page *page,
1090 int stripe_nr,
1091 unsigned long page_index,
1092 unsigned long bio_max_len)
1093 {
1094 struct bio *last = bio_list->tail;
1095 int ret;
1096 struct bio *bio;
1097 struct btrfs_bio_stripe *stripe;
1098 u64 disk_start;
1099
1100 stripe = &rbio->bbio->stripes[stripe_nr];
1101 disk_start = stripe->physical + (page_index << PAGE_SHIFT);
1102
1103 /* if the device is missing, just fail this stripe */
1104 if (!stripe->dev->bdev)
1105 return fail_rbio_index(rbio, stripe_nr);
1106
1107 /* see if we can add this page onto our existing bio */
1108 if (last) {
1109 u64 last_end = (u64)last->bi_iter.bi_sector << 9;
1110 last_end += last->bi_iter.bi_size;
1111
1112 /*
1113 * we can't merge these if they are from different
1114 * devices or if they are not contiguous
1115 */
1116 if (last_end == disk_start && !last->bi_status &&
1117 last->bi_disk == stripe->dev->bdev->bd_disk &&
1118 last->bi_partno == stripe->dev->bdev->bd_partno) {
1119 ret = bio_add_page(last, page, PAGE_SIZE, 0);
1120 if (ret == PAGE_SIZE)
1121 return 0;
1122 }
1123 }
1124
1125 /* put a new bio on the list */
1126 bio = btrfs_io_bio_alloc(bio_max_len >> PAGE_SHIFT ?: 1);
1127 btrfs_io_bio(bio)->device = stripe->dev;
1128 bio->bi_iter.bi_size = 0;
1129 bio_set_dev(bio, stripe->dev->bdev);
1130 bio->bi_iter.bi_sector = disk_start >> 9;
1131
1132 bio_add_page(bio, page, PAGE_SIZE, 0);
1133 bio_list_add(bio_list, bio);
1134 return 0;
1135 }
1136
1137 /*
1138 * while we're doing the read/modify/write cycle, we could
1139 * have errors in reading pages off the disk. This checks
1140 * for errors and if we're not able to read the page it'll
1141 * trigger parity reconstruction. The rmw will be finished
1142 * after we've reconstructed the failed stripes
1143 */
validate_rbio_for_rmw(struct btrfs_raid_bio * rbio)1144 static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio)
1145 {
1146 if (rbio->faila >= 0 || rbio->failb >= 0) {
1147 BUG_ON(rbio->faila == rbio->real_stripes - 1);
1148 __raid56_parity_recover(rbio);
1149 } else {
1150 finish_rmw(rbio);
1151 }
1152 }
1153
1154 /*
1155 * helper function to walk our bio list and populate the bio_pages array with
1156 * the result. This seems expensive, but it is faster than constantly
1157 * searching through the bio list as we setup the IO in finish_rmw or stripe
1158 * reconstruction.
1159 *
1160 * This must be called before you trust the answers from page_in_rbio
1161 */
index_rbio_pages(struct btrfs_raid_bio * rbio)1162 static void index_rbio_pages(struct btrfs_raid_bio *rbio)
1163 {
1164 struct bio *bio;
1165 u64 start;
1166 unsigned long stripe_offset;
1167 unsigned long page_index;
1168
1169 spin_lock_irq(&rbio->bio_list_lock);
1170 bio_list_for_each(bio, &rbio->bio_list) {
1171 struct bio_vec bvec;
1172 struct bvec_iter iter;
1173 int i = 0;
1174
1175 start = (u64)bio->bi_iter.bi_sector << 9;
1176 stripe_offset = start - rbio->bbio->raid_map[0];
1177 page_index = stripe_offset >> PAGE_SHIFT;
1178
1179 if (bio_flagged(bio, BIO_CLONED))
1180 bio->bi_iter = btrfs_io_bio(bio)->iter;
1181
1182 bio_for_each_segment(bvec, bio, iter) {
1183 rbio->bio_pages[page_index + i] = bvec.bv_page;
1184 i++;
1185 }
1186 }
1187 spin_unlock_irq(&rbio->bio_list_lock);
1188 }
1189
1190 /*
1191 * this is called from one of two situations. We either
1192 * have a full stripe from the higher layers, or we've read all
1193 * the missing bits off disk.
1194 *
1195 * This will calculate the parity and then send down any
1196 * changed blocks.
1197 */
finish_rmw(struct btrfs_raid_bio * rbio)1198 static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
1199 {
1200 struct btrfs_bio *bbio = rbio->bbio;
1201 void **pointers = rbio->finish_pointers;
1202 int nr_data = rbio->nr_data;
1203 int stripe;
1204 int pagenr;
1205 bool has_qstripe;
1206 struct bio_list bio_list;
1207 struct bio *bio;
1208 int ret;
1209
1210 bio_list_init(&bio_list);
1211
1212 if (rbio->real_stripes - rbio->nr_data == 1)
1213 has_qstripe = false;
1214 else if (rbio->real_stripes - rbio->nr_data == 2)
1215 has_qstripe = true;
1216 else
1217 BUG();
1218
1219 /* We should have at least one data sector. */
1220 ASSERT(bitmap_weight(rbio->dbitmap, rbio->stripe_npages));
1221
1222 /* at this point we either have a full stripe,
1223 * or we've read the full stripe from the drive.
1224 * recalculate the parity and write the new results.
1225 *
1226 * We're not allowed to add any new bios to the
1227 * bio list here, anyone else that wants to
1228 * change this stripe needs to do their own rmw.
1229 */
1230 spin_lock_irq(&rbio->bio_list_lock);
1231 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1232 spin_unlock_irq(&rbio->bio_list_lock);
1233
1234 atomic_set(&rbio->error, 0);
1235
1236 /*
1237 * now that we've set rmw_locked, run through the
1238 * bio list one last time and map the page pointers
1239 *
1240 * We don't cache full rbios because we're assuming
1241 * the higher layers are unlikely to use this area of
1242 * the disk again soon. If they do use it again,
1243 * hopefully they will send another full bio.
1244 */
1245 index_rbio_pages(rbio);
1246 if (!rbio_is_full(rbio))
1247 cache_rbio_pages(rbio);
1248 else
1249 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
1250
1251 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1252 struct page *p;
1253 /* first collect one page from each data stripe */
1254 for (stripe = 0; stripe < nr_data; stripe++) {
1255 p = page_in_rbio(rbio, stripe, pagenr, 0);
1256 pointers[stripe] = kmap(p);
1257 }
1258
1259 /* then add the parity stripe */
1260 p = rbio_pstripe_page(rbio, pagenr);
1261 SetPageUptodate(p);
1262 pointers[stripe++] = kmap(p);
1263
1264 if (has_qstripe) {
1265
1266 /*
1267 * raid6, add the qstripe and call the
1268 * library function to fill in our p/q
1269 */
1270 p = rbio_qstripe_page(rbio, pagenr);
1271 SetPageUptodate(p);
1272 pointers[stripe++] = kmap(p);
1273
1274 raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
1275 pointers);
1276 } else {
1277 /* raid5 */
1278 copy_page(pointers[nr_data], pointers[0]);
1279 run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
1280 }
1281
1282
1283 for (stripe = 0; stripe < rbio->real_stripes; stripe++)
1284 kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
1285 }
1286
1287 /*
1288 * time to start writing. Make bios for everything from the
1289 * higher layers (the bio_list in our rbio) and our p/q. Ignore
1290 * everything else.
1291 */
1292 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1293 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1294 struct page *page;
1295
1296 /* This vertical stripe has no data, skip it. */
1297 if (!test_bit(pagenr, rbio->dbitmap))
1298 continue;
1299
1300 if (stripe < rbio->nr_data) {
1301 page = page_in_rbio(rbio, stripe, pagenr, 1);
1302 if (!page)
1303 continue;
1304 } else {
1305 page = rbio_stripe_page(rbio, stripe, pagenr);
1306 }
1307
1308 ret = rbio_add_io_page(rbio, &bio_list,
1309 page, stripe, pagenr, rbio->stripe_len);
1310 if (ret)
1311 goto cleanup;
1312 }
1313 }
1314
1315 if (likely(!bbio->num_tgtdevs))
1316 goto write_data;
1317
1318 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1319 if (!bbio->tgtdev_map[stripe])
1320 continue;
1321
1322 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1323 struct page *page;
1324
1325 /* This vertical stripe has no data, skip it. */
1326 if (!test_bit(pagenr, rbio->dbitmap))
1327 continue;
1328
1329 if (stripe < rbio->nr_data) {
1330 page = page_in_rbio(rbio, stripe, pagenr, 1);
1331 if (!page)
1332 continue;
1333 } else {
1334 page = rbio_stripe_page(rbio, stripe, pagenr);
1335 }
1336
1337 ret = rbio_add_io_page(rbio, &bio_list, page,
1338 rbio->bbio->tgtdev_map[stripe],
1339 pagenr, rbio->stripe_len);
1340 if (ret)
1341 goto cleanup;
1342 }
1343 }
1344
1345 write_data:
1346 atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list));
1347 BUG_ON(atomic_read(&rbio->stripes_pending) == 0);
1348
1349 while ((bio = bio_list_pop(&bio_list))) {
1350 bio->bi_private = rbio;
1351 bio->bi_end_io = raid_write_end_io;
1352 bio->bi_opf = REQ_OP_WRITE;
1353
1354 submit_bio(bio);
1355 }
1356 return;
1357
1358 cleanup:
1359 rbio_orig_end_io(rbio, BLK_STS_IOERR);
1360
1361 while ((bio = bio_list_pop(&bio_list)))
1362 bio_put(bio);
1363 }
1364
1365 /*
1366 * helper to find the stripe number for a given bio. Used to figure out which
1367 * stripe has failed. This expects the bio to correspond to a physical disk,
1368 * so it looks up based on physical sector numbers.
1369 */
find_bio_stripe(struct btrfs_raid_bio * rbio,struct bio * bio)1370 static int find_bio_stripe(struct btrfs_raid_bio *rbio,
1371 struct bio *bio)
1372 {
1373 u64 physical = bio->bi_iter.bi_sector;
1374 int i;
1375 struct btrfs_bio_stripe *stripe;
1376
1377 physical <<= 9;
1378
1379 for (i = 0; i < rbio->bbio->num_stripes; i++) {
1380 stripe = &rbio->bbio->stripes[i];
1381 if (in_range(physical, stripe->physical, rbio->stripe_len) &&
1382 stripe->dev->bdev &&
1383 bio->bi_disk == stripe->dev->bdev->bd_disk &&
1384 bio->bi_partno == stripe->dev->bdev->bd_partno) {
1385 return i;
1386 }
1387 }
1388 return -1;
1389 }
1390
1391 /*
1392 * helper to find the stripe number for a given
1393 * bio (before mapping). Used to figure out which stripe has
1394 * failed. This looks up based on logical block numbers.
1395 */
find_logical_bio_stripe(struct btrfs_raid_bio * rbio,struct bio * bio)1396 static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio,
1397 struct bio *bio)
1398 {
1399 u64 logical = (u64)bio->bi_iter.bi_sector << 9;
1400 int i;
1401
1402 for (i = 0; i < rbio->nr_data; i++) {
1403 u64 stripe_start = rbio->bbio->raid_map[i];
1404
1405 if (in_range(logical, stripe_start, rbio->stripe_len))
1406 return i;
1407 }
1408 return -1;
1409 }
1410
1411 /*
1412 * returns -EIO if we had too many failures
1413 */
fail_rbio_index(struct btrfs_raid_bio * rbio,int failed)1414 static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed)
1415 {
1416 unsigned long flags;
1417 int ret = 0;
1418
1419 spin_lock_irqsave(&rbio->bio_list_lock, flags);
1420
1421 /* we already know this stripe is bad, move on */
1422 if (rbio->faila == failed || rbio->failb == failed)
1423 goto out;
1424
1425 if (rbio->faila == -1) {
1426 /* first failure on this rbio */
1427 rbio->faila = failed;
1428 atomic_inc(&rbio->error);
1429 } else if (rbio->failb == -1) {
1430 /* second failure on this rbio */
1431 rbio->failb = failed;
1432 atomic_inc(&rbio->error);
1433 } else {
1434 ret = -EIO;
1435 }
1436 out:
1437 spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
1438
1439 return ret;
1440 }
1441
1442 /*
1443 * helper to fail a stripe based on a physical disk
1444 * bio.
1445 */
fail_bio_stripe(struct btrfs_raid_bio * rbio,struct bio * bio)1446 static int fail_bio_stripe(struct btrfs_raid_bio *rbio,
1447 struct bio *bio)
1448 {
1449 int failed = find_bio_stripe(rbio, bio);
1450
1451 if (failed < 0)
1452 return -EIO;
1453
1454 return fail_rbio_index(rbio, failed);
1455 }
1456
1457 /*
1458 * this sets each page in the bio uptodate. It should only be used on private
1459 * rbio pages, nothing that comes in from the higher layers
1460 */
set_bio_pages_uptodate(struct bio * bio)1461 static void set_bio_pages_uptodate(struct bio *bio)
1462 {
1463 struct bio_vec *bvec;
1464 struct bvec_iter_all iter_all;
1465
1466 ASSERT(!bio_flagged(bio, BIO_CLONED));
1467
1468 bio_for_each_segment_all(bvec, bio, iter_all)
1469 SetPageUptodate(bvec->bv_page);
1470 }
1471
1472 /*
1473 * end io for the read phase of the rmw cycle. All the bios here are physical
1474 * stripe bios we've read from the disk so we can recalculate the parity of the
1475 * stripe.
1476 *
1477 * This will usually kick off finish_rmw once all the bios are read in, but it
1478 * may trigger parity reconstruction if we had any errors along the way
1479 */
raid_rmw_end_io(struct bio * bio)1480 static void raid_rmw_end_io(struct bio *bio)
1481 {
1482 struct btrfs_raid_bio *rbio = bio->bi_private;
1483
1484 if (bio->bi_status)
1485 fail_bio_stripe(rbio, bio);
1486 else
1487 set_bio_pages_uptodate(bio);
1488
1489 bio_put(bio);
1490
1491 if (!atomic_dec_and_test(&rbio->stripes_pending))
1492 return;
1493
1494 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
1495 goto cleanup;
1496
1497 /*
1498 * this will normally call finish_rmw to start our write
1499 * but if there are any failed stripes we'll reconstruct
1500 * from parity first
1501 */
1502 validate_rbio_for_rmw(rbio);
1503 return;
1504
1505 cleanup:
1506
1507 rbio_orig_end_io(rbio, BLK_STS_IOERR);
1508 }
1509
1510 /*
1511 * the stripe must be locked by the caller. It will
1512 * unlock after all the writes are done
1513 */
raid56_rmw_stripe(struct btrfs_raid_bio * rbio)1514 static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
1515 {
1516 int bios_to_read = 0;
1517 struct bio_list bio_list;
1518 int ret;
1519 int pagenr;
1520 int stripe;
1521 struct bio *bio;
1522
1523 bio_list_init(&bio_list);
1524
1525 ret = alloc_rbio_pages(rbio);
1526 if (ret)
1527 goto cleanup;
1528
1529 index_rbio_pages(rbio);
1530
1531 atomic_set(&rbio->error, 0);
1532 /*
1533 * build a list of bios to read all the missing parts of this
1534 * stripe
1535 */
1536 for (stripe = 0; stripe < rbio->nr_data; stripe++) {
1537 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1538 struct page *page;
1539 /*
1540 * we want to find all the pages missing from
1541 * the rbio and read them from the disk. If
1542 * page_in_rbio finds a page in the bio list
1543 * we don't need to read it off the stripe.
1544 */
1545 page = page_in_rbio(rbio, stripe, pagenr, 1);
1546 if (page)
1547 continue;
1548
1549 page = rbio_stripe_page(rbio, stripe, pagenr);
1550 /*
1551 * the bio cache may have handed us an uptodate
1552 * page. If so, be happy and use it
1553 */
1554 if (PageUptodate(page))
1555 continue;
1556
1557 ret = rbio_add_io_page(rbio, &bio_list, page,
1558 stripe, pagenr, rbio->stripe_len);
1559 if (ret)
1560 goto cleanup;
1561 }
1562 }
1563
1564 bios_to_read = bio_list_size(&bio_list);
1565 if (!bios_to_read) {
1566 /*
1567 * this can happen if others have merged with
1568 * us, it means there is nothing left to read.
1569 * But if there are missing devices it may not be
1570 * safe to do the full stripe write yet.
1571 */
1572 goto finish;
1573 }
1574
1575 /*
1576 * the bbio may be freed once we submit the last bio. Make sure
1577 * not to touch it after that
1578 */
1579 atomic_set(&rbio->stripes_pending, bios_to_read);
1580 while ((bio = bio_list_pop(&bio_list))) {
1581 bio->bi_private = rbio;
1582 bio->bi_end_io = raid_rmw_end_io;
1583 bio->bi_opf = REQ_OP_READ;
1584
1585 btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
1586
1587 submit_bio(bio);
1588 }
1589 /* the actual write will happen once the reads are done */
1590 return 0;
1591
1592 cleanup:
1593 rbio_orig_end_io(rbio, BLK_STS_IOERR);
1594
1595 while ((bio = bio_list_pop(&bio_list)))
1596 bio_put(bio);
1597
1598 return -EIO;
1599
1600 finish:
1601 validate_rbio_for_rmw(rbio);
1602 return 0;
1603 }
1604
1605 /*
1606 * if the upper layers pass in a full stripe, we thank them by only allocating
1607 * enough pages to hold the parity, and sending it all down quickly.
1608 */
full_stripe_write(struct btrfs_raid_bio * rbio)1609 static int full_stripe_write(struct btrfs_raid_bio *rbio)
1610 {
1611 int ret;
1612
1613 ret = alloc_rbio_parity_pages(rbio);
1614 if (ret) {
1615 __free_raid_bio(rbio);
1616 return ret;
1617 }
1618
1619 ret = lock_stripe_add(rbio);
1620 if (ret == 0)
1621 finish_rmw(rbio);
1622 return 0;
1623 }
1624
1625 /*
1626 * partial stripe writes get handed over to async helpers.
1627 * We're really hoping to merge a few more writes into this
1628 * rbio before calculating new parity
1629 */
partial_stripe_write(struct btrfs_raid_bio * rbio)1630 static int partial_stripe_write(struct btrfs_raid_bio *rbio)
1631 {
1632 int ret;
1633
1634 ret = lock_stripe_add(rbio);
1635 if (ret == 0)
1636 start_async_work(rbio, rmw_work);
1637 return 0;
1638 }
1639
1640 /*
1641 * sometimes while we were reading from the drive to
1642 * recalculate parity, enough new bios come into create
1643 * a full stripe. So we do a check here to see if we can
1644 * go directly to finish_rmw
1645 */
__raid56_parity_write(struct btrfs_raid_bio * rbio)1646 static int __raid56_parity_write(struct btrfs_raid_bio *rbio)
1647 {
1648 /* head off into rmw land if we don't have a full stripe */
1649 if (!rbio_is_full(rbio))
1650 return partial_stripe_write(rbio);
1651 return full_stripe_write(rbio);
1652 }
1653
1654 /*
1655 * We use plugging call backs to collect full stripes.
1656 * Any time we get a partial stripe write while plugged
1657 * we collect it into a list. When the unplug comes down,
1658 * we sort the list by logical block number and merge
1659 * everything we can into the same rbios
1660 */
1661 struct btrfs_plug_cb {
1662 struct blk_plug_cb cb;
1663 struct btrfs_fs_info *info;
1664 struct list_head rbio_list;
1665 struct btrfs_work work;
1666 };
1667
1668 /*
1669 * rbios on the plug list are sorted for easier merging.
1670 */
plug_cmp(void * priv,const struct list_head * a,const struct list_head * b)1671 static int plug_cmp(void *priv, const struct list_head *a,
1672 const struct list_head *b)
1673 {
1674 struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio,
1675 plug_list);
1676 struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
1677 plug_list);
1678 u64 a_sector = ra->bio_list.head->bi_iter.bi_sector;
1679 u64 b_sector = rb->bio_list.head->bi_iter.bi_sector;
1680
1681 if (a_sector < b_sector)
1682 return -1;
1683 if (a_sector > b_sector)
1684 return 1;
1685 return 0;
1686 }
1687
run_plug(struct btrfs_plug_cb * plug)1688 static void run_plug(struct btrfs_plug_cb *plug)
1689 {
1690 struct btrfs_raid_bio *cur;
1691 struct btrfs_raid_bio *last = NULL;
1692
1693 /*
1694 * sort our plug list then try to merge
1695 * everything we can in hopes of creating full
1696 * stripes.
1697 */
1698 list_sort(NULL, &plug->rbio_list, plug_cmp);
1699 while (!list_empty(&plug->rbio_list)) {
1700 cur = list_entry(plug->rbio_list.next,
1701 struct btrfs_raid_bio, plug_list);
1702 list_del_init(&cur->plug_list);
1703
1704 if (rbio_is_full(cur)) {
1705 int ret;
1706
1707 /* we have a full stripe, send it down */
1708 ret = full_stripe_write(cur);
1709 BUG_ON(ret);
1710 continue;
1711 }
1712 if (last) {
1713 if (rbio_can_merge(last, cur)) {
1714 merge_rbio(last, cur);
1715 __free_raid_bio(cur);
1716 continue;
1717
1718 }
1719 __raid56_parity_write(last);
1720 }
1721 last = cur;
1722 }
1723 if (last) {
1724 __raid56_parity_write(last);
1725 }
1726 kfree(plug);
1727 }
1728
1729 /*
1730 * if the unplug comes from schedule, we have to push the
1731 * work off to a helper thread
1732 */
unplug_work(struct btrfs_work * work)1733 static void unplug_work(struct btrfs_work *work)
1734 {
1735 struct btrfs_plug_cb *plug;
1736 plug = container_of(work, struct btrfs_plug_cb, work);
1737 run_plug(plug);
1738 }
1739
btrfs_raid_unplug(struct blk_plug_cb * cb,bool from_schedule)1740 static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
1741 {
1742 struct btrfs_plug_cb *plug;
1743 plug = container_of(cb, struct btrfs_plug_cb, cb);
1744
1745 if (from_schedule) {
1746 btrfs_init_work(&plug->work, unplug_work, NULL, NULL);
1747 btrfs_queue_work(plug->info->rmw_workers,
1748 &plug->work);
1749 return;
1750 }
1751 run_plug(plug);
1752 }
1753
1754 /* Add the original bio into rbio->bio_list, and update rbio::dbitmap. */
rbio_add_bio(struct btrfs_raid_bio * rbio,struct bio * orig_bio)1755 static void rbio_add_bio(struct btrfs_raid_bio *rbio, struct bio *orig_bio)
1756 {
1757 const struct btrfs_fs_info *fs_info = rbio->fs_info;
1758 const u64 orig_logical = orig_bio->bi_iter.bi_sector << SECTOR_SHIFT;
1759 const u64 full_stripe_start = rbio->bbio->raid_map[0];
1760 const u32 orig_len = orig_bio->bi_iter.bi_size;
1761 const u32 sectorsize = fs_info->sectorsize;
1762 u64 cur_logical;
1763
1764 ASSERT(orig_logical >= full_stripe_start &&
1765 orig_logical + orig_len <= full_stripe_start +
1766 rbio->nr_data * rbio->stripe_len);
1767
1768 bio_list_add(&rbio->bio_list, orig_bio);
1769 rbio->bio_list_bytes += orig_bio->bi_iter.bi_size;
1770
1771 /* Update the dbitmap. */
1772 for (cur_logical = orig_logical; cur_logical < orig_logical + orig_len;
1773 cur_logical += sectorsize) {
1774 int bit = ((u32)(cur_logical - full_stripe_start) >>
1775 PAGE_SHIFT) % rbio->stripe_npages;
1776
1777 set_bit(bit, rbio->dbitmap);
1778 }
1779 }
1780
1781 /*
1782 * our main entry point for writes from the rest of the FS.
1783 */
raid56_parity_write(struct btrfs_fs_info * fs_info,struct bio * bio,struct btrfs_bio * bbio,u64 stripe_len)1784 int raid56_parity_write(struct btrfs_fs_info *fs_info, struct bio *bio,
1785 struct btrfs_bio *bbio, u64 stripe_len)
1786 {
1787 struct btrfs_raid_bio *rbio;
1788 struct btrfs_plug_cb *plug = NULL;
1789 struct blk_plug_cb *cb;
1790 int ret;
1791
1792 rbio = alloc_rbio(fs_info, bbio, stripe_len);
1793 if (IS_ERR(rbio)) {
1794 btrfs_put_bbio(bbio);
1795 return PTR_ERR(rbio);
1796 }
1797 rbio->operation = BTRFS_RBIO_WRITE;
1798 rbio_add_bio(rbio, bio);
1799
1800 btrfs_bio_counter_inc_noblocked(fs_info);
1801 rbio->generic_bio_cnt = 1;
1802
1803 /*
1804 * don't plug on full rbios, just get them out the door
1805 * as quickly as we can
1806 */
1807 if (rbio_is_full(rbio)) {
1808 ret = full_stripe_write(rbio);
1809 if (ret)
1810 btrfs_bio_counter_dec(fs_info);
1811 return ret;
1812 }
1813
1814 cb = blk_check_plugged(btrfs_raid_unplug, fs_info, sizeof(*plug));
1815 if (cb) {
1816 plug = container_of(cb, struct btrfs_plug_cb, cb);
1817 if (!plug->info) {
1818 plug->info = fs_info;
1819 INIT_LIST_HEAD(&plug->rbio_list);
1820 }
1821 list_add_tail(&rbio->plug_list, &plug->rbio_list);
1822 ret = 0;
1823 } else {
1824 ret = __raid56_parity_write(rbio);
1825 if (ret)
1826 btrfs_bio_counter_dec(fs_info);
1827 }
1828 return ret;
1829 }
1830
1831 /*
1832 * all parity reconstruction happens here. We've read in everything
1833 * we can find from the drives and this does the heavy lifting of
1834 * sorting the good from the bad.
1835 */
__raid_recover_end_io(struct btrfs_raid_bio * rbio)1836 static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
1837 {
1838 int pagenr, stripe;
1839 void **pointers;
1840 int faila = -1, failb = -1;
1841 struct page *page;
1842 blk_status_t err;
1843 int i;
1844
1845 pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
1846 if (!pointers) {
1847 err = BLK_STS_RESOURCE;
1848 goto cleanup_io;
1849 }
1850
1851 faila = rbio->faila;
1852 failb = rbio->failb;
1853
1854 if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1855 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
1856 spin_lock_irq(&rbio->bio_list_lock);
1857 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1858 spin_unlock_irq(&rbio->bio_list_lock);
1859 }
1860
1861 index_rbio_pages(rbio);
1862
1863 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1864 /*
1865 * Now we just use bitmap to mark the horizontal stripes in
1866 * which we have data when doing parity scrub.
1867 */
1868 if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB &&
1869 !test_bit(pagenr, rbio->dbitmap))
1870 continue;
1871
1872 /* setup our array of pointers with pages
1873 * from each stripe
1874 */
1875 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1876 /*
1877 * if we're rebuilding a read, we have to use
1878 * pages from the bio list
1879 */
1880 if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1881 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
1882 (stripe == faila || stripe == failb)) {
1883 page = page_in_rbio(rbio, stripe, pagenr, 0);
1884 } else {
1885 page = rbio_stripe_page(rbio, stripe, pagenr);
1886 }
1887 pointers[stripe] = kmap(page);
1888 }
1889
1890 /* all raid6 handling here */
1891 if (rbio->bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) {
1892 /*
1893 * single failure, rebuild from parity raid5
1894 * style
1895 */
1896 if (failb < 0) {
1897 if (faila == rbio->nr_data) {
1898 /*
1899 * Just the P stripe has failed, without
1900 * a bad data or Q stripe.
1901 * TODO, we should redo the xor here.
1902 */
1903 err = BLK_STS_IOERR;
1904 goto cleanup;
1905 }
1906 /*
1907 * a single failure in raid6 is rebuilt
1908 * in the pstripe code below
1909 */
1910 goto pstripe;
1911 }
1912
1913 /* make sure our ps and qs are in order */
1914 if (faila > failb)
1915 swap(faila, failb);
1916
1917 /* if the q stripe is failed, do a pstripe reconstruction
1918 * from the xors.
1919 * If both the q stripe and the P stripe are failed, we're
1920 * here due to a crc mismatch and we can't give them the
1921 * data they want
1922 */
1923 if (rbio->bbio->raid_map[failb] == RAID6_Q_STRIPE) {
1924 if (rbio->bbio->raid_map[faila] ==
1925 RAID5_P_STRIPE) {
1926 err = BLK_STS_IOERR;
1927 goto cleanup;
1928 }
1929 /*
1930 * otherwise we have one bad data stripe and
1931 * a good P stripe. raid5!
1932 */
1933 goto pstripe;
1934 }
1935
1936 if (rbio->bbio->raid_map[failb] == RAID5_P_STRIPE) {
1937 raid6_datap_recov(rbio->real_stripes,
1938 PAGE_SIZE, faila, pointers);
1939 } else {
1940 raid6_2data_recov(rbio->real_stripes,
1941 PAGE_SIZE, faila, failb,
1942 pointers);
1943 }
1944 } else {
1945 void *p;
1946
1947 /* rebuild from P stripe here (raid5 or raid6) */
1948 BUG_ON(failb != -1);
1949 pstripe:
1950 /* Copy parity block into failed block to start with */
1951 copy_page(pointers[faila], pointers[rbio->nr_data]);
1952
1953 /* rearrange the pointer array */
1954 p = pointers[faila];
1955 for (stripe = faila; stripe < rbio->nr_data - 1; stripe++)
1956 pointers[stripe] = pointers[stripe + 1];
1957 pointers[rbio->nr_data - 1] = p;
1958
1959 /* xor in the rest */
1960 run_xor(pointers, rbio->nr_data - 1, PAGE_SIZE);
1961 }
1962 /* if we're doing this rebuild as part of an rmw, go through
1963 * and set all of our private rbio pages in the
1964 * failed stripes as uptodate. This way finish_rmw will
1965 * know they can be trusted. If this was a read reconstruction,
1966 * other endio functions will fiddle the uptodate bits
1967 */
1968 if (rbio->operation == BTRFS_RBIO_WRITE) {
1969 for (i = 0; i < rbio->stripe_npages; i++) {
1970 if (faila != -1) {
1971 page = rbio_stripe_page(rbio, faila, i);
1972 SetPageUptodate(page);
1973 }
1974 if (failb != -1) {
1975 page = rbio_stripe_page(rbio, failb, i);
1976 SetPageUptodate(page);
1977 }
1978 }
1979 }
1980 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1981 /*
1982 * if we're rebuilding a read, we have to use
1983 * pages from the bio list
1984 */
1985 if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1986 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
1987 (stripe == faila || stripe == failb)) {
1988 page = page_in_rbio(rbio, stripe, pagenr, 0);
1989 } else {
1990 page = rbio_stripe_page(rbio, stripe, pagenr);
1991 }
1992 kunmap(page);
1993 }
1994 }
1995
1996 err = BLK_STS_OK;
1997 cleanup:
1998 kfree(pointers);
1999
2000 cleanup_io:
2001 /*
2002 * Similar to READ_REBUILD, REBUILD_MISSING at this point also has a
2003 * valid rbio which is consistent with ondisk content, thus such a
2004 * valid rbio can be cached to avoid further disk reads.
2005 */
2006 if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
2007 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
2008 /*
2009 * - In case of two failures, where rbio->failb != -1:
2010 *
2011 * Do not cache this rbio since the above read reconstruction
2012 * (raid6_datap_recov() or raid6_2data_recov()) may have
2013 * changed some content of stripes which are not identical to
2014 * on-disk content any more, otherwise, a later write/recover
2015 * may steal stripe_pages from this rbio and end up with
2016 * corruptions or rebuild failures.
2017 *
2018 * - In case of single failure, where rbio->failb == -1:
2019 *
2020 * Cache this rbio iff the above read reconstruction is
2021 * executed without problems.
2022 */
2023 if (err == BLK_STS_OK && rbio->failb < 0)
2024 cache_rbio_pages(rbio);
2025 else
2026 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2027
2028 rbio_orig_end_io(rbio, err);
2029 } else if (err == BLK_STS_OK) {
2030 rbio->faila = -1;
2031 rbio->failb = -1;
2032
2033 if (rbio->operation == BTRFS_RBIO_WRITE)
2034 finish_rmw(rbio);
2035 else if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB)
2036 finish_parity_scrub(rbio, 0);
2037 else
2038 BUG();
2039 } else {
2040 rbio_orig_end_io(rbio, err);
2041 }
2042 }
2043
2044 /*
2045 * This is called only for stripes we've read from disk to
2046 * reconstruct the parity.
2047 */
raid_recover_end_io(struct bio * bio)2048 static void raid_recover_end_io(struct bio *bio)
2049 {
2050 struct btrfs_raid_bio *rbio = bio->bi_private;
2051
2052 /*
2053 * we only read stripe pages off the disk, set them
2054 * up to date if there were no errors
2055 */
2056 if (bio->bi_status)
2057 fail_bio_stripe(rbio, bio);
2058 else
2059 set_bio_pages_uptodate(bio);
2060 bio_put(bio);
2061
2062 if (!atomic_dec_and_test(&rbio->stripes_pending))
2063 return;
2064
2065 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2066 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2067 else
2068 __raid_recover_end_io(rbio);
2069 }
2070
2071 /*
2072 * reads everything we need off the disk to reconstruct
2073 * the parity. endio handlers trigger final reconstruction
2074 * when the IO is done.
2075 *
2076 * This is used both for reads from the higher layers and for
2077 * parity construction required to finish a rmw cycle.
2078 */
__raid56_parity_recover(struct btrfs_raid_bio * rbio)2079 static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
2080 {
2081 int bios_to_read = 0;
2082 struct bio_list bio_list;
2083 int ret;
2084 int pagenr;
2085 int stripe;
2086 struct bio *bio;
2087
2088 bio_list_init(&bio_list);
2089
2090 ret = alloc_rbio_pages(rbio);
2091 if (ret)
2092 goto cleanup;
2093
2094 atomic_set(&rbio->error, 0);
2095
2096 /*
2097 * Read everything that hasn't failed. However this time we will
2098 * not trust any cached sector.
2099 * As we may read out some stale data but higher layer is not reading
2100 * that stale part.
2101 *
2102 * So here we always re-read everything in recovery path.
2103 */
2104 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2105 if (rbio->faila == stripe || rbio->failb == stripe) {
2106 atomic_inc(&rbio->error);
2107 continue;
2108 }
2109
2110 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
2111 ret = rbio_add_io_page(rbio, &bio_list,
2112 rbio_stripe_page(rbio, stripe, pagenr),
2113 stripe, pagenr, rbio->stripe_len);
2114 if (ret < 0)
2115 goto cleanup;
2116 }
2117 }
2118
2119 bios_to_read = bio_list_size(&bio_list);
2120 if (!bios_to_read) {
2121 /*
2122 * we might have no bios to read just because the pages
2123 * were up to date, or we might have no bios to read because
2124 * the devices were gone.
2125 */
2126 if (atomic_read(&rbio->error) <= rbio->bbio->max_errors) {
2127 __raid_recover_end_io(rbio);
2128 return 0;
2129 } else {
2130 goto cleanup;
2131 }
2132 }
2133
2134 /*
2135 * the bbio may be freed once we submit the last bio. Make sure
2136 * not to touch it after that
2137 */
2138 atomic_set(&rbio->stripes_pending, bios_to_read);
2139 while ((bio = bio_list_pop(&bio_list))) {
2140 bio->bi_private = rbio;
2141 bio->bi_end_io = raid_recover_end_io;
2142 bio->bi_opf = REQ_OP_READ;
2143
2144 btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
2145
2146 submit_bio(bio);
2147 }
2148
2149 return 0;
2150
2151 cleanup:
2152 if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
2153 rbio->operation == BTRFS_RBIO_REBUILD_MISSING)
2154 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2155
2156 while ((bio = bio_list_pop(&bio_list)))
2157 bio_put(bio);
2158
2159 return -EIO;
2160 }
2161
2162 /*
2163 * the main entry point for reads from the higher layers. This
2164 * is really only called when the normal read path had a failure,
2165 * so we assume the bio they send down corresponds to a failed part
2166 * of the drive.
2167 */
raid56_parity_recover(struct btrfs_fs_info * fs_info,struct bio * bio,struct btrfs_bio * bbio,u64 stripe_len,int mirror_num,int generic_io)2168 int raid56_parity_recover(struct btrfs_fs_info *fs_info, struct bio *bio,
2169 struct btrfs_bio *bbio, u64 stripe_len,
2170 int mirror_num, int generic_io)
2171 {
2172 struct btrfs_raid_bio *rbio;
2173 int ret;
2174
2175 if (generic_io) {
2176 ASSERT(bbio->mirror_num == mirror_num);
2177 btrfs_io_bio(bio)->mirror_num = mirror_num;
2178 }
2179
2180 rbio = alloc_rbio(fs_info, bbio, stripe_len);
2181 if (IS_ERR(rbio)) {
2182 if (generic_io)
2183 btrfs_put_bbio(bbio);
2184 return PTR_ERR(rbio);
2185 }
2186
2187 rbio->operation = BTRFS_RBIO_READ_REBUILD;
2188 rbio_add_bio(rbio, bio);
2189
2190 rbio->faila = find_logical_bio_stripe(rbio, bio);
2191 if (rbio->faila == -1) {
2192 btrfs_warn(fs_info,
2193 "%s could not find the bad stripe in raid56 so that we cannot recover any more (bio has logical %llu len %llu, bbio has map_type %llu)",
2194 __func__, (u64)bio->bi_iter.bi_sector << 9,
2195 (u64)bio->bi_iter.bi_size, bbio->map_type);
2196 if (generic_io)
2197 btrfs_put_bbio(bbio);
2198 kfree(rbio);
2199 return -EIO;
2200 }
2201
2202 if (generic_io) {
2203 btrfs_bio_counter_inc_noblocked(fs_info);
2204 rbio->generic_bio_cnt = 1;
2205 } else {
2206 btrfs_get_bbio(bbio);
2207 }
2208
2209 /*
2210 * Loop retry:
2211 * for 'mirror == 2', reconstruct from all other stripes.
2212 * for 'mirror_num > 2', select a stripe to fail on every retry.
2213 */
2214 if (mirror_num > 2) {
2215 /*
2216 * 'mirror == 3' is to fail the p stripe and
2217 * reconstruct from the q stripe. 'mirror > 3' is to
2218 * fail a data stripe and reconstruct from p+q stripe.
2219 */
2220 rbio->failb = rbio->real_stripes - (mirror_num - 1);
2221 ASSERT(rbio->failb > 0);
2222 if (rbio->failb <= rbio->faila)
2223 rbio->failb--;
2224 }
2225
2226 ret = lock_stripe_add(rbio);
2227
2228 /*
2229 * __raid56_parity_recover will end the bio with
2230 * any errors it hits. We don't want to return
2231 * its error value up the stack because our caller
2232 * will end up calling bio_endio with any nonzero
2233 * return
2234 */
2235 if (ret == 0)
2236 __raid56_parity_recover(rbio);
2237 /*
2238 * our rbio has been added to the list of
2239 * rbios that will be handled after the
2240 * currently lock owner is done
2241 */
2242 return 0;
2243
2244 }
2245
rmw_work(struct btrfs_work * work)2246 static void rmw_work(struct btrfs_work *work)
2247 {
2248 struct btrfs_raid_bio *rbio;
2249
2250 rbio = container_of(work, struct btrfs_raid_bio, work);
2251 raid56_rmw_stripe(rbio);
2252 }
2253
read_rebuild_work(struct btrfs_work * work)2254 static void read_rebuild_work(struct btrfs_work *work)
2255 {
2256 struct btrfs_raid_bio *rbio;
2257
2258 rbio = container_of(work, struct btrfs_raid_bio, work);
2259 __raid56_parity_recover(rbio);
2260 }
2261
2262 /*
2263 * The following code is used to scrub/replace the parity stripe
2264 *
2265 * Caller must have already increased bio_counter for getting @bbio.
2266 *
2267 * Note: We need make sure all the pages that add into the scrub/replace
2268 * raid bio are correct and not be changed during the scrub/replace. That
2269 * is those pages just hold metadata or file data with checksum.
2270 */
2271
2272 struct btrfs_raid_bio *
raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info * fs_info,struct bio * bio,struct btrfs_bio * bbio,u64 stripe_len,struct btrfs_device * scrub_dev,unsigned long * dbitmap,int stripe_nsectors)2273 raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
2274 struct btrfs_bio *bbio, u64 stripe_len,
2275 struct btrfs_device *scrub_dev,
2276 unsigned long *dbitmap, int stripe_nsectors)
2277 {
2278 struct btrfs_raid_bio *rbio;
2279 int i;
2280
2281 rbio = alloc_rbio(fs_info, bbio, stripe_len);
2282 if (IS_ERR(rbio))
2283 return NULL;
2284 bio_list_add(&rbio->bio_list, bio);
2285 /*
2286 * This is a special bio which is used to hold the completion handler
2287 * and make the scrub rbio is similar to the other types
2288 */
2289 ASSERT(!bio->bi_iter.bi_size);
2290 rbio->operation = BTRFS_RBIO_PARITY_SCRUB;
2291
2292 /*
2293 * After mapping bbio with BTRFS_MAP_WRITE, parities have been sorted
2294 * to the end position, so this search can start from the first parity
2295 * stripe.
2296 */
2297 for (i = rbio->nr_data; i < rbio->real_stripes; i++) {
2298 if (bbio->stripes[i].dev == scrub_dev) {
2299 rbio->scrubp = i;
2300 break;
2301 }
2302 }
2303 ASSERT(i < rbio->real_stripes);
2304
2305 /* Now we just support the sectorsize equals to page size */
2306 ASSERT(fs_info->sectorsize == PAGE_SIZE);
2307 ASSERT(rbio->stripe_npages == stripe_nsectors);
2308 bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors);
2309
2310 /*
2311 * We have already increased bio_counter when getting bbio, record it
2312 * so we can free it at rbio_orig_end_io().
2313 */
2314 rbio->generic_bio_cnt = 1;
2315
2316 return rbio;
2317 }
2318
2319 /* Used for both parity scrub and missing. */
raid56_add_scrub_pages(struct btrfs_raid_bio * rbio,struct page * page,u64 logical)2320 void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
2321 u64 logical)
2322 {
2323 int stripe_offset;
2324 int index;
2325
2326 ASSERT(logical >= rbio->bbio->raid_map[0]);
2327 ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] +
2328 rbio->stripe_len * rbio->nr_data);
2329 stripe_offset = (int)(logical - rbio->bbio->raid_map[0]);
2330 index = stripe_offset >> PAGE_SHIFT;
2331 rbio->bio_pages[index] = page;
2332 }
2333
2334 /*
2335 * We just scrub the parity that we have correct data on the same horizontal,
2336 * so we needn't allocate all pages for all the stripes.
2337 */
alloc_rbio_essential_pages(struct btrfs_raid_bio * rbio)2338 static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio)
2339 {
2340 int i;
2341 int bit;
2342 int index;
2343 struct page *page;
2344
2345 for_each_set_bit(bit, rbio->dbitmap, rbio->stripe_npages) {
2346 for (i = 0; i < rbio->real_stripes; i++) {
2347 index = i * rbio->stripe_npages + bit;
2348 if (rbio->stripe_pages[index])
2349 continue;
2350
2351 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2352 if (!page)
2353 return -ENOMEM;
2354 rbio->stripe_pages[index] = page;
2355 }
2356 }
2357 return 0;
2358 }
2359
finish_parity_scrub(struct btrfs_raid_bio * rbio,int need_check)2360 static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
2361 int need_check)
2362 {
2363 struct btrfs_bio *bbio = rbio->bbio;
2364 void **pointers = rbio->finish_pointers;
2365 unsigned long *pbitmap = rbio->finish_pbitmap;
2366 int nr_data = rbio->nr_data;
2367 int stripe;
2368 int pagenr;
2369 bool has_qstripe;
2370 struct page *p_page = NULL;
2371 struct page *q_page = NULL;
2372 struct bio_list bio_list;
2373 struct bio *bio;
2374 int is_replace = 0;
2375 int ret;
2376
2377 bio_list_init(&bio_list);
2378
2379 if (rbio->real_stripes - rbio->nr_data == 1)
2380 has_qstripe = false;
2381 else if (rbio->real_stripes - rbio->nr_data == 2)
2382 has_qstripe = true;
2383 else
2384 BUG();
2385
2386 if (bbio->num_tgtdevs && bbio->tgtdev_map[rbio->scrubp]) {
2387 is_replace = 1;
2388 bitmap_copy(pbitmap, rbio->dbitmap, rbio->stripe_npages);
2389 }
2390
2391 /*
2392 * Because the higher layers(scrubber) are unlikely to
2393 * use this area of the disk again soon, so don't cache
2394 * it.
2395 */
2396 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2397
2398 if (!need_check)
2399 goto writeback;
2400
2401 p_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2402 if (!p_page)
2403 goto cleanup;
2404 SetPageUptodate(p_page);
2405
2406 if (has_qstripe) {
2407 /* RAID6, allocate and map temp space for the Q stripe */
2408 q_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2409 if (!q_page) {
2410 __free_page(p_page);
2411 goto cleanup;
2412 }
2413 SetPageUptodate(q_page);
2414 pointers[rbio->real_stripes - 1] = kmap(q_page);
2415 }
2416
2417 atomic_set(&rbio->error, 0);
2418
2419 /* Map the parity stripe just once */
2420 pointers[nr_data] = kmap(p_page);
2421
2422 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2423 struct page *p;
2424 void *parity;
2425 /* first collect one page from each data stripe */
2426 for (stripe = 0; stripe < nr_data; stripe++) {
2427 p = page_in_rbio(rbio, stripe, pagenr, 0);
2428 pointers[stripe] = kmap(p);
2429 }
2430
2431 if (has_qstripe) {
2432 /* RAID6, call the library function to fill in our P/Q */
2433 raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
2434 pointers);
2435 } else {
2436 /* raid5 */
2437 copy_page(pointers[nr_data], pointers[0]);
2438 run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
2439 }
2440
2441 /* Check scrubbing parity and repair it */
2442 p = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2443 parity = kmap(p);
2444 if (memcmp(parity, pointers[rbio->scrubp], PAGE_SIZE))
2445 copy_page(parity, pointers[rbio->scrubp]);
2446 else
2447 /* Parity is right, needn't writeback */
2448 bitmap_clear(rbio->dbitmap, pagenr, 1);
2449 kunmap(p);
2450
2451 for (stripe = 0; stripe < nr_data; stripe++)
2452 kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
2453 }
2454
2455 kunmap(p_page);
2456 __free_page(p_page);
2457 if (q_page) {
2458 kunmap(q_page);
2459 __free_page(q_page);
2460 }
2461
2462 writeback:
2463 /*
2464 * time to start writing. Make bios for everything from the
2465 * higher layers (the bio_list in our rbio) and our p/q. Ignore
2466 * everything else.
2467 */
2468 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2469 struct page *page;
2470
2471 page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2472 ret = rbio_add_io_page(rbio, &bio_list,
2473 page, rbio->scrubp, pagenr, rbio->stripe_len);
2474 if (ret)
2475 goto cleanup;
2476 }
2477
2478 if (!is_replace)
2479 goto submit_write;
2480
2481 for_each_set_bit(pagenr, pbitmap, rbio->stripe_npages) {
2482 struct page *page;
2483
2484 page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2485 ret = rbio_add_io_page(rbio, &bio_list, page,
2486 bbio->tgtdev_map[rbio->scrubp],
2487 pagenr, rbio->stripe_len);
2488 if (ret)
2489 goto cleanup;
2490 }
2491
2492 submit_write:
2493 nr_data = bio_list_size(&bio_list);
2494 if (!nr_data) {
2495 /* Every parity is right */
2496 rbio_orig_end_io(rbio, BLK_STS_OK);
2497 return;
2498 }
2499
2500 atomic_set(&rbio->stripes_pending, nr_data);
2501
2502 while ((bio = bio_list_pop(&bio_list))) {
2503 bio->bi_private = rbio;
2504 bio->bi_end_io = raid_write_end_io;
2505 bio->bi_opf = REQ_OP_WRITE;
2506
2507 submit_bio(bio);
2508 }
2509 return;
2510
2511 cleanup:
2512 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2513
2514 while ((bio = bio_list_pop(&bio_list)))
2515 bio_put(bio);
2516 }
2517
is_data_stripe(struct btrfs_raid_bio * rbio,int stripe)2518 static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe)
2519 {
2520 if (stripe >= 0 && stripe < rbio->nr_data)
2521 return 1;
2522 return 0;
2523 }
2524
2525 /*
2526 * While we're doing the parity check and repair, we could have errors
2527 * in reading pages off the disk. This checks for errors and if we're
2528 * not able to read the page it'll trigger parity reconstruction. The
2529 * parity scrub will be finished after we've reconstructed the failed
2530 * stripes
2531 */
validate_rbio_for_parity_scrub(struct btrfs_raid_bio * rbio)2532 static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio)
2533 {
2534 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2535 goto cleanup;
2536
2537 if (rbio->faila >= 0 || rbio->failb >= 0) {
2538 int dfail = 0, failp = -1;
2539
2540 if (is_data_stripe(rbio, rbio->faila))
2541 dfail++;
2542 else if (is_parity_stripe(rbio->faila))
2543 failp = rbio->faila;
2544
2545 if (is_data_stripe(rbio, rbio->failb))
2546 dfail++;
2547 else if (is_parity_stripe(rbio->failb))
2548 failp = rbio->failb;
2549
2550 /*
2551 * Because we can not use a scrubbing parity to repair
2552 * the data, so the capability of the repair is declined.
2553 * (In the case of RAID5, we can not repair anything)
2554 */
2555 if (dfail > rbio->bbio->max_errors - 1)
2556 goto cleanup;
2557
2558 /*
2559 * If all data is good, only parity is correctly, just
2560 * repair the parity.
2561 */
2562 if (dfail == 0) {
2563 finish_parity_scrub(rbio, 0);
2564 return;
2565 }
2566
2567 /*
2568 * Here means we got one corrupted data stripe and one
2569 * corrupted parity on RAID6, if the corrupted parity
2570 * is scrubbing parity, luckily, use the other one to repair
2571 * the data, or we can not repair the data stripe.
2572 */
2573 if (failp != rbio->scrubp)
2574 goto cleanup;
2575
2576 __raid_recover_end_io(rbio);
2577 } else {
2578 finish_parity_scrub(rbio, 1);
2579 }
2580 return;
2581
2582 cleanup:
2583 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2584 }
2585
2586 /*
2587 * end io for the read phase of the rmw cycle. All the bios here are physical
2588 * stripe bios we've read from the disk so we can recalculate the parity of the
2589 * stripe.
2590 *
2591 * This will usually kick off finish_rmw once all the bios are read in, but it
2592 * may trigger parity reconstruction if we had any errors along the way
2593 */
raid56_parity_scrub_end_io(struct bio * bio)2594 static void raid56_parity_scrub_end_io(struct bio *bio)
2595 {
2596 struct btrfs_raid_bio *rbio = bio->bi_private;
2597
2598 if (bio->bi_status)
2599 fail_bio_stripe(rbio, bio);
2600 else
2601 set_bio_pages_uptodate(bio);
2602
2603 bio_put(bio);
2604
2605 if (!atomic_dec_and_test(&rbio->stripes_pending))
2606 return;
2607
2608 /*
2609 * this will normally call finish_rmw to start our write
2610 * but if there are any failed stripes we'll reconstruct
2611 * from parity first
2612 */
2613 validate_rbio_for_parity_scrub(rbio);
2614 }
2615
raid56_parity_scrub_stripe(struct btrfs_raid_bio * rbio)2616 static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
2617 {
2618 int bios_to_read = 0;
2619 struct bio_list bio_list;
2620 int ret;
2621 int pagenr;
2622 int stripe;
2623 struct bio *bio;
2624
2625 bio_list_init(&bio_list);
2626
2627 ret = alloc_rbio_essential_pages(rbio);
2628 if (ret)
2629 goto cleanup;
2630
2631 atomic_set(&rbio->error, 0);
2632 /*
2633 * build a list of bios to read all the missing parts of this
2634 * stripe
2635 */
2636 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2637 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2638 struct page *page;
2639 /*
2640 * we want to find all the pages missing from
2641 * the rbio and read them from the disk. If
2642 * page_in_rbio finds a page in the bio list
2643 * we don't need to read it off the stripe.
2644 */
2645 page = page_in_rbio(rbio, stripe, pagenr, 1);
2646 if (page)
2647 continue;
2648
2649 page = rbio_stripe_page(rbio, stripe, pagenr);
2650 /*
2651 * the bio cache may have handed us an uptodate
2652 * page. If so, be happy and use it
2653 */
2654 if (PageUptodate(page))
2655 continue;
2656
2657 ret = rbio_add_io_page(rbio, &bio_list, page,
2658 stripe, pagenr, rbio->stripe_len);
2659 if (ret)
2660 goto cleanup;
2661 }
2662 }
2663
2664 bios_to_read = bio_list_size(&bio_list);
2665 if (!bios_to_read) {
2666 /*
2667 * this can happen if others have merged with
2668 * us, it means there is nothing left to read.
2669 * But if there are missing devices it may not be
2670 * safe to do the full stripe write yet.
2671 */
2672 goto finish;
2673 }
2674
2675 /*
2676 * the bbio may be freed once we submit the last bio. Make sure
2677 * not to touch it after that
2678 */
2679 atomic_set(&rbio->stripes_pending, bios_to_read);
2680 while ((bio = bio_list_pop(&bio_list))) {
2681 bio->bi_private = rbio;
2682 bio->bi_end_io = raid56_parity_scrub_end_io;
2683 bio->bi_opf = REQ_OP_READ;
2684
2685 btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
2686
2687 submit_bio(bio);
2688 }
2689 /* the actual write will happen once the reads are done */
2690 return;
2691
2692 cleanup:
2693 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2694
2695 while ((bio = bio_list_pop(&bio_list)))
2696 bio_put(bio);
2697
2698 return;
2699
2700 finish:
2701 validate_rbio_for_parity_scrub(rbio);
2702 }
2703
scrub_parity_work(struct btrfs_work * work)2704 static void scrub_parity_work(struct btrfs_work *work)
2705 {
2706 struct btrfs_raid_bio *rbio;
2707
2708 rbio = container_of(work, struct btrfs_raid_bio, work);
2709 raid56_parity_scrub_stripe(rbio);
2710 }
2711
raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio * rbio)2712 void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
2713 {
2714 if (!lock_stripe_add(rbio))
2715 start_async_work(rbio, scrub_parity_work);
2716 }
2717
2718 /* The following code is used for dev replace of a missing RAID 5/6 device. */
2719
2720 struct btrfs_raid_bio *
raid56_alloc_missing_rbio(struct btrfs_fs_info * fs_info,struct bio * bio,struct btrfs_bio * bbio,u64 length)2721 raid56_alloc_missing_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
2722 struct btrfs_bio *bbio, u64 length)
2723 {
2724 struct btrfs_raid_bio *rbio;
2725
2726 rbio = alloc_rbio(fs_info, bbio, length);
2727 if (IS_ERR(rbio))
2728 return NULL;
2729
2730 rbio->operation = BTRFS_RBIO_REBUILD_MISSING;
2731 bio_list_add(&rbio->bio_list, bio);
2732 /*
2733 * This is a special bio which is used to hold the completion handler
2734 * and make the scrub rbio is similar to the other types
2735 */
2736 ASSERT(!bio->bi_iter.bi_size);
2737
2738 rbio->faila = find_logical_bio_stripe(rbio, bio);
2739 if (rbio->faila == -1) {
2740 BUG();
2741 kfree(rbio);
2742 return NULL;
2743 }
2744
2745 /*
2746 * When we get bbio, we have already increased bio_counter, record it
2747 * so we can free it at rbio_orig_end_io()
2748 */
2749 rbio->generic_bio_cnt = 1;
2750
2751 return rbio;
2752 }
2753
raid56_submit_missing_rbio(struct btrfs_raid_bio * rbio)2754 void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio)
2755 {
2756 if (!lock_stripe_add(rbio))
2757 start_async_work(rbio, read_rebuild_work);
2758 }
2759