1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2012 Fusion-io All rights reserved.
4 * Copyright (C) 2012 Intel Corp. All rights reserved.
5 */
6
7 #include <linux/sched.h>
8 #include <linux/bio.h>
9 #include <linux/slab.h>
10 #include <linux/blkdev.h>
11 #include <linux/raid/pq.h>
12 #include <linux/hash.h>
13 #include <linux/list_sort.h>
14 #include <linux/raid/xor.h>
15 #include <linux/mm.h>
16 #include "ctree.h"
17 #include "disk-io.h"
18 #include "volumes.h"
19 #include "raid56.h"
20 #include "async-thread.h"
21
22 /* set when additional merges to this rbio are not allowed */
23 #define RBIO_RMW_LOCKED_BIT 1
24
25 /*
26 * set when this rbio is sitting in the hash, but it is just a cache
27 * of past RMW
28 */
29 #define RBIO_CACHE_BIT 2
30
31 /*
32 * set when it is safe to trust the stripe_pages for caching
33 */
34 #define RBIO_CACHE_READY_BIT 3
35
36 #define RBIO_CACHE_SIZE 1024
37
38 #define BTRFS_STRIPE_HASH_TABLE_BITS 11
39
40 /* Used by the raid56 code to lock stripes for read/modify/write */
41 struct btrfs_stripe_hash {
42 struct list_head hash_list;
43 spinlock_t lock;
44 };
45
46 /* Used by the raid56 code to lock stripes for read/modify/write */
47 struct btrfs_stripe_hash_table {
48 struct list_head stripe_cache;
49 spinlock_t cache_lock;
50 int cache_size;
51 struct btrfs_stripe_hash table[];
52 };
53
54 enum btrfs_rbio_ops {
55 BTRFS_RBIO_WRITE,
56 BTRFS_RBIO_READ_REBUILD,
57 BTRFS_RBIO_PARITY_SCRUB,
58 BTRFS_RBIO_REBUILD_MISSING,
59 };
60
61 struct btrfs_raid_bio {
62 struct btrfs_fs_info *fs_info;
63 struct btrfs_bio *bbio;
64
65 /* while we're doing rmw on a stripe
66 * we put it into a hash table so we can
67 * lock the stripe and merge more rbios
68 * into it.
69 */
70 struct list_head hash_list;
71
72 /*
73 * LRU list for the stripe cache
74 */
75 struct list_head stripe_cache;
76
77 /*
78 * for scheduling work in the helper threads
79 */
80 struct btrfs_work work;
81
82 /*
83 * bio list and bio_list_lock are used
84 * to add more bios into the stripe
85 * in hopes of avoiding the full rmw
86 */
87 struct bio_list bio_list;
88 spinlock_t bio_list_lock;
89
90 /* also protected by the bio_list_lock, the
91 * plug list is used by the plugging code
92 * to collect partial bios while plugged. The
93 * stripe locking code also uses it to hand off
94 * the stripe lock to the next pending IO
95 */
96 struct list_head plug_list;
97
98 /*
99 * flags that tell us if it is safe to
100 * merge with this bio
101 */
102 unsigned long flags;
103
104 /* size of each individual stripe on disk */
105 int stripe_len;
106
107 /* number of data stripes (no p/q) */
108 int nr_data;
109
110 int real_stripes;
111
112 int stripe_npages;
113 /*
114 * set if we're doing a parity rebuild
115 * for a read from higher up, which is handled
116 * differently from a parity rebuild as part of
117 * rmw
118 */
119 enum btrfs_rbio_ops operation;
120
121 /* first bad stripe */
122 int faila;
123
124 /* second bad stripe (for raid6 use) */
125 int failb;
126
127 int scrubp;
128 /*
129 * number of pages needed to represent the full
130 * stripe
131 */
132 int nr_pages;
133
134 /*
135 * size of all the bios in the bio_list. This
136 * helps us decide if the rbio maps to a full
137 * stripe or not
138 */
139 int bio_list_bytes;
140
141 int generic_bio_cnt;
142
143 refcount_t refs;
144
145 atomic_t stripes_pending;
146
147 atomic_t error;
148 /*
149 * these are two arrays of pointers. We allocate the
150 * rbio big enough to hold them both and setup their
151 * locations when the rbio is allocated
152 */
153
154 /* pointers to pages that we allocated for
155 * reading/writing stripes directly from the disk (including P/Q)
156 */
157 struct page **stripe_pages;
158
159 /*
160 * pointers to the pages in the bio_list. Stored
161 * here for faster lookup
162 */
163 struct page **bio_pages;
164
165 /*
166 * bitmap to record which horizontal stripe has data
167 */
168 unsigned long *dbitmap;
169
170 /* allocated with real_stripes-many pointers for finish_*() calls */
171 void **finish_pointers;
172
173 /* allocated with stripe_npages-many bits for finish_*() calls */
174 unsigned long *finish_pbitmap;
175 };
176
177 static int __raid56_parity_recover(struct btrfs_raid_bio *rbio);
178 static noinline void finish_rmw(struct btrfs_raid_bio *rbio);
179 static void rmw_work(struct btrfs_work *work);
180 static void read_rebuild_work(struct btrfs_work *work);
181 static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio);
182 static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed);
183 static void __free_raid_bio(struct btrfs_raid_bio *rbio);
184 static void index_rbio_pages(struct btrfs_raid_bio *rbio);
185 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
186
187 static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
188 int need_check);
189 static void scrub_parity_work(struct btrfs_work *work);
190
start_async_work(struct btrfs_raid_bio * rbio,btrfs_func_t work_func)191 static void start_async_work(struct btrfs_raid_bio *rbio, btrfs_func_t work_func)
192 {
193 btrfs_init_work(&rbio->work, work_func, NULL, NULL);
194 btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
195 }
196
197 /*
198 * the stripe hash table is used for locking, and to collect
199 * bios in hopes of making a full stripe
200 */
btrfs_alloc_stripe_hash_table(struct btrfs_fs_info * info)201 int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
202 {
203 struct btrfs_stripe_hash_table *table;
204 struct btrfs_stripe_hash_table *x;
205 struct btrfs_stripe_hash *cur;
206 struct btrfs_stripe_hash *h;
207 int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS;
208 int i;
209 int table_size;
210
211 if (info->stripe_hash_table)
212 return 0;
213
214 /*
215 * The table is large, starting with order 4 and can go as high as
216 * order 7 in case lock debugging is turned on.
217 *
218 * Try harder to allocate and fallback to vmalloc to lower the chance
219 * of a failing mount.
220 */
221 table_size = sizeof(*table) + sizeof(*h) * num_entries;
222 table = kvzalloc(table_size, GFP_KERNEL);
223 if (!table)
224 return -ENOMEM;
225
226 spin_lock_init(&table->cache_lock);
227 INIT_LIST_HEAD(&table->stripe_cache);
228
229 h = table->table;
230
231 for (i = 0; i < num_entries; i++) {
232 cur = h + i;
233 INIT_LIST_HEAD(&cur->hash_list);
234 spin_lock_init(&cur->lock);
235 }
236
237 x = cmpxchg(&info->stripe_hash_table, NULL, table);
238 if (x)
239 kvfree(x);
240 return 0;
241 }
242
243 /*
244 * caching an rbio means to copy anything from the
245 * bio_pages array into the stripe_pages array. We
246 * use the page uptodate bit in the stripe cache array
247 * to indicate if it has valid data
248 *
249 * once the caching is done, we set the cache ready
250 * bit.
251 */
cache_rbio_pages(struct btrfs_raid_bio * rbio)252 static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
253 {
254 int i;
255 char *s;
256 char *d;
257 int ret;
258
259 ret = alloc_rbio_pages(rbio);
260 if (ret)
261 return;
262
263 for (i = 0; i < rbio->nr_pages; i++) {
264 if (!rbio->bio_pages[i])
265 continue;
266
267 s = kmap(rbio->bio_pages[i]);
268 d = kmap(rbio->stripe_pages[i]);
269
270 copy_page(d, s);
271
272 kunmap(rbio->bio_pages[i]);
273 kunmap(rbio->stripe_pages[i]);
274 SetPageUptodate(rbio->stripe_pages[i]);
275 }
276 set_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
277 }
278
279 /*
280 * we hash on the first logical address of the stripe
281 */
rbio_bucket(struct btrfs_raid_bio * rbio)282 static int rbio_bucket(struct btrfs_raid_bio *rbio)
283 {
284 u64 num = rbio->bbio->raid_map[0];
285
286 /*
287 * we shift down quite a bit. We're using byte
288 * addressing, and most of the lower bits are zeros.
289 * This tends to upset hash_64, and it consistently
290 * returns just one or two different values.
291 *
292 * shifting off the lower bits fixes things.
293 */
294 return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS);
295 }
296
297 /*
298 * stealing an rbio means taking all the uptodate pages from the stripe
299 * array in the source rbio and putting them into the destination rbio
300 */
steal_rbio(struct btrfs_raid_bio * src,struct btrfs_raid_bio * dest)301 static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest)
302 {
303 int i;
304 struct page *s;
305 struct page *d;
306
307 if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags))
308 return;
309
310 for (i = 0; i < dest->nr_pages; i++) {
311 s = src->stripe_pages[i];
312 if (!s || !PageUptodate(s)) {
313 continue;
314 }
315
316 d = dest->stripe_pages[i];
317 if (d)
318 __free_page(d);
319
320 dest->stripe_pages[i] = s;
321 src->stripe_pages[i] = NULL;
322 }
323 }
324
325 /*
326 * merging means we take the bio_list from the victim and
327 * splice it into the destination. The victim should
328 * be discarded afterwards.
329 *
330 * must be called with dest->rbio_list_lock held
331 */
merge_rbio(struct btrfs_raid_bio * dest,struct btrfs_raid_bio * victim)332 static void merge_rbio(struct btrfs_raid_bio *dest,
333 struct btrfs_raid_bio *victim)
334 {
335 bio_list_merge(&dest->bio_list, &victim->bio_list);
336 dest->bio_list_bytes += victim->bio_list_bytes;
337 /* Also inherit the bitmaps from @victim. */
338 bitmap_or(dest->dbitmap, victim->dbitmap, dest->dbitmap,
339 dest->stripe_npages);
340 dest->generic_bio_cnt += victim->generic_bio_cnt;
341 bio_list_init(&victim->bio_list);
342 }
343
344 /*
345 * used to prune items that are in the cache. The caller
346 * must hold the hash table lock.
347 */
__remove_rbio_from_cache(struct btrfs_raid_bio * rbio)348 static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
349 {
350 int bucket = rbio_bucket(rbio);
351 struct btrfs_stripe_hash_table *table;
352 struct btrfs_stripe_hash *h;
353 int freeit = 0;
354
355 /*
356 * check the bit again under the hash table lock.
357 */
358 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
359 return;
360
361 table = rbio->fs_info->stripe_hash_table;
362 h = table->table + bucket;
363
364 /* hold the lock for the bucket because we may be
365 * removing it from the hash table
366 */
367 spin_lock(&h->lock);
368
369 /*
370 * hold the lock for the bio list because we need
371 * to make sure the bio list is empty
372 */
373 spin_lock(&rbio->bio_list_lock);
374
375 if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) {
376 list_del_init(&rbio->stripe_cache);
377 table->cache_size -= 1;
378 freeit = 1;
379
380 /* if the bio list isn't empty, this rbio is
381 * still involved in an IO. We take it out
382 * of the cache list, and drop the ref that
383 * was held for the list.
384 *
385 * If the bio_list was empty, we also remove
386 * the rbio from the hash_table, and drop
387 * the corresponding ref
388 */
389 if (bio_list_empty(&rbio->bio_list)) {
390 if (!list_empty(&rbio->hash_list)) {
391 list_del_init(&rbio->hash_list);
392 refcount_dec(&rbio->refs);
393 BUG_ON(!list_empty(&rbio->plug_list));
394 }
395 }
396 }
397
398 spin_unlock(&rbio->bio_list_lock);
399 spin_unlock(&h->lock);
400
401 if (freeit)
402 __free_raid_bio(rbio);
403 }
404
405 /*
406 * prune a given rbio from the cache
407 */
remove_rbio_from_cache(struct btrfs_raid_bio * rbio)408 static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
409 {
410 struct btrfs_stripe_hash_table *table;
411 unsigned long flags;
412
413 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
414 return;
415
416 table = rbio->fs_info->stripe_hash_table;
417
418 spin_lock_irqsave(&table->cache_lock, flags);
419 __remove_rbio_from_cache(rbio);
420 spin_unlock_irqrestore(&table->cache_lock, flags);
421 }
422
423 /*
424 * remove everything in the cache
425 */
btrfs_clear_rbio_cache(struct btrfs_fs_info * info)426 static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info)
427 {
428 struct btrfs_stripe_hash_table *table;
429 unsigned long flags;
430 struct btrfs_raid_bio *rbio;
431
432 table = info->stripe_hash_table;
433
434 spin_lock_irqsave(&table->cache_lock, flags);
435 while (!list_empty(&table->stripe_cache)) {
436 rbio = list_entry(table->stripe_cache.next,
437 struct btrfs_raid_bio,
438 stripe_cache);
439 __remove_rbio_from_cache(rbio);
440 }
441 spin_unlock_irqrestore(&table->cache_lock, flags);
442 }
443
444 /*
445 * remove all cached entries and free the hash table
446 * used by unmount
447 */
btrfs_free_stripe_hash_table(struct btrfs_fs_info * info)448 void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info)
449 {
450 if (!info->stripe_hash_table)
451 return;
452 btrfs_clear_rbio_cache(info);
453 kvfree(info->stripe_hash_table);
454 info->stripe_hash_table = NULL;
455 }
456
457 /*
458 * insert an rbio into the stripe cache. It
459 * must have already been prepared by calling
460 * cache_rbio_pages
461 *
462 * If this rbio was already cached, it gets
463 * moved to the front of the lru.
464 *
465 * If the size of the rbio cache is too big, we
466 * prune an item.
467 */
cache_rbio(struct btrfs_raid_bio * rbio)468 static void cache_rbio(struct btrfs_raid_bio *rbio)
469 {
470 struct btrfs_stripe_hash_table *table;
471 unsigned long flags;
472
473 if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags))
474 return;
475
476 table = rbio->fs_info->stripe_hash_table;
477
478 spin_lock_irqsave(&table->cache_lock, flags);
479 spin_lock(&rbio->bio_list_lock);
480
481 /* bump our ref if we were not in the list before */
482 if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags))
483 refcount_inc(&rbio->refs);
484
485 if (!list_empty(&rbio->stripe_cache)){
486 list_move(&rbio->stripe_cache, &table->stripe_cache);
487 } else {
488 list_add(&rbio->stripe_cache, &table->stripe_cache);
489 table->cache_size += 1;
490 }
491
492 spin_unlock(&rbio->bio_list_lock);
493
494 if (table->cache_size > RBIO_CACHE_SIZE) {
495 struct btrfs_raid_bio *found;
496
497 found = list_entry(table->stripe_cache.prev,
498 struct btrfs_raid_bio,
499 stripe_cache);
500
501 if (found != rbio)
502 __remove_rbio_from_cache(found);
503 }
504
505 spin_unlock_irqrestore(&table->cache_lock, flags);
506 }
507
508 /*
509 * helper function to run the xor_blocks api. It is only
510 * able to do MAX_XOR_BLOCKS at a time, so we need to
511 * loop through.
512 */
run_xor(void ** pages,int src_cnt,ssize_t len)513 static void run_xor(void **pages, int src_cnt, ssize_t len)
514 {
515 int src_off = 0;
516 int xor_src_cnt = 0;
517 void *dest = pages[src_cnt];
518
519 while(src_cnt > 0) {
520 xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
521 xor_blocks(xor_src_cnt, len, dest, pages + src_off);
522
523 src_cnt -= xor_src_cnt;
524 src_off += xor_src_cnt;
525 }
526 }
527
528 /*
529 * Returns true if the bio list inside this rbio covers an entire stripe (no
530 * rmw required).
531 */
rbio_is_full(struct btrfs_raid_bio * rbio)532 static int rbio_is_full(struct btrfs_raid_bio *rbio)
533 {
534 unsigned long flags;
535 unsigned long size = rbio->bio_list_bytes;
536 int ret = 1;
537
538 spin_lock_irqsave(&rbio->bio_list_lock, flags);
539 if (size != rbio->nr_data * rbio->stripe_len)
540 ret = 0;
541 BUG_ON(size > rbio->nr_data * rbio->stripe_len);
542 spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
543
544 return ret;
545 }
546
547 /*
548 * returns 1 if it is safe to merge two rbios together.
549 * The merging is safe if the two rbios correspond to
550 * the same stripe and if they are both going in the same
551 * direction (read vs write), and if neither one is
552 * locked for final IO
553 *
554 * The caller is responsible for locking such that
555 * rmw_locked is safe to test
556 */
rbio_can_merge(struct btrfs_raid_bio * last,struct btrfs_raid_bio * cur)557 static int rbio_can_merge(struct btrfs_raid_bio *last,
558 struct btrfs_raid_bio *cur)
559 {
560 if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) ||
561 test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags))
562 return 0;
563
564 /*
565 * we can't merge with cached rbios, since the
566 * idea is that when we merge the destination
567 * rbio is going to run our IO for us. We can
568 * steal from cached rbios though, other functions
569 * handle that.
570 */
571 if (test_bit(RBIO_CACHE_BIT, &last->flags) ||
572 test_bit(RBIO_CACHE_BIT, &cur->flags))
573 return 0;
574
575 if (last->bbio->raid_map[0] !=
576 cur->bbio->raid_map[0])
577 return 0;
578
579 /* we can't merge with different operations */
580 if (last->operation != cur->operation)
581 return 0;
582 /*
583 * We've need read the full stripe from the drive.
584 * check and repair the parity and write the new results.
585 *
586 * We're not allowed to add any new bios to the
587 * bio list here, anyone else that wants to
588 * change this stripe needs to do their own rmw.
589 */
590 if (last->operation == BTRFS_RBIO_PARITY_SCRUB)
591 return 0;
592
593 if (last->operation == BTRFS_RBIO_REBUILD_MISSING)
594 return 0;
595
596 if (last->operation == BTRFS_RBIO_READ_REBUILD) {
597 int fa = last->faila;
598 int fb = last->failb;
599 int cur_fa = cur->faila;
600 int cur_fb = cur->failb;
601
602 if (last->faila >= last->failb) {
603 fa = last->failb;
604 fb = last->faila;
605 }
606
607 if (cur->faila >= cur->failb) {
608 cur_fa = cur->failb;
609 cur_fb = cur->faila;
610 }
611
612 if (fa != cur_fa || fb != cur_fb)
613 return 0;
614 }
615 return 1;
616 }
617
rbio_stripe_page_index(struct btrfs_raid_bio * rbio,int stripe,int index)618 static int rbio_stripe_page_index(struct btrfs_raid_bio *rbio, int stripe,
619 int index)
620 {
621 return stripe * rbio->stripe_npages + index;
622 }
623
624 /*
625 * these are just the pages from the rbio array, not from anything
626 * the FS sent down to us
627 */
rbio_stripe_page(struct btrfs_raid_bio * rbio,int stripe,int index)628 static struct page *rbio_stripe_page(struct btrfs_raid_bio *rbio, int stripe,
629 int index)
630 {
631 return rbio->stripe_pages[rbio_stripe_page_index(rbio, stripe, index)];
632 }
633
634 /*
635 * helper to index into the pstripe
636 */
rbio_pstripe_page(struct btrfs_raid_bio * rbio,int index)637 static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index)
638 {
639 return rbio_stripe_page(rbio, rbio->nr_data, index);
640 }
641
642 /*
643 * helper to index into the qstripe, returns null
644 * if there is no qstripe
645 */
rbio_qstripe_page(struct btrfs_raid_bio * rbio,int index)646 static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index)
647 {
648 if (rbio->nr_data + 1 == rbio->real_stripes)
649 return NULL;
650 return rbio_stripe_page(rbio, rbio->nr_data + 1, index);
651 }
652
653 /*
654 * The first stripe in the table for a logical address
655 * has the lock. rbios are added in one of three ways:
656 *
657 * 1) Nobody has the stripe locked yet. The rbio is given
658 * the lock and 0 is returned. The caller must start the IO
659 * themselves.
660 *
661 * 2) Someone has the stripe locked, but we're able to merge
662 * with the lock owner. The rbio is freed and the IO will
663 * start automatically along with the existing rbio. 1 is returned.
664 *
665 * 3) Someone has the stripe locked, but we're not able to merge.
666 * The rbio is added to the lock owner's plug list, or merged into
667 * an rbio already on the plug list. When the lock owner unlocks,
668 * the next rbio on the list is run and the IO is started automatically.
669 * 1 is returned
670 *
671 * If we return 0, the caller still owns the rbio and must continue with
672 * IO submission. If we return 1, the caller must assume the rbio has
673 * already been freed.
674 */
lock_stripe_add(struct btrfs_raid_bio * rbio)675 static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
676 {
677 int bucket = rbio_bucket(rbio);
678 struct btrfs_stripe_hash *h = rbio->fs_info->stripe_hash_table->table + bucket;
679 struct btrfs_raid_bio *cur;
680 struct btrfs_raid_bio *pending;
681 unsigned long flags;
682 struct btrfs_raid_bio *freeit = NULL;
683 struct btrfs_raid_bio *cache_drop = NULL;
684 int ret = 0;
685
686 spin_lock_irqsave(&h->lock, flags);
687 list_for_each_entry(cur, &h->hash_list, hash_list) {
688 if (cur->bbio->raid_map[0] == rbio->bbio->raid_map[0]) {
689 spin_lock(&cur->bio_list_lock);
690
691 /* can we steal this cached rbio's pages? */
692 if (bio_list_empty(&cur->bio_list) &&
693 list_empty(&cur->plug_list) &&
694 test_bit(RBIO_CACHE_BIT, &cur->flags) &&
695 !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
696 list_del_init(&cur->hash_list);
697 refcount_dec(&cur->refs);
698
699 steal_rbio(cur, rbio);
700 cache_drop = cur;
701 spin_unlock(&cur->bio_list_lock);
702
703 goto lockit;
704 }
705
706 /* can we merge into the lock owner? */
707 if (rbio_can_merge(cur, rbio)) {
708 merge_rbio(cur, rbio);
709 spin_unlock(&cur->bio_list_lock);
710 freeit = rbio;
711 ret = 1;
712 goto out;
713 }
714
715
716 /*
717 * we couldn't merge with the running
718 * rbio, see if we can merge with the
719 * pending ones. We don't have to
720 * check for rmw_locked because there
721 * is no way they are inside finish_rmw
722 * right now
723 */
724 list_for_each_entry(pending, &cur->plug_list,
725 plug_list) {
726 if (rbio_can_merge(pending, rbio)) {
727 merge_rbio(pending, rbio);
728 spin_unlock(&cur->bio_list_lock);
729 freeit = rbio;
730 ret = 1;
731 goto out;
732 }
733 }
734
735 /* no merging, put us on the tail of the plug list,
736 * our rbio will be started with the currently
737 * running rbio unlocks
738 */
739 list_add_tail(&rbio->plug_list, &cur->plug_list);
740 spin_unlock(&cur->bio_list_lock);
741 ret = 1;
742 goto out;
743 }
744 }
745 lockit:
746 refcount_inc(&rbio->refs);
747 list_add(&rbio->hash_list, &h->hash_list);
748 out:
749 spin_unlock_irqrestore(&h->lock, flags);
750 if (cache_drop)
751 remove_rbio_from_cache(cache_drop);
752 if (freeit)
753 __free_raid_bio(freeit);
754 return ret;
755 }
756
757 /*
758 * called as rmw or parity rebuild is completed. If the plug list has more
759 * rbios waiting for this stripe, the next one on the list will be started
760 */
unlock_stripe(struct btrfs_raid_bio * rbio)761 static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
762 {
763 int bucket;
764 struct btrfs_stripe_hash *h;
765 unsigned long flags;
766 int keep_cache = 0;
767
768 bucket = rbio_bucket(rbio);
769 h = rbio->fs_info->stripe_hash_table->table + bucket;
770
771 if (list_empty(&rbio->plug_list))
772 cache_rbio(rbio);
773
774 spin_lock_irqsave(&h->lock, flags);
775 spin_lock(&rbio->bio_list_lock);
776
777 if (!list_empty(&rbio->hash_list)) {
778 /*
779 * if we're still cached and there is no other IO
780 * to perform, just leave this rbio here for others
781 * to steal from later
782 */
783 if (list_empty(&rbio->plug_list) &&
784 test_bit(RBIO_CACHE_BIT, &rbio->flags)) {
785 keep_cache = 1;
786 clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
787 BUG_ON(!bio_list_empty(&rbio->bio_list));
788 goto done;
789 }
790
791 list_del_init(&rbio->hash_list);
792 refcount_dec(&rbio->refs);
793
794 /*
795 * we use the plug list to hold all the rbios
796 * waiting for the chance to lock this stripe.
797 * hand the lock over to one of them.
798 */
799 if (!list_empty(&rbio->plug_list)) {
800 struct btrfs_raid_bio *next;
801 struct list_head *head = rbio->plug_list.next;
802
803 next = list_entry(head, struct btrfs_raid_bio,
804 plug_list);
805
806 list_del_init(&rbio->plug_list);
807
808 list_add(&next->hash_list, &h->hash_list);
809 refcount_inc(&next->refs);
810 spin_unlock(&rbio->bio_list_lock);
811 spin_unlock_irqrestore(&h->lock, flags);
812
813 if (next->operation == BTRFS_RBIO_READ_REBUILD)
814 start_async_work(next, read_rebuild_work);
815 else if (next->operation == BTRFS_RBIO_REBUILD_MISSING) {
816 steal_rbio(rbio, next);
817 start_async_work(next, read_rebuild_work);
818 } else if (next->operation == BTRFS_RBIO_WRITE) {
819 steal_rbio(rbio, next);
820 start_async_work(next, rmw_work);
821 } else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) {
822 steal_rbio(rbio, next);
823 start_async_work(next, scrub_parity_work);
824 }
825
826 goto done_nolock;
827 }
828 }
829 done:
830 spin_unlock(&rbio->bio_list_lock);
831 spin_unlock_irqrestore(&h->lock, flags);
832
833 done_nolock:
834 if (!keep_cache)
835 remove_rbio_from_cache(rbio);
836 }
837
__free_raid_bio(struct btrfs_raid_bio * rbio)838 static void __free_raid_bio(struct btrfs_raid_bio *rbio)
839 {
840 int i;
841
842 if (!refcount_dec_and_test(&rbio->refs))
843 return;
844
845 WARN_ON(!list_empty(&rbio->stripe_cache));
846 WARN_ON(!list_empty(&rbio->hash_list));
847 WARN_ON(!bio_list_empty(&rbio->bio_list));
848
849 for (i = 0; i < rbio->nr_pages; i++) {
850 if (rbio->stripe_pages[i]) {
851 __free_page(rbio->stripe_pages[i]);
852 rbio->stripe_pages[i] = NULL;
853 }
854 }
855
856 btrfs_put_bbio(rbio->bbio);
857 kfree(rbio);
858 }
859
rbio_endio_bio_list(struct bio * cur,blk_status_t err)860 static void rbio_endio_bio_list(struct bio *cur, blk_status_t err)
861 {
862 struct bio *next;
863
864 while (cur) {
865 next = cur->bi_next;
866 cur->bi_next = NULL;
867 cur->bi_status = err;
868 bio_endio(cur);
869 cur = next;
870 }
871 }
872
873 /*
874 * this frees the rbio and runs through all the bios in the
875 * bio_list and calls end_io on them
876 */
rbio_orig_end_io(struct btrfs_raid_bio * rbio,blk_status_t err)877 static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err)
878 {
879 struct bio *cur = bio_list_get(&rbio->bio_list);
880 struct bio *extra;
881
882 if (rbio->generic_bio_cnt)
883 btrfs_bio_counter_sub(rbio->fs_info, rbio->generic_bio_cnt);
884 /*
885 * Clear the data bitmap, as the rbio may be cached for later usage.
886 * do this before before unlock_stripe() so there will be no new bio
887 * for this bio.
888 */
889 bitmap_clear(rbio->dbitmap, 0, rbio->stripe_npages);
890
891 /*
892 * At this moment, rbio->bio_list is empty, however since rbio does not
893 * always have RBIO_RMW_LOCKED_BIT set and rbio is still linked on the
894 * hash list, rbio may be merged with others so that rbio->bio_list
895 * becomes non-empty.
896 * Once unlock_stripe() is done, rbio->bio_list will not be updated any
897 * more and we can call bio_endio() on all queued bios.
898 */
899 unlock_stripe(rbio);
900 extra = bio_list_get(&rbio->bio_list);
901 __free_raid_bio(rbio);
902
903 rbio_endio_bio_list(cur, err);
904 if (extra)
905 rbio_endio_bio_list(extra, err);
906 }
907
908 /*
909 * end io function used by finish_rmw. When we finally
910 * get here, we've written a full stripe
911 */
raid_write_end_io(struct bio * bio)912 static void raid_write_end_io(struct bio *bio)
913 {
914 struct btrfs_raid_bio *rbio = bio->bi_private;
915 blk_status_t err = bio->bi_status;
916 int max_errors;
917
918 if (err)
919 fail_bio_stripe(rbio, bio);
920
921 bio_put(bio);
922
923 if (!atomic_dec_and_test(&rbio->stripes_pending))
924 return;
925
926 err = BLK_STS_OK;
927
928 /* OK, we have read all the stripes we need to. */
929 max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ?
930 0 : rbio->bbio->max_errors;
931 if (atomic_read(&rbio->error) > max_errors)
932 err = BLK_STS_IOERR;
933
934 rbio_orig_end_io(rbio, err);
935 }
936
937 /*
938 * the read/modify/write code wants to use the original bio for
939 * any pages it included, and then use the rbio for everything
940 * else. This function decides if a given index (stripe number)
941 * and page number in that stripe fall inside the original bio
942 * or the rbio.
943 *
944 * if you set bio_list_only, you'll get a NULL back for any ranges
945 * that are outside the bio_list
946 *
947 * This doesn't take any refs on anything, you get a bare page pointer
948 * and the caller must bump refs as required.
949 *
950 * You must call index_rbio_pages once before you can trust
951 * the answers from this function.
952 */
page_in_rbio(struct btrfs_raid_bio * rbio,int index,int pagenr,int bio_list_only)953 static struct page *page_in_rbio(struct btrfs_raid_bio *rbio,
954 int index, int pagenr, int bio_list_only)
955 {
956 int chunk_page;
957 struct page *p = NULL;
958
959 chunk_page = index * (rbio->stripe_len >> PAGE_SHIFT) + pagenr;
960
961 spin_lock_irq(&rbio->bio_list_lock);
962 p = rbio->bio_pages[chunk_page];
963 spin_unlock_irq(&rbio->bio_list_lock);
964
965 if (p || bio_list_only)
966 return p;
967
968 return rbio->stripe_pages[chunk_page];
969 }
970
971 /*
972 * number of pages we need for the entire stripe across all the
973 * drives
974 */
rbio_nr_pages(unsigned long stripe_len,int nr_stripes)975 static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
976 {
977 return DIV_ROUND_UP(stripe_len, PAGE_SIZE) * nr_stripes;
978 }
979
980 /*
981 * allocation and initial setup for the btrfs_raid_bio. Not
982 * this does not allocate any pages for rbio->pages.
983 */
alloc_rbio(struct btrfs_fs_info * fs_info,struct btrfs_bio * bbio,u64 stripe_len)984 static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info,
985 struct btrfs_bio *bbio,
986 u64 stripe_len)
987 {
988 struct btrfs_raid_bio *rbio;
989 int nr_data = 0;
990 int real_stripes = bbio->num_stripes - bbio->num_tgtdevs;
991 int num_pages = rbio_nr_pages(stripe_len, real_stripes);
992 int stripe_npages = DIV_ROUND_UP(stripe_len, PAGE_SIZE);
993 void *p;
994
995 rbio = kzalloc(sizeof(*rbio) +
996 sizeof(*rbio->stripe_pages) * num_pages +
997 sizeof(*rbio->bio_pages) * num_pages +
998 sizeof(*rbio->finish_pointers) * real_stripes +
999 sizeof(*rbio->dbitmap) * BITS_TO_LONGS(stripe_npages) +
1000 sizeof(*rbio->finish_pbitmap) *
1001 BITS_TO_LONGS(stripe_npages),
1002 GFP_NOFS);
1003 if (!rbio)
1004 return ERR_PTR(-ENOMEM);
1005
1006 bio_list_init(&rbio->bio_list);
1007 INIT_LIST_HEAD(&rbio->plug_list);
1008 spin_lock_init(&rbio->bio_list_lock);
1009 INIT_LIST_HEAD(&rbio->stripe_cache);
1010 INIT_LIST_HEAD(&rbio->hash_list);
1011 rbio->bbio = bbio;
1012 rbio->fs_info = fs_info;
1013 rbio->stripe_len = stripe_len;
1014 rbio->nr_pages = num_pages;
1015 rbio->real_stripes = real_stripes;
1016 rbio->stripe_npages = stripe_npages;
1017 rbio->faila = -1;
1018 rbio->failb = -1;
1019 refcount_set(&rbio->refs, 1);
1020 atomic_set(&rbio->error, 0);
1021 atomic_set(&rbio->stripes_pending, 0);
1022
1023 /*
1024 * the stripe_pages, bio_pages, etc arrays point to the extra
1025 * memory we allocated past the end of the rbio
1026 */
1027 p = rbio + 1;
1028 #define CONSUME_ALLOC(ptr, count) do { \
1029 ptr = p; \
1030 p = (unsigned char *)p + sizeof(*(ptr)) * (count); \
1031 } while (0)
1032 CONSUME_ALLOC(rbio->stripe_pages, num_pages);
1033 CONSUME_ALLOC(rbio->bio_pages, num_pages);
1034 CONSUME_ALLOC(rbio->finish_pointers, real_stripes);
1035 CONSUME_ALLOC(rbio->dbitmap, BITS_TO_LONGS(stripe_npages));
1036 CONSUME_ALLOC(rbio->finish_pbitmap, BITS_TO_LONGS(stripe_npages));
1037 #undef CONSUME_ALLOC
1038
1039 if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
1040 nr_data = real_stripes - 1;
1041 else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
1042 nr_data = real_stripes - 2;
1043 else
1044 BUG();
1045
1046 rbio->nr_data = nr_data;
1047 return rbio;
1048 }
1049
1050 /* allocate pages for all the stripes in the bio, including parity */
alloc_rbio_pages(struct btrfs_raid_bio * rbio)1051 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio)
1052 {
1053 int i;
1054 struct page *page;
1055
1056 for (i = 0; i < rbio->nr_pages; i++) {
1057 if (rbio->stripe_pages[i])
1058 continue;
1059 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1060 if (!page)
1061 return -ENOMEM;
1062 rbio->stripe_pages[i] = page;
1063 }
1064 return 0;
1065 }
1066
1067 /* only allocate pages for p/q stripes */
alloc_rbio_parity_pages(struct btrfs_raid_bio * rbio)1068 static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
1069 {
1070 int i;
1071 struct page *page;
1072
1073 i = rbio_stripe_page_index(rbio, rbio->nr_data, 0);
1074
1075 for (; i < rbio->nr_pages; i++) {
1076 if (rbio->stripe_pages[i])
1077 continue;
1078 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1079 if (!page)
1080 return -ENOMEM;
1081 rbio->stripe_pages[i] = page;
1082 }
1083 return 0;
1084 }
1085
1086 /*
1087 * add a single page from a specific stripe into our list of bios for IO
1088 * this will try to merge into existing bios if possible, and returns
1089 * zero if all went well.
1090 */
rbio_add_io_page(struct btrfs_raid_bio * rbio,struct bio_list * bio_list,struct page * page,int stripe_nr,unsigned long page_index,unsigned long bio_max_len)1091 static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
1092 struct bio_list *bio_list,
1093 struct page *page,
1094 int stripe_nr,
1095 unsigned long page_index,
1096 unsigned long bio_max_len)
1097 {
1098 struct bio *last = bio_list->tail;
1099 u64 last_end = 0;
1100 int ret;
1101 struct bio *bio;
1102 struct btrfs_bio_stripe *stripe;
1103 u64 disk_start;
1104
1105 stripe = &rbio->bbio->stripes[stripe_nr];
1106 disk_start = stripe->physical + (page_index << PAGE_SHIFT);
1107
1108 /* if the device is missing, just fail this stripe */
1109 if (!stripe->dev->bdev)
1110 return fail_rbio_index(rbio, stripe_nr);
1111
1112 /* see if we can add this page onto our existing bio */
1113 if (last) {
1114 last_end = (u64)last->bi_iter.bi_sector << 9;
1115 last_end += last->bi_iter.bi_size;
1116
1117 /*
1118 * we can't merge these if they are from different
1119 * devices or if they are not contiguous
1120 */
1121 if (last_end == disk_start && stripe->dev->bdev &&
1122 !last->bi_status &&
1123 last->bi_disk == stripe->dev->bdev->bd_disk &&
1124 last->bi_partno == stripe->dev->bdev->bd_partno) {
1125 ret = bio_add_page(last, page, PAGE_SIZE, 0);
1126 if (ret == PAGE_SIZE)
1127 return 0;
1128 }
1129 }
1130
1131 /* put a new bio on the list */
1132 bio = btrfs_io_bio_alloc(bio_max_len >> PAGE_SHIFT ?: 1);
1133 bio->bi_iter.bi_size = 0;
1134 bio_set_dev(bio, stripe->dev->bdev);
1135 bio->bi_iter.bi_sector = disk_start >> 9;
1136
1137 bio_add_page(bio, page, PAGE_SIZE, 0);
1138 bio_list_add(bio_list, bio);
1139 return 0;
1140 }
1141
1142 /*
1143 * while we're doing the read/modify/write cycle, we could
1144 * have errors in reading pages off the disk. This checks
1145 * for errors and if we're not able to read the page it'll
1146 * trigger parity reconstruction. The rmw will be finished
1147 * after we've reconstructed the failed stripes
1148 */
validate_rbio_for_rmw(struct btrfs_raid_bio * rbio)1149 static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio)
1150 {
1151 if (rbio->faila >= 0 || rbio->failb >= 0) {
1152 BUG_ON(rbio->faila == rbio->real_stripes - 1);
1153 __raid56_parity_recover(rbio);
1154 } else {
1155 finish_rmw(rbio);
1156 }
1157 }
1158
1159 /*
1160 * helper function to walk our bio list and populate the bio_pages array with
1161 * the result. This seems expensive, but it is faster than constantly
1162 * searching through the bio list as we setup the IO in finish_rmw or stripe
1163 * reconstruction.
1164 *
1165 * This must be called before you trust the answers from page_in_rbio
1166 */
index_rbio_pages(struct btrfs_raid_bio * rbio)1167 static void index_rbio_pages(struct btrfs_raid_bio *rbio)
1168 {
1169 struct bio *bio;
1170 u64 start;
1171 unsigned long stripe_offset;
1172 unsigned long page_index;
1173
1174 spin_lock_irq(&rbio->bio_list_lock);
1175 bio_list_for_each(bio, &rbio->bio_list) {
1176 struct bio_vec bvec;
1177 struct bvec_iter iter;
1178 int i = 0;
1179
1180 start = (u64)bio->bi_iter.bi_sector << 9;
1181 stripe_offset = start - rbio->bbio->raid_map[0];
1182 page_index = stripe_offset >> PAGE_SHIFT;
1183
1184 if (bio_flagged(bio, BIO_CLONED))
1185 bio->bi_iter = btrfs_io_bio(bio)->iter;
1186
1187 bio_for_each_segment(bvec, bio, iter) {
1188 rbio->bio_pages[page_index + i] = bvec.bv_page;
1189 i++;
1190 }
1191 }
1192 spin_unlock_irq(&rbio->bio_list_lock);
1193 }
1194
1195 /*
1196 * this is called from one of two situations. We either
1197 * have a full stripe from the higher layers, or we've read all
1198 * the missing bits off disk.
1199 *
1200 * This will calculate the parity and then send down any
1201 * changed blocks.
1202 */
finish_rmw(struct btrfs_raid_bio * rbio)1203 static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
1204 {
1205 struct btrfs_bio *bbio = rbio->bbio;
1206 void **pointers = rbio->finish_pointers;
1207 int nr_data = rbio->nr_data;
1208 int stripe;
1209 int pagenr;
1210 bool has_qstripe;
1211 struct bio_list bio_list;
1212 struct bio *bio;
1213 int ret;
1214
1215 bio_list_init(&bio_list);
1216
1217 if (rbio->real_stripes - rbio->nr_data == 1)
1218 has_qstripe = false;
1219 else if (rbio->real_stripes - rbio->nr_data == 2)
1220 has_qstripe = true;
1221 else
1222 BUG();
1223
1224 /* We should have at least one data sector. */
1225 ASSERT(bitmap_weight(rbio->dbitmap, rbio->stripe_npages));
1226
1227 /* at this point we either have a full stripe,
1228 * or we've read the full stripe from the drive.
1229 * recalculate the parity and write the new results.
1230 *
1231 * We're not allowed to add any new bios to the
1232 * bio list here, anyone else that wants to
1233 * change this stripe needs to do their own rmw.
1234 */
1235 spin_lock_irq(&rbio->bio_list_lock);
1236 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1237 spin_unlock_irq(&rbio->bio_list_lock);
1238
1239 atomic_set(&rbio->error, 0);
1240
1241 /*
1242 * now that we've set rmw_locked, run through the
1243 * bio list one last time and map the page pointers
1244 *
1245 * We don't cache full rbios because we're assuming
1246 * the higher layers are unlikely to use this area of
1247 * the disk again soon. If they do use it again,
1248 * hopefully they will send another full bio.
1249 */
1250 index_rbio_pages(rbio);
1251 if (!rbio_is_full(rbio))
1252 cache_rbio_pages(rbio);
1253 else
1254 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
1255
1256 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1257 struct page *p;
1258 /* first collect one page from each data stripe */
1259 for (stripe = 0; stripe < nr_data; stripe++) {
1260 p = page_in_rbio(rbio, stripe, pagenr, 0);
1261 pointers[stripe] = kmap(p);
1262 }
1263
1264 /* then add the parity stripe */
1265 p = rbio_pstripe_page(rbio, pagenr);
1266 SetPageUptodate(p);
1267 pointers[stripe++] = kmap(p);
1268
1269 if (has_qstripe) {
1270
1271 /*
1272 * raid6, add the qstripe and call the
1273 * library function to fill in our p/q
1274 */
1275 p = rbio_qstripe_page(rbio, pagenr);
1276 SetPageUptodate(p);
1277 pointers[stripe++] = kmap(p);
1278
1279 raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
1280 pointers);
1281 } else {
1282 /* raid5 */
1283 copy_page(pointers[nr_data], pointers[0]);
1284 run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
1285 }
1286
1287
1288 for (stripe = 0; stripe < rbio->real_stripes; stripe++)
1289 kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
1290 }
1291
1292 /*
1293 * time to start writing. Make bios for everything from the
1294 * higher layers (the bio_list in our rbio) and our p/q. Ignore
1295 * everything else.
1296 */
1297 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1298 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1299 struct page *page;
1300
1301 /* This vertical stripe has no data, skip it. */
1302 if (!test_bit(pagenr, rbio->dbitmap))
1303 continue;
1304
1305 if (stripe < rbio->nr_data) {
1306 page = page_in_rbio(rbio, stripe, pagenr, 1);
1307 if (!page)
1308 continue;
1309 } else {
1310 page = rbio_stripe_page(rbio, stripe, pagenr);
1311 }
1312
1313 ret = rbio_add_io_page(rbio, &bio_list,
1314 page, stripe, pagenr, rbio->stripe_len);
1315 if (ret)
1316 goto cleanup;
1317 }
1318 }
1319
1320 if (likely(!bbio->num_tgtdevs))
1321 goto write_data;
1322
1323 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1324 if (!bbio->tgtdev_map[stripe])
1325 continue;
1326
1327 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1328 struct page *page;
1329
1330 /* This vertical stripe has no data, skip it. */
1331 if (!test_bit(pagenr, rbio->dbitmap))
1332 continue;
1333
1334 if (stripe < rbio->nr_data) {
1335 page = page_in_rbio(rbio, stripe, pagenr, 1);
1336 if (!page)
1337 continue;
1338 } else {
1339 page = rbio_stripe_page(rbio, stripe, pagenr);
1340 }
1341
1342 ret = rbio_add_io_page(rbio, &bio_list, page,
1343 rbio->bbio->tgtdev_map[stripe],
1344 pagenr, rbio->stripe_len);
1345 if (ret)
1346 goto cleanup;
1347 }
1348 }
1349
1350 write_data:
1351 atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list));
1352 BUG_ON(atomic_read(&rbio->stripes_pending) == 0);
1353
1354 while (1) {
1355 bio = bio_list_pop(&bio_list);
1356 if (!bio)
1357 break;
1358
1359 bio->bi_private = rbio;
1360 bio->bi_end_io = raid_write_end_io;
1361 bio->bi_opf = REQ_OP_WRITE;
1362
1363 submit_bio(bio);
1364 }
1365 return;
1366
1367 cleanup:
1368 rbio_orig_end_io(rbio, BLK_STS_IOERR);
1369
1370 while ((bio = bio_list_pop(&bio_list)))
1371 bio_put(bio);
1372 }
1373
1374 /*
1375 * helper to find the stripe number for a given bio. Used to figure out which
1376 * stripe has failed. This expects the bio to correspond to a physical disk,
1377 * so it looks up based on physical sector numbers.
1378 */
find_bio_stripe(struct btrfs_raid_bio * rbio,struct bio * bio)1379 static int find_bio_stripe(struct btrfs_raid_bio *rbio,
1380 struct bio *bio)
1381 {
1382 u64 physical = bio->bi_iter.bi_sector;
1383 u64 stripe_start;
1384 int i;
1385 struct btrfs_bio_stripe *stripe;
1386
1387 physical <<= 9;
1388
1389 for (i = 0; i < rbio->bbio->num_stripes; i++) {
1390 stripe = &rbio->bbio->stripes[i];
1391 stripe_start = stripe->physical;
1392 if (physical >= stripe_start &&
1393 physical < stripe_start + rbio->stripe_len &&
1394 stripe->dev->bdev &&
1395 bio->bi_disk == stripe->dev->bdev->bd_disk &&
1396 bio->bi_partno == stripe->dev->bdev->bd_partno) {
1397 return i;
1398 }
1399 }
1400 return -1;
1401 }
1402
1403 /*
1404 * helper to find the stripe number for a given
1405 * bio (before mapping). Used to figure out which stripe has
1406 * failed. This looks up based on logical block numbers.
1407 */
find_logical_bio_stripe(struct btrfs_raid_bio * rbio,struct bio * bio)1408 static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio,
1409 struct bio *bio)
1410 {
1411 u64 logical = bio->bi_iter.bi_sector;
1412 u64 stripe_start;
1413 int i;
1414
1415 logical <<= 9;
1416
1417 for (i = 0; i < rbio->nr_data; i++) {
1418 stripe_start = rbio->bbio->raid_map[i];
1419 if (logical >= stripe_start &&
1420 logical < stripe_start + rbio->stripe_len) {
1421 return i;
1422 }
1423 }
1424 return -1;
1425 }
1426
1427 /*
1428 * returns -EIO if we had too many failures
1429 */
fail_rbio_index(struct btrfs_raid_bio * rbio,int failed)1430 static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed)
1431 {
1432 unsigned long flags;
1433 int ret = 0;
1434
1435 spin_lock_irqsave(&rbio->bio_list_lock, flags);
1436
1437 /* we already know this stripe is bad, move on */
1438 if (rbio->faila == failed || rbio->failb == failed)
1439 goto out;
1440
1441 if (rbio->faila == -1) {
1442 /* first failure on this rbio */
1443 rbio->faila = failed;
1444 atomic_inc(&rbio->error);
1445 } else if (rbio->failb == -1) {
1446 /* second failure on this rbio */
1447 rbio->failb = failed;
1448 atomic_inc(&rbio->error);
1449 } else {
1450 ret = -EIO;
1451 }
1452 out:
1453 spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
1454
1455 return ret;
1456 }
1457
1458 /*
1459 * helper to fail a stripe based on a physical disk
1460 * bio.
1461 */
fail_bio_stripe(struct btrfs_raid_bio * rbio,struct bio * bio)1462 static int fail_bio_stripe(struct btrfs_raid_bio *rbio,
1463 struct bio *bio)
1464 {
1465 int failed = find_bio_stripe(rbio, bio);
1466
1467 if (failed < 0)
1468 return -EIO;
1469
1470 return fail_rbio_index(rbio, failed);
1471 }
1472
1473 /*
1474 * this sets each page in the bio uptodate. It should only be used on private
1475 * rbio pages, nothing that comes in from the higher layers
1476 */
set_bio_pages_uptodate(struct bio * bio)1477 static void set_bio_pages_uptodate(struct bio *bio)
1478 {
1479 struct bio_vec *bvec;
1480 struct bvec_iter_all iter_all;
1481
1482 ASSERT(!bio_flagged(bio, BIO_CLONED));
1483
1484 bio_for_each_segment_all(bvec, bio, iter_all)
1485 SetPageUptodate(bvec->bv_page);
1486 }
1487
1488 /*
1489 * end io for the read phase of the rmw cycle. All the bios here are physical
1490 * stripe bios we've read from the disk so we can recalculate the parity of the
1491 * stripe.
1492 *
1493 * This will usually kick off finish_rmw once all the bios are read in, but it
1494 * may trigger parity reconstruction if we had any errors along the way
1495 */
raid_rmw_end_io(struct bio * bio)1496 static void raid_rmw_end_io(struct bio *bio)
1497 {
1498 struct btrfs_raid_bio *rbio = bio->bi_private;
1499
1500 if (bio->bi_status)
1501 fail_bio_stripe(rbio, bio);
1502 else
1503 set_bio_pages_uptodate(bio);
1504
1505 bio_put(bio);
1506
1507 if (!atomic_dec_and_test(&rbio->stripes_pending))
1508 return;
1509
1510 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
1511 goto cleanup;
1512
1513 /*
1514 * this will normally call finish_rmw to start our write
1515 * but if there are any failed stripes we'll reconstruct
1516 * from parity first
1517 */
1518 validate_rbio_for_rmw(rbio);
1519 return;
1520
1521 cleanup:
1522
1523 rbio_orig_end_io(rbio, BLK_STS_IOERR);
1524 }
1525
1526 /*
1527 * the stripe must be locked by the caller. It will
1528 * unlock after all the writes are done
1529 */
raid56_rmw_stripe(struct btrfs_raid_bio * rbio)1530 static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
1531 {
1532 int bios_to_read = 0;
1533 struct bio_list bio_list;
1534 int ret;
1535 int pagenr;
1536 int stripe;
1537 struct bio *bio;
1538
1539 bio_list_init(&bio_list);
1540
1541 ret = alloc_rbio_pages(rbio);
1542 if (ret)
1543 goto cleanup;
1544
1545 index_rbio_pages(rbio);
1546
1547 atomic_set(&rbio->error, 0);
1548 /*
1549 * build a list of bios to read all the missing parts of this
1550 * stripe
1551 */
1552 for (stripe = 0; stripe < rbio->nr_data; stripe++) {
1553 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1554 struct page *page;
1555 /*
1556 * we want to find all the pages missing from
1557 * the rbio and read them from the disk. If
1558 * page_in_rbio finds a page in the bio list
1559 * we don't need to read it off the stripe.
1560 */
1561 page = page_in_rbio(rbio, stripe, pagenr, 1);
1562 if (page)
1563 continue;
1564
1565 page = rbio_stripe_page(rbio, stripe, pagenr);
1566 /*
1567 * the bio cache may have handed us an uptodate
1568 * page. If so, be happy and use it
1569 */
1570 if (PageUptodate(page))
1571 continue;
1572
1573 ret = rbio_add_io_page(rbio, &bio_list, page,
1574 stripe, pagenr, rbio->stripe_len);
1575 if (ret)
1576 goto cleanup;
1577 }
1578 }
1579
1580 bios_to_read = bio_list_size(&bio_list);
1581 if (!bios_to_read) {
1582 /*
1583 * this can happen if others have merged with
1584 * us, it means there is nothing left to read.
1585 * But if there are missing devices it may not be
1586 * safe to do the full stripe write yet.
1587 */
1588 goto finish;
1589 }
1590
1591 /*
1592 * the bbio may be freed once we submit the last bio. Make sure
1593 * not to touch it after that
1594 */
1595 atomic_set(&rbio->stripes_pending, bios_to_read);
1596 while (1) {
1597 bio = bio_list_pop(&bio_list);
1598 if (!bio)
1599 break;
1600
1601 bio->bi_private = rbio;
1602 bio->bi_end_io = raid_rmw_end_io;
1603 bio->bi_opf = REQ_OP_READ;
1604
1605 btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
1606
1607 submit_bio(bio);
1608 }
1609 /* the actual write will happen once the reads are done */
1610 return 0;
1611
1612 cleanup:
1613 rbio_orig_end_io(rbio, BLK_STS_IOERR);
1614
1615 while ((bio = bio_list_pop(&bio_list)))
1616 bio_put(bio);
1617
1618 return -EIO;
1619
1620 finish:
1621 validate_rbio_for_rmw(rbio);
1622 return 0;
1623 }
1624
1625 /*
1626 * if the upper layers pass in a full stripe, we thank them by only allocating
1627 * enough pages to hold the parity, and sending it all down quickly.
1628 */
full_stripe_write(struct btrfs_raid_bio * rbio)1629 static int full_stripe_write(struct btrfs_raid_bio *rbio)
1630 {
1631 int ret;
1632
1633 ret = alloc_rbio_parity_pages(rbio);
1634 if (ret) {
1635 __free_raid_bio(rbio);
1636 return ret;
1637 }
1638
1639 ret = lock_stripe_add(rbio);
1640 if (ret == 0)
1641 finish_rmw(rbio);
1642 return 0;
1643 }
1644
1645 /*
1646 * partial stripe writes get handed over to async helpers.
1647 * We're really hoping to merge a few more writes into this
1648 * rbio before calculating new parity
1649 */
partial_stripe_write(struct btrfs_raid_bio * rbio)1650 static int partial_stripe_write(struct btrfs_raid_bio *rbio)
1651 {
1652 int ret;
1653
1654 ret = lock_stripe_add(rbio);
1655 if (ret == 0)
1656 start_async_work(rbio, rmw_work);
1657 return 0;
1658 }
1659
1660 /*
1661 * sometimes while we were reading from the drive to
1662 * recalculate parity, enough new bios come into create
1663 * a full stripe. So we do a check here to see if we can
1664 * go directly to finish_rmw
1665 */
__raid56_parity_write(struct btrfs_raid_bio * rbio)1666 static int __raid56_parity_write(struct btrfs_raid_bio *rbio)
1667 {
1668 /* head off into rmw land if we don't have a full stripe */
1669 if (!rbio_is_full(rbio))
1670 return partial_stripe_write(rbio);
1671 return full_stripe_write(rbio);
1672 }
1673
1674 /*
1675 * We use plugging call backs to collect full stripes.
1676 * Any time we get a partial stripe write while plugged
1677 * we collect it into a list. When the unplug comes down,
1678 * we sort the list by logical block number and merge
1679 * everything we can into the same rbios
1680 */
1681 struct btrfs_plug_cb {
1682 struct blk_plug_cb cb;
1683 struct btrfs_fs_info *info;
1684 struct list_head rbio_list;
1685 struct btrfs_work work;
1686 };
1687
1688 /*
1689 * rbios on the plug list are sorted for easier merging.
1690 */
plug_cmp(void * priv,struct list_head * a,struct list_head * b)1691 static int plug_cmp(void *priv, struct list_head *a, struct list_head *b)
1692 {
1693 struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio,
1694 plug_list);
1695 struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
1696 plug_list);
1697 u64 a_sector = ra->bio_list.head->bi_iter.bi_sector;
1698 u64 b_sector = rb->bio_list.head->bi_iter.bi_sector;
1699
1700 if (a_sector < b_sector)
1701 return -1;
1702 if (a_sector > b_sector)
1703 return 1;
1704 return 0;
1705 }
1706
run_plug(struct btrfs_plug_cb * plug)1707 static void run_plug(struct btrfs_plug_cb *plug)
1708 {
1709 struct btrfs_raid_bio *cur;
1710 struct btrfs_raid_bio *last = NULL;
1711
1712 /*
1713 * sort our plug list then try to merge
1714 * everything we can in hopes of creating full
1715 * stripes.
1716 */
1717 list_sort(NULL, &plug->rbio_list, plug_cmp);
1718 while (!list_empty(&plug->rbio_list)) {
1719 cur = list_entry(plug->rbio_list.next,
1720 struct btrfs_raid_bio, plug_list);
1721 list_del_init(&cur->plug_list);
1722
1723 if (rbio_is_full(cur)) {
1724 int ret;
1725
1726 /* we have a full stripe, send it down */
1727 ret = full_stripe_write(cur);
1728 BUG_ON(ret);
1729 continue;
1730 }
1731 if (last) {
1732 if (rbio_can_merge(last, cur)) {
1733 merge_rbio(last, cur);
1734 __free_raid_bio(cur);
1735 continue;
1736
1737 }
1738 __raid56_parity_write(last);
1739 }
1740 last = cur;
1741 }
1742 if (last) {
1743 __raid56_parity_write(last);
1744 }
1745 kfree(plug);
1746 }
1747
1748 /*
1749 * if the unplug comes from schedule, we have to push the
1750 * work off to a helper thread
1751 */
unplug_work(struct btrfs_work * work)1752 static void unplug_work(struct btrfs_work *work)
1753 {
1754 struct btrfs_plug_cb *plug;
1755 plug = container_of(work, struct btrfs_plug_cb, work);
1756 run_plug(plug);
1757 }
1758
btrfs_raid_unplug(struct blk_plug_cb * cb,bool from_schedule)1759 static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
1760 {
1761 struct btrfs_plug_cb *plug;
1762 plug = container_of(cb, struct btrfs_plug_cb, cb);
1763
1764 if (from_schedule) {
1765 btrfs_init_work(&plug->work, unplug_work, NULL, NULL);
1766 btrfs_queue_work(plug->info->rmw_workers,
1767 &plug->work);
1768 return;
1769 }
1770 run_plug(plug);
1771 }
1772
1773 /* Add the original bio into rbio->bio_list, and update rbio::dbitmap. */
rbio_add_bio(struct btrfs_raid_bio * rbio,struct bio * orig_bio)1774 static void rbio_add_bio(struct btrfs_raid_bio *rbio, struct bio *orig_bio)
1775 {
1776 const struct btrfs_fs_info *fs_info = rbio->fs_info;
1777 const u64 orig_logical = orig_bio->bi_iter.bi_sector << SECTOR_SHIFT;
1778 const u64 full_stripe_start = rbio->bbio->raid_map[0];
1779 const u32 orig_len = orig_bio->bi_iter.bi_size;
1780 const u32 sectorsize = fs_info->sectorsize;
1781 u64 cur_logical;
1782
1783 ASSERT(orig_logical >= full_stripe_start &&
1784 orig_logical + orig_len <= full_stripe_start +
1785 rbio->nr_data * rbio->stripe_len);
1786
1787 bio_list_add(&rbio->bio_list, orig_bio);
1788 rbio->bio_list_bytes += orig_bio->bi_iter.bi_size;
1789
1790 /* Update the dbitmap. */
1791 for (cur_logical = orig_logical; cur_logical < orig_logical + orig_len;
1792 cur_logical += sectorsize) {
1793 int bit = ((u32)(cur_logical - full_stripe_start) >>
1794 PAGE_SHIFT) % rbio->stripe_npages;
1795
1796 set_bit(bit, rbio->dbitmap);
1797 }
1798 }
1799
1800 /*
1801 * our main entry point for writes from the rest of the FS.
1802 */
raid56_parity_write(struct btrfs_fs_info * fs_info,struct bio * bio,struct btrfs_bio * bbio,u64 stripe_len)1803 int raid56_parity_write(struct btrfs_fs_info *fs_info, struct bio *bio,
1804 struct btrfs_bio *bbio, u64 stripe_len)
1805 {
1806 struct btrfs_raid_bio *rbio;
1807 struct btrfs_plug_cb *plug = NULL;
1808 struct blk_plug_cb *cb;
1809 int ret;
1810
1811 rbio = alloc_rbio(fs_info, bbio, stripe_len);
1812 if (IS_ERR(rbio)) {
1813 btrfs_put_bbio(bbio);
1814 return PTR_ERR(rbio);
1815 }
1816 rbio->operation = BTRFS_RBIO_WRITE;
1817 rbio_add_bio(rbio, bio);
1818
1819 btrfs_bio_counter_inc_noblocked(fs_info);
1820 rbio->generic_bio_cnt = 1;
1821
1822 /*
1823 * don't plug on full rbios, just get them out the door
1824 * as quickly as we can
1825 */
1826 if (rbio_is_full(rbio)) {
1827 ret = full_stripe_write(rbio);
1828 if (ret)
1829 btrfs_bio_counter_dec(fs_info);
1830 return ret;
1831 }
1832
1833 cb = blk_check_plugged(btrfs_raid_unplug, fs_info, sizeof(*plug));
1834 if (cb) {
1835 plug = container_of(cb, struct btrfs_plug_cb, cb);
1836 if (!plug->info) {
1837 plug->info = fs_info;
1838 INIT_LIST_HEAD(&plug->rbio_list);
1839 }
1840 list_add_tail(&rbio->plug_list, &plug->rbio_list);
1841 ret = 0;
1842 } else {
1843 ret = __raid56_parity_write(rbio);
1844 if (ret)
1845 btrfs_bio_counter_dec(fs_info);
1846 }
1847 return ret;
1848 }
1849
1850 /*
1851 * all parity reconstruction happens here. We've read in everything
1852 * we can find from the drives and this does the heavy lifting of
1853 * sorting the good from the bad.
1854 */
__raid_recover_end_io(struct btrfs_raid_bio * rbio)1855 static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
1856 {
1857 int pagenr, stripe;
1858 void **pointers;
1859 int faila = -1, failb = -1;
1860 struct page *page;
1861 blk_status_t err;
1862 int i;
1863
1864 pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
1865 if (!pointers) {
1866 err = BLK_STS_RESOURCE;
1867 goto cleanup_io;
1868 }
1869
1870 faila = rbio->faila;
1871 failb = rbio->failb;
1872
1873 if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1874 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
1875 spin_lock_irq(&rbio->bio_list_lock);
1876 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1877 spin_unlock_irq(&rbio->bio_list_lock);
1878 }
1879
1880 index_rbio_pages(rbio);
1881
1882 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1883 /*
1884 * Now we just use bitmap to mark the horizontal stripes in
1885 * which we have data when doing parity scrub.
1886 */
1887 if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB &&
1888 !test_bit(pagenr, rbio->dbitmap))
1889 continue;
1890
1891 /* setup our array of pointers with pages
1892 * from each stripe
1893 */
1894 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1895 /*
1896 * if we're rebuilding a read, we have to use
1897 * pages from the bio list
1898 */
1899 if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1900 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
1901 (stripe == faila || stripe == failb)) {
1902 page = page_in_rbio(rbio, stripe, pagenr, 0);
1903 } else {
1904 page = rbio_stripe_page(rbio, stripe, pagenr);
1905 }
1906 pointers[stripe] = kmap(page);
1907 }
1908
1909 /* all raid6 handling here */
1910 if (rbio->bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) {
1911 /*
1912 * single failure, rebuild from parity raid5
1913 * style
1914 */
1915 if (failb < 0) {
1916 if (faila == rbio->nr_data) {
1917 /*
1918 * Just the P stripe has failed, without
1919 * a bad data or Q stripe.
1920 * TODO, we should redo the xor here.
1921 */
1922 err = BLK_STS_IOERR;
1923 goto cleanup;
1924 }
1925 /*
1926 * a single failure in raid6 is rebuilt
1927 * in the pstripe code below
1928 */
1929 goto pstripe;
1930 }
1931
1932 /* make sure our ps and qs are in order */
1933 if (faila > failb) {
1934 int tmp = failb;
1935 failb = faila;
1936 faila = tmp;
1937 }
1938
1939 /* if the q stripe is failed, do a pstripe reconstruction
1940 * from the xors.
1941 * If both the q stripe and the P stripe are failed, we're
1942 * here due to a crc mismatch and we can't give them the
1943 * data they want
1944 */
1945 if (rbio->bbio->raid_map[failb] == RAID6_Q_STRIPE) {
1946 if (rbio->bbio->raid_map[faila] ==
1947 RAID5_P_STRIPE) {
1948 err = BLK_STS_IOERR;
1949 goto cleanup;
1950 }
1951 /*
1952 * otherwise we have one bad data stripe and
1953 * a good P stripe. raid5!
1954 */
1955 goto pstripe;
1956 }
1957
1958 if (rbio->bbio->raid_map[failb] == RAID5_P_STRIPE) {
1959 raid6_datap_recov(rbio->real_stripes,
1960 PAGE_SIZE, faila, pointers);
1961 } else {
1962 raid6_2data_recov(rbio->real_stripes,
1963 PAGE_SIZE, faila, failb,
1964 pointers);
1965 }
1966 } else {
1967 void *p;
1968
1969 /* rebuild from P stripe here (raid5 or raid6) */
1970 BUG_ON(failb != -1);
1971 pstripe:
1972 /* Copy parity block into failed block to start with */
1973 copy_page(pointers[faila], pointers[rbio->nr_data]);
1974
1975 /* rearrange the pointer array */
1976 p = pointers[faila];
1977 for (stripe = faila; stripe < rbio->nr_data - 1; stripe++)
1978 pointers[stripe] = pointers[stripe + 1];
1979 pointers[rbio->nr_data - 1] = p;
1980
1981 /* xor in the rest */
1982 run_xor(pointers, rbio->nr_data - 1, PAGE_SIZE);
1983 }
1984 /* if we're doing this rebuild as part of an rmw, go through
1985 * and set all of our private rbio pages in the
1986 * failed stripes as uptodate. This way finish_rmw will
1987 * know they can be trusted. If this was a read reconstruction,
1988 * other endio functions will fiddle the uptodate bits
1989 */
1990 if (rbio->operation == BTRFS_RBIO_WRITE) {
1991 for (i = 0; i < rbio->stripe_npages; i++) {
1992 if (faila != -1) {
1993 page = rbio_stripe_page(rbio, faila, i);
1994 SetPageUptodate(page);
1995 }
1996 if (failb != -1) {
1997 page = rbio_stripe_page(rbio, failb, i);
1998 SetPageUptodate(page);
1999 }
2000 }
2001 }
2002 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2003 /*
2004 * if we're rebuilding a read, we have to use
2005 * pages from the bio list
2006 */
2007 if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
2008 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
2009 (stripe == faila || stripe == failb)) {
2010 page = page_in_rbio(rbio, stripe, pagenr, 0);
2011 } else {
2012 page = rbio_stripe_page(rbio, stripe, pagenr);
2013 }
2014 kunmap(page);
2015 }
2016 }
2017
2018 err = BLK_STS_OK;
2019 cleanup:
2020 kfree(pointers);
2021
2022 cleanup_io:
2023 /*
2024 * Similar to READ_REBUILD, REBUILD_MISSING at this point also has a
2025 * valid rbio which is consistent with ondisk content, thus such a
2026 * valid rbio can be cached to avoid further disk reads.
2027 */
2028 if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
2029 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
2030 /*
2031 * - In case of two failures, where rbio->failb != -1:
2032 *
2033 * Do not cache this rbio since the above read reconstruction
2034 * (raid6_datap_recov() or raid6_2data_recov()) may have
2035 * changed some content of stripes which are not identical to
2036 * on-disk content any more, otherwise, a later write/recover
2037 * may steal stripe_pages from this rbio and end up with
2038 * corruptions or rebuild failures.
2039 *
2040 * - In case of single failure, where rbio->failb == -1:
2041 *
2042 * Cache this rbio iff the above read reconstruction is
2043 * executed without problems.
2044 */
2045 if (err == BLK_STS_OK && rbio->failb < 0)
2046 cache_rbio_pages(rbio);
2047 else
2048 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2049
2050 rbio_orig_end_io(rbio, err);
2051 } else if (err == BLK_STS_OK) {
2052 rbio->faila = -1;
2053 rbio->failb = -1;
2054
2055 if (rbio->operation == BTRFS_RBIO_WRITE)
2056 finish_rmw(rbio);
2057 else if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB)
2058 finish_parity_scrub(rbio, 0);
2059 else
2060 BUG();
2061 } else {
2062 rbio_orig_end_io(rbio, err);
2063 }
2064 }
2065
2066 /*
2067 * This is called only for stripes we've read from disk to
2068 * reconstruct the parity.
2069 */
raid_recover_end_io(struct bio * bio)2070 static void raid_recover_end_io(struct bio *bio)
2071 {
2072 struct btrfs_raid_bio *rbio = bio->bi_private;
2073
2074 /*
2075 * we only read stripe pages off the disk, set them
2076 * up to date if there were no errors
2077 */
2078 if (bio->bi_status)
2079 fail_bio_stripe(rbio, bio);
2080 else
2081 set_bio_pages_uptodate(bio);
2082 bio_put(bio);
2083
2084 if (!atomic_dec_and_test(&rbio->stripes_pending))
2085 return;
2086
2087 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2088 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2089 else
2090 __raid_recover_end_io(rbio);
2091 }
2092
2093 /*
2094 * reads everything we need off the disk to reconstruct
2095 * the parity. endio handlers trigger final reconstruction
2096 * when the IO is done.
2097 *
2098 * This is used both for reads from the higher layers and for
2099 * parity construction required to finish a rmw cycle.
2100 */
__raid56_parity_recover(struct btrfs_raid_bio * rbio)2101 static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
2102 {
2103 int bios_to_read = 0;
2104 struct bio_list bio_list;
2105 int ret;
2106 int pagenr;
2107 int stripe;
2108 struct bio *bio;
2109
2110 bio_list_init(&bio_list);
2111
2112 ret = alloc_rbio_pages(rbio);
2113 if (ret)
2114 goto cleanup;
2115
2116 atomic_set(&rbio->error, 0);
2117
2118 /*
2119 * Read everything that hasn't failed. However this time we will
2120 * not trust any cached sector.
2121 * As we may read out some stale data but higher layer is not reading
2122 * that stale part.
2123 *
2124 * So here we always re-read everything in recovery path.
2125 */
2126 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2127 if (rbio->faila == stripe || rbio->failb == stripe) {
2128 atomic_inc(&rbio->error);
2129 continue;
2130 }
2131
2132 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
2133 ret = rbio_add_io_page(rbio, &bio_list,
2134 rbio_stripe_page(rbio, stripe, pagenr),
2135 stripe, pagenr, rbio->stripe_len);
2136 if (ret < 0)
2137 goto cleanup;
2138 }
2139 }
2140
2141 bios_to_read = bio_list_size(&bio_list);
2142 if (!bios_to_read) {
2143 /*
2144 * we might have no bios to read just because the pages
2145 * were up to date, or we might have no bios to read because
2146 * the devices were gone.
2147 */
2148 if (atomic_read(&rbio->error) <= rbio->bbio->max_errors) {
2149 __raid_recover_end_io(rbio);
2150 goto out;
2151 } else {
2152 goto cleanup;
2153 }
2154 }
2155
2156 /*
2157 * the bbio may be freed once we submit the last bio. Make sure
2158 * not to touch it after that
2159 */
2160 atomic_set(&rbio->stripes_pending, bios_to_read);
2161 while (1) {
2162 bio = bio_list_pop(&bio_list);
2163 if (!bio)
2164 break;
2165
2166 bio->bi_private = rbio;
2167 bio->bi_end_io = raid_recover_end_io;
2168 bio->bi_opf = REQ_OP_READ;
2169
2170 btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
2171
2172 submit_bio(bio);
2173 }
2174 out:
2175 return 0;
2176
2177 cleanup:
2178 if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
2179 rbio->operation == BTRFS_RBIO_REBUILD_MISSING)
2180 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2181
2182 while ((bio = bio_list_pop(&bio_list)))
2183 bio_put(bio);
2184
2185 return -EIO;
2186 }
2187
2188 /*
2189 * the main entry point for reads from the higher layers. This
2190 * is really only called when the normal read path had a failure,
2191 * so we assume the bio they send down corresponds to a failed part
2192 * of the drive.
2193 */
raid56_parity_recover(struct btrfs_fs_info * fs_info,struct bio * bio,struct btrfs_bio * bbio,u64 stripe_len,int mirror_num,int generic_io)2194 int raid56_parity_recover(struct btrfs_fs_info *fs_info, struct bio *bio,
2195 struct btrfs_bio *bbio, u64 stripe_len,
2196 int mirror_num, int generic_io)
2197 {
2198 struct btrfs_raid_bio *rbio;
2199 int ret;
2200
2201 if (generic_io) {
2202 ASSERT(bbio->mirror_num == mirror_num);
2203 btrfs_io_bio(bio)->mirror_num = mirror_num;
2204 }
2205
2206 rbio = alloc_rbio(fs_info, bbio, stripe_len);
2207 if (IS_ERR(rbio)) {
2208 if (generic_io)
2209 btrfs_put_bbio(bbio);
2210 return PTR_ERR(rbio);
2211 }
2212
2213 rbio->operation = BTRFS_RBIO_READ_REBUILD;
2214 rbio_add_bio(rbio, bio);
2215
2216 rbio->faila = find_logical_bio_stripe(rbio, bio);
2217 if (rbio->faila == -1) {
2218 btrfs_warn(fs_info,
2219 "%s could not find the bad stripe in raid56 so that we cannot recover any more (bio has logical %llu len %llu, bbio has map_type %llu)",
2220 __func__, (u64)bio->bi_iter.bi_sector << 9,
2221 (u64)bio->bi_iter.bi_size, bbio->map_type);
2222 if (generic_io)
2223 btrfs_put_bbio(bbio);
2224 kfree(rbio);
2225 return -EIO;
2226 }
2227
2228 if (generic_io) {
2229 btrfs_bio_counter_inc_noblocked(fs_info);
2230 rbio->generic_bio_cnt = 1;
2231 } else {
2232 btrfs_get_bbio(bbio);
2233 }
2234
2235 /*
2236 * Loop retry:
2237 * for 'mirror == 2', reconstruct from all other stripes.
2238 * for 'mirror_num > 2', select a stripe to fail on every retry.
2239 */
2240 if (mirror_num > 2) {
2241 /*
2242 * 'mirror == 3' is to fail the p stripe and
2243 * reconstruct from the q stripe. 'mirror > 3' is to
2244 * fail a data stripe and reconstruct from p+q stripe.
2245 */
2246 rbio->failb = rbio->real_stripes - (mirror_num - 1);
2247 ASSERT(rbio->failb > 0);
2248 if (rbio->failb <= rbio->faila)
2249 rbio->failb--;
2250 }
2251
2252 ret = lock_stripe_add(rbio);
2253
2254 /*
2255 * __raid56_parity_recover will end the bio with
2256 * any errors it hits. We don't want to return
2257 * its error value up the stack because our caller
2258 * will end up calling bio_endio with any nonzero
2259 * return
2260 */
2261 if (ret == 0)
2262 __raid56_parity_recover(rbio);
2263 /*
2264 * our rbio has been added to the list of
2265 * rbios that will be handled after the
2266 * currently lock owner is done
2267 */
2268 return 0;
2269
2270 }
2271
rmw_work(struct btrfs_work * work)2272 static void rmw_work(struct btrfs_work *work)
2273 {
2274 struct btrfs_raid_bio *rbio;
2275
2276 rbio = container_of(work, struct btrfs_raid_bio, work);
2277 raid56_rmw_stripe(rbio);
2278 }
2279
read_rebuild_work(struct btrfs_work * work)2280 static void read_rebuild_work(struct btrfs_work *work)
2281 {
2282 struct btrfs_raid_bio *rbio;
2283
2284 rbio = container_of(work, struct btrfs_raid_bio, work);
2285 __raid56_parity_recover(rbio);
2286 }
2287
2288 /*
2289 * The following code is used to scrub/replace the parity stripe
2290 *
2291 * Caller must have already increased bio_counter for getting @bbio.
2292 *
2293 * Note: We need make sure all the pages that add into the scrub/replace
2294 * raid bio are correct and not be changed during the scrub/replace. That
2295 * is those pages just hold metadata or file data with checksum.
2296 */
2297
2298 struct btrfs_raid_bio *
raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info * fs_info,struct bio * bio,struct btrfs_bio * bbio,u64 stripe_len,struct btrfs_device * scrub_dev,unsigned long * dbitmap,int stripe_nsectors)2299 raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
2300 struct btrfs_bio *bbio, u64 stripe_len,
2301 struct btrfs_device *scrub_dev,
2302 unsigned long *dbitmap, int stripe_nsectors)
2303 {
2304 struct btrfs_raid_bio *rbio;
2305 int i;
2306
2307 rbio = alloc_rbio(fs_info, bbio, stripe_len);
2308 if (IS_ERR(rbio))
2309 return NULL;
2310 bio_list_add(&rbio->bio_list, bio);
2311 /*
2312 * This is a special bio which is used to hold the completion handler
2313 * and make the scrub rbio is similar to the other types
2314 */
2315 ASSERT(!bio->bi_iter.bi_size);
2316 rbio->operation = BTRFS_RBIO_PARITY_SCRUB;
2317
2318 /*
2319 * After mapping bbio with BTRFS_MAP_WRITE, parities have been sorted
2320 * to the end position, so this search can start from the first parity
2321 * stripe.
2322 */
2323 for (i = rbio->nr_data; i < rbio->real_stripes; i++) {
2324 if (bbio->stripes[i].dev == scrub_dev) {
2325 rbio->scrubp = i;
2326 break;
2327 }
2328 }
2329 ASSERT(i < rbio->real_stripes);
2330
2331 /* Now we just support the sectorsize equals to page size */
2332 ASSERT(fs_info->sectorsize == PAGE_SIZE);
2333 ASSERT(rbio->stripe_npages == stripe_nsectors);
2334 bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors);
2335
2336 /*
2337 * We have already increased bio_counter when getting bbio, record it
2338 * so we can free it at rbio_orig_end_io().
2339 */
2340 rbio->generic_bio_cnt = 1;
2341
2342 return rbio;
2343 }
2344
2345 /* Used for both parity scrub and missing. */
raid56_add_scrub_pages(struct btrfs_raid_bio * rbio,struct page * page,u64 logical)2346 void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
2347 u64 logical)
2348 {
2349 int stripe_offset;
2350 int index;
2351
2352 ASSERT(logical >= rbio->bbio->raid_map[0]);
2353 ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] +
2354 rbio->stripe_len * rbio->nr_data);
2355 stripe_offset = (int)(logical - rbio->bbio->raid_map[0]);
2356 index = stripe_offset >> PAGE_SHIFT;
2357 rbio->bio_pages[index] = page;
2358 }
2359
2360 /*
2361 * We just scrub the parity that we have correct data on the same horizontal,
2362 * so we needn't allocate all pages for all the stripes.
2363 */
alloc_rbio_essential_pages(struct btrfs_raid_bio * rbio)2364 static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio)
2365 {
2366 int i;
2367 int bit;
2368 int index;
2369 struct page *page;
2370
2371 for_each_set_bit(bit, rbio->dbitmap, rbio->stripe_npages) {
2372 for (i = 0; i < rbio->real_stripes; i++) {
2373 index = i * rbio->stripe_npages + bit;
2374 if (rbio->stripe_pages[index])
2375 continue;
2376
2377 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2378 if (!page)
2379 return -ENOMEM;
2380 rbio->stripe_pages[index] = page;
2381 }
2382 }
2383 return 0;
2384 }
2385
finish_parity_scrub(struct btrfs_raid_bio * rbio,int need_check)2386 static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
2387 int need_check)
2388 {
2389 struct btrfs_bio *bbio = rbio->bbio;
2390 void **pointers = rbio->finish_pointers;
2391 unsigned long *pbitmap = rbio->finish_pbitmap;
2392 int nr_data = rbio->nr_data;
2393 int stripe;
2394 int pagenr;
2395 bool has_qstripe;
2396 struct page *p_page = NULL;
2397 struct page *q_page = NULL;
2398 struct bio_list bio_list;
2399 struct bio *bio;
2400 int is_replace = 0;
2401 int ret;
2402
2403 bio_list_init(&bio_list);
2404
2405 if (rbio->real_stripes - rbio->nr_data == 1)
2406 has_qstripe = false;
2407 else if (rbio->real_stripes - rbio->nr_data == 2)
2408 has_qstripe = true;
2409 else
2410 BUG();
2411
2412 if (bbio->num_tgtdevs && bbio->tgtdev_map[rbio->scrubp]) {
2413 is_replace = 1;
2414 bitmap_copy(pbitmap, rbio->dbitmap, rbio->stripe_npages);
2415 }
2416
2417 /*
2418 * Because the higher layers(scrubber) are unlikely to
2419 * use this area of the disk again soon, so don't cache
2420 * it.
2421 */
2422 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2423
2424 if (!need_check)
2425 goto writeback;
2426
2427 p_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2428 if (!p_page)
2429 goto cleanup;
2430 SetPageUptodate(p_page);
2431
2432 if (has_qstripe) {
2433 /* RAID6, allocate and map temp space for the Q stripe */
2434 q_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2435 if (!q_page) {
2436 __free_page(p_page);
2437 goto cleanup;
2438 }
2439 SetPageUptodate(q_page);
2440 pointers[rbio->real_stripes - 1] = kmap(q_page);
2441 }
2442
2443 atomic_set(&rbio->error, 0);
2444
2445 /* Map the parity stripe just once */
2446 pointers[nr_data] = kmap(p_page);
2447
2448 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2449 struct page *p;
2450 void *parity;
2451 /* first collect one page from each data stripe */
2452 for (stripe = 0; stripe < nr_data; stripe++) {
2453 p = page_in_rbio(rbio, stripe, pagenr, 0);
2454 pointers[stripe] = kmap(p);
2455 }
2456
2457 if (has_qstripe) {
2458 /* RAID6, call the library function to fill in our P/Q */
2459 raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
2460 pointers);
2461 } else {
2462 /* raid5 */
2463 copy_page(pointers[nr_data], pointers[0]);
2464 run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
2465 }
2466
2467 /* Check scrubbing parity and repair it */
2468 p = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2469 parity = kmap(p);
2470 if (memcmp(parity, pointers[rbio->scrubp], PAGE_SIZE))
2471 copy_page(parity, pointers[rbio->scrubp]);
2472 else
2473 /* Parity is right, needn't writeback */
2474 bitmap_clear(rbio->dbitmap, pagenr, 1);
2475 kunmap(p);
2476
2477 for (stripe = 0; stripe < nr_data; stripe++)
2478 kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
2479 }
2480
2481 kunmap(p_page);
2482 __free_page(p_page);
2483 if (q_page) {
2484 kunmap(q_page);
2485 __free_page(q_page);
2486 }
2487
2488 writeback:
2489 /*
2490 * time to start writing. Make bios for everything from the
2491 * higher layers (the bio_list in our rbio) and our p/q. Ignore
2492 * everything else.
2493 */
2494 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2495 struct page *page;
2496
2497 page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2498 ret = rbio_add_io_page(rbio, &bio_list,
2499 page, rbio->scrubp, pagenr, rbio->stripe_len);
2500 if (ret)
2501 goto cleanup;
2502 }
2503
2504 if (!is_replace)
2505 goto submit_write;
2506
2507 for_each_set_bit(pagenr, pbitmap, rbio->stripe_npages) {
2508 struct page *page;
2509
2510 page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2511 ret = rbio_add_io_page(rbio, &bio_list, page,
2512 bbio->tgtdev_map[rbio->scrubp],
2513 pagenr, rbio->stripe_len);
2514 if (ret)
2515 goto cleanup;
2516 }
2517
2518 submit_write:
2519 nr_data = bio_list_size(&bio_list);
2520 if (!nr_data) {
2521 /* Every parity is right */
2522 rbio_orig_end_io(rbio, BLK_STS_OK);
2523 return;
2524 }
2525
2526 atomic_set(&rbio->stripes_pending, nr_data);
2527
2528 while (1) {
2529 bio = bio_list_pop(&bio_list);
2530 if (!bio)
2531 break;
2532
2533 bio->bi_private = rbio;
2534 bio->bi_end_io = raid_write_end_io;
2535 bio->bi_opf = REQ_OP_WRITE;
2536
2537 submit_bio(bio);
2538 }
2539 return;
2540
2541 cleanup:
2542 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2543
2544 while ((bio = bio_list_pop(&bio_list)))
2545 bio_put(bio);
2546 }
2547
is_data_stripe(struct btrfs_raid_bio * rbio,int stripe)2548 static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe)
2549 {
2550 if (stripe >= 0 && stripe < rbio->nr_data)
2551 return 1;
2552 return 0;
2553 }
2554
2555 /*
2556 * While we're doing the parity check and repair, we could have errors
2557 * in reading pages off the disk. This checks for errors and if we're
2558 * not able to read the page it'll trigger parity reconstruction. The
2559 * parity scrub will be finished after we've reconstructed the failed
2560 * stripes
2561 */
validate_rbio_for_parity_scrub(struct btrfs_raid_bio * rbio)2562 static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio)
2563 {
2564 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2565 goto cleanup;
2566
2567 if (rbio->faila >= 0 || rbio->failb >= 0) {
2568 int dfail = 0, failp = -1;
2569
2570 if (is_data_stripe(rbio, rbio->faila))
2571 dfail++;
2572 else if (is_parity_stripe(rbio->faila))
2573 failp = rbio->faila;
2574
2575 if (is_data_stripe(rbio, rbio->failb))
2576 dfail++;
2577 else if (is_parity_stripe(rbio->failb))
2578 failp = rbio->failb;
2579
2580 /*
2581 * Because we can not use a scrubbing parity to repair
2582 * the data, so the capability of the repair is declined.
2583 * (In the case of RAID5, we can not repair anything)
2584 */
2585 if (dfail > rbio->bbio->max_errors - 1)
2586 goto cleanup;
2587
2588 /*
2589 * If all data is good, only parity is correctly, just
2590 * repair the parity.
2591 */
2592 if (dfail == 0) {
2593 finish_parity_scrub(rbio, 0);
2594 return;
2595 }
2596
2597 /*
2598 * Here means we got one corrupted data stripe and one
2599 * corrupted parity on RAID6, if the corrupted parity
2600 * is scrubbing parity, luckily, use the other one to repair
2601 * the data, or we can not repair the data stripe.
2602 */
2603 if (failp != rbio->scrubp)
2604 goto cleanup;
2605
2606 __raid_recover_end_io(rbio);
2607 } else {
2608 finish_parity_scrub(rbio, 1);
2609 }
2610 return;
2611
2612 cleanup:
2613 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2614 }
2615
2616 /*
2617 * end io for the read phase of the rmw cycle. All the bios here are physical
2618 * stripe bios we've read from the disk so we can recalculate the parity of the
2619 * stripe.
2620 *
2621 * This will usually kick off finish_rmw once all the bios are read in, but it
2622 * may trigger parity reconstruction if we had any errors along the way
2623 */
raid56_parity_scrub_end_io(struct bio * bio)2624 static void raid56_parity_scrub_end_io(struct bio *bio)
2625 {
2626 struct btrfs_raid_bio *rbio = bio->bi_private;
2627
2628 if (bio->bi_status)
2629 fail_bio_stripe(rbio, bio);
2630 else
2631 set_bio_pages_uptodate(bio);
2632
2633 bio_put(bio);
2634
2635 if (!atomic_dec_and_test(&rbio->stripes_pending))
2636 return;
2637
2638 /*
2639 * this will normally call finish_rmw to start our write
2640 * but if there are any failed stripes we'll reconstruct
2641 * from parity first
2642 */
2643 validate_rbio_for_parity_scrub(rbio);
2644 }
2645
raid56_parity_scrub_stripe(struct btrfs_raid_bio * rbio)2646 static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
2647 {
2648 int bios_to_read = 0;
2649 struct bio_list bio_list;
2650 int ret;
2651 int pagenr;
2652 int stripe;
2653 struct bio *bio;
2654
2655 bio_list_init(&bio_list);
2656
2657 ret = alloc_rbio_essential_pages(rbio);
2658 if (ret)
2659 goto cleanup;
2660
2661 atomic_set(&rbio->error, 0);
2662 /*
2663 * build a list of bios to read all the missing parts of this
2664 * stripe
2665 */
2666 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2667 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2668 struct page *page;
2669 /*
2670 * we want to find all the pages missing from
2671 * the rbio and read them from the disk. If
2672 * page_in_rbio finds a page in the bio list
2673 * we don't need to read it off the stripe.
2674 */
2675 page = page_in_rbio(rbio, stripe, pagenr, 1);
2676 if (page)
2677 continue;
2678
2679 page = rbio_stripe_page(rbio, stripe, pagenr);
2680 /*
2681 * the bio cache may have handed us an uptodate
2682 * page. If so, be happy and use it
2683 */
2684 if (PageUptodate(page))
2685 continue;
2686
2687 ret = rbio_add_io_page(rbio, &bio_list, page,
2688 stripe, pagenr, rbio->stripe_len);
2689 if (ret)
2690 goto cleanup;
2691 }
2692 }
2693
2694 bios_to_read = bio_list_size(&bio_list);
2695 if (!bios_to_read) {
2696 /*
2697 * this can happen if others have merged with
2698 * us, it means there is nothing left to read.
2699 * But if there are missing devices it may not be
2700 * safe to do the full stripe write yet.
2701 */
2702 goto finish;
2703 }
2704
2705 /*
2706 * the bbio may be freed once we submit the last bio. Make sure
2707 * not to touch it after that
2708 */
2709 atomic_set(&rbio->stripes_pending, bios_to_read);
2710 while (1) {
2711 bio = bio_list_pop(&bio_list);
2712 if (!bio)
2713 break;
2714
2715 bio->bi_private = rbio;
2716 bio->bi_end_io = raid56_parity_scrub_end_io;
2717 bio->bi_opf = REQ_OP_READ;
2718
2719 btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
2720
2721 submit_bio(bio);
2722 }
2723 /* the actual write will happen once the reads are done */
2724 return;
2725
2726 cleanup:
2727 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2728
2729 while ((bio = bio_list_pop(&bio_list)))
2730 bio_put(bio);
2731
2732 return;
2733
2734 finish:
2735 validate_rbio_for_parity_scrub(rbio);
2736 }
2737
scrub_parity_work(struct btrfs_work * work)2738 static void scrub_parity_work(struct btrfs_work *work)
2739 {
2740 struct btrfs_raid_bio *rbio;
2741
2742 rbio = container_of(work, struct btrfs_raid_bio, work);
2743 raid56_parity_scrub_stripe(rbio);
2744 }
2745
raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio * rbio)2746 void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
2747 {
2748 if (!lock_stripe_add(rbio))
2749 start_async_work(rbio, scrub_parity_work);
2750 }
2751
2752 /* The following code is used for dev replace of a missing RAID 5/6 device. */
2753
2754 struct btrfs_raid_bio *
raid56_alloc_missing_rbio(struct btrfs_fs_info * fs_info,struct bio * bio,struct btrfs_bio * bbio,u64 length)2755 raid56_alloc_missing_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
2756 struct btrfs_bio *bbio, u64 length)
2757 {
2758 struct btrfs_raid_bio *rbio;
2759
2760 rbio = alloc_rbio(fs_info, bbio, length);
2761 if (IS_ERR(rbio))
2762 return NULL;
2763
2764 rbio->operation = BTRFS_RBIO_REBUILD_MISSING;
2765 bio_list_add(&rbio->bio_list, bio);
2766 /*
2767 * This is a special bio which is used to hold the completion handler
2768 * and make the scrub rbio is similar to the other types
2769 */
2770 ASSERT(!bio->bi_iter.bi_size);
2771
2772 rbio->faila = find_logical_bio_stripe(rbio, bio);
2773 if (rbio->faila == -1) {
2774 BUG();
2775 kfree(rbio);
2776 return NULL;
2777 }
2778
2779 /*
2780 * When we get bbio, we have already increased bio_counter, record it
2781 * so we can free it at rbio_orig_end_io()
2782 */
2783 rbio->generic_bio_cnt = 1;
2784
2785 return rbio;
2786 }
2787
raid56_submit_missing_rbio(struct btrfs_raid_bio * rbio)2788 void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio)
2789 {
2790 if (!lock_stripe_add(rbio))
2791 start_async_work(rbio, read_rebuild_work);
2792 }
2793