1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2012 Fusion-io All rights reserved.
4 * Copyright (C) 2012 Intel Corp. All rights reserved.
5 */
6
7 #include <linux/sched.h>
8 #include <linux/bio.h>
9 #include <linux/slab.h>
10 #include <linux/blkdev.h>
11 #include <linux/raid/pq.h>
12 #include <linux/hash.h>
13 #include <linux/list_sort.h>
14 #include <linux/raid/xor.h>
15 #include <linux/mm.h>
16 #include "ctree.h"
17 #include "disk-io.h"
18 #include "volumes.h"
19 #include "raid56.h"
20 #include "async-thread.h"
21
22 /* set when additional merges to this rbio are not allowed */
23 #define RBIO_RMW_LOCKED_BIT 1
24
25 /*
26 * set when this rbio is sitting in the hash, but it is just a cache
27 * of past RMW
28 */
29 #define RBIO_CACHE_BIT 2
30
31 /*
32 * set when it is safe to trust the stripe_pages for caching
33 */
34 #define RBIO_CACHE_READY_BIT 3
35
36 #define RBIO_CACHE_SIZE 1024
37
38 #define BTRFS_STRIPE_HASH_TABLE_BITS 11
39
40 /* Used by the raid56 code to lock stripes for read/modify/write */
41 struct btrfs_stripe_hash {
42 struct list_head hash_list;
43 spinlock_t lock;
44 };
45
46 /* Used by the raid56 code to lock stripes for read/modify/write */
47 struct btrfs_stripe_hash_table {
48 struct list_head stripe_cache;
49 spinlock_t cache_lock;
50 int cache_size;
51 struct btrfs_stripe_hash table[];
52 };
53
54 enum btrfs_rbio_ops {
55 BTRFS_RBIO_WRITE,
56 BTRFS_RBIO_READ_REBUILD,
57 BTRFS_RBIO_PARITY_SCRUB,
58 BTRFS_RBIO_REBUILD_MISSING,
59 };
60
61 struct btrfs_raid_bio {
62 struct btrfs_fs_info *fs_info;
63 struct btrfs_bio *bbio;
64
65 /* while we're doing rmw on a stripe
66 * we put it into a hash table so we can
67 * lock the stripe and merge more rbios
68 * into it.
69 */
70 struct list_head hash_list;
71
72 /*
73 * LRU list for the stripe cache
74 */
75 struct list_head stripe_cache;
76
77 /*
78 * for scheduling work in the helper threads
79 */
80 struct btrfs_work work;
81
82 /*
83 * bio list and bio_list_lock are used
84 * to add more bios into the stripe
85 * in hopes of avoiding the full rmw
86 */
87 struct bio_list bio_list;
88 spinlock_t bio_list_lock;
89
90 /* also protected by the bio_list_lock, the
91 * plug list is used by the plugging code
92 * to collect partial bios while plugged. The
93 * stripe locking code also uses it to hand off
94 * the stripe lock to the next pending IO
95 */
96 struct list_head plug_list;
97
98 /*
99 * flags that tell us if it is safe to
100 * merge with this bio
101 */
102 unsigned long flags;
103
104 /* size of each individual stripe on disk */
105 int stripe_len;
106
107 /* number of data stripes (no p/q) */
108 int nr_data;
109
110 int real_stripes;
111
112 int stripe_npages;
113 /*
114 * set if we're doing a parity rebuild
115 * for a read from higher up, which is handled
116 * differently from a parity rebuild as part of
117 * rmw
118 */
119 enum btrfs_rbio_ops operation;
120
121 /* first bad stripe */
122 int faila;
123
124 /* second bad stripe (for raid6 use) */
125 int failb;
126
127 int scrubp;
128 /*
129 * number of pages needed to represent the full
130 * stripe
131 */
132 int nr_pages;
133
134 /*
135 * size of all the bios in the bio_list. This
136 * helps us decide if the rbio maps to a full
137 * stripe or not
138 */
139 int bio_list_bytes;
140
141 int generic_bio_cnt;
142
143 refcount_t refs;
144
145 atomic_t stripes_pending;
146
147 atomic_t error;
148 /*
149 * these are two arrays of pointers. We allocate the
150 * rbio big enough to hold them both and setup their
151 * locations when the rbio is allocated
152 */
153
154 /* pointers to pages that we allocated for
155 * reading/writing stripes directly from the disk (including P/Q)
156 */
157 struct page **stripe_pages;
158
159 /*
160 * pointers to the pages in the bio_list. Stored
161 * here for faster lookup
162 */
163 struct page **bio_pages;
164
165 /*
166 * bitmap to record which horizontal stripe has data
167 */
168 unsigned long *dbitmap;
169
170 /* allocated with real_stripes-many pointers for finish_*() calls */
171 void **finish_pointers;
172
173 /* allocated with stripe_npages-many bits for finish_*() calls */
174 unsigned long *finish_pbitmap;
175 };
176
177 static int __raid56_parity_recover(struct btrfs_raid_bio *rbio);
178 static noinline void finish_rmw(struct btrfs_raid_bio *rbio);
179 static void rmw_work(struct btrfs_work *work);
180 static void read_rebuild_work(struct btrfs_work *work);
181 static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio);
182 static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed);
183 static void __free_raid_bio(struct btrfs_raid_bio *rbio);
184 static void index_rbio_pages(struct btrfs_raid_bio *rbio);
185 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
186
187 static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
188 int need_check);
189 static void scrub_parity_work(struct btrfs_work *work);
190
start_async_work(struct btrfs_raid_bio * rbio,btrfs_func_t work_func)191 static void start_async_work(struct btrfs_raid_bio *rbio, btrfs_func_t work_func)
192 {
193 btrfs_init_work(&rbio->work, work_func, NULL, NULL);
194 btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
195 }
196
197 /*
198 * the stripe hash table is used for locking, and to collect
199 * bios in hopes of making a full stripe
200 */
btrfs_alloc_stripe_hash_table(struct btrfs_fs_info * info)201 int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
202 {
203 struct btrfs_stripe_hash_table *table;
204 struct btrfs_stripe_hash_table *x;
205 struct btrfs_stripe_hash *cur;
206 struct btrfs_stripe_hash *h;
207 int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS;
208 int i;
209
210 if (info->stripe_hash_table)
211 return 0;
212
213 /*
214 * The table is large, starting with order 4 and can go as high as
215 * order 7 in case lock debugging is turned on.
216 *
217 * Try harder to allocate and fallback to vmalloc to lower the chance
218 * of a failing mount.
219 */
220 table = kvzalloc(struct_size(table, table, num_entries), GFP_KERNEL);
221 if (!table)
222 return -ENOMEM;
223
224 spin_lock_init(&table->cache_lock);
225 INIT_LIST_HEAD(&table->stripe_cache);
226
227 h = table->table;
228
229 for (i = 0; i < num_entries; i++) {
230 cur = h + i;
231 INIT_LIST_HEAD(&cur->hash_list);
232 spin_lock_init(&cur->lock);
233 }
234
235 x = cmpxchg(&info->stripe_hash_table, NULL, table);
236 if (x)
237 kvfree(x);
238 return 0;
239 }
240
241 /*
242 * caching an rbio means to copy anything from the
243 * bio_pages array into the stripe_pages array. We
244 * use the page uptodate bit in the stripe cache array
245 * to indicate if it has valid data
246 *
247 * once the caching is done, we set the cache ready
248 * bit.
249 */
cache_rbio_pages(struct btrfs_raid_bio * rbio)250 static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
251 {
252 int i;
253 char *s;
254 char *d;
255 int ret;
256
257 ret = alloc_rbio_pages(rbio);
258 if (ret)
259 return;
260
261 for (i = 0; i < rbio->nr_pages; i++) {
262 if (!rbio->bio_pages[i])
263 continue;
264
265 s = kmap(rbio->bio_pages[i]);
266 d = kmap(rbio->stripe_pages[i]);
267
268 copy_page(d, s);
269
270 kunmap(rbio->bio_pages[i]);
271 kunmap(rbio->stripe_pages[i]);
272 SetPageUptodate(rbio->stripe_pages[i]);
273 }
274 set_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
275 }
276
277 /*
278 * we hash on the first logical address of the stripe
279 */
rbio_bucket(struct btrfs_raid_bio * rbio)280 static int rbio_bucket(struct btrfs_raid_bio *rbio)
281 {
282 u64 num = rbio->bbio->raid_map[0];
283
284 /*
285 * we shift down quite a bit. We're using byte
286 * addressing, and most of the lower bits are zeros.
287 * This tends to upset hash_64, and it consistently
288 * returns just one or two different values.
289 *
290 * shifting off the lower bits fixes things.
291 */
292 return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS);
293 }
294
295 /*
296 * stealing an rbio means taking all the uptodate pages from the stripe
297 * array in the source rbio and putting them into the destination rbio
298 */
steal_rbio(struct btrfs_raid_bio * src,struct btrfs_raid_bio * dest)299 static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest)
300 {
301 int i;
302 struct page *s;
303 struct page *d;
304
305 if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags))
306 return;
307
308 for (i = 0; i < dest->nr_pages; i++) {
309 s = src->stripe_pages[i];
310 if (!s || !PageUptodate(s)) {
311 continue;
312 }
313
314 d = dest->stripe_pages[i];
315 if (d)
316 __free_page(d);
317
318 dest->stripe_pages[i] = s;
319 src->stripe_pages[i] = NULL;
320 }
321 }
322
323 /*
324 * merging means we take the bio_list from the victim and
325 * splice it into the destination. The victim should
326 * be discarded afterwards.
327 *
328 * must be called with dest->rbio_list_lock held
329 */
merge_rbio(struct btrfs_raid_bio * dest,struct btrfs_raid_bio * victim)330 static void merge_rbio(struct btrfs_raid_bio *dest,
331 struct btrfs_raid_bio *victim)
332 {
333 bio_list_merge(&dest->bio_list, &victim->bio_list);
334 dest->bio_list_bytes += victim->bio_list_bytes;
335 dest->generic_bio_cnt += victim->generic_bio_cnt;
336 bio_list_init(&victim->bio_list);
337 }
338
339 /*
340 * used to prune items that are in the cache. The caller
341 * must hold the hash table lock.
342 */
__remove_rbio_from_cache(struct btrfs_raid_bio * rbio)343 static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
344 {
345 int bucket = rbio_bucket(rbio);
346 struct btrfs_stripe_hash_table *table;
347 struct btrfs_stripe_hash *h;
348 int freeit = 0;
349
350 /*
351 * check the bit again under the hash table lock.
352 */
353 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
354 return;
355
356 table = rbio->fs_info->stripe_hash_table;
357 h = table->table + bucket;
358
359 /* hold the lock for the bucket because we may be
360 * removing it from the hash table
361 */
362 spin_lock(&h->lock);
363
364 /*
365 * hold the lock for the bio list because we need
366 * to make sure the bio list is empty
367 */
368 spin_lock(&rbio->bio_list_lock);
369
370 if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) {
371 list_del_init(&rbio->stripe_cache);
372 table->cache_size -= 1;
373 freeit = 1;
374
375 /* if the bio list isn't empty, this rbio is
376 * still involved in an IO. We take it out
377 * of the cache list, and drop the ref that
378 * was held for the list.
379 *
380 * If the bio_list was empty, we also remove
381 * the rbio from the hash_table, and drop
382 * the corresponding ref
383 */
384 if (bio_list_empty(&rbio->bio_list)) {
385 if (!list_empty(&rbio->hash_list)) {
386 list_del_init(&rbio->hash_list);
387 refcount_dec(&rbio->refs);
388 BUG_ON(!list_empty(&rbio->plug_list));
389 }
390 }
391 }
392
393 spin_unlock(&rbio->bio_list_lock);
394 spin_unlock(&h->lock);
395
396 if (freeit)
397 __free_raid_bio(rbio);
398 }
399
400 /*
401 * prune a given rbio from the cache
402 */
remove_rbio_from_cache(struct btrfs_raid_bio * rbio)403 static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
404 {
405 struct btrfs_stripe_hash_table *table;
406 unsigned long flags;
407
408 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
409 return;
410
411 table = rbio->fs_info->stripe_hash_table;
412
413 spin_lock_irqsave(&table->cache_lock, flags);
414 __remove_rbio_from_cache(rbio);
415 spin_unlock_irqrestore(&table->cache_lock, flags);
416 }
417
418 /*
419 * remove everything in the cache
420 */
btrfs_clear_rbio_cache(struct btrfs_fs_info * info)421 static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info)
422 {
423 struct btrfs_stripe_hash_table *table;
424 unsigned long flags;
425 struct btrfs_raid_bio *rbio;
426
427 table = info->stripe_hash_table;
428
429 spin_lock_irqsave(&table->cache_lock, flags);
430 while (!list_empty(&table->stripe_cache)) {
431 rbio = list_entry(table->stripe_cache.next,
432 struct btrfs_raid_bio,
433 stripe_cache);
434 __remove_rbio_from_cache(rbio);
435 }
436 spin_unlock_irqrestore(&table->cache_lock, flags);
437 }
438
439 /*
440 * remove all cached entries and free the hash table
441 * used by unmount
442 */
btrfs_free_stripe_hash_table(struct btrfs_fs_info * info)443 void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info)
444 {
445 if (!info->stripe_hash_table)
446 return;
447 btrfs_clear_rbio_cache(info);
448 kvfree(info->stripe_hash_table);
449 info->stripe_hash_table = NULL;
450 }
451
452 /*
453 * insert an rbio into the stripe cache. It
454 * must have already been prepared by calling
455 * cache_rbio_pages
456 *
457 * If this rbio was already cached, it gets
458 * moved to the front of the lru.
459 *
460 * If the size of the rbio cache is too big, we
461 * prune an item.
462 */
cache_rbio(struct btrfs_raid_bio * rbio)463 static void cache_rbio(struct btrfs_raid_bio *rbio)
464 {
465 struct btrfs_stripe_hash_table *table;
466 unsigned long flags;
467
468 if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags))
469 return;
470
471 table = rbio->fs_info->stripe_hash_table;
472
473 spin_lock_irqsave(&table->cache_lock, flags);
474 spin_lock(&rbio->bio_list_lock);
475
476 /* bump our ref if we were not in the list before */
477 if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags))
478 refcount_inc(&rbio->refs);
479
480 if (!list_empty(&rbio->stripe_cache)){
481 list_move(&rbio->stripe_cache, &table->stripe_cache);
482 } else {
483 list_add(&rbio->stripe_cache, &table->stripe_cache);
484 table->cache_size += 1;
485 }
486
487 spin_unlock(&rbio->bio_list_lock);
488
489 if (table->cache_size > RBIO_CACHE_SIZE) {
490 struct btrfs_raid_bio *found;
491
492 found = list_entry(table->stripe_cache.prev,
493 struct btrfs_raid_bio,
494 stripe_cache);
495
496 if (found != rbio)
497 __remove_rbio_from_cache(found);
498 }
499
500 spin_unlock_irqrestore(&table->cache_lock, flags);
501 }
502
503 /*
504 * helper function to run the xor_blocks api. It is only
505 * able to do MAX_XOR_BLOCKS at a time, so we need to
506 * loop through.
507 */
run_xor(void ** pages,int src_cnt,ssize_t len)508 static void run_xor(void **pages, int src_cnt, ssize_t len)
509 {
510 int src_off = 0;
511 int xor_src_cnt = 0;
512 void *dest = pages[src_cnt];
513
514 while(src_cnt > 0) {
515 xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
516 xor_blocks(xor_src_cnt, len, dest, pages + src_off);
517
518 src_cnt -= xor_src_cnt;
519 src_off += xor_src_cnt;
520 }
521 }
522
523 /*
524 * Returns true if the bio list inside this rbio covers an entire stripe (no
525 * rmw required).
526 */
rbio_is_full(struct btrfs_raid_bio * rbio)527 static int rbio_is_full(struct btrfs_raid_bio *rbio)
528 {
529 unsigned long flags;
530 unsigned long size = rbio->bio_list_bytes;
531 int ret = 1;
532
533 spin_lock_irqsave(&rbio->bio_list_lock, flags);
534 if (size != rbio->nr_data * rbio->stripe_len)
535 ret = 0;
536 BUG_ON(size > rbio->nr_data * rbio->stripe_len);
537 spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
538
539 return ret;
540 }
541
542 /*
543 * returns 1 if it is safe to merge two rbios together.
544 * The merging is safe if the two rbios correspond to
545 * the same stripe and if they are both going in the same
546 * direction (read vs write), and if neither one is
547 * locked for final IO
548 *
549 * The caller is responsible for locking such that
550 * rmw_locked is safe to test
551 */
rbio_can_merge(struct btrfs_raid_bio * last,struct btrfs_raid_bio * cur)552 static int rbio_can_merge(struct btrfs_raid_bio *last,
553 struct btrfs_raid_bio *cur)
554 {
555 if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) ||
556 test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags))
557 return 0;
558
559 /*
560 * we can't merge with cached rbios, since the
561 * idea is that when we merge the destination
562 * rbio is going to run our IO for us. We can
563 * steal from cached rbios though, other functions
564 * handle that.
565 */
566 if (test_bit(RBIO_CACHE_BIT, &last->flags) ||
567 test_bit(RBIO_CACHE_BIT, &cur->flags))
568 return 0;
569
570 if (last->bbio->raid_map[0] !=
571 cur->bbio->raid_map[0])
572 return 0;
573
574 /* we can't merge with different operations */
575 if (last->operation != cur->operation)
576 return 0;
577 /*
578 * We've need read the full stripe from the drive.
579 * check and repair the parity and write the new results.
580 *
581 * We're not allowed to add any new bios to the
582 * bio list here, anyone else that wants to
583 * change this stripe needs to do their own rmw.
584 */
585 if (last->operation == BTRFS_RBIO_PARITY_SCRUB)
586 return 0;
587
588 if (last->operation == BTRFS_RBIO_REBUILD_MISSING)
589 return 0;
590
591 if (last->operation == BTRFS_RBIO_READ_REBUILD) {
592 int fa = last->faila;
593 int fb = last->failb;
594 int cur_fa = cur->faila;
595 int cur_fb = cur->failb;
596
597 if (last->faila >= last->failb) {
598 fa = last->failb;
599 fb = last->faila;
600 }
601
602 if (cur->faila >= cur->failb) {
603 cur_fa = cur->failb;
604 cur_fb = cur->faila;
605 }
606
607 if (fa != cur_fa || fb != cur_fb)
608 return 0;
609 }
610 return 1;
611 }
612
rbio_stripe_page_index(struct btrfs_raid_bio * rbio,int stripe,int index)613 static int rbio_stripe_page_index(struct btrfs_raid_bio *rbio, int stripe,
614 int index)
615 {
616 return stripe * rbio->stripe_npages + index;
617 }
618
619 /*
620 * these are just the pages from the rbio array, not from anything
621 * the FS sent down to us
622 */
rbio_stripe_page(struct btrfs_raid_bio * rbio,int stripe,int index)623 static struct page *rbio_stripe_page(struct btrfs_raid_bio *rbio, int stripe,
624 int index)
625 {
626 return rbio->stripe_pages[rbio_stripe_page_index(rbio, stripe, index)];
627 }
628
629 /*
630 * helper to index into the pstripe
631 */
rbio_pstripe_page(struct btrfs_raid_bio * rbio,int index)632 static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index)
633 {
634 return rbio_stripe_page(rbio, rbio->nr_data, index);
635 }
636
637 /*
638 * helper to index into the qstripe, returns null
639 * if there is no qstripe
640 */
rbio_qstripe_page(struct btrfs_raid_bio * rbio,int index)641 static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index)
642 {
643 if (rbio->nr_data + 1 == rbio->real_stripes)
644 return NULL;
645 return rbio_stripe_page(rbio, rbio->nr_data + 1, index);
646 }
647
648 /*
649 * The first stripe in the table for a logical address
650 * has the lock. rbios are added in one of three ways:
651 *
652 * 1) Nobody has the stripe locked yet. The rbio is given
653 * the lock and 0 is returned. The caller must start the IO
654 * themselves.
655 *
656 * 2) Someone has the stripe locked, but we're able to merge
657 * with the lock owner. The rbio is freed and the IO will
658 * start automatically along with the existing rbio. 1 is returned.
659 *
660 * 3) Someone has the stripe locked, but we're not able to merge.
661 * The rbio is added to the lock owner's plug list, or merged into
662 * an rbio already on the plug list. When the lock owner unlocks,
663 * the next rbio on the list is run and the IO is started automatically.
664 * 1 is returned
665 *
666 * If we return 0, the caller still owns the rbio and must continue with
667 * IO submission. If we return 1, the caller must assume the rbio has
668 * already been freed.
669 */
lock_stripe_add(struct btrfs_raid_bio * rbio)670 static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
671 {
672 struct btrfs_stripe_hash *h;
673 struct btrfs_raid_bio *cur;
674 struct btrfs_raid_bio *pending;
675 unsigned long flags;
676 struct btrfs_raid_bio *freeit = NULL;
677 struct btrfs_raid_bio *cache_drop = NULL;
678 int ret = 0;
679
680 h = rbio->fs_info->stripe_hash_table->table + rbio_bucket(rbio);
681
682 spin_lock_irqsave(&h->lock, flags);
683 list_for_each_entry(cur, &h->hash_list, hash_list) {
684 if (cur->bbio->raid_map[0] != rbio->bbio->raid_map[0])
685 continue;
686
687 spin_lock(&cur->bio_list_lock);
688
689 /* Can we steal this cached rbio's pages? */
690 if (bio_list_empty(&cur->bio_list) &&
691 list_empty(&cur->plug_list) &&
692 test_bit(RBIO_CACHE_BIT, &cur->flags) &&
693 !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
694 list_del_init(&cur->hash_list);
695 refcount_dec(&cur->refs);
696
697 steal_rbio(cur, rbio);
698 cache_drop = cur;
699 spin_unlock(&cur->bio_list_lock);
700
701 goto lockit;
702 }
703
704 /* Can we merge into the lock owner? */
705 if (rbio_can_merge(cur, rbio)) {
706 merge_rbio(cur, rbio);
707 spin_unlock(&cur->bio_list_lock);
708 freeit = rbio;
709 ret = 1;
710 goto out;
711 }
712
713
714 /*
715 * We couldn't merge with the running rbio, see if we can merge
716 * with the pending ones. We don't have to check for rmw_locked
717 * because there is no way they are inside finish_rmw right now
718 */
719 list_for_each_entry(pending, &cur->plug_list, plug_list) {
720 if (rbio_can_merge(pending, rbio)) {
721 merge_rbio(pending, rbio);
722 spin_unlock(&cur->bio_list_lock);
723 freeit = rbio;
724 ret = 1;
725 goto out;
726 }
727 }
728
729 /*
730 * No merging, put us on the tail of the plug list, our rbio
731 * will be started with the currently running rbio unlocks
732 */
733 list_add_tail(&rbio->plug_list, &cur->plug_list);
734 spin_unlock(&cur->bio_list_lock);
735 ret = 1;
736 goto out;
737 }
738 lockit:
739 refcount_inc(&rbio->refs);
740 list_add(&rbio->hash_list, &h->hash_list);
741 out:
742 spin_unlock_irqrestore(&h->lock, flags);
743 if (cache_drop)
744 remove_rbio_from_cache(cache_drop);
745 if (freeit)
746 __free_raid_bio(freeit);
747 return ret;
748 }
749
750 /*
751 * called as rmw or parity rebuild is completed. If the plug list has more
752 * rbios waiting for this stripe, the next one on the list will be started
753 */
unlock_stripe(struct btrfs_raid_bio * rbio)754 static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
755 {
756 int bucket;
757 struct btrfs_stripe_hash *h;
758 unsigned long flags;
759 int keep_cache = 0;
760
761 bucket = rbio_bucket(rbio);
762 h = rbio->fs_info->stripe_hash_table->table + bucket;
763
764 if (list_empty(&rbio->plug_list))
765 cache_rbio(rbio);
766
767 spin_lock_irqsave(&h->lock, flags);
768 spin_lock(&rbio->bio_list_lock);
769
770 if (!list_empty(&rbio->hash_list)) {
771 /*
772 * if we're still cached and there is no other IO
773 * to perform, just leave this rbio here for others
774 * to steal from later
775 */
776 if (list_empty(&rbio->plug_list) &&
777 test_bit(RBIO_CACHE_BIT, &rbio->flags)) {
778 keep_cache = 1;
779 clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
780 BUG_ON(!bio_list_empty(&rbio->bio_list));
781 goto done;
782 }
783
784 list_del_init(&rbio->hash_list);
785 refcount_dec(&rbio->refs);
786
787 /*
788 * we use the plug list to hold all the rbios
789 * waiting for the chance to lock this stripe.
790 * hand the lock over to one of them.
791 */
792 if (!list_empty(&rbio->plug_list)) {
793 struct btrfs_raid_bio *next;
794 struct list_head *head = rbio->plug_list.next;
795
796 next = list_entry(head, struct btrfs_raid_bio,
797 plug_list);
798
799 list_del_init(&rbio->plug_list);
800
801 list_add(&next->hash_list, &h->hash_list);
802 refcount_inc(&next->refs);
803 spin_unlock(&rbio->bio_list_lock);
804 spin_unlock_irqrestore(&h->lock, flags);
805
806 if (next->operation == BTRFS_RBIO_READ_REBUILD)
807 start_async_work(next, read_rebuild_work);
808 else if (next->operation == BTRFS_RBIO_REBUILD_MISSING) {
809 steal_rbio(rbio, next);
810 start_async_work(next, read_rebuild_work);
811 } else if (next->operation == BTRFS_RBIO_WRITE) {
812 steal_rbio(rbio, next);
813 start_async_work(next, rmw_work);
814 } else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) {
815 steal_rbio(rbio, next);
816 start_async_work(next, scrub_parity_work);
817 }
818
819 goto done_nolock;
820 }
821 }
822 done:
823 spin_unlock(&rbio->bio_list_lock);
824 spin_unlock_irqrestore(&h->lock, flags);
825
826 done_nolock:
827 if (!keep_cache)
828 remove_rbio_from_cache(rbio);
829 }
830
__free_raid_bio(struct btrfs_raid_bio * rbio)831 static void __free_raid_bio(struct btrfs_raid_bio *rbio)
832 {
833 int i;
834
835 if (!refcount_dec_and_test(&rbio->refs))
836 return;
837
838 WARN_ON(!list_empty(&rbio->stripe_cache));
839 WARN_ON(!list_empty(&rbio->hash_list));
840 WARN_ON(!bio_list_empty(&rbio->bio_list));
841
842 for (i = 0; i < rbio->nr_pages; i++) {
843 if (rbio->stripe_pages[i]) {
844 __free_page(rbio->stripe_pages[i]);
845 rbio->stripe_pages[i] = NULL;
846 }
847 }
848
849 btrfs_put_bbio(rbio->bbio);
850 kfree(rbio);
851 }
852
rbio_endio_bio_list(struct bio * cur,blk_status_t err)853 static void rbio_endio_bio_list(struct bio *cur, blk_status_t err)
854 {
855 struct bio *next;
856
857 while (cur) {
858 next = cur->bi_next;
859 cur->bi_next = NULL;
860 cur->bi_status = err;
861 bio_endio(cur);
862 cur = next;
863 }
864 }
865
866 /*
867 * this frees the rbio and runs through all the bios in the
868 * bio_list and calls end_io on them
869 */
rbio_orig_end_io(struct btrfs_raid_bio * rbio,blk_status_t err)870 static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err)
871 {
872 struct bio *cur = bio_list_get(&rbio->bio_list);
873 struct bio *extra;
874
875 if (rbio->generic_bio_cnt)
876 btrfs_bio_counter_sub(rbio->fs_info, rbio->generic_bio_cnt);
877
878 /*
879 * At this moment, rbio->bio_list is empty, however since rbio does not
880 * always have RBIO_RMW_LOCKED_BIT set and rbio is still linked on the
881 * hash list, rbio may be merged with others so that rbio->bio_list
882 * becomes non-empty.
883 * Once unlock_stripe() is done, rbio->bio_list will not be updated any
884 * more and we can call bio_endio() on all queued bios.
885 */
886 unlock_stripe(rbio);
887 extra = bio_list_get(&rbio->bio_list);
888 __free_raid_bio(rbio);
889
890 rbio_endio_bio_list(cur, err);
891 if (extra)
892 rbio_endio_bio_list(extra, err);
893 }
894
895 /*
896 * end io function used by finish_rmw. When we finally
897 * get here, we've written a full stripe
898 */
raid_write_end_io(struct bio * bio)899 static void raid_write_end_io(struct bio *bio)
900 {
901 struct btrfs_raid_bio *rbio = bio->bi_private;
902 blk_status_t err = bio->bi_status;
903 int max_errors;
904
905 if (err)
906 fail_bio_stripe(rbio, bio);
907
908 bio_put(bio);
909
910 if (!atomic_dec_and_test(&rbio->stripes_pending))
911 return;
912
913 err = BLK_STS_OK;
914
915 /* OK, we have read all the stripes we need to. */
916 max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ?
917 0 : rbio->bbio->max_errors;
918 if (atomic_read(&rbio->error) > max_errors)
919 err = BLK_STS_IOERR;
920
921 rbio_orig_end_io(rbio, err);
922 }
923
924 /*
925 * the read/modify/write code wants to use the original bio for
926 * any pages it included, and then use the rbio for everything
927 * else. This function decides if a given index (stripe number)
928 * and page number in that stripe fall inside the original bio
929 * or the rbio.
930 *
931 * if you set bio_list_only, you'll get a NULL back for any ranges
932 * that are outside the bio_list
933 *
934 * This doesn't take any refs on anything, you get a bare page pointer
935 * and the caller must bump refs as required.
936 *
937 * You must call index_rbio_pages once before you can trust
938 * the answers from this function.
939 */
page_in_rbio(struct btrfs_raid_bio * rbio,int index,int pagenr,int bio_list_only)940 static struct page *page_in_rbio(struct btrfs_raid_bio *rbio,
941 int index, int pagenr, int bio_list_only)
942 {
943 int chunk_page;
944 struct page *p = NULL;
945
946 chunk_page = index * (rbio->stripe_len >> PAGE_SHIFT) + pagenr;
947
948 spin_lock_irq(&rbio->bio_list_lock);
949 p = rbio->bio_pages[chunk_page];
950 spin_unlock_irq(&rbio->bio_list_lock);
951
952 if (p || bio_list_only)
953 return p;
954
955 return rbio->stripe_pages[chunk_page];
956 }
957
958 /*
959 * number of pages we need for the entire stripe across all the
960 * drives
961 */
rbio_nr_pages(unsigned long stripe_len,int nr_stripes)962 static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
963 {
964 return DIV_ROUND_UP(stripe_len, PAGE_SIZE) * nr_stripes;
965 }
966
967 /*
968 * allocation and initial setup for the btrfs_raid_bio. Not
969 * this does not allocate any pages for rbio->pages.
970 */
alloc_rbio(struct btrfs_fs_info * fs_info,struct btrfs_bio * bbio,u64 stripe_len)971 static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info,
972 struct btrfs_bio *bbio,
973 u64 stripe_len)
974 {
975 struct btrfs_raid_bio *rbio;
976 int nr_data = 0;
977 int real_stripes = bbio->num_stripes - bbio->num_tgtdevs;
978 int num_pages = rbio_nr_pages(stripe_len, real_stripes);
979 int stripe_npages = DIV_ROUND_UP(stripe_len, PAGE_SIZE);
980 void *p;
981
982 rbio = kzalloc(sizeof(*rbio) +
983 sizeof(*rbio->stripe_pages) * num_pages +
984 sizeof(*rbio->bio_pages) * num_pages +
985 sizeof(*rbio->finish_pointers) * real_stripes +
986 sizeof(*rbio->dbitmap) * BITS_TO_LONGS(stripe_npages) +
987 sizeof(*rbio->finish_pbitmap) *
988 BITS_TO_LONGS(stripe_npages),
989 GFP_NOFS);
990 if (!rbio)
991 return ERR_PTR(-ENOMEM);
992
993 bio_list_init(&rbio->bio_list);
994 INIT_LIST_HEAD(&rbio->plug_list);
995 spin_lock_init(&rbio->bio_list_lock);
996 INIT_LIST_HEAD(&rbio->stripe_cache);
997 INIT_LIST_HEAD(&rbio->hash_list);
998 rbio->bbio = bbio;
999 rbio->fs_info = fs_info;
1000 rbio->stripe_len = stripe_len;
1001 rbio->nr_pages = num_pages;
1002 rbio->real_stripes = real_stripes;
1003 rbio->stripe_npages = stripe_npages;
1004 rbio->faila = -1;
1005 rbio->failb = -1;
1006 refcount_set(&rbio->refs, 1);
1007 atomic_set(&rbio->error, 0);
1008 atomic_set(&rbio->stripes_pending, 0);
1009
1010 /*
1011 * the stripe_pages, bio_pages, etc arrays point to the extra
1012 * memory we allocated past the end of the rbio
1013 */
1014 p = rbio + 1;
1015 #define CONSUME_ALLOC(ptr, count) do { \
1016 ptr = p; \
1017 p = (unsigned char *)p + sizeof(*(ptr)) * (count); \
1018 } while (0)
1019 CONSUME_ALLOC(rbio->stripe_pages, num_pages);
1020 CONSUME_ALLOC(rbio->bio_pages, num_pages);
1021 CONSUME_ALLOC(rbio->finish_pointers, real_stripes);
1022 CONSUME_ALLOC(rbio->dbitmap, BITS_TO_LONGS(stripe_npages));
1023 CONSUME_ALLOC(rbio->finish_pbitmap, BITS_TO_LONGS(stripe_npages));
1024 #undef CONSUME_ALLOC
1025
1026 if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
1027 nr_data = real_stripes - 1;
1028 else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
1029 nr_data = real_stripes - 2;
1030 else
1031 BUG();
1032
1033 rbio->nr_data = nr_data;
1034 return rbio;
1035 }
1036
1037 /* allocate pages for all the stripes in the bio, including parity */
alloc_rbio_pages(struct btrfs_raid_bio * rbio)1038 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio)
1039 {
1040 int i;
1041 struct page *page;
1042
1043 for (i = 0; i < rbio->nr_pages; i++) {
1044 if (rbio->stripe_pages[i])
1045 continue;
1046 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1047 if (!page)
1048 return -ENOMEM;
1049 rbio->stripe_pages[i] = page;
1050 }
1051 return 0;
1052 }
1053
1054 /* only allocate pages for p/q stripes */
alloc_rbio_parity_pages(struct btrfs_raid_bio * rbio)1055 static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
1056 {
1057 int i;
1058 struct page *page;
1059
1060 i = rbio_stripe_page_index(rbio, rbio->nr_data, 0);
1061
1062 for (; i < rbio->nr_pages; i++) {
1063 if (rbio->stripe_pages[i])
1064 continue;
1065 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1066 if (!page)
1067 return -ENOMEM;
1068 rbio->stripe_pages[i] = page;
1069 }
1070 return 0;
1071 }
1072
1073 /*
1074 * add a single page from a specific stripe into our list of bios for IO
1075 * this will try to merge into existing bios if possible, and returns
1076 * zero if all went well.
1077 */
rbio_add_io_page(struct btrfs_raid_bio * rbio,struct bio_list * bio_list,struct page * page,int stripe_nr,unsigned long page_index,unsigned long bio_max_len)1078 static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
1079 struct bio_list *bio_list,
1080 struct page *page,
1081 int stripe_nr,
1082 unsigned long page_index,
1083 unsigned long bio_max_len)
1084 {
1085 struct bio *last = bio_list->tail;
1086 int ret;
1087 struct bio *bio;
1088 struct btrfs_bio_stripe *stripe;
1089 u64 disk_start;
1090
1091 stripe = &rbio->bbio->stripes[stripe_nr];
1092 disk_start = stripe->physical + (page_index << PAGE_SHIFT);
1093
1094 /* if the device is missing, just fail this stripe */
1095 if (!stripe->dev->bdev)
1096 return fail_rbio_index(rbio, stripe_nr);
1097
1098 /* see if we can add this page onto our existing bio */
1099 if (last) {
1100 u64 last_end = (u64)last->bi_iter.bi_sector << 9;
1101 last_end += last->bi_iter.bi_size;
1102
1103 /*
1104 * we can't merge these if they are from different
1105 * devices or if they are not contiguous
1106 */
1107 if (last_end == disk_start && !last->bi_status &&
1108 last->bi_disk == stripe->dev->bdev->bd_disk &&
1109 last->bi_partno == stripe->dev->bdev->bd_partno) {
1110 ret = bio_add_page(last, page, PAGE_SIZE, 0);
1111 if (ret == PAGE_SIZE)
1112 return 0;
1113 }
1114 }
1115
1116 /* put a new bio on the list */
1117 bio = btrfs_io_bio_alloc(bio_max_len >> PAGE_SHIFT ?: 1);
1118 btrfs_io_bio(bio)->device = stripe->dev;
1119 bio->bi_iter.bi_size = 0;
1120 bio_set_dev(bio, stripe->dev->bdev);
1121 bio->bi_iter.bi_sector = disk_start >> 9;
1122
1123 bio_add_page(bio, page, PAGE_SIZE, 0);
1124 bio_list_add(bio_list, bio);
1125 return 0;
1126 }
1127
1128 /*
1129 * while we're doing the read/modify/write cycle, we could
1130 * have errors in reading pages off the disk. This checks
1131 * for errors and if we're not able to read the page it'll
1132 * trigger parity reconstruction. The rmw will be finished
1133 * after we've reconstructed the failed stripes
1134 */
validate_rbio_for_rmw(struct btrfs_raid_bio * rbio)1135 static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio)
1136 {
1137 if (rbio->faila >= 0 || rbio->failb >= 0) {
1138 BUG_ON(rbio->faila == rbio->real_stripes - 1);
1139 __raid56_parity_recover(rbio);
1140 } else {
1141 finish_rmw(rbio);
1142 }
1143 }
1144
1145 /*
1146 * helper function to walk our bio list and populate the bio_pages array with
1147 * the result. This seems expensive, but it is faster than constantly
1148 * searching through the bio list as we setup the IO in finish_rmw or stripe
1149 * reconstruction.
1150 *
1151 * This must be called before you trust the answers from page_in_rbio
1152 */
index_rbio_pages(struct btrfs_raid_bio * rbio)1153 static void index_rbio_pages(struct btrfs_raid_bio *rbio)
1154 {
1155 struct bio *bio;
1156 u64 start;
1157 unsigned long stripe_offset;
1158 unsigned long page_index;
1159
1160 spin_lock_irq(&rbio->bio_list_lock);
1161 bio_list_for_each(bio, &rbio->bio_list) {
1162 struct bio_vec bvec;
1163 struct bvec_iter iter;
1164 int i = 0;
1165
1166 start = (u64)bio->bi_iter.bi_sector << 9;
1167 stripe_offset = start - rbio->bbio->raid_map[0];
1168 page_index = stripe_offset >> PAGE_SHIFT;
1169
1170 if (bio_flagged(bio, BIO_CLONED))
1171 bio->bi_iter = btrfs_io_bio(bio)->iter;
1172
1173 bio_for_each_segment(bvec, bio, iter) {
1174 rbio->bio_pages[page_index + i] = bvec.bv_page;
1175 i++;
1176 }
1177 }
1178 spin_unlock_irq(&rbio->bio_list_lock);
1179 }
1180
1181 /*
1182 * this is called from one of two situations. We either
1183 * have a full stripe from the higher layers, or we've read all
1184 * the missing bits off disk.
1185 *
1186 * This will calculate the parity and then send down any
1187 * changed blocks.
1188 */
finish_rmw(struct btrfs_raid_bio * rbio)1189 static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
1190 {
1191 struct btrfs_bio *bbio = rbio->bbio;
1192 void **pointers = rbio->finish_pointers;
1193 int nr_data = rbio->nr_data;
1194 int stripe;
1195 int pagenr;
1196 bool has_qstripe;
1197 struct bio_list bio_list;
1198 struct bio *bio;
1199 int ret;
1200
1201 bio_list_init(&bio_list);
1202
1203 if (rbio->real_stripes - rbio->nr_data == 1)
1204 has_qstripe = false;
1205 else if (rbio->real_stripes - rbio->nr_data == 2)
1206 has_qstripe = true;
1207 else
1208 BUG();
1209
1210 /* at this point we either have a full stripe,
1211 * or we've read the full stripe from the drive.
1212 * recalculate the parity and write the new results.
1213 *
1214 * We're not allowed to add any new bios to the
1215 * bio list here, anyone else that wants to
1216 * change this stripe needs to do their own rmw.
1217 */
1218 spin_lock_irq(&rbio->bio_list_lock);
1219 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1220 spin_unlock_irq(&rbio->bio_list_lock);
1221
1222 atomic_set(&rbio->error, 0);
1223
1224 /*
1225 * now that we've set rmw_locked, run through the
1226 * bio list one last time and map the page pointers
1227 *
1228 * We don't cache full rbios because we're assuming
1229 * the higher layers are unlikely to use this area of
1230 * the disk again soon. If they do use it again,
1231 * hopefully they will send another full bio.
1232 */
1233 index_rbio_pages(rbio);
1234 if (!rbio_is_full(rbio))
1235 cache_rbio_pages(rbio);
1236 else
1237 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
1238
1239 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1240 struct page *p;
1241 /* first collect one page from each data stripe */
1242 for (stripe = 0; stripe < nr_data; stripe++) {
1243 p = page_in_rbio(rbio, stripe, pagenr, 0);
1244 pointers[stripe] = kmap(p);
1245 }
1246
1247 /* then add the parity stripe */
1248 p = rbio_pstripe_page(rbio, pagenr);
1249 SetPageUptodate(p);
1250 pointers[stripe++] = kmap(p);
1251
1252 if (has_qstripe) {
1253
1254 /*
1255 * raid6, add the qstripe and call the
1256 * library function to fill in our p/q
1257 */
1258 p = rbio_qstripe_page(rbio, pagenr);
1259 SetPageUptodate(p);
1260 pointers[stripe++] = kmap(p);
1261
1262 raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
1263 pointers);
1264 } else {
1265 /* raid5 */
1266 copy_page(pointers[nr_data], pointers[0]);
1267 run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
1268 }
1269
1270
1271 for (stripe = 0; stripe < rbio->real_stripes; stripe++)
1272 kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
1273 }
1274
1275 /*
1276 * time to start writing. Make bios for everything from the
1277 * higher layers (the bio_list in our rbio) and our p/q. Ignore
1278 * everything else.
1279 */
1280 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1281 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1282 struct page *page;
1283 if (stripe < rbio->nr_data) {
1284 page = page_in_rbio(rbio, stripe, pagenr, 1);
1285 if (!page)
1286 continue;
1287 } else {
1288 page = rbio_stripe_page(rbio, stripe, pagenr);
1289 }
1290
1291 ret = rbio_add_io_page(rbio, &bio_list,
1292 page, stripe, pagenr, rbio->stripe_len);
1293 if (ret)
1294 goto cleanup;
1295 }
1296 }
1297
1298 if (likely(!bbio->num_tgtdevs))
1299 goto write_data;
1300
1301 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1302 if (!bbio->tgtdev_map[stripe])
1303 continue;
1304
1305 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1306 struct page *page;
1307 if (stripe < rbio->nr_data) {
1308 page = page_in_rbio(rbio, stripe, pagenr, 1);
1309 if (!page)
1310 continue;
1311 } else {
1312 page = rbio_stripe_page(rbio, stripe, pagenr);
1313 }
1314
1315 ret = rbio_add_io_page(rbio, &bio_list, page,
1316 rbio->bbio->tgtdev_map[stripe],
1317 pagenr, rbio->stripe_len);
1318 if (ret)
1319 goto cleanup;
1320 }
1321 }
1322
1323 write_data:
1324 atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list));
1325 BUG_ON(atomic_read(&rbio->stripes_pending) == 0);
1326
1327 while ((bio = bio_list_pop(&bio_list))) {
1328 bio->bi_private = rbio;
1329 bio->bi_end_io = raid_write_end_io;
1330 bio->bi_opf = REQ_OP_WRITE;
1331
1332 submit_bio(bio);
1333 }
1334 return;
1335
1336 cleanup:
1337 rbio_orig_end_io(rbio, BLK_STS_IOERR);
1338
1339 while ((bio = bio_list_pop(&bio_list)))
1340 bio_put(bio);
1341 }
1342
1343 /*
1344 * helper to find the stripe number for a given bio. Used to figure out which
1345 * stripe has failed. This expects the bio to correspond to a physical disk,
1346 * so it looks up based on physical sector numbers.
1347 */
find_bio_stripe(struct btrfs_raid_bio * rbio,struct bio * bio)1348 static int find_bio_stripe(struct btrfs_raid_bio *rbio,
1349 struct bio *bio)
1350 {
1351 u64 physical = bio->bi_iter.bi_sector;
1352 int i;
1353 struct btrfs_bio_stripe *stripe;
1354
1355 physical <<= 9;
1356
1357 for (i = 0; i < rbio->bbio->num_stripes; i++) {
1358 stripe = &rbio->bbio->stripes[i];
1359 if (in_range(physical, stripe->physical, rbio->stripe_len) &&
1360 stripe->dev->bdev &&
1361 bio->bi_disk == stripe->dev->bdev->bd_disk &&
1362 bio->bi_partno == stripe->dev->bdev->bd_partno) {
1363 return i;
1364 }
1365 }
1366 return -1;
1367 }
1368
1369 /*
1370 * helper to find the stripe number for a given
1371 * bio (before mapping). Used to figure out which stripe has
1372 * failed. This looks up based on logical block numbers.
1373 */
find_logical_bio_stripe(struct btrfs_raid_bio * rbio,struct bio * bio)1374 static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio,
1375 struct bio *bio)
1376 {
1377 u64 logical = (u64)bio->bi_iter.bi_sector << 9;
1378 int i;
1379
1380 for (i = 0; i < rbio->nr_data; i++) {
1381 u64 stripe_start = rbio->bbio->raid_map[i];
1382
1383 if (in_range(logical, stripe_start, rbio->stripe_len))
1384 return i;
1385 }
1386 return -1;
1387 }
1388
1389 /*
1390 * returns -EIO if we had too many failures
1391 */
fail_rbio_index(struct btrfs_raid_bio * rbio,int failed)1392 static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed)
1393 {
1394 unsigned long flags;
1395 int ret = 0;
1396
1397 spin_lock_irqsave(&rbio->bio_list_lock, flags);
1398
1399 /* we already know this stripe is bad, move on */
1400 if (rbio->faila == failed || rbio->failb == failed)
1401 goto out;
1402
1403 if (rbio->faila == -1) {
1404 /* first failure on this rbio */
1405 rbio->faila = failed;
1406 atomic_inc(&rbio->error);
1407 } else if (rbio->failb == -1) {
1408 /* second failure on this rbio */
1409 rbio->failb = failed;
1410 atomic_inc(&rbio->error);
1411 } else {
1412 ret = -EIO;
1413 }
1414 out:
1415 spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
1416
1417 return ret;
1418 }
1419
1420 /*
1421 * helper to fail a stripe based on a physical disk
1422 * bio.
1423 */
fail_bio_stripe(struct btrfs_raid_bio * rbio,struct bio * bio)1424 static int fail_bio_stripe(struct btrfs_raid_bio *rbio,
1425 struct bio *bio)
1426 {
1427 int failed = find_bio_stripe(rbio, bio);
1428
1429 if (failed < 0)
1430 return -EIO;
1431
1432 return fail_rbio_index(rbio, failed);
1433 }
1434
1435 /*
1436 * this sets each page in the bio uptodate. It should only be used on private
1437 * rbio pages, nothing that comes in from the higher layers
1438 */
set_bio_pages_uptodate(struct bio * bio)1439 static void set_bio_pages_uptodate(struct bio *bio)
1440 {
1441 struct bio_vec *bvec;
1442 struct bvec_iter_all iter_all;
1443
1444 ASSERT(!bio_flagged(bio, BIO_CLONED));
1445
1446 bio_for_each_segment_all(bvec, bio, iter_all)
1447 SetPageUptodate(bvec->bv_page);
1448 }
1449
1450 /*
1451 * end io for the read phase of the rmw cycle. All the bios here are physical
1452 * stripe bios we've read from the disk so we can recalculate the parity of the
1453 * stripe.
1454 *
1455 * This will usually kick off finish_rmw once all the bios are read in, but it
1456 * may trigger parity reconstruction if we had any errors along the way
1457 */
raid_rmw_end_io(struct bio * bio)1458 static void raid_rmw_end_io(struct bio *bio)
1459 {
1460 struct btrfs_raid_bio *rbio = bio->bi_private;
1461
1462 if (bio->bi_status)
1463 fail_bio_stripe(rbio, bio);
1464 else
1465 set_bio_pages_uptodate(bio);
1466
1467 bio_put(bio);
1468
1469 if (!atomic_dec_and_test(&rbio->stripes_pending))
1470 return;
1471
1472 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
1473 goto cleanup;
1474
1475 /*
1476 * this will normally call finish_rmw to start our write
1477 * but if there are any failed stripes we'll reconstruct
1478 * from parity first
1479 */
1480 validate_rbio_for_rmw(rbio);
1481 return;
1482
1483 cleanup:
1484
1485 rbio_orig_end_io(rbio, BLK_STS_IOERR);
1486 }
1487
1488 /*
1489 * the stripe must be locked by the caller. It will
1490 * unlock after all the writes are done
1491 */
raid56_rmw_stripe(struct btrfs_raid_bio * rbio)1492 static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
1493 {
1494 int bios_to_read = 0;
1495 struct bio_list bio_list;
1496 int ret;
1497 int pagenr;
1498 int stripe;
1499 struct bio *bio;
1500
1501 bio_list_init(&bio_list);
1502
1503 ret = alloc_rbio_pages(rbio);
1504 if (ret)
1505 goto cleanup;
1506
1507 index_rbio_pages(rbio);
1508
1509 atomic_set(&rbio->error, 0);
1510 /*
1511 * build a list of bios to read all the missing parts of this
1512 * stripe
1513 */
1514 for (stripe = 0; stripe < rbio->nr_data; stripe++) {
1515 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1516 struct page *page;
1517 /*
1518 * we want to find all the pages missing from
1519 * the rbio and read them from the disk. If
1520 * page_in_rbio finds a page in the bio list
1521 * we don't need to read it off the stripe.
1522 */
1523 page = page_in_rbio(rbio, stripe, pagenr, 1);
1524 if (page)
1525 continue;
1526
1527 page = rbio_stripe_page(rbio, stripe, pagenr);
1528 /*
1529 * the bio cache may have handed us an uptodate
1530 * page. If so, be happy and use it
1531 */
1532 if (PageUptodate(page))
1533 continue;
1534
1535 ret = rbio_add_io_page(rbio, &bio_list, page,
1536 stripe, pagenr, rbio->stripe_len);
1537 if (ret)
1538 goto cleanup;
1539 }
1540 }
1541
1542 bios_to_read = bio_list_size(&bio_list);
1543 if (!bios_to_read) {
1544 /*
1545 * this can happen if others have merged with
1546 * us, it means there is nothing left to read.
1547 * But if there are missing devices it may not be
1548 * safe to do the full stripe write yet.
1549 */
1550 goto finish;
1551 }
1552
1553 /*
1554 * the bbio may be freed once we submit the last bio. Make sure
1555 * not to touch it after that
1556 */
1557 atomic_set(&rbio->stripes_pending, bios_to_read);
1558 while ((bio = bio_list_pop(&bio_list))) {
1559 bio->bi_private = rbio;
1560 bio->bi_end_io = raid_rmw_end_io;
1561 bio->bi_opf = REQ_OP_READ;
1562
1563 btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
1564
1565 submit_bio(bio);
1566 }
1567 /* the actual write will happen once the reads are done */
1568 return 0;
1569
1570 cleanup:
1571 rbio_orig_end_io(rbio, BLK_STS_IOERR);
1572
1573 while ((bio = bio_list_pop(&bio_list)))
1574 bio_put(bio);
1575
1576 return -EIO;
1577
1578 finish:
1579 validate_rbio_for_rmw(rbio);
1580 return 0;
1581 }
1582
1583 /*
1584 * if the upper layers pass in a full stripe, we thank them by only allocating
1585 * enough pages to hold the parity, and sending it all down quickly.
1586 */
full_stripe_write(struct btrfs_raid_bio * rbio)1587 static int full_stripe_write(struct btrfs_raid_bio *rbio)
1588 {
1589 int ret;
1590
1591 ret = alloc_rbio_parity_pages(rbio);
1592 if (ret) {
1593 __free_raid_bio(rbio);
1594 return ret;
1595 }
1596
1597 ret = lock_stripe_add(rbio);
1598 if (ret == 0)
1599 finish_rmw(rbio);
1600 return 0;
1601 }
1602
1603 /*
1604 * partial stripe writes get handed over to async helpers.
1605 * We're really hoping to merge a few more writes into this
1606 * rbio before calculating new parity
1607 */
partial_stripe_write(struct btrfs_raid_bio * rbio)1608 static int partial_stripe_write(struct btrfs_raid_bio *rbio)
1609 {
1610 int ret;
1611
1612 ret = lock_stripe_add(rbio);
1613 if (ret == 0)
1614 start_async_work(rbio, rmw_work);
1615 return 0;
1616 }
1617
1618 /*
1619 * sometimes while we were reading from the drive to
1620 * recalculate parity, enough new bios come into create
1621 * a full stripe. So we do a check here to see if we can
1622 * go directly to finish_rmw
1623 */
__raid56_parity_write(struct btrfs_raid_bio * rbio)1624 static int __raid56_parity_write(struct btrfs_raid_bio *rbio)
1625 {
1626 /* head off into rmw land if we don't have a full stripe */
1627 if (!rbio_is_full(rbio))
1628 return partial_stripe_write(rbio);
1629 return full_stripe_write(rbio);
1630 }
1631
1632 /*
1633 * We use plugging call backs to collect full stripes.
1634 * Any time we get a partial stripe write while plugged
1635 * we collect it into a list. When the unplug comes down,
1636 * we sort the list by logical block number and merge
1637 * everything we can into the same rbios
1638 */
1639 struct btrfs_plug_cb {
1640 struct blk_plug_cb cb;
1641 struct btrfs_fs_info *info;
1642 struct list_head rbio_list;
1643 struct btrfs_work work;
1644 };
1645
1646 /*
1647 * rbios on the plug list are sorted for easier merging.
1648 */
plug_cmp(void * priv,const struct list_head * a,const struct list_head * b)1649 static int plug_cmp(void *priv, const struct list_head *a,
1650 const struct list_head *b)
1651 {
1652 struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio,
1653 plug_list);
1654 struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
1655 plug_list);
1656 u64 a_sector = ra->bio_list.head->bi_iter.bi_sector;
1657 u64 b_sector = rb->bio_list.head->bi_iter.bi_sector;
1658
1659 if (a_sector < b_sector)
1660 return -1;
1661 if (a_sector > b_sector)
1662 return 1;
1663 return 0;
1664 }
1665
run_plug(struct btrfs_plug_cb * plug)1666 static void run_plug(struct btrfs_plug_cb *plug)
1667 {
1668 struct btrfs_raid_bio *cur;
1669 struct btrfs_raid_bio *last = NULL;
1670
1671 /*
1672 * sort our plug list then try to merge
1673 * everything we can in hopes of creating full
1674 * stripes.
1675 */
1676 list_sort(NULL, &plug->rbio_list, plug_cmp);
1677 while (!list_empty(&plug->rbio_list)) {
1678 cur = list_entry(plug->rbio_list.next,
1679 struct btrfs_raid_bio, plug_list);
1680 list_del_init(&cur->plug_list);
1681
1682 if (rbio_is_full(cur)) {
1683 int ret;
1684
1685 /* we have a full stripe, send it down */
1686 ret = full_stripe_write(cur);
1687 BUG_ON(ret);
1688 continue;
1689 }
1690 if (last) {
1691 if (rbio_can_merge(last, cur)) {
1692 merge_rbio(last, cur);
1693 __free_raid_bio(cur);
1694 continue;
1695
1696 }
1697 __raid56_parity_write(last);
1698 }
1699 last = cur;
1700 }
1701 if (last) {
1702 __raid56_parity_write(last);
1703 }
1704 kfree(plug);
1705 }
1706
1707 /*
1708 * if the unplug comes from schedule, we have to push the
1709 * work off to a helper thread
1710 */
unplug_work(struct btrfs_work * work)1711 static void unplug_work(struct btrfs_work *work)
1712 {
1713 struct btrfs_plug_cb *plug;
1714 plug = container_of(work, struct btrfs_plug_cb, work);
1715 run_plug(plug);
1716 }
1717
btrfs_raid_unplug(struct blk_plug_cb * cb,bool from_schedule)1718 static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
1719 {
1720 struct btrfs_plug_cb *plug;
1721 plug = container_of(cb, struct btrfs_plug_cb, cb);
1722
1723 if (from_schedule) {
1724 btrfs_init_work(&plug->work, unplug_work, NULL, NULL);
1725 btrfs_queue_work(plug->info->rmw_workers,
1726 &plug->work);
1727 return;
1728 }
1729 run_plug(plug);
1730 }
1731
1732 /*
1733 * our main entry point for writes from the rest of the FS.
1734 */
raid56_parity_write(struct btrfs_fs_info * fs_info,struct bio * bio,struct btrfs_bio * bbio,u64 stripe_len)1735 int raid56_parity_write(struct btrfs_fs_info *fs_info, struct bio *bio,
1736 struct btrfs_bio *bbio, u64 stripe_len)
1737 {
1738 struct btrfs_raid_bio *rbio;
1739 struct btrfs_plug_cb *plug = NULL;
1740 struct blk_plug_cb *cb;
1741 int ret;
1742
1743 rbio = alloc_rbio(fs_info, bbio, stripe_len);
1744 if (IS_ERR(rbio)) {
1745 btrfs_put_bbio(bbio);
1746 return PTR_ERR(rbio);
1747 }
1748 bio_list_add(&rbio->bio_list, bio);
1749 rbio->bio_list_bytes = bio->bi_iter.bi_size;
1750 rbio->operation = BTRFS_RBIO_WRITE;
1751
1752 btrfs_bio_counter_inc_noblocked(fs_info);
1753 rbio->generic_bio_cnt = 1;
1754
1755 /*
1756 * don't plug on full rbios, just get them out the door
1757 * as quickly as we can
1758 */
1759 if (rbio_is_full(rbio)) {
1760 ret = full_stripe_write(rbio);
1761 if (ret)
1762 btrfs_bio_counter_dec(fs_info);
1763 return ret;
1764 }
1765
1766 cb = blk_check_plugged(btrfs_raid_unplug, fs_info, sizeof(*plug));
1767 if (cb) {
1768 plug = container_of(cb, struct btrfs_plug_cb, cb);
1769 if (!plug->info) {
1770 plug->info = fs_info;
1771 INIT_LIST_HEAD(&plug->rbio_list);
1772 }
1773 list_add_tail(&rbio->plug_list, &plug->rbio_list);
1774 ret = 0;
1775 } else {
1776 ret = __raid56_parity_write(rbio);
1777 if (ret)
1778 btrfs_bio_counter_dec(fs_info);
1779 }
1780 return ret;
1781 }
1782
1783 /*
1784 * all parity reconstruction happens here. We've read in everything
1785 * we can find from the drives and this does the heavy lifting of
1786 * sorting the good from the bad.
1787 */
__raid_recover_end_io(struct btrfs_raid_bio * rbio)1788 static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
1789 {
1790 int pagenr, stripe;
1791 void **pointers;
1792 int faila = -1, failb = -1;
1793 struct page *page;
1794 blk_status_t err;
1795 int i;
1796
1797 pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
1798 if (!pointers) {
1799 err = BLK_STS_RESOURCE;
1800 goto cleanup_io;
1801 }
1802
1803 faila = rbio->faila;
1804 failb = rbio->failb;
1805
1806 if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1807 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
1808 spin_lock_irq(&rbio->bio_list_lock);
1809 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1810 spin_unlock_irq(&rbio->bio_list_lock);
1811 }
1812
1813 index_rbio_pages(rbio);
1814
1815 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1816 /*
1817 * Now we just use bitmap to mark the horizontal stripes in
1818 * which we have data when doing parity scrub.
1819 */
1820 if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB &&
1821 !test_bit(pagenr, rbio->dbitmap))
1822 continue;
1823
1824 /* setup our array of pointers with pages
1825 * from each stripe
1826 */
1827 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1828 /*
1829 * if we're rebuilding a read, we have to use
1830 * pages from the bio list
1831 */
1832 if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1833 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
1834 (stripe == faila || stripe == failb)) {
1835 page = page_in_rbio(rbio, stripe, pagenr, 0);
1836 } else {
1837 page = rbio_stripe_page(rbio, stripe, pagenr);
1838 }
1839 pointers[stripe] = kmap(page);
1840 }
1841
1842 /* all raid6 handling here */
1843 if (rbio->bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) {
1844 /*
1845 * single failure, rebuild from parity raid5
1846 * style
1847 */
1848 if (failb < 0) {
1849 if (faila == rbio->nr_data) {
1850 /*
1851 * Just the P stripe has failed, without
1852 * a bad data or Q stripe.
1853 * TODO, we should redo the xor here.
1854 */
1855 err = BLK_STS_IOERR;
1856 goto cleanup;
1857 }
1858 /*
1859 * a single failure in raid6 is rebuilt
1860 * in the pstripe code below
1861 */
1862 goto pstripe;
1863 }
1864
1865 /* make sure our ps and qs are in order */
1866 if (faila > failb)
1867 swap(faila, failb);
1868
1869 /* if the q stripe is failed, do a pstripe reconstruction
1870 * from the xors.
1871 * If both the q stripe and the P stripe are failed, we're
1872 * here due to a crc mismatch and we can't give them the
1873 * data they want
1874 */
1875 if (rbio->bbio->raid_map[failb] == RAID6_Q_STRIPE) {
1876 if (rbio->bbio->raid_map[faila] ==
1877 RAID5_P_STRIPE) {
1878 err = BLK_STS_IOERR;
1879 goto cleanup;
1880 }
1881 /*
1882 * otherwise we have one bad data stripe and
1883 * a good P stripe. raid5!
1884 */
1885 goto pstripe;
1886 }
1887
1888 if (rbio->bbio->raid_map[failb] == RAID5_P_STRIPE) {
1889 raid6_datap_recov(rbio->real_stripes,
1890 PAGE_SIZE, faila, pointers);
1891 } else {
1892 raid6_2data_recov(rbio->real_stripes,
1893 PAGE_SIZE, faila, failb,
1894 pointers);
1895 }
1896 } else {
1897 void *p;
1898
1899 /* rebuild from P stripe here (raid5 or raid6) */
1900 BUG_ON(failb != -1);
1901 pstripe:
1902 /* Copy parity block into failed block to start with */
1903 copy_page(pointers[faila], pointers[rbio->nr_data]);
1904
1905 /* rearrange the pointer array */
1906 p = pointers[faila];
1907 for (stripe = faila; stripe < rbio->nr_data - 1; stripe++)
1908 pointers[stripe] = pointers[stripe + 1];
1909 pointers[rbio->nr_data - 1] = p;
1910
1911 /* xor in the rest */
1912 run_xor(pointers, rbio->nr_data - 1, PAGE_SIZE);
1913 }
1914 /* if we're doing this rebuild as part of an rmw, go through
1915 * and set all of our private rbio pages in the
1916 * failed stripes as uptodate. This way finish_rmw will
1917 * know they can be trusted. If this was a read reconstruction,
1918 * other endio functions will fiddle the uptodate bits
1919 */
1920 if (rbio->operation == BTRFS_RBIO_WRITE) {
1921 for (i = 0; i < rbio->stripe_npages; i++) {
1922 if (faila != -1) {
1923 page = rbio_stripe_page(rbio, faila, i);
1924 SetPageUptodate(page);
1925 }
1926 if (failb != -1) {
1927 page = rbio_stripe_page(rbio, failb, i);
1928 SetPageUptodate(page);
1929 }
1930 }
1931 }
1932 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1933 /*
1934 * if we're rebuilding a read, we have to use
1935 * pages from the bio list
1936 */
1937 if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1938 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
1939 (stripe == faila || stripe == failb)) {
1940 page = page_in_rbio(rbio, stripe, pagenr, 0);
1941 } else {
1942 page = rbio_stripe_page(rbio, stripe, pagenr);
1943 }
1944 kunmap(page);
1945 }
1946 }
1947
1948 err = BLK_STS_OK;
1949 cleanup:
1950 kfree(pointers);
1951
1952 cleanup_io:
1953 /*
1954 * Similar to READ_REBUILD, REBUILD_MISSING at this point also has a
1955 * valid rbio which is consistent with ondisk content, thus such a
1956 * valid rbio can be cached to avoid further disk reads.
1957 */
1958 if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1959 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
1960 /*
1961 * - In case of two failures, where rbio->failb != -1:
1962 *
1963 * Do not cache this rbio since the above read reconstruction
1964 * (raid6_datap_recov() or raid6_2data_recov()) may have
1965 * changed some content of stripes which are not identical to
1966 * on-disk content any more, otherwise, a later write/recover
1967 * may steal stripe_pages from this rbio and end up with
1968 * corruptions or rebuild failures.
1969 *
1970 * - In case of single failure, where rbio->failb == -1:
1971 *
1972 * Cache this rbio iff the above read reconstruction is
1973 * executed without problems.
1974 */
1975 if (err == BLK_STS_OK && rbio->failb < 0)
1976 cache_rbio_pages(rbio);
1977 else
1978 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
1979
1980 rbio_orig_end_io(rbio, err);
1981 } else if (err == BLK_STS_OK) {
1982 rbio->faila = -1;
1983 rbio->failb = -1;
1984
1985 if (rbio->operation == BTRFS_RBIO_WRITE)
1986 finish_rmw(rbio);
1987 else if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB)
1988 finish_parity_scrub(rbio, 0);
1989 else
1990 BUG();
1991 } else {
1992 rbio_orig_end_io(rbio, err);
1993 }
1994 }
1995
1996 /*
1997 * This is called only for stripes we've read from disk to
1998 * reconstruct the parity.
1999 */
raid_recover_end_io(struct bio * bio)2000 static void raid_recover_end_io(struct bio *bio)
2001 {
2002 struct btrfs_raid_bio *rbio = bio->bi_private;
2003
2004 /*
2005 * we only read stripe pages off the disk, set them
2006 * up to date if there were no errors
2007 */
2008 if (bio->bi_status)
2009 fail_bio_stripe(rbio, bio);
2010 else
2011 set_bio_pages_uptodate(bio);
2012 bio_put(bio);
2013
2014 if (!atomic_dec_and_test(&rbio->stripes_pending))
2015 return;
2016
2017 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2018 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2019 else
2020 __raid_recover_end_io(rbio);
2021 }
2022
2023 /*
2024 * reads everything we need off the disk to reconstruct
2025 * the parity. endio handlers trigger final reconstruction
2026 * when the IO is done.
2027 *
2028 * This is used both for reads from the higher layers and for
2029 * parity construction required to finish a rmw cycle.
2030 */
__raid56_parity_recover(struct btrfs_raid_bio * rbio)2031 static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
2032 {
2033 int bios_to_read = 0;
2034 struct bio_list bio_list;
2035 int ret;
2036 int pagenr;
2037 int stripe;
2038 struct bio *bio;
2039
2040 bio_list_init(&bio_list);
2041
2042 ret = alloc_rbio_pages(rbio);
2043 if (ret)
2044 goto cleanup;
2045
2046 atomic_set(&rbio->error, 0);
2047
2048 /*
2049 * read everything that hasn't failed. Thanks to the
2050 * stripe cache, it is possible that some or all of these
2051 * pages are going to be uptodate.
2052 */
2053 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2054 if (rbio->faila == stripe || rbio->failb == stripe) {
2055 atomic_inc(&rbio->error);
2056 continue;
2057 }
2058
2059 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
2060 struct page *p;
2061
2062 /*
2063 * the rmw code may have already read this
2064 * page in
2065 */
2066 p = rbio_stripe_page(rbio, stripe, pagenr);
2067 if (PageUptodate(p))
2068 continue;
2069
2070 ret = rbio_add_io_page(rbio, &bio_list,
2071 rbio_stripe_page(rbio, stripe, pagenr),
2072 stripe, pagenr, rbio->stripe_len);
2073 if (ret < 0)
2074 goto cleanup;
2075 }
2076 }
2077
2078 bios_to_read = bio_list_size(&bio_list);
2079 if (!bios_to_read) {
2080 /*
2081 * we might have no bios to read just because the pages
2082 * were up to date, or we might have no bios to read because
2083 * the devices were gone.
2084 */
2085 if (atomic_read(&rbio->error) <= rbio->bbio->max_errors) {
2086 __raid_recover_end_io(rbio);
2087 return 0;
2088 } else {
2089 goto cleanup;
2090 }
2091 }
2092
2093 /*
2094 * the bbio may be freed once we submit the last bio. Make sure
2095 * not to touch it after that
2096 */
2097 atomic_set(&rbio->stripes_pending, bios_to_read);
2098 while ((bio = bio_list_pop(&bio_list))) {
2099 bio->bi_private = rbio;
2100 bio->bi_end_io = raid_recover_end_io;
2101 bio->bi_opf = REQ_OP_READ;
2102
2103 btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
2104
2105 submit_bio(bio);
2106 }
2107
2108 return 0;
2109
2110 cleanup:
2111 if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
2112 rbio->operation == BTRFS_RBIO_REBUILD_MISSING)
2113 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2114
2115 while ((bio = bio_list_pop(&bio_list)))
2116 bio_put(bio);
2117
2118 return -EIO;
2119 }
2120
2121 /*
2122 * the main entry point for reads from the higher layers. This
2123 * is really only called when the normal read path had a failure,
2124 * so we assume the bio they send down corresponds to a failed part
2125 * of the drive.
2126 */
raid56_parity_recover(struct btrfs_fs_info * fs_info,struct bio * bio,struct btrfs_bio * bbio,u64 stripe_len,int mirror_num,int generic_io)2127 int raid56_parity_recover(struct btrfs_fs_info *fs_info, struct bio *bio,
2128 struct btrfs_bio *bbio, u64 stripe_len,
2129 int mirror_num, int generic_io)
2130 {
2131 struct btrfs_raid_bio *rbio;
2132 int ret;
2133
2134 if (generic_io) {
2135 ASSERT(bbio->mirror_num == mirror_num);
2136 btrfs_io_bio(bio)->mirror_num = mirror_num;
2137 }
2138
2139 rbio = alloc_rbio(fs_info, bbio, stripe_len);
2140 if (IS_ERR(rbio)) {
2141 if (generic_io)
2142 btrfs_put_bbio(bbio);
2143 return PTR_ERR(rbio);
2144 }
2145
2146 rbio->operation = BTRFS_RBIO_READ_REBUILD;
2147 bio_list_add(&rbio->bio_list, bio);
2148 rbio->bio_list_bytes = bio->bi_iter.bi_size;
2149
2150 rbio->faila = find_logical_bio_stripe(rbio, bio);
2151 if (rbio->faila == -1) {
2152 btrfs_warn(fs_info,
2153 "%s could not find the bad stripe in raid56 so that we cannot recover any more (bio has logical %llu len %llu, bbio has map_type %llu)",
2154 __func__, (u64)bio->bi_iter.bi_sector << 9,
2155 (u64)bio->bi_iter.bi_size, bbio->map_type);
2156 if (generic_io)
2157 btrfs_put_bbio(bbio);
2158 kfree(rbio);
2159 return -EIO;
2160 }
2161
2162 if (generic_io) {
2163 btrfs_bio_counter_inc_noblocked(fs_info);
2164 rbio->generic_bio_cnt = 1;
2165 } else {
2166 btrfs_get_bbio(bbio);
2167 }
2168
2169 /*
2170 * Loop retry:
2171 * for 'mirror == 2', reconstruct from all other stripes.
2172 * for 'mirror_num > 2', select a stripe to fail on every retry.
2173 */
2174 if (mirror_num > 2) {
2175 /*
2176 * 'mirror == 3' is to fail the p stripe and
2177 * reconstruct from the q stripe. 'mirror > 3' is to
2178 * fail a data stripe and reconstruct from p+q stripe.
2179 */
2180 rbio->failb = rbio->real_stripes - (mirror_num - 1);
2181 ASSERT(rbio->failb > 0);
2182 if (rbio->failb <= rbio->faila)
2183 rbio->failb--;
2184 }
2185
2186 ret = lock_stripe_add(rbio);
2187
2188 /*
2189 * __raid56_parity_recover will end the bio with
2190 * any errors it hits. We don't want to return
2191 * its error value up the stack because our caller
2192 * will end up calling bio_endio with any nonzero
2193 * return
2194 */
2195 if (ret == 0)
2196 __raid56_parity_recover(rbio);
2197 /*
2198 * our rbio has been added to the list of
2199 * rbios that will be handled after the
2200 * currently lock owner is done
2201 */
2202 return 0;
2203
2204 }
2205
rmw_work(struct btrfs_work * work)2206 static void rmw_work(struct btrfs_work *work)
2207 {
2208 struct btrfs_raid_bio *rbio;
2209
2210 rbio = container_of(work, struct btrfs_raid_bio, work);
2211 raid56_rmw_stripe(rbio);
2212 }
2213
read_rebuild_work(struct btrfs_work * work)2214 static void read_rebuild_work(struct btrfs_work *work)
2215 {
2216 struct btrfs_raid_bio *rbio;
2217
2218 rbio = container_of(work, struct btrfs_raid_bio, work);
2219 __raid56_parity_recover(rbio);
2220 }
2221
2222 /*
2223 * The following code is used to scrub/replace the parity stripe
2224 *
2225 * Caller must have already increased bio_counter for getting @bbio.
2226 *
2227 * Note: We need make sure all the pages that add into the scrub/replace
2228 * raid bio are correct and not be changed during the scrub/replace. That
2229 * is those pages just hold metadata or file data with checksum.
2230 */
2231
2232 struct btrfs_raid_bio *
raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info * fs_info,struct bio * bio,struct btrfs_bio * bbio,u64 stripe_len,struct btrfs_device * scrub_dev,unsigned long * dbitmap,int stripe_nsectors)2233 raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
2234 struct btrfs_bio *bbio, u64 stripe_len,
2235 struct btrfs_device *scrub_dev,
2236 unsigned long *dbitmap, int stripe_nsectors)
2237 {
2238 struct btrfs_raid_bio *rbio;
2239 int i;
2240
2241 rbio = alloc_rbio(fs_info, bbio, stripe_len);
2242 if (IS_ERR(rbio))
2243 return NULL;
2244 bio_list_add(&rbio->bio_list, bio);
2245 /*
2246 * This is a special bio which is used to hold the completion handler
2247 * and make the scrub rbio is similar to the other types
2248 */
2249 ASSERT(!bio->bi_iter.bi_size);
2250 rbio->operation = BTRFS_RBIO_PARITY_SCRUB;
2251
2252 /*
2253 * After mapping bbio with BTRFS_MAP_WRITE, parities have been sorted
2254 * to the end position, so this search can start from the first parity
2255 * stripe.
2256 */
2257 for (i = rbio->nr_data; i < rbio->real_stripes; i++) {
2258 if (bbio->stripes[i].dev == scrub_dev) {
2259 rbio->scrubp = i;
2260 break;
2261 }
2262 }
2263 ASSERT(i < rbio->real_stripes);
2264
2265 /* Now we just support the sectorsize equals to page size */
2266 ASSERT(fs_info->sectorsize == PAGE_SIZE);
2267 ASSERT(rbio->stripe_npages == stripe_nsectors);
2268 bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors);
2269
2270 /*
2271 * We have already increased bio_counter when getting bbio, record it
2272 * so we can free it at rbio_orig_end_io().
2273 */
2274 rbio->generic_bio_cnt = 1;
2275
2276 return rbio;
2277 }
2278
2279 /* Used for both parity scrub and missing. */
raid56_add_scrub_pages(struct btrfs_raid_bio * rbio,struct page * page,u64 logical)2280 void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
2281 u64 logical)
2282 {
2283 int stripe_offset;
2284 int index;
2285
2286 ASSERT(logical >= rbio->bbio->raid_map[0]);
2287 ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] +
2288 rbio->stripe_len * rbio->nr_data);
2289 stripe_offset = (int)(logical - rbio->bbio->raid_map[0]);
2290 index = stripe_offset >> PAGE_SHIFT;
2291 rbio->bio_pages[index] = page;
2292 }
2293
2294 /*
2295 * We just scrub the parity that we have correct data on the same horizontal,
2296 * so we needn't allocate all pages for all the stripes.
2297 */
alloc_rbio_essential_pages(struct btrfs_raid_bio * rbio)2298 static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio)
2299 {
2300 int i;
2301 int bit;
2302 int index;
2303 struct page *page;
2304
2305 for_each_set_bit(bit, rbio->dbitmap, rbio->stripe_npages) {
2306 for (i = 0; i < rbio->real_stripes; i++) {
2307 index = i * rbio->stripe_npages + bit;
2308 if (rbio->stripe_pages[index])
2309 continue;
2310
2311 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2312 if (!page)
2313 return -ENOMEM;
2314 rbio->stripe_pages[index] = page;
2315 }
2316 }
2317 return 0;
2318 }
2319
finish_parity_scrub(struct btrfs_raid_bio * rbio,int need_check)2320 static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
2321 int need_check)
2322 {
2323 struct btrfs_bio *bbio = rbio->bbio;
2324 void **pointers = rbio->finish_pointers;
2325 unsigned long *pbitmap = rbio->finish_pbitmap;
2326 int nr_data = rbio->nr_data;
2327 int stripe;
2328 int pagenr;
2329 bool has_qstripe;
2330 struct page *p_page = NULL;
2331 struct page *q_page = NULL;
2332 struct bio_list bio_list;
2333 struct bio *bio;
2334 int is_replace = 0;
2335 int ret;
2336
2337 bio_list_init(&bio_list);
2338
2339 if (rbio->real_stripes - rbio->nr_data == 1)
2340 has_qstripe = false;
2341 else if (rbio->real_stripes - rbio->nr_data == 2)
2342 has_qstripe = true;
2343 else
2344 BUG();
2345
2346 if (bbio->num_tgtdevs && bbio->tgtdev_map[rbio->scrubp]) {
2347 is_replace = 1;
2348 bitmap_copy(pbitmap, rbio->dbitmap, rbio->stripe_npages);
2349 }
2350
2351 /*
2352 * Because the higher layers(scrubber) are unlikely to
2353 * use this area of the disk again soon, so don't cache
2354 * it.
2355 */
2356 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2357
2358 if (!need_check)
2359 goto writeback;
2360
2361 p_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2362 if (!p_page)
2363 goto cleanup;
2364 SetPageUptodate(p_page);
2365
2366 if (has_qstripe) {
2367 /* RAID6, allocate and map temp space for the Q stripe */
2368 q_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2369 if (!q_page) {
2370 __free_page(p_page);
2371 goto cleanup;
2372 }
2373 SetPageUptodate(q_page);
2374 pointers[rbio->real_stripes - 1] = kmap(q_page);
2375 }
2376
2377 atomic_set(&rbio->error, 0);
2378
2379 /* Map the parity stripe just once */
2380 pointers[nr_data] = kmap(p_page);
2381
2382 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2383 struct page *p;
2384 void *parity;
2385 /* first collect one page from each data stripe */
2386 for (stripe = 0; stripe < nr_data; stripe++) {
2387 p = page_in_rbio(rbio, stripe, pagenr, 0);
2388 pointers[stripe] = kmap(p);
2389 }
2390
2391 if (has_qstripe) {
2392 /* RAID6, call the library function to fill in our P/Q */
2393 raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
2394 pointers);
2395 } else {
2396 /* raid5 */
2397 copy_page(pointers[nr_data], pointers[0]);
2398 run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
2399 }
2400
2401 /* Check scrubbing parity and repair it */
2402 p = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2403 parity = kmap(p);
2404 if (memcmp(parity, pointers[rbio->scrubp], PAGE_SIZE))
2405 copy_page(parity, pointers[rbio->scrubp]);
2406 else
2407 /* Parity is right, needn't writeback */
2408 bitmap_clear(rbio->dbitmap, pagenr, 1);
2409 kunmap(p);
2410
2411 for (stripe = 0; stripe < nr_data; stripe++)
2412 kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
2413 }
2414
2415 kunmap(p_page);
2416 __free_page(p_page);
2417 if (q_page) {
2418 kunmap(q_page);
2419 __free_page(q_page);
2420 }
2421
2422 writeback:
2423 /*
2424 * time to start writing. Make bios for everything from the
2425 * higher layers (the bio_list in our rbio) and our p/q. Ignore
2426 * everything else.
2427 */
2428 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2429 struct page *page;
2430
2431 page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2432 ret = rbio_add_io_page(rbio, &bio_list,
2433 page, rbio->scrubp, pagenr, rbio->stripe_len);
2434 if (ret)
2435 goto cleanup;
2436 }
2437
2438 if (!is_replace)
2439 goto submit_write;
2440
2441 for_each_set_bit(pagenr, pbitmap, rbio->stripe_npages) {
2442 struct page *page;
2443
2444 page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2445 ret = rbio_add_io_page(rbio, &bio_list, page,
2446 bbio->tgtdev_map[rbio->scrubp],
2447 pagenr, rbio->stripe_len);
2448 if (ret)
2449 goto cleanup;
2450 }
2451
2452 submit_write:
2453 nr_data = bio_list_size(&bio_list);
2454 if (!nr_data) {
2455 /* Every parity is right */
2456 rbio_orig_end_io(rbio, BLK_STS_OK);
2457 return;
2458 }
2459
2460 atomic_set(&rbio->stripes_pending, nr_data);
2461
2462 while ((bio = bio_list_pop(&bio_list))) {
2463 bio->bi_private = rbio;
2464 bio->bi_end_io = raid_write_end_io;
2465 bio->bi_opf = REQ_OP_WRITE;
2466
2467 submit_bio(bio);
2468 }
2469 return;
2470
2471 cleanup:
2472 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2473
2474 while ((bio = bio_list_pop(&bio_list)))
2475 bio_put(bio);
2476 }
2477
is_data_stripe(struct btrfs_raid_bio * rbio,int stripe)2478 static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe)
2479 {
2480 if (stripe >= 0 && stripe < rbio->nr_data)
2481 return 1;
2482 return 0;
2483 }
2484
2485 /*
2486 * While we're doing the parity check and repair, we could have errors
2487 * in reading pages off the disk. This checks for errors and if we're
2488 * not able to read the page it'll trigger parity reconstruction. The
2489 * parity scrub will be finished after we've reconstructed the failed
2490 * stripes
2491 */
validate_rbio_for_parity_scrub(struct btrfs_raid_bio * rbio)2492 static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio)
2493 {
2494 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2495 goto cleanup;
2496
2497 if (rbio->faila >= 0 || rbio->failb >= 0) {
2498 int dfail = 0, failp = -1;
2499
2500 if (is_data_stripe(rbio, rbio->faila))
2501 dfail++;
2502 else if (is_parity_stripe(rbio->faila))
2503 failp = rbio->faila;
2504
2505 if (is_data_stripe(rbio, rbio->failb))
2506 dfail++;
2507 else if (is_parity_stripe(rbio->failb))
2508 failp = rbio->failb;
2509
2510 /*
2511 * Because we can not use a scrubbing parity to repair
2512 * the data, so the capability of the repair is declined.
2513 * (In the case of RAID5, we can not repair anything)
2514 */
2515 if (dfail > rbio->bbio->max_errors - 1)
2516 goto cleanup;
2517
2518 /*
2519 * If all data is good, only parity is correctly, just
2520 * repair the parity.
2521 */
2522 if (dfail == 0) {
2523 finish_parity_scrub(rbio, 0);
2524 return;
2525 }
2526
2527 /*
2528 * Here means we got one corrupted data stripe and one
2529 * corrupted parity on RAID6, if the corrupted parity
2530 * is scrubbing parity, luckily, use the other one to repair
2531 * the data, or we can not repair the data stripe.
2532 */
2533 if (failp != rbio->scrubp)
2534 goto cleanup;
2535
2536 __raid_recover_end_io(rbio);
2537 } else {
2538 finish_parity_scrub(rbio, 1);
2539 }
2540 return;
2541
2542 cleanup:
2543 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2544 }
2545
2546 /*
2547 * end io for the read phase of the rmw cycle. All the bios here are physical
2548 * stripe bios we've read from the disk so we can recalculate the parity of the
2549 * stripe.
2550 *
2551 * This will usually kick off finish_rmw once all the bios are read in, but it
2552 * may trigger parity reconstruction if we had any errors along the way
2553 */
raid56_parity_scrub_end_io(struct bio * bio)2554 static void raid56_parity_scrub_end_io(struct bio *bio)
2555 {
2556 struct btrfs_raid_bio *rbio = bio->bi_private;
2557
2558 if (bio->bi_status)
2559 fail_bio_stripe(rbio, bio);
2560 else
2561 set_bio_pages_uptodate(bio);
2562
2563 bio_put(bio);
2564
2565 if (!atomic_dec_and_test(&rbio->stripes_pending))
2566 return;
2567
2568 /*
2569 * this will normally call finish_rmw to start our write
2570 * but if there are any failed stripes we'll reconstruct
2571 * from parity first
2572 */
2573 validate_rbio_for_parity_scrub(rbio);
2574 }
2575
raid56_parity_scrub_stripe(struct btrfs_raid_bio * rbio)2576 static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
2577 {
2578 int bios_to_read = 0;
2579 struct bio_list bio_list;
2580 int ret;
2581 int pagenr;
2582 int stripe;
2583 struct bio *bio;
2584
2585 bio_list_init(&bio_list);
2586
2587 ret = alloc_rbio_essential_pages(rbio);
2588 if (ret)
2589 goto cleanup;
2590
2591 atomic_set(&rbio->error, 0);
2592 /*
2593 * build a list of bios to read all the missing parts of this
2594 * stripe
2595 */
2596 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2597 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2598 struct page *page;
2599 /*
2600 * we want to find all the pages missing from
2601 * the rbio and read them from the disk. If
2602 * page_in_rbio finds a page in the bio list
2603 * we don't need to read it off the stripe.
2604 */
2605 page = page_in_rbio(rbio, stripe, pagenr, 1);
2606 if (page)
2607 continue;
2608
2609 page = rbio_stripe_page(rbio, stripe, pagenr);
2610 /*
2611 * the bio cache may have handed us an uptodate
2612 * page. If so, be happy and use it
2613 */
2614 if (PageUptodate(page))
2615 continue;
2616
2617 ret = rbio_add_io_page(rbio, &bio_list, page,
2618 stripe, pagenr, rbio->stripe_len);
2619 if (ret)
2620 goto cleanup;
2621 }
2622 }
2623
2624 bios_to_read = bio_list_size(&bio_list);
2625 if (!bios_to_read) {
2626 /*
2627 * this can happen if others have merged with
2628 * us, it means there is nothing left to read.
2629 * But if there are missing devices it may not be
2630 * safe to do the full stripe write yet.
2631 */
2632 goto finish;
2633 }
2634
2635 /*
2636 * the bbio may be freed once we submit the last bio. Make sure
2637 * not to touch it after that
2638 */
2639 atomic_set(&rbio->stripes_pending, bios_to_read);
2640 while ((bio = bio_list_pop(&bio_list))) {
2641 bio->bi_private = rbio;
2642 bio->bi_end_io = raid56_parity_scrub_end_io;
2643 bio->bi_opf = REQ_OP_READ;
2644
2645 btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
2646
2647 submit_bio(bio);
2648 }
2649 /* the actual write will happen once the reads are done */
2650 return;
2651
2652 cleanup:
2653 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2654
2655 while ((bio = bio_list_pop(&bio_list)))
2656 bio_put(bio);
2657
2658 return;
2659
2660 finish:
2661 validate_rbio_for_parity_scrub(rbio);
2662 }
2663
scrub_parity_work(struct btrfs_work * work)2664 static void scrub_parity_work(struct btrfs_work *work)
2665 {
2666 struct btrfs_raid_bio *rbio;
2667
2668 rbio = container_of(work, struct btrfs_raid_bio, work);
2669 raid56_parity_scrub_stripe(rbio);
2670 }
2671
raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio * rbio)2672 void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
2673 {
2674 if (!lock_stripe_add(rbio))
2675 start_async_work(rbio, scrub_parity_work);
2676 }
2677
2678 /* The following code is used for dev replace of a missing RAID 5/6 device. */
2679
2680 struct btrfs_raid_bio *
raid56_alloc_missing_rbio(struct btrfs_fs_info * fs_info,struct bio * bio,struct btrfs_bio * bbio,u64 length)2681 raid56_alloc_missing_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
2682 struct btrfs_bio *bbio, u64 length)
2683 {
2684 struct btrfs_raid_bio *rbio;
2685
2686 rbio = alloc_rbio(fs_info, bbio, length);
2687 if (IS_ERR(rbio))
2688 return NULL;
2689
2690 rbio->operation = BTRFS_RBIO_REBUILD_MISSING;
2691 bio_list_add(&rbio->bio_list, bio);
2692 /*
2693 * This is a special bio which is used to hold the completion handler
2694 * and make the scrub rbio is similar to the other types
2695 */
2696 ASSERT(!bio->bi_iter.bi_size);
2697
2698 rbio->faila = find_logical_bio_stripe(rbio, bio);
2699 if (rbio->faila == -1) {
2700 BUG();
2701 kfree(rbio);
2702 return NULL;
2703 }
2704
2705 /*
2706 * When we get bbio, we have already increased bio_counter, record it
2707 * so we can free it at rbio_orig_end_io()
2708 */
2709 rbio->generic_bio_cnt = 1;
2710
2711 return rbio;
2712 }
2713
raid56_submit_missing_rbio(struct btrfs_raid_bio * rbio)2714 void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio)
2715 {
2716 if (!lock_stripe_add(rbio))
2717 start_async_work(rbio, read_rebuild_work);
2718 }
2719