• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2012 Fusion-io  All rights reserved.
4  * Copyright (C) 2012 Intel Corp. All rights reserved.
5  */
6 
7 #include <linux/sched.h>
8 #include <linux/bio.h>
9 #include <linux/slab.h>
10 #include <linux/blkdev.h>
11 #include <linux/raid/pq.h>
12 #include <linux/hash.h>
13 #include <linux/list_sort.h>
14 #include <linux/raid/xor.h>
15 #include <linux/mm.h>
16 #include "misc.h"
17 #include "ctree.h"
18 #include "disk-io.h"
19 #include "volumes.h"
20 #include "raid56.h"
21 #include "async-thread.h"
22 
23 /* set when additional merges to this rbio are not allowed */
24 #define RBIO_RMW_LOCKED_BIT	1
25 
26 /*
27  * set when this rbio is sitting in the hash, but it is just a cache
28  * of past RMW
29  */
30 #define RBIO_CACHE_BIT		2
31 
32 /*
33  * set when it is safe to trust the stripe_pages for caching
34  */
35 #define RBIO_CACHE_READY_BIT	3
36 
37 #define RBIO_CACHE_SIZE 1024
38 
39 #define BTRFS_STRIPE_HASH_TABLE_BITS				11
40 
41 /* Used by the raid56 code to lock stripes for read/modify/write */
42 struct btrfs_stripe_hash {
43 	struct list_head hash_list;
44 	spinlock_t lock;
45 };
46 
47 /* Used by the raid56 code to lock stripes for read/modify/write */
48 struct btrfs_stripe_hash_table {
49 	struct list_head stripe_cache;
50 	spinlock_t cache_lock;
51 	int cache_size;
52 	struct btrfs_stripe_hash table[];
53 };
54 
55 enum btrfs_rbio_ops {
56 	BTRFS_RBIO_WRITE,
57 	BTRFS_RBIO_READ_REBUILD,
58 	BTRFS_RBIO_PARITY_SCRUB,
59 	BTRFS_RBIO_REBUILD_MISSING,
60 };
61 
62 struct btrfs_raid_bio {
63 	struct btrfs_fs_info *fs_info;
64 	struct btrfs_io_context *bioc;
65 
66 	/* while we're doing rmw on a stripe
67 	 * we put it into a hash table so we can
68 	 * lock the stripe and merge more rbios
69 	 * into it.
70 	 */
71 	struct list_head hash_list;
72 
73 	/*
74 	 * LRU list for the stripe cache
75 	 */
76 	struct list_head stripe_cache;
77 
78 	/*
79 	 * for scheduling work in the helper threads
80 	 */
81 	struct btrfs_work work;
82 
83 	/*
84 	 * bio list and bio_list_lock are used
85 	 * to add more bios into the stripe
86 	 * in hopes of avoiding the full rmw
87 	 */
88 	struct bio_list bio_list;
89 	spinlock_t bio_list_lock;
90 
91 	/* also protected by the bio_list_lock, the
92 	 * plug list is used by the plugging code
93 	 * to collect partial bios while plugged.  The
94 	 * stripe locking code also uses it to hand off
95 	 * the stripe lock to the next pending IO
96 	 */
97 	struct list_head plug_list;
98 
99 	/*
100 	 * flags that tell us if it is safe to
101 	 * merge with this bio
102 	 */
103 	unsigned long flags;
104 
105 	/* size of each individual stripe on disk */
106 	int stripe_len;
107 
108 	/* number of data stripes (no p/q) */
109 	int nr_data;
110 
111 	int real_stripes;
112 
113 	int stripe_npages;
114 	/*
115 	 * set if we're doing a parity rebuild
116 	 * for a read from higher up, which is handled
117 	 * differently from a parity rebuild as part of
118 	 * rmw
119 	 */
120 	enum btrfs_rbio_ops operation;
121 
122 	/* first bad stripe */
123 	int faila;
124 
125 	/* second bad stripe (for raid6 use) */
126 	int failb;
127 
128 	int scrubp;
129 	/*
130 	 * number of pages needed to represent the full
131 	 * stripe
132 	 */
133 	int nr_pages;
134 
135 	/*
136 	 * size of all the bios in the bio_list.  This
137 	 * helps us decide if the rbio maps to a full
138 	 * stripe or not
139 	 */
140 	int bio_list_bytes;
141 
142 	int generic_bio_cnt;
143 
144 	refcount_t refs;
145 
146 	atomic_t stripes_pending;
147 
148 	atomic_t error;
149 	/*
150 	 * these are two arrays of pointers.  We allocate the
151 	 * rbio big enough to hold them both and setup their
152 	 * locations when the rbio is allocated
153 	 */
154 
155 	/* pointers to pages that we allocated for
156 	 * reading/writing stripes directly from the disk (including P/Q)
157 	 */
158 	struct page **stripe_pages;
159 
160 	/*
161 	 * pointers to the pages in the bio_list.  Stored
162 	 * here for faster lookup
163 	 */
164 	struct page **bio_pages;
165 
166 	/*
167 	 * bitmap to record which horizontal stripe has data
168 	 */
169 	unsigned long *dbitmap;
170 
171 	/* allocated with real_stripes-many pointers for finish_*() calls */
172 	void **finish_pointers;
173 
174 	/* allocated with stripe_npages-many bits for finish_*() calls */
175 	unsigned long *finish_pbitmap;
176 };
177 
178 static int __raid56_parity_recover(struct btrfs_raid_bio *rbio);
179 static noinline void finish_rmw(struct btrfs_raid_bio *rbio);
180 static void rmw_work(struct btrfs_work *work);
181 static void read_rebuild_work(struct btrfs_work *work);
182 static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio);
183 static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed);
184 static void __free_raid_bio(struct btrfs_raid_bio *rbio);
185 static void index_rbio_pages(struct btrfs_raid_bio *rbio);
186 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
187 
188 static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
189 					 int need_check);
190 static void scrub_parity_work(struct btrfs_work *work);
191 
start_async_work(struct btrfs_raid_bio * rbio,btrfs_func_t work_func)192 static void start_async_work(struct btrfs_raid_bio *rbio, btrfs_func_t work_func)
193 {
194 	btrfs_init_work(&rbio->work, work_func, NULL, NULL);
195 	btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
196 }
197 
198 /*
199  * the stripe hash table is used for locking, and to collect
200  * bios in hopes of making a full stripe
201  */
btrfs_alloc_stripe_hash_table(struct btrfs_fs_info * info)202 int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
203 {
204 	struct btrfs_stripe_hash_table *table;
205 	struct btrfs_stripe_hash_table *x;
206 	struct btrfs_stripe_hash *cur;
207 	struct btrfs_stripe_hash *h;
208 	int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS;
209 	int i;
210 
211 	if (info->stripe_hash_table)
212 		return 0;
213 
214 	/*
215 	 * The table is large, starting with order 4 and can go as high as
216 	 * order 7 in case lock debugging is turned on.
217 	 *
218 	 * Try harder to allocate and fallback to vmalloc to lower the chance
219 	 * of a failing mount.
220 	 */
221 	table = kvzalloc(struct_size(table, table, num_entries), GFP_KERNEL);
222 	if (!table)
223 		return -ENOMEM;
224 
225 	spin_lock_init(&table->cache_lock);
226 	INIT_LIST_HEAD(&table->stripe_cache);
227 
228 	h = table->table;
229 
230 	for (i = 0; i < num_entries; i++) {
231 		cur = h + i;
232 		INIT_LIST_HEAD(&cur->hash_list);
233 		spin_lock_init(&cur->lock);
234 	}
235 
236 	x = cmpxchg(&info->stripe_hash_table, NULL, table);
237 	kvfree(x);
238 	return 0;
239 }
240 
241 /*
242  * caching an rbio means to copy anything from the
243  * bio_pages array into the stripe_pages array.  We
244  * use the page uptodate bit in the stripe cache array
245  * to indicate if it has valid data
246  *
247  * once the caching is done, we set the cache ready
248  * bit.
249  */
cache_rbio_pages(struct btrfs_raid_bio * rbio)250 static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
251 {
252 	int i;
253 	int ret;
254 
255 	ret = alloc_rbio_pages(rbio);
256 	if (ret)
257 		return;
258 
259 	for (i = 0; i < rbio->nr_pages; i++) {
260 		if (!rbio->bio_pages[i])
261 			continue;
262 
263 		copy_highpage(rbio->stripe_pages[i], rbio->bio_pages[i]);
264 		SetPageUptodate(rbio->stripe_pages[i]);
265 	}
266 	set_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
267 }
268 
269 /*
270  * we hash on the first logical address of the stripe
271  */
rbio_bucket(struct btrfs_raid_bio * rbio)272 static int rbio_bucket(struct btrfs_raid_bio *rbio)
273 {
274 	u64 num = rbio->bioc->raid_map[0];
275 
276 	/*
277 	 * we shift down quite a bit.  We're using byte
278 	 * addressing, and most of the lower bits are zeros.
279 	 * This tends to upset hash_64, and it consistently
280 	 * returns just one or two different values.
281 	 *
282 	 * shifting off the lower bits fixes things.
283 	 */
284 	return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS);
285 }
286 
287 /*
288  * stealing an rbio means taking all the uptodate pages from the stripe
289  * array in the source rbio and putting them into the destination rbio
290  */
steal_rbio(struct btrfs_raid_bio * src,struct btrfs_raid_bio * dest)291 static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest)
292 {
293 	int i;
294 	struct page *s;
295 	struct page *d;
296 
297 	if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags))
298 		return;
299 
300 	for (i = 0; i < dest->nr_pages; i++) {
301 		s = src->stripe_pages[i];
302 		if (!s || !PageUptodate(s)) {
303 			continue;
304 		}
305 
306 		d = dest->stripe_pages[i];
307 		if (d)
308 			__free_page(d);
309 
310 		dest->stripe_pages[i] = s;
311 		src->stripe_pages[i] = NULL;
312 	}
313 }
314 
315 /*
316  * merging means we take the bio_list from the victim and
317  * splice it into the destination.  The victim should
318  * be discarded afterwards.
319  *
320  * must be called with dest->rbio_list_lock held
321  */
merge_rbio(struct btrfs_raid_bio * dest,struct btrfs_raid_bio * victim)322 static void merge_rbio(struct btrfs_raid_bio *dest,
323 		       struct btrfs_raid_bio *victim)
324 {
325 	bio_list_merge(&dest->bio_list, &victim->bio_list);
326 	dest->bio_list_bytes += victim->bio_list_bytes;
327 	/* Also inherit the bitmaps from @victim. */
328 	bitmap_or(dest->dbitmap, victim->dbitmap, dest->dbitmap,
329 		  dest->stripe_npages);
330 	dest->generic_bio_cnt += victim->generic_bio_cnt;
331 	bio_list_init(&victim->bio_list);
332 }
333 
334 /*
335  * used to prune items that are in the cache.  The caller
336  * must hold the hash table lock.
337  */
__remove_rbio_from_cache(struct btrfs_raid_bio * rbio)338 static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
339 {
340 	int bucket = rbio_bucket(rbio);
341 	struct btrfs_stripe_hash_table *table;
342 	struct btrfs_stripe_hash *h;
343 	int freeit = 0;
344 
345 	/*
346 	 * check the bit again under the hash table lock.
347 	 */
348 	if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
349 		return;
350 
351 	table = rbio->fs_info->stripe_hash_table;
352 	h = table->table + bucket;
353 
354 	/* hold the lock for the bucket because we may be
355 	 * removing it from the hash table
356 	 */
357 	spin_lock(&h->lock);
358 
359 	/*
360 	 * hold the lock for the bio list because we need
361 	 * to make sure the bio list is empty
362 	 */
363 	spin_lock(&rbio->bio_list_lock);
364 
365 	if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) {
366 		list_del_init(&rbio->stripe_cache);
367 		table->cache_size -= 1;
368 		freeit = 1;
369 
370 		/* if the bio list isn't empty, this rbio is
371 		 * still involved in an IO.  We take it out
372 		 * of the cache list, and drop the ref that
373 		 * was held for the list.
374 		 *
375 		 * If the bio_list was empty, we also remove
376 		 * the rbio from the hash_table, and drop
377 		 * the corresponding ref
378 		 */
379 		if (bio_list_empty(&rbio->bio_list)) {
380 			if (!list_empty(&rbio->hash_list)) {
381 				list_del_init(&rbio->hash_list);
382 				refcount_dec(&rbio->refs);
383 				BUG_ON(!list_empty(&rbio->plug_list));
384 			}
385 		}
386 	}
387 
388 	spin_unlock(&rbio->bio_list_lock);
389 	spin_unlock(&h->lock);
390 
391 	if (freeit)
392 		__free_raid_bio(rbio);
393 }
394 
395 /*
396  * prune a given rbio from the cache
397  */
remove_rbio_from_cache(struct btrfs_raid_bio * rbio)398 static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
399 {
400 	struct btrfs_stripe_hash_table *table;
401 	unsigned long flags;
402 
403 	if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
404 		return;
405 
406 	table = rbio->fs_info->stripe_hash_table;
407 
408 	spin_lock_irqsave(&table->cache_lock, flags);
409 	__remove_rbio_from_cache(rbio);
410 	spin_unlock_irqrestore(&table->cache_lock, flags);
411 }
412 
413 /*
414  * remove everything in the cache
415  */
btrfs_clear_rbio_cache(struct btrfs_fs_info * info)416 static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info)
417 {
418 	struct btrfs_stripe_hash_table *table;
419 	unsigned long flags;
420 	struct btrfs_raid_bio *rbio;
421 
422 	table = info->stripe_hash_table;
423 
424 	spin_lock_irqsave(&table->cache_lock, flags);
425 	while (!list_empty(&table->stripe_cache)) {
426 		rbio = list_entry(table->stripe_cache.next,
427 				  struct btrfs_raid_bio,
428 				  stripe_cache);
429 		__remove_rbio_from_cache(rbio);
430 	}
431 	spin_unlock_irqrestore(&table->cache_lock, flags);
432 }
433 
434 /*
435  * remove all cached entries and free the hash table
436  * used by unmount
437  */
btrfs_free_stripe_hash_table(struct btrfs_fs_info * info)438 void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info)
439 {
440 	if (!info->stripe_hash_table)
441 		return;
442 	btrfs_clear_rbio_cache(info);
443 	kvfree(info->stripe_hash_table);
444 	info->stripe_hash_table = NULL;
445 }
446 
447 /*
448  * insert an rbio into the stripe cache.  It
449  * must have already been prepared by calling
450  * cache_rbio_pages
451  *
452  * If this rbio was already cached, it gets
453  * moved to the front of the lru.
454  *
455  * If the size of the rbio cache is too big, we
456  * prune an item.
457  */
cache_rbio(struct btrfs_raid_bio * rbio)458 static void cache_rbio(struct btrfs_raid_bio *rbio)
459 {
460 	struct btrfs_stripe_hash_table *table;
461 	unsigned long flags;
462 
463 	if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags))
464 		return;
465 
466 	table = rbio->fs_info->stripe_hash_table;
467 
468 	spin_lock_irqsave(&table->cache_lock, flags);
469 	spin_lock(&rbio->bio_list_lock);
470 
471 	/* bump our ref if we were not in the list before */
472 	if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags))
473 		refcount_inc(&rbio->refs);
474 
475 	if (!list_empty(&rbio->stripe_cache)){
476 		list_move(&rbio->stripe_cache, &table->stripe_cache);
477 	} else {
478 		list_add(&rbio->stripe_cache, &table->stripe_cache);
479 		table->cache_size += 1;
480 	}
481 
482 	spin_unlock(&rbio->bio_list_lock);
483 
484 	if (table->cache_size > RBIO_CACHE_SIZE) {
485 		struct btrfs_raid_bio *found;
486 
487 		found = list_entry(table->stripe_cache.prev,
488 				  struct btrfs_raid_bio,
489 				  stripe_cache);
490 
491 		if (found != rbio)
492 			__remove_rbio_from_cache(found);
493 	}
494 
495 	spin_unlock_irqrestore(&table->cache_lock, flags);
496 }
497 
498 /*
499  * helper function to run the xor_blocks api.  It is only
500  * able to do MAX_XOR_BLOCKS at a time, so we need to
501  * loop through.
502  */
run_xor(void ** pages,int src_cnt,ssize_t len)503 static void run_xor(void **pages, int src_cnt, ssize_t len)
504 {
505 	int src_off = 0;
506 	int xor_src_cnt = 0;
507 	void *dest = pages[src_cnt];
508 
509 	while(src_cnt > 0) {
510 		xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
511 		xor_blocks(xor_src_cnt, len, dest, pages + src_off);
512 
513 		src_cnt -= xor_src_cnt;
514 		src_off += xor_src_cnt;
515 	}
516 }
517 
518 /*
519  * Returns true if the bio list inside this rbio covers an entire stripe (no
520  * rmw required).
521  */
rbio_is_full(struct btrfs_raid_bio * rbio)522 static int rbio_is_full(struct btrfs_raid_bio *rbio)
523 {
524 	unsigned long flags;
525 	unsigned long size = rbio->bio_list_bytes;
526 	int ret = 1;
527 
528 	spin_lock_irqsave(&rbio->bio_list_lock, flags);
529 	if (size != rbio->nr_data * rbio->stripe_len)
530 		ret = 0;
531 	BUG_ON(size > rbio->nr_data * rbio->stripe_len);
532 	spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
533 
534 	return ret;
535 }
536 
537 /*
538  * returns 1 if it is safe to merge two rbios together.
539  * The merging is safe if the two rbios correspond to
540  * the same stripe and if they are both going in the same
541  * direction (read vs write), and if neither one is
542  * locked for final IO
543  *
544  * The caller is responsible for locking such that
545  * rmw_locked is safe to test
546  */
rbio_can_merge(struct btrfs_raid_bio * last,struct btrfs_raid_bio * cur)547 static int rbio_can_merge(struct btrfs_raid_bio *last,
548 			  struct btrfs_raid_bio *cur)
549 {
550 	if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) ||
551 	    test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags))
552 		return 0;
553 
554 	/*
555 	 * we can't merge with cached rbios, since the
556 	 * idea is that when we merge the destination
557 	 * rbio is going to run our IO for us.  We can
558 	 * steal from cached rbios though, other functions
559 	 * handle that.
560 	 */
561 	if (test_bit(RBIO_CACHE_BIT, &last->flags) ||
562 	    test_bit(RBIO_CACHE_BIT, &cur->flags))
563 		return 0;
564 
565 	if (last->bioc->raid_map[0] != cur->bioc->raid_map[0])
566 		return 0;
567 
568 	/* we can't merge with different operations */
569 	if (last->operation != cur->operation)
570 		return 0;
571 	/*
572 	 * We've need read the full stripe from the drive.
573 	 * check and repair the parity and write the new results.
574 	 *
575 	 * We're not allowed to add any new bios to the
576 	 * bio list here, anyone else that wants to
577 	 * change this stripe needs to do their own rmw.
578 	 */
579 	if (last->operation == BTRFS_RBIO_PARITY_SCRUB)
580 		return 0;
581 
582 	if (last->operation == BTRFS_RBIO_REBUILD_MISSING)
583 		return 0;
584 
585 	if (last->operation == BTRFS_RBIO_READ_REBUILD) {
586 		int fa = last->faila;
587 		int fb = last->failb;
588 		int cur_fa = cur->faila;
589 		int cur_fb = cur->failb;
590 
591 		if (last->faila >= last->failb) {
592 			fa = last->failb;
593 			fb = last->faila;
594 		}
595 
596 		if (cur->faila >= cur->failb) {
597 			cur_fa = cur->failb;
598 			cur_fb = cur->faila;
599 		}
600 
601 		if (fa != cur_fa || fb != cur_fb)
602 			return 0;
603 	}
604 	return 1;
605 }
606 
rbio_stripe_page_index(struct btrfs_raid_bio * rbio,int stripe,int index)607 static int rbio_stripe_page_index(struct btrfs_raid_bio *rbio, int stripe,
608 				  int index)
609 {
610 	return stripe * rbio->stripe_npages + index;
611 }
612 
613 /*
614  * these are just the pages from the rbio array, not from anything
615  * the FS sent down to us
616  */
rbio_stripe_page(struct btrfs_raid_bio * rbio,int stripe,int index)617 static struct page *rbio_stripe_page(struct btrfs_raid_bio *rbio, int stripe,
618 				     int index)
619 {
620 	return rbio->stripe_pages[rbio_stripe_page_index(rbio, stripe, index)];
621 }
622 
623 /*
624  * helper to index into the pstripe
625  */
rbio_pstripe_page(struct btrfs_raid_bio * rbio,int index)626 static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index)
627 {
628 	return rbio_stripe_page(rbio, rbio->nr_data, index);
629 }
630 
631 /*
632  * helper to index into the qstripe, returns null
633  * if there is no qstripe
634  */
rbio_qstripe_page(struct btrfs_raid_bio * rbio,int index)635 static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index)
636 {
637 	if (rbio->nr_data + 1 == rbio->real_stripes)
638 		return NULL;
639 	return rbio_stripe_page(rbio, rbio->nr_data + 1, index);
640 }
641 
642 /*
643  * The first stripe in the table for a logical address
644  * has the lock.  rbios are added in one of three ways:
645  *
646  * 1) Nobody has the stripe locked yet.  The rbio is given
647  * the lock and 0 is returned.  The caller must start the IO
648  * themselves.
649  *
650  * 2) Someone has the stripe locked, but we're able to merge
651  * with the lock owner.  The rbio is freed and the IO will
652  * start automatically along with the existing rbio.  1 is returned.
653  *
654  * 3) Someone has the stripe locked, but we're not able to merge.
655  * The rbio is added to the lock owner's plug list, or merged into
656  * an rbio already on the plug list.  When the lock owner unlocks,
657  * the next rbio on the list is run and the IO is started automatically.
658  * 1 is returned
659  *
660  * If we return 0, the caller still owns the rbio and must continue with
661  * IO submission.  If we return 1, the caller must assume the rbio has
662  * already been freed.
663  */
lock_stripe_add(struct btrfs_raid_bio * rbio)664 static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
665 {
666 	struct btrfs_stripe_hash *h;
667 	struct btrfs_raid_bio *cur;
668 	struct btrfs_raid_bio *pending;
669 	unsigned long flags;
670 	struct btrfs_raid_bio *freeit = NULL;
671 	struct btrfs_raid_bio *cache_drop = NULL;
672 	int ret = 0;
673 
674 	h = rbio->fs_info->stripe_hash_table->table + rbio_bucket(rbio);
675 
676 	spin_lock_irqsave(&h->lock, flags);
677 	list_for_each_entry(cur, &h->hash_list, hash_list) {
678 		if (cur->bioc->raid_map[0] != rbio->bioc->raid_map[0])
679 			continue;
680 
681 		spin_lock(&cur->bio_list_lock);
682 
683 		/* Can we steal this cached rbio's pages? */
684 		if (bio_list_empty(&cur->bio_list) &&
685 		    list_empty(&cur->plug_list) &&
686 		    test_bit(RBIO_CACHE_BIT, &cur->flags) &&
687 		    !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
688 			list_del_init(&cur->hash_list);
689 			refcount_dec(&cur->refs);
690 
691 			steal_rbio(cur, rbio);
692 			cache_drop = cur;
693 			spin_unlock(&cur->bio_list_lock);
694 
695 			goto lockit;
696 		}
697 
698 		/* Can we merge into the lock owner? */
699 		if (rbio_can_merge(cur, rbio)) {
700 			merge_rbio(cur, rbio);
701 			spin_unlock(&cur->bio_list_lock);
702 			freeit = rbio;
703 			ret = 1;
704 			goto out;
705 		}
706 
707 
708 		/*
709 		 * We couldn't merge with the running rbio, see if we can merge
710 		 * with the pending ones.  We don't have to check for rmw_locked
711 		 * because there is no way they are inside finish_rmw right now
712 		 */
713 		list_for_each_entry(pending, &cur->plug_list, plug_list) {
714 			if (rbio_can_merge(pending, rbio)) {
715 				merge_rbio(pending, rbio);
716 				spin_unlock(&cur->bio_list_lock);
717 				freeit = rbio;
718 				ret = 1;
719 				goto out;
720 			}
721 		}
722 
723 		/*
724 		 * No merging, put us on the tail of the plug list, our rbio
725 		 * will be started with the currently running rbio unlocks
726 		 */
727 		list_add_tail(&rbio->plug_list, &cur->plug_list);
728 		spin_unlock(&cur->bio_list_lock);
729 		ret = 1;
730 		goto out;
731 	}
732 lockit:
733 	refcount_inc(&rbio->refs);
734 	list_add(&rbio->hash_list, &h->hash_list);
735 out:
736 	spin_unlock_irqrestore(&h->lock, flags);
737 	if (cache_drop)
738 		remove_rbio_from_cache(cache_drop);
739 	if (freeit)
740 		__free_raid_bio(freeit);
741 	return ret;
742 }
743 
744 /*
745  * called as rmw or parity rebuild is completed.  If the plug list has more
746  * rbios waiting for this stripe, the next one on the list will be started
747  */
unlock_stripe(struct btrfs_raid_bio * rbio)748 static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
749 {
750 	int bucket;
751 	struct btrfs_stripe_hash *h;
752 	unsigned long flags;
753 	int keep_cache = 0;
754 
755 	bucket = rbio_bucket(rbio);
756 	h = rbio->fs_info->stripe_hash_table->table + bucket;
757 
758 	if (list_empty(&rbio->plug_list))
759 		cache_rbio(rbio);
760 
761 	spin_lock_irqsave(&h->lock, flags);
762 	spin_lock(&rbio->bio_list_lock);
763 
764 	if (!list_empty(&rbio->hash_list)) {
765 		/*
766 		 * if we're still cached and there is no other IO
767 		 * to perform, just leave this rbio here for others
768 		 * to steal from later
769 		 */
770 		if (list_empty(&rbio->plug_list) &&
771 		    test_bit(RBIO_CACHE_BIT, &rbio->flags)) {
772 			keep_cache = 1;
773 			clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
774 			BUG_ON(!bio_list_empty(&rbio->bio_list));
775 			goto done;
776 		}
777 
778 		list_del_init(&rbio->hash_list);
779 		refcount_dec(&rbio->refs);
780 
781 		/*
782 		 * we use the plug list to hold all the rbios
783 		 * waiting for the chance to lock this stripe.
784 		 * hand the lock over to one of them.
785 		 */
786 		if (!list_empty(&rbio->plug_list)) {
787 			struct btrfs_raid_bio *next;
788 			struct list_head *head = rbio->plug_list.next;
789 
790 			next = list_entry(head, struct btrfs_raid_bio,
791 					  plug_list);
792 
793 			list_del_init(&rbio->plug_list);
794 
795 			list_add(&next->hash_list, &h->hash_list);
796 			refcount_inc(&next->refs);
797 			spin_unlock(&rbio->bio_list_lock);
798 			spin_unlock_irqrestore(&h->lock, flags);
799 
800 			if (next->operation == BTRFS_RBIO_READ_REBUILD)
801 				start_async_work(next, read_rebuild_work);
802 			else if (next->operation == BTRFS_RBIO_REBUILD_MISSING) {
803 				steal_rbio(rbio, next);
804 				start_async_work(next, read_rebuild_work);
805 			} else if (next->operation == BTRFS_RBIO_WRITE) {
806 				steal_rbio(rbio, next);
807 				start_async_work(next, rmw_work);
808 			} else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) {
809 				steal_rbio(rbio, next);
810 				start_async_work(next, scrub_parity_work);
811 			}
812 
813 			goto done_nolock;
814 		}
815 	}
816 done:
817 	spin_unlock(&rbio->bio_list_lock);
818 	spin_unlock_irqrestore(&h->lock, flags);
819 
820 done_nolock:
821 	if (!keep_cache)
822 		remove_rbio_from_cache(rbio);
823 }
824 
__free_raid_bio(struct btrfs_raid_bio * rbio)825 static void __free_raid_bio(struct btrfs_raid_bio *rbio)
826 {
827 	int i;
828 
829 	if (!refcount_dec_and_test(&rbio->refs))
830 		return;
831 
832 	WARN_ON(!list_empty(&rbio->stripe_cache));
833 	WARN_ON(!list_empty(&rbio->hash_list));
834 	WARN_ON(!bio_list_empty(&rbio->bio_list));
835 
836 	for (i = 0; i < rbio->nr_pages; i++) {
837 		if (rbio->stripe_pages[i]) {
838 			__free_page(rbio->stripe_pages[i]);
839 			rbio->stripe_pages[i] = NULL;
840 		}
841 	}
842 
843 	btrfs_put_bioc(rbio->bioc);
844 	kfree(rbio);
845 }
846 
rbio_endio_bio_list(struct bio * cur,blk_status_t err)847 static void rbio_endio_bio_list(struct bio *cur, blk_status_t err)
848 {
849 	struct bio *next;
850 
851 	while (cur) {
852 		next = cur->bi_next;
853 		cur->bi_next = NULL;
854 		cur->bi_status = err;
855 		bio_endio(cur);
856 		cur = next;
857 	}
858 }
859 
860 /*
861  * this frees the rbio and runs through all the bios in the
862  * bio_list and calls end_io on them
863  */
rbio_orig_end_io(struct btrfs_raid_bio * rbio,blk_status_t err)864 static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err)
865 {
866 	struct bio *cur = bio_list_get(&rbio->bio_list);
867 	struct bio *extra;
868 
869 	if (rbio->generic_bio_cnt)
870 		btrfs_bio_counter_sub(rbio->fs_info, rbio->generic_bio_cnt);
871 	/*
872 	 * Clear the data bitmap, as the rbio may be cached for later usage.
873 	 * do this before before unlock_stripe() so there will be no new bio
874 	 * for this bio.
875 	 */
876 	bitmap_clear(rbio->dbitmap, 0, rbio->stripe_npages);
877 
878 	/*
879 	 * At this moment, rbio->bio_list is empty, however since rbio does not
880 	 * always have RBIO_RMW_LOCKED_BIT set and rbio is still linked on the
881 	 * hash list, rbio may be merged with others so that rbio->bio_list
882 	 * becomes non-empty.
883 	 * Once unlock_stripe() is done, rbio->bio_list will not be updated any
884 	 * more and we can call bio_endio() on all queued bios.
885 	 */
886 	unlock_stripe(rbio);
887 	extra = bio_list_get(&rbio->bio_list);
888 	__free_raid_bio(rbio);
889 
890 	rbio_endio_bio_list(cur, err);
891 	if (extra)
892 		rbio_endio_bio_list(extra, err);
893 }
894 
895 /*
896  * end io function used by finish_rmw.  When we finally
897  * get here, we've written a full stripe
898  */
raid_write_end_io(struct bio * bio)899 static void raid_write_end_io(struct bio *bio)
900 {
901 	struct btrfs_raid_bio *rbio = bio->bi_private;
902 	blk_status_t err = bio->bi_status;
903 	int max_errors;
904 
905 	if (err)
906 		fail_bio_stripe(rbio, bio);
907 
908 	bio_put(bio);
909 
910 	if (!atomic_dec_and_test(&rbio->stripes_pending))
911 		return;
912 
913 	err = BLK_STS_OK;
914 
915 	/* OK, we have read all the stripes we need to. */
916 	max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ?
917 		     0 : rbio->bioc->max_errors;
918 	if (atomic_read(&rbio->error) > max_errors)
919 		err = BLK_STS_IOERR;
920 
921 	rbio_orig_end_io(rbio, err);
922 }
923 
924 /*
925  * the read/modify/write code wants to use the original bio for
926  * any pages it included, and then use the rbio for everything
927  * else.  This function decides if a given index (stripe number)
928  * and page number in that stripe fall inside the original bio
929  * or the rbio.
930  *
931  * if you set bio_list_only, you'll get a NULL back for any ranges
932  * that are outside the bio_list
933  *
934  * This doesn't take any refs on anything, you get a bare page pointer
935  * and the caller must bump refs as required.
936  *
937  * You must call index_rbio_pages once before you can trust
938  * the answers from this function.
939  */
page_in_rbio(struct btrfs_raid_bio * rbio,int index,int pagenr,int bio_list_only)940 static struct page *page_in_rbio(struct btrfs_raid_bio *rbio,
941 				 int index, int pagenr, int bio_list_only)
942 {
943 	int chunk_page;
944 	struct page *p = NULL;
945 
946 	chunk_page = index * (rbio->stripe_len >> PAGE_SHIFT) + pagenr;
947 
948 	spin_lock_irq(&rbio->bio_list_lock);
949 	p = rbio->bio_pages[chunk_page];
950 	spin_unlock_irq(&rbio->bio_list_lock);
951 
952 	if (p || bio_list_only)
953 		return p;
954 
955 	return rbio->stripe_pages[chunk_page];
956 }
957 
958 /*
959  * number of pages we need for the entire stripe across all the
960  * drives
961  */
rbio_nr_pages(unsigned long stripe_len,int nr_stripes)962 static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
963 {
964 	return DIV_ROUND_UP(stripe_len, PAGE_SIZE) * nr_stripes;
965 }
966 
967 /*
968  * allocation and initial setup for the btrfs_raid_bio.  Not
969  * this does not allocate any pages for rbio->pages.
970  */
alloc_rbio(struct btrfs_fs_info * fs_info,struct btrfs_io_context * bioc,u64 stripe_len)971 static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info,
972 					 struct btrfs_io_context *bioc,
973 					 u64 stripe_len)
974 {
975 	struct btrfs_raid_bio *rbio;
976 	int nr_data = 0;
977 	int real_stripes = bioc->num_stripes - bioc->num_tgtdevs;
978 	int num_pages = rbio_nr_pages(stripe_len, real_stripes);
979 	int stripe_npages = DIV_ROUND_UP(stripe_len, PAGE_SIZE);
980 	void *p;
981 
982 	rbio = kzalloc(sizeof(*rbio) +
983 		       sizeof(*rbio->stripe_pages) * num_pages +
984 		       sizeof(*rbio->bio_pages) * num_pages +
985 		       sizeof(*rbio->finish_pointers) * real_stripes +
986 		       sizeof(*rbio->dbitmap) * BITS_TO_LONGS(stripe_npages) +
987 		       sizeof(*rbio->finish_pbitmap) *
988 				BITS_TO_LONGS(stripe_npages),
989 		       GFP_NOFS);
990 	if (!rbio)
991 		return ERR_PTR(-ENOMEM);
992 
993 	bio_list_init(&rbio->bio_list);
994 	INIT_LIST_HEAD(&rbio->plug_list);
995 	spin_lock_init(&rbio->bio_list_lock);
996 	INIT_LIST_HEAD(&rbio->stripe_cache);
997 	INIT_LIST_HEAD(&rbio->hash_list);
998 	rbio->bioc = bioc;
999 	rbio->fs_info = fs_info;
1000 	rbio->stripe_len = stripe_len;
1001 	rbio->nr_pages = num_pages;
1002 	rbio->real_stripes = real_stripes;
1003 	rbio->stripe_npages = stripe_npages;
1004 	rbio->faila = -1;
1005 	rbio->failb = -1;
1006 	refcount_set(&rbio->refs, 1);
1007 	atomic_set(&rbio->error, 0);
1008 	atomic_set(&rbio->stripes_pending, 0);
1009 
1010 	/*
1011 	 * the stripe_pages, bio_pages, etc arrays point to the extra
1012 	 * memory we allocated past the end of the rbio
1013 	 */
1014 	p = rbio + 1;
1015 #define CONSUME_ALLOC(ptr, count)	do {				\
1016 		ptr = p;						\
1017 		p = (unsigned char *)p + sizeof(*(ptr)) * (count);	\
1018 	} while (0)
1019 	CONSUME_ALLOC(rbio->stripe_pages, num_pages);
1020 	CONSUME_ALLOC(rbio->bio_pages, num_pages);
1021 	CONSUME_ALLOC(rbio->finish_pointers, real_stripes);
1022 	CONSUME_ALLOC(rbio->dbitmap, BITS_TO_LONGS(stripe_npages));
1023 	CONSUME_ALLOC(rbio->finish_pbitmap, BITS_TO_LONGS(stripe_npages));
1024 #undef  CONSUME_ALLOC
1025 
1026 	if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID5)
1027 		nr_data = real_stripes - 1;
1028 	else if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID6)
1029 		nr_data = real_stripes - 2;
1030 	else
1031 		BUG();
1032 
1033 	rbio->nr_data = nr_data;
1034 	return rbio;
1035 }
1036 
1037 /* allocate pages for all the stripes in the bio, including parity */
alloc_rbio_pages(struct btrfs_raid_bio * rbio)1038 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio)
1039 {
1040 	int i;
1041 	struct page *page;
1042 
1043 	for (i = 0; i < rbio->nr_pages; i++) {
1044 		if (rbio->stripe_pages[i])
1045 			continue;
1046 		page = alloc_page(GFP_NOFS);
1047 		if (!page)
1048 			return -ENOMEM;
1049 		rbio->stripe_pages[i] = page;
1050 	}
1051 	return 0;
1052 }
1053 
1054 /* only allocate pages for p/q stripes */
alloc_rbio_parity_pages(struct btrfs_raid_bio * rbio)1055 static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
1056 {
1057 	int i;
1058 	struct page *page;
1059 
1060 	i = rbio_stripe_page_index(rbio, rbio->nr_data, 0);
1061 
1062 	for (; i < rbio->nr_pages; i++) {
1063 		if (rbio->stripe_pages[i])
1064 			continue;
1065 		page = alloc_page(GFP_NOFS);
1066 		if (!page)
1067 			return -ENOMEM;
1068 		rbio->stripe_pages[i] = page;
1069 	}
1070 	return 0;
1071 }
1072 
1073 /*
1074  * add a single page from a specific stripe into our list of bios for IO
1075  * this will try to merge into existing bios if possible, and returns
1076  * zero if all went well.
1077  */
rbio_add_io_page(struct btrfs_raid_bio * rbio,struct bio_list * bio_list,struct page * page,int stripe_nr,unsigned long page_index,unsigned long bio_max_len)1078 static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
1079 			    struct bio_list *bio_list,
1080 			    struct page *page,
1081 			    int stripe_nr,
1082 			    unsigned long page_index,
1083 			    unsigned long bio_max_len)
1084 {
1085 	struct bio *last = bio_list->tail;
1086 	int ret;
1087 	struct bio *bio;
1088 	struct btrfs_io_stripe *stripe;
1089 	u64 disk_start;
1090 
1091 	stripe = &rbio->bioc->stripes[stripe_nr];
1092 	disk_start = stripe->physical + (page_index << PAGE_SHIFT);
1093 
1094 	/* if the device is missing, just fail this stripe */
1095 	if (!stripe->dev->bdev)
1096 		return fail_rbio_index(rbio, stripe_nr);
1097 
1098 	/* see if we can add this page onto our existing bio */
1099 	if (last) {
1100 		u64 last_end = last->bi_iter.bi_sector << 9;
1101 		last_end += last->bi_iter.bi_size;
1102 
1103 		/*
1104 		 * we can't merge these if they are from different
1105 		 * devices or if they are not contiguous
1106 		 */
1107 		if (last_end == disk_start && !last->bi_status &&
1108 		    last->bi_bdev == stripe->dev->bdev) {
1109 			ret = bio_add_page(last, page, PAGE_SIZE, 0);
1110 			if (ret == PAGE_SIZE)
1111 				return 0;
1112 		}
1113 	}
1114 
1115 	/* put a new bio on the list */
1116 	bio = btrfs_io_bio_alloc(bio_max_len >> PAGE_SHIFT ?: 1);
1117 	btrfs_io_bio(bio)->device = stripe->dev;
1118 	bio->bi_iter.bi_size = 0;
1119 	bio_set_dev(bio, stripe->dev->bdev);
1120 	bio->bi_iter.bi_sector = disk_start >> 9;
1121 
1122 	bio_add_page(bio, page, PAGE_SIZE, 0);
1123 	bio_list_add(bio_list, bio);
1124 	return 0;
1125 }
1126 
1127 /*
1128  * while we're doing the read/modify/write cycle, we could
1129  * have errors in reading pages off the disk.  This checks
1130  * for errors and if we're not able to read the page it'll
1131  * trigger parity reconstruction.  The rmw will be finished
1132  * after we've reconstructed the failed stripes
1133  */
validate_rbio_for_rmw(struct btrfs_raid_bio * rbio)1134 static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio)
1135 {
1136 	if (rbio->faila >= 0 || rbio->failb >= 0) {
1137 		BUG_ON(rbio->faila == rbio->real_stripes - 1);
1138 		__raid56_parity_recover(rbio);
1139 	} else {
1140 		finish_rmw(rbio);
1141 	}
1142 }
1143 
1144 /*
1145  * helper function to walk our bio list and populate the bio_pages array with
1146  * the result.  This seems expensive, but it is faster than constantly
1147  * searching through the bio list as we setup the IO in finish_rmw or stripe
1148  * reconstruction.
1149  *
1150  * This must be called before you trust the answers from page_in_rbio
1151  */
index_rbio_pages(struct btrfs_raid_bio * rbio)1152 static void index_rbio_pages(struct btrfs_raid_bio *rbio)
1153 {
1154 	struct bio *bio;
1155 	u64 start;
1156 	unsigned long stripe_offset;
1157 	unsigned long page_index;
1158 
1159 	spin_lock_irq(&rbio->bio_list_lock);
1160 	bio_list_for_each(bio, &rbio->bio_list) {
1161 		struct bio_vec bvec;
1162 		struct bvec_iter iter;
1163 		int i = 0;
1164 
1165 		start = bio->bi_iter.bi_sector << 9;
1166 		stripe_offset = start - rbio->bioc->raid_map[0];
1167 		page_index = stripe_offset >> PAGE_SHIFT;
1168 
1169 		if (bio_flagged(bio, BIO_CLONED))
1170 			bio->bi_iter = btrfs_io_bio(bio)->iter;
1171 
1172 		bio_for_each_segment(bvec, bio, iter) {
1173 			rbio->bio_pages[page_index + i] = bvec.bv_page;
1174 			i++;
1175 		}
1176 	}
1177 	spin_unlock_irq(&rbio->bio_list_lock);
1178 }
1179 
1180 /*
1181  * this is called from one of two situations.  We either
1182  * have a full stripe from the higher layers, or we've read all
1183  * the missing bits off disk.
1184  *
1185  * This will calculate the parity and then send down any
1186  * changed blocks.
1187  */
finish_rmw(struct btrfs_raid_bio * rbio)1188 static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
1189 {
1190 	struct btrfs_io_context *bioc = rbio->bioc;
1191 	void **pointers = rbio->finish_pointers;
1192 	int nr_data = rbio->nr_data;
1193 	int stripe;
1194 	int pagenr;
1195 	bool has_qstripe;
1196 	struct bio_list bio_list;
1197 	struct bio *bio;
1198 	int ret;
1199 
1200 	bio_list_init(&bio_list);
1201 
1202 	if (rbio->real_stripes - rbio->nr_data == 1)
1203 		has_qstripe = false;
1204 	else if (rbio->real_stripes - rbio->nr_data == 2)
1205 		has_qstripe = true;
1206 	else
1207 		BUG();
1208 
1209 	/* We should have at least one data sector. */
1210 	ASSERT(bitmap_weight(rbio->dbitmap, rbio->stripe_npages));
1211 
1212 	/* at this point we either have a full stripe,
1213 	 * or we've read the full stripe from the drive.
1214 	 * recalculate the parity and write the new results.
1215 	 *
1216 	 * We're not allowed to add any new bios to the
1217 	 * bio list here, anyone else that wants to
1218 	 * change this stripe needs to do their own rmw.
1219 	 */
1220 	spin_lock_irq(&rbio->bio_list_lock);
1221 	set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1222 	spin_unlock_irq(&rbio->bio_list_lock);
1223 
1224 	atomic_set(&rbio->error, 0);
1225 
1226 	/*
1227 	 * now that we've set rmw_locked, run through the
1228 	 * bio list one last time and map the page pointers
1229 	 *
1230 	 * We don't cache full rbios because we're assuming
1231 	 * the higher layers are unlikely to use this area of
1232 	 * the disk again soon.  If they do use it again,
1233 	 * hopefully they will send another full bio.
1234 	 */
1235 	index_rbio_pages(rbio);
1236 	if (!rbio_is_full(rbio))
1237 		cache_rbio_pages(rbio);
1238 	else
1239 		clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
1240 
1241 	for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1242 		struct page *p;
1243 		/* first collect one page from each data stripe */
1244 		for (stripe = 0; stripe < nr_data; stripe++) {
1245 			p = page_in_rbio(rbio, stripe, pagenr, 0);
1246 			pointers[stripe] = kmap_local_page(p);
1247 		}
1248 
1249 		/* then add the parity stripe */
1250 		p = rbio_pstripe_page(rbio, pagenr);
1251 		SetPageUptodate(p);
1252 		pointers[stripe++] = kmap_local_page(p);
1253 
1254 		if (has_qstripe) {
1255 
1256 			/*
1257 			 * raid6, add the qstripe and call the
1258 			 * library function to fill in our p/q
1259 			 */
1260 			p = rbio_qstripe_page(rbio, pagenr);
1261 			SetPageUptodate(p);
1262 			pointers[stripe++] = kmap_local_page(p);
1263 
1264 			raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
1265 						pointers);
1266 		} else {
1267 			/* raid5 */
1268 			copy_page(pointers[nr_data], pointers[0]);
1269 			run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
1270 		}
1271 		for (stripe = stripe - 1; stripe >= 0; stripe--)
1272 			kunmap_local(pointers[stripe]);
1273 	}
1274 
1275 	/*
1276 	 * time to start writing.  Make bios for everything from the
1277 	 * higher layers (the bio_list in our rbio) and our p/q.  Ignore
1278 	 * everything else.
1279 	 */
1280 	for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1281 		for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1282 			struct page *page;
1283 
1284 			/* This vertical stripe has no data, skip it. */
1285 			if (!test_bit(pagenr, rbio->dbitmap))
1286 				continue;
1287 
1288 			if (stripe < rbio->nr_data) {
1289 				page = page_in_rbio(rbio, stripe, pagenr, 1);
1290 				if (!page)
1291 					continue;
1292 			} else {
1293 			       page = rbio_stripe_page(rbio, stripe, pagenr);
1294 			}
1295 
1296 			ret = rbio_add_io_page(rbio, &bio_list,
1297 				       page, stripe, pagenr, rbio->stripe_len);
1298 			if (ret)
1299 				goto cleanup;
1300 		}
1301 	}
1302 
1303 	if (likely(!bioc->num_tgtdevs))
1304 		goto write_data;
1305 
1306 	for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1307 		if (!bioc->tgtdev_map[stripe])
1308 			continue;
1309 
1310 		for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1311 			struct page *page;
1312 
1313 			/* This vertical stripe has no data, skip it. */
1314 			if (!test_bit(pagenr, rbio->dbitmap))
1315 				continue;
1316 
1317 			if (stripe < rbio->nr_data) {
1318 				page = page_in_rbio(rbio, stripe, pagenr, 1);
1319 				if (!page)
1320 					continue;
1321 			} else {
1322 			       page = rbio_stripe_page(rbio, stripe, pagenr);
1323 			}
1324 
1325 			ret = rbio_add_io_page(rbio, &bio_list, page,
1326 					       rbio->bioc->tgtdev_map[stripe],
1327 					       pagenr, rbio->stripe_len);
1328 			if (ret)
1329 				goto cleanup;
1330 		}
1331 	}
1332 
1333 write_data:
1334 	atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list));
1335 	BUG_ON(atomic_read(&rbio->stripes_pending) == 0);
1336 
1337 	while ((bio = bio_list_pop(&bio_list))) {
1338 		bio->bi_private = rbio;
1339 		bio->bi_end_io = raid_write_end_io;
1340 		bio->bi_opf = REQ_OP_WRITE;
1341 
1342 		submit_bio(bio);
1343 	}
1344 	return;
1345 
1346 cleanup:
1347 	rbio_orig_end_io(rbio, BLK_STS_IOERR);
1348 
1349 	while ((bio = bio_list_pop(&bio_list)))
1350 		bio_put(bio);
1351 }
1352 
1353 /*
1354  * helper to find the stripe number for a given bio.  Used to figure out which
1355  * stripe has failed.  This expects the bio to correspond to a physical disk,
1356  * so it looks up based on physical sector numbers.
1357  */
find_bio_stripe(struct btrfs_raid_bio * rbio,struct bio * bio)1358 static int find_bio_stripe(struct btrfs_raid_bio *rbio,
1359 			   struct bio *bio)
1360 {
1361 	u64 physical = bio->bi_iter.bi_sector;
1362 	int i;
1363 	struct btrfs_io_stripe *stripe;
1364 
1365 	physical <<= 9;
1366 
1367 	for (i = 0; i < rbio->bioc->num_stripes; i++) {
1368 		stripe = &rbio->bioc->stripes[i];
1369 		if (in_range(physical, stripe->physical, rbio->stripe_len) &&
1370 		    stripe->dev->bdev && bio->bi_bdev == stripe->dev->bdev) {
1371 			return i;
1372 		}
1373 	}
1374 	return -1;
1375 }
1376 
1377 /*
1378  * helper to find the stripe number for a given
1379  * bio (before mapping).  Used to figure out which stripe has
1380  * failed.  This looks up based on logical block numbers.
1381  */
find_logical_bio_stripe(struct btrfs_raid_bio * rbio,struct bio * bio)1382 static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio,
1383 				   struct bio *bio)
1384 {
1385 	u64 logical = bio->bi_iter.bi_sector << 9;
1386 	int i;
1387 
1388 	for (i = 0; i < rbio->nr_data; i++) {
1389 		u64 stripe_start = rbio->bioc->raid_map[i];
1390 
1391 		if (in_range(logical, stripe_start, rbio->stripe_len))
1392 			return i;
1393 	}
1394 	return -1;
1395 }
1396 
1397 /*
1398  * returns -EIO if we had too many failures
1399  */
fail_rbio_index(struct btrfs_raid_bio * rbio,int failed)1400 static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed)
1401 {
1402 	unsigned long flags;
1403 	int ret = 0;
1404 
1405 	spin_lock_irqsave(&rbio->bio_list_lock, flags);
1406 
1407 	/* we already know this stripe is bad, move on */
1408 	if (rbio->faila == failed || rbio->failb == failed)
1409 		goto out;
1410 
1411 	if (rbio->faila == -1) {
1412 		/* first failure on this rbio */
1413 		rbio->faila = failed;
1414 		atomic_inc(&rbio->error);
1415 	} else if (rbio->failb == -1) {
1416 		/* second failure on this rbio */
1417 		rbio->failb = failed;
1418 		atomic_inc(&rbio->error);
1419 	} else {
1420 		ret = -EIO;
1421 	}
1422 out:
1423 	spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
1424 
1425 	return ret;
1426 }
1427 
1428 /*
1429  * helper to fail a stripe based on a physical disk
1430  * bio.
1431  */
fail_bio_stripe(struct btrfs_raid_bio * rbio,struct bio * bio)1432 static int fail_bio_stripe(struct btrfs_raid_bio *rbio,
1433 			   struct bio *bio)
1434 {
1435 	int failed = find_bio_stripe(rbio, bio);
1436 
1437 	if (failed < 0)
1438 		return -EIO;
1439 
1440 	return fail_rbio_index(rbio, failed);
1441 }
1442 
1443 /*
1444  * this sets each page in the bio uptodate.  It should only be used on private
1445  * rbio pages, nothing that comes in from the higher layers
1446  */
set_bio_pages_uptodate(struct bio * bio)1447 static void set_bio_pages_uptodate(struct bio *bio)
1448 {
1449 	struct bio_vec *bvec;
1450 	struct bvec_iter_all iter_all;
1451 
1452 	ASSERT(!bio_flagged(bio, BIO_CLONED));
1453 
1454 	bio_for_each_segment_all(bvec, bio, iter_all)
1455 		SetPageUptodate(bvec->bv_page);
1456 }
1457 
1458 /*
1459  * end io for the read phase of the rmw cycle.  All the bios here are physical
1460  * stripe bios we've read from the disk so we can recalculate the parity of the
1461  * stripe.
1462  *
1463  * This will usually kick off finish_rmw once all the bios are read in, but it
1464  * may trigger parity reconstruction if we had any errors along the way
1465  */
raid_rmw_end_io(struct bio * bio)1466 static void raid_rmw_end_io(struct bio *bio)
1467 {
1468 	struct btrfs_raid_bio *rbio = bio->bi_private;
1469 
1470 	if (bio->bi_status)
1471 		fail_bio_stripe(rbio, bio);
1472 	else
1473 		set_bio_pages_uptodate(bio);
1474 
1475 	bio_put(bio);
1476 
1477 	if (!atomic_dec_and_test(&rbio->stripes_pending))
1478 		return;
1479 
1480 	if (atomic_read(&rbio->error) > rbio->bioc->max_errors)
1481 		goto cleanup;
1482 
1483 	/*
1484 	 * this will normally call finish_rmw to start our write
1485 	 * but if there are any failed stripes we'll reconstruct
1486 	 * from parity first
1487 	 */
1488 	validate_rbio_for_rmw(rbio);
1489 	return;
1490 
1491 cleanup:
1492 
1493 	rbio_orig_end_io(rbio, BLK_STS_IOERR);
1494 }
1495 
1496 /*
1497  * the stripe must be locked by the caller.  It will
1498  * unlock after all the writes are done
1499  */
raid56_rmw_stripe(struct btrfs_raid_bio * rbio)1500 static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
1501 {
1502 	int bios_to_read = 0;
1503 	struct bio_list bio_list;
1504 	int ret;
1505 	int pagenr;
1506 	int stripe;
1507 	struct bio *bio;
1508 
1509 	bio_list_init(&bio_list);
1510 
1511 	ret = alloc_rbio_pages(rbio);
1512 	if (ret)
1513 		goto cleanup;
1514 
1515 	index_rbio_pages(rbio);
1516 
1517 	atomic_set(&rbio->error, 0);
1518 	/*
1519 	 * build a list of bios to read all the missing parts of this
1520 	 * stripe
1521 	 */
1522 	for (stripe = 0; stripe < rbio->nr_data; stripe++) {
1523 		for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1524 			struct page *page;
1525 			/*
1526 			 * we want to find all the pages missing from
1527 			 * the rbio and read them from the disk.  If
1528 			 * page_in_rbio finds a page in the bio list
1529 			 * we don't need to read it off the stripe.
1530 			 */
1531 			page = page_in_rbio(rbio, stripe, pagenr, 1);
1532 			if (page)
1533 				continue;
1534 
1535 			page = rbio_stripe_page(rbio, stripe, pagenr);
1536 			/*
1537 			 * the bio cache may have handed us an uptodate
1538 			 * page.  If so, be happy and use it
1539 			 */
1540 			if (PageUptodate(page))
1541 				continue;
1542 
1543 			ret = rbio_add_io_page(rbio, &bio_list, page,
1544 				       stripe, pagenr, rbio->stripe_len);
1545 			if (ret)
1546 				goto cleanup;
1547 		}
1548 	}
1549 
1550 	bios_to_read = bio_list_size(&bio_list);
1551 	if (!bios_to_read) {
1552 		/*
1553 		 * this can happen if others have merged with
1554 		 * us, it means there is nothing left to read.
1555 		 * But if there are missing devices it may not be
1556 		 * safe to do the full stripe write yet.
1557 		 */
1558 		goto finish;
1559 	}
1560 
1561 	/*
1562 	 * The bioc may be freed once we submit the last bio. Make sure not to
1563 	 * touch it after that.
1564 	 */
1565 	atomic_set(&rbio->stripes_pending, bios_to_read);
1566 	while ((bio = bio_list_pop(&bio_list))) {
1567 		bio->bi_private = rbio;
1568 		bio->bi_end_io = raid_rmw_end_io;
1569 		bio->bi_opf = REQ_OP_READ;
1570 
1571 		btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
1572 
1573 		submit_bio(bio);
1574 	}
1575 	/* the actual write will happen once the reads are done */
1576 	return 0;
1577 
1578 cleanup:
1579 	rbio_orig_end_io(rbio, BLK_STS_IOERR);
1580 
1581 	while ((bio = bio_list_pop(&bio_list)))
1582 		bio_put(bio);
1583 
1584 	return -EIO;
1585 
1586 finish:
1587 	validate_rbio_for_rmw(rbio);
1588 	return 0;
1589 }
1590 
1591 /*
1592  * if the upper layers pass in a full stripe, we thank them by only allocating
1593  * enough pages to hold the parity, and sending it all down quickly.
1594  */
full_stripe_write(struct btrfs_raid_bio * rbio)1595 static int full_stripe_write(struct btrfs_raid_bio *rbio)
1596 {
1597 	int ret;
1598 
1599 	ret = alloc_rbio_parity_pages(rbio);
1600 	if (ret) {
1601 		__free_raid_bio(rbio);
1602 		return ret;
1603 	}
1604 
1605 	ret = lock_stripe_add(rbio);
1606 	if (ret == 0)
1607 		finish_rmw(rbio);
1608 	return 0;
1609 }
1610 
1611 /*
1612  * partial stripe writes get handed over to async helpers.
1613  * We're really hoping to merge a few more writes into this
1614  * rbio before calculating new parity
1615  */
partial_stripe_write(struct btrfs_raid_bio * rbio)1616 static int partial_stripe_write(struct btrfs_raid_bio *rbio)
1617 {
1618 	int ret;
1619 
1620 	ret = lock_stripe_add(rbio);
1621 	if (ret == 0)
1622 		start_async_work(rbio, rmw_work);
1623 	return 0;
1624 }
1625 
1626 /*
1627  * sometimes while we were reading from the drive to
1628  * recalculate parity, enough new bios come into create
1629  * a full stripe.  So we do a check here to see if we can
1630  * go directly to finish_rmw
1631  */
__raid56_parity_write(struct btrfs_raid_bio * rbio)1632 static int __raid56_parity_write(struct btrfs_raid_bio *rbio)
1633 {
1634 	/* head off into rmw land if we don't have a full stripe */
1635 	if (!rbio_is_full(rbio))
1636 		return partial_stripe_write(rbio);
1637 	return full_stripe_write(rbio);
1638 }
1639 
1640 /*
1641  * We use plugging call backs to collect full stripes.
1642  * Any time we get a partial stripe write while plugged
1643  * we collect it into a list.  When the unplug comes down,
1644  * we sort the list by logical block number and merge
1645  * everything we can into the same rbios
1646  */
1647 struct btrfs_plug_cb {
1648 	struct blk_plug_cb cb;
1649 	struct btrfs_fs_info *info;
1650 	struct list_head rbio_list;
1651 	struct btrfs_work work;
1652 };
1653 
1654 /*
1655  * rbios on the plug list are sorted for easier merging.
1656  */
plug_cmp(void * priv,const struct list_head * a,const struct list_head * b)1657 static int plug_cmp(void *priv, const struct list_head *a,
1658 		    const struct list_head *b)
1659 {
1660 	const struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio,
1661 						       plug_list);
1662 	const struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
1663 						       plug_list);
1664 	u64 a_sector = ra->bio_list.head->bi_iter.bi_sector;
1665 	u64 b_sector = rb->bio_list.head->bi_iter.bi_sector;
1666 
1667 	if (a_sector < b_sector)
1668 		return -1;
1669 	if (a_sector > b_sector)
1670 		return 1;
1671 	return 0;
1672 }
1673 
run_plug(struct btrfs_plug_cb * plug)1674 static void run_plug(struct btrfs_plug_cb *plug)
1675 {
1676 	struct btrfs_raid_bio *cur;
1677 	struct btrfs_raid_bio *last = NULL;
1678 
1679 	/*
1680 	 * sort our plug list then try to merge
1681 	 * everything we can in hopes of creating full
1682 	 * stripes.
1683 	 */
1684 	list_sort(NULL, &plug->rbio_list, plug_cmp);
1685 	while (!list_empty(&plug->rbio_list)) {
1686 		cur = list_entry(plug->rbio_list.next,
1687 				 struct btrfs_raid_bio, plug_list);
1688 		list_del_init(&cur->plug_list);
1689 
1690 		if (rbio_is_full(cur)) {
1691 			int ret;
1692 
1693 			/* we have a full stripe, send it down */
1694 			ret = full_stripe_write(cur);
1695 			BUG_ON(ret);
1696 			continue;
1697 		}
1698 		if (last) {
1699 			if (rbio_can_merge(last, cur)) {
1700 				merge_rbio(last, cur);
1701 				__free_raid_bio(cur);
1702 				continue;
1703 
1704 			}
1705 			__raid56_parity_write(last);
1706 		}
1707 		last = cur;
1708 	}
1709 	if (last) {
1710 		__raid56_parity_write(last);
1711 	}
1712 	kfree(plug);
1713 }
1714 
1715 /*
1716  * if the unplug comes from schedule, we have to push the
1717  * work off to a helper thread
1718  */
unplug_work(struct btrfs_work * work)1719 static void unplug_work(struct btrfs_work *work)
1720 {
1721 	struct btrfs_plug_cb *plug;
1722 	plug = container_of(work, struct btrfs_plug_cb, work);
1723 	run_plug(plug);
1724 }
1725 
btrfs_raid_unplug(struct blk_plug_cb * cb,bool from_schedule)1726 static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
1727 {
1728 	struct btrfs_plug_cb *plug;
1729 	plug = container_of(cb, struct btrfs_plug_cb, cb);
1730 
1731 	if (from_schedule) {
1732 		btrfs_init_work(&plug->work, unplug_work, NULL, NULL);
1733 		btrfs_queue_work(plug->info->rmw_workers,
1734 				 &plug->work);
1735 		return;
1736 	}
1737 	run_plug(plug);
1738 }
1739 
1740 /* Add the original bio into rbio->bio_list, and update rbio::dbitmap. */
rbio_add_bio(struct btrfs_raid_bio * rbio,struct bio * orig_bio)1741 static void rbio_add_bio(struct btrfs_raid_bio *rbio, struct bio *orig_bio)
1742 {
1743 	const struct btrfs_fs_info *fs_info = rbio->fs_info;
1744 	const u64 orig_logical = orig_bio->bi_iter.bi_sector << SECTOR_SHIFT;
1745 	const u64 full_stripe_start = rbio->bioc->raid_map[0];
1746 	const u32 orig_len = orig_bio->bi_iter.bi_size;
1747 	const u32 sectorsize = fs_info->sectorsize;
1748 	u64 cur_logical;
1749 
1750 	ASSERT(orig_logical >= full_stripe_start &&
1751 	       orig_logical + orig_len <= full_stripe_start +
1752 	       rbio->nr_data * rbio->stripe_len);
1753 
1754 	bio_list_add(&rbio->bio_list, orig_bio);
1755 	rbio->bio_list_bytes += orig_bio->bi_iter.bi_size;
1756 
1757 	/* Update the dbitmap. */
1758 	for (cur_logical = orig_logical; cur_logical < orig_logical + orig_len;
1759 	     cur_logical += sectorsize) {
1760 		int bit = ((u32)(cur_logical - full_stripe_start) >>
1761 			   fs_info->sectorsize_bits) % rbio->stripe_npages;
1762 
1763 		set_bit(bit, rbio->dbitmap);
1764 	}
1765 }
1766 
1767 /*
1768  * our main entry point for writes from the rest of the FS.
1769  */
raid56_parity_write(struct btrfs_fs_info * fs_info,struct bio * bio,struct btrfs_io_context * bioc,u64 stripe_len)1770 int raid56_parity_write(struct btrfs_fs_info *fs_info, struct bio *bio,
1771 			struct btrfs_io_context *bioc, u64 stripe_len)
1772 {
1773 	struct btrfs_raid_bio *rbio;
1774 	struct btrfs_plug_cb *plug = NULL;
1775 	struct blk_plug_cb *cb;
1776 	int ret;
1777 
1778 	rbio = alloc_rbio(fs_info, bioc, stripe_len);
1779 	if (IS_ERR(rbio)) {
1780 		btrfs_put_bioc(bioc);
1781 		return PTR_ERR(rbio);
1782 	}
1783 	rbio->operation = BTRFS_RBIO_WRITE;
1784 	rbio_add_bio(rbio, bio);
1785 
1786 	btrfs_bio_counter_inc_noblocked(fs_info);
1787 	rbio->generic_bio_cnt = 1;
1788 
1789 	/*
1790 	 * don't plug on full rbios, just get them out the door
1791 	 * as quickly as we can
1792 	 */
1793 	if (rbio_is_full(rbio)) {
1794 		ret = full_stripe_write(rbio);
1795 		if (ret)
1796 			btrfs_bio_counter_dec(fs_info);
1797 		return ret;
1798 	}
1799 
1800 	cb = blk_check_plugged(btrfs_raid_unplug, fs_info, sizeof(*plug));
1801 	if (cb) {
1802 		plug = container_of(cb, struct btrfs_plug_cb, cb);
1803 		if (!plug->info) {
1804 			plug->info = fs_info;
1805 			INIT_LIST_HEAD(&plug->rbio_list);
1806 		}
1807 		list_add_tail(&rbio->plug_list, &plug->rbio_list);
1808 		ret = 0;
1809 	} else {
1810 		ret = __raid56_parity_write(rbio);
1811 		if (ret)
1812 			btrfs_bio_counter_dec(fs_info);
1813 	}
1814 	return ret;
1815 }
1816 
1817 /*
1818  * all parity reconstruction happens here.  We've read in everything
1819  * we can find from the drives and this does the heavy lifting of
1820  * sorting the good from the bad.
1821  */
__raid_recover_end_io(struct btrfs_raid_bio * rbio)1822 static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
1823 {
1824 	int pagenr, stripe;
1825 	void **pointers;
1826 	void **unmap_array;
1827 	int faila = -1, failb = -1;
1828 	struct page *page;
1829 	blk_status_t err;
1830 	int i;
1831 
1832 	pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
1833 	if (!pointers) {
1834 		err = BLK_STS_RESOURCE;
1835 		goto cleanup_io;
1836 	}
1837 
1838 	/*
1839 	 * Store copy of pointers that does not get reordered during
1840 	 * reconstruction so that kunmap_local works.
1841 	 */
1842 	unmap_array = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
1843 	if (!unmap_array) {
1844 		err = BLK_STS_RESOURCE;
1845 		goto cleanup_pointers;
1846 	}
1847 
1848 	faila = rbio->faila;
1849 	failb = rbio->failb;
1850 
1851 	if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1852 	    rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
1853 		spin_lock_irq(&rbio->bio_list_lock);
1854 		set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1855 		spin_unlock_irq(&rbio->bio_list_lock);
1856 	}
1857 
1858 	index_rbio_pages(rbio);
1859 
1860 	for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1861 		/*
1862 		 * Now we just use bitmap to mark the horizontal stripes in
1863 		 * which we have data when doing parity scrub.
1864 		 */
1865 		if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB &&
1866 		    !test_bit(pagenr, rbio->dbitmap))
1867 			continue;
1868 
1869 		/*
1870 		 * Setup our array of pointers with pages from each stripe
1871 		 *
1872 		 * NOTE: store a duplicate array of pointers to preserve the
1873 		 * pointer order
1874 		 */
1875 		for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1876 			/*
1877 			 * if we're rebuilding a read, we have to use
1878 			 * pages from the bio list
1879 			 */
1880 			if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1881 			     rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
1882 			    (stripe == faila || stripe == failb)) {
1883 				page = page_in_rbio(rbio, stripe, pagenr, 0);
1884 			} else {
1885 				page = rbio_stripe_page(rbio, stripe, pagenr);
1886 			}
1887 			pointers[stripe] = kmap_local_page(page);
1888 			unmap_array[stripe] = pointers[stripe];
1889 		}
1890 
1891 		/* all raid6 handling here */
1892 		if (rbio->bioc->map_type & BTRFS_BLOCK_GROUP_RAID6) {
1893 			/*
1894 			 * single failure, rebuild from parity raid5
1895 			 * style
1896 			 */
1897 			if (failb < 0) {
1898 				if (faila == rbio->nr_data) {
1899 					/*
1900 					 * Just the P stripe has failed, without
1901 					 * a bad data or Q stripe.
1902 					 * TODO, we should redo the xor here.
1903 					 */
1904 					err = BLK_STS_IOERR;
1905 					goto cleanup;
1906 				}
1907 				/*
1908 				 * a single failure in raid6 is rebuilt
1909 				 * in the pstripe code below
1910 				 */
1911 				goto pstripe;
1912 			}
1913 
1914 			/* make sure our ps and qs are in order */
1915 			if (faila > failb)
1916 				swap(faila, failb);
1917 
1918 			/* if the q stripe is failed, do a pstripe reconstruction
1919 			 * from the xors.
1920 			 * If both the q stripe and the P stripe are failed, we're
1921 			 * here due to a crc mismatch and we can't give them the
1922 			 * data they want
1923 			 */
1924 			if (rbio->bioc->raid_map[failb] == RAID6_Q_STRIPE) {
1925 				if (rbio->bioc->raid_map[faila] ==
1926 				    RAID5_P_STRIPE) {
1927 					err = BLK_STS_IOERR;
1928 					goto cleanup;
1929 				}
1930 				/*
1931 				 * otherwise we have one bad data stripe and
1932 				 * a good P stripe.  raid5!
1933 				 */
1934 				goto pstripe;
1935 			}
1936 
1937 			if (rbio->bioc->raid_map[failb] == RAID5_P_STRIPE) {
1938 				raid6_datap_recov(rbio->real_stripes,
1939 						  PAGE_SIZE, faila, pointers);
1940 			} else {
1941 				raid6_2data_recov(rbio->real_stripes,
1942 						  PAGE_SIZE, faila, failb,
1943 						  pointers);
1944 			}
1945 		} else {
1946 			void *p;
1947 
1948 			/* rebuild from P stripe here (raid5 or raid6) */
1949 			BUG_ON(failb != -1);
1950 pstripe:
1951 			/* Copy parity block into failed block to start with */
1952 			copy_page(pointers[faila], pointers[rbio->nr_data]);
1953 
1954 			/* rearrange the pointer array */
1955 			p = pointers[faila];
1956 			for (stripe = faila; stripe < rbio->nr_data - 1; stripe++)
1957 				pointers[stripe] = pointers[stripe + 1];
1958 			pointers[rbio->nr_data - 1] = p;
1959 
1960 			/* xor in the rest */
1961 			run_xor(pointers, rbio->nr_data - 1, PAGE_SIZE);
1962 		}
1963 		/* if we're doing this rebuild as part of an rmw, go through
1964 		 * and set all of our private rbio pages in the
1965 		 * failed stripes as uptodate.  This way finish_rmw will
1966 		 * know they can be trusted.  If this was a read reconstruction,
1967 		 * other endio functions will fiddle the uptodate bits
1968 		 */
1969 		if (rbio->operation == BTRFS_RBIO_WRITE) {
1970 			for (i = 0;  i < rbio->stripe_npages; i++) {
1971 				if (faila != -1) {
1972 					page = rbio_stripe_page(rbio, faila, i);
1973 					SetPageUptodate(page);
1974 				}
1975 				if (failb != -1) {
1976 					page = rbio_stripe_page(rbio, failb, i);
1977 					SetPageUptodate(page);
1978 				}
1979 			}
1980 		}
1981 		for (stripe = rbio->real_stripes - 1; stripe >= 0; stripe--)
1982 			kunmap_local(unmap_array[stripe]);
1983 	}
1984 
1985 	err = BLK_STS_OK;
1986 cleanup:
1987 	kfree(unmap_array);
1988 cleanup_pointers:
1989 	kfree(pointers);
1990 
1991 cleanup_io:
1992 	/*
1993 	 * Similar to READ_REBUILD, REBUILD_MISSING at this point also has a
1994 	 * valid rbio which is consistent with ondisk content, thus such a
1995 	 * valid rbio can be cached to avoid further disk reads.
1996 	 */
1997 	if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1998 	    rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
1999 		/*
2000 		 * - In case of two failures, where rbio->failb != -1:
2001 		 *
2002 		 *   Do not cache this rbio since the above read reconstruction
2003 		 *   (raid6_datap_recov() or raid6_2data_recov()) may have
2004 		 *   changed some content of stripes which are not identical to
2005 		 *   on-disk content any more, otherwise, a later write/recover
2006 		 *   may steal stripe_pages from this rbio and end up with
2007 		 *   corruptions or rebuild failures.
2008 		 *
2009 		 * - In case of single failure, where rbio->failb == -1:
2010 		 *
2011 		 *   Cache this rbio iff the above read reconstruction is
2012 		 *   executed without problems.
2013 		 */
2014 		if (err == BLK_STS_OK && rbio->failb < 0)
2015 			cache_rbio_pages(rbio);
2016 		else
2017 			clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2018 
2019 		rbio_orig_end_io(rbio, err);
2020 	} else if (err == BLK_STS_OK) {
2021 		rbio->faila = -1;
2022 		rbio->failb = -1;
2023 
2024 		if (rbio->operation == BTRFS_RBIO_WRITE)
2025 			finish_rmw(rbio);
2026 		else if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB)
2027 			finish_parity_scrub(rbio, 0);
2028 		else
2029 			BUG();
2030 	} else {
2031 		rbio_orig_end_io(rbio, err);
2032 	}
2033 }
2034 
2035 /*
2036  * This is called only for stripes we've read from disk to
2037  * reconstruct the parity.
2038  */
raid_recover_end_io(struct bio * bio)2039 static void raid_recover_end_io(struct bio *bio)
2040 {
2041 	struct btrfs_raid_bio *rbio = bio->bi_private;
2042 
2043 	/*
2044 	 * we only read stripe pages off the disk, set them
2045 	 * up to date if there were no errors
2046 	 */
2047 	if (bio->bi_status)
2048 		fail_bio_stripe(rbio, bio);
2049 	else
2050 		set_bio_pages_uptodate(bio);
2051 	bio_put(bio);
2052 
2053 	if (!atomic_dec_and_test(&rbio->stripes_pending))
2054 		return;
2055 
2056 	if (atomic_read(&rbio->error) > rbio->bioc->max_errors)
2057 		rbio_orig_end_io(rbio, BLK_STS_IOERR);
2058 	else
2059 		__raid_recover_end_io(rbio);
2060 }
2061 
2062 /*
2063  * reads everything we need off the disk to reconstruct
2064  * the parity. endio handlers trigger final reconstruction
2065  * when the IO is done.
2066  *
2067  * This is used both for reads from the higher layers and for
2068  * parity construction required to finish a rmw cycle.
2069  */
__raid56_parity_recover(struct btrfs_raid_bio * rbio)2070 static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
2071 {
2072 	int bios_to_read = 0;
2073 	struct bio_list bio_list;
2074 	int ret;
2075 	int pagenr;
2076 	int stripe;
2077 	struct bio *bio;
2078 
2079 	bio_list_init(&bio_list);
2080 
2081 	ret = alloc_rbio_pages(rbio);
2082 	if (ret)
2083 		goto cleanup;
2084 
2085 	atomic_set(&rbio->error, 0);
2086 
2087 	/*
2088 	 * Read everything that hasn't failed. However this time we will
2089 	 * not trust any cached sector.
2090 	 * As we may read out some stale data but higher layer is not reading
2091 	 * that stale part.
2092 	 *
2093 	 * So here we always re-read everything in recovery path.
2094 	 */
2095 	for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2096 		if (rbio->faila == stripe || rbio->failb == stripe) {
2097 			atomic_inc(&rbio->error);
2098 			continue;
2099 		}
2100 
2101 		for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
2102 			ret = rbio_add_io_page(rbio, &bio_list,
2103 				       rbio_stripe_page(rbio, stripe, pagenr),
2104 				       stripe, pagenr, rbio->stripe_len);
2105 			if (ret < 0)
2106 				goto cleanup;
2107 		}
2108 	}
2109 
2110 	bios_to_read = bio_list_size(&bio_list);
2111 	if (!bios_to_read) {
2112 		/*
2113 		 * we might have no bios to read just because the pages
2114 		 * were up to date, or we might have no bios to read because
2115 		 * the devices were gone.
2116 		 */
2117 		if (atomic_read(&rbio->error) <= rbio->bioc->max_errors) {
2118 			__raid_recover_end_io(rbio);
2119 			return 0;
2120 		} else {
2121 			goto cleanup;
2122 		}
2123 	}
2124 
2125 	/*
2126 	 * The bioc may be freed once we submit the last bio. Make sure not to
2127 	 * touch it after that.
2128 	 */
2129 	atomic_set(&rbio->stripes_pending, bios_to_read);
2130 	while ((bio = bio_list_pop(&bio_list))) {
2131 		bio->bi_private = rbio;
2132 		bio->bi_end_io = raid_recover_end_io;
2133 		bio->bi_opf = REQ_OP_READ;
2134 
2135 		btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
2136 
2137 		submit_bio(bio);
2138 	}
2139 
2140 	return 0;
2141 
2142 cleanup:
2143 	if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
2144 	    rbio->operation == BTRFS_RBIO_REBUILD_MISSING)
2145 		rbio_orig_end_io(rbio, BLK_STS_IOERR);
2146 
2147 	while ((bio = bio_list_pop(&bio_list)))
2148 		bio_put(bio);
2149 
2150 	return -EIO;
2151 }
2152 
2153 /*
2154  * the main entry point for reads from the higher layers.  This
2155  * is really only called when the normal read path had a failure,
2156  * so we assume the bio they send down corresponds to a failed part
2157  * of the drive.
2158  */
raid56_parity_recover(struct btrfs_fs_info * fs_info,struct bio * bio,struct btrfs_io_context * bioc,u64 stripe_len,int mirror_num,int generic_io)2159 int raid56_parity_recover(struct btrfs_fs_info *fs_info, struct bio *bio,
2160 			  struct btrfs_io_context *bioc, u64 stripe_len,
2161 			  int mirror_num, int generic_io)
2162 {
2163 	struct btrfs_raid_bio *rbio;
2164 	int ret;
2165 
2166 	if (generic_io) {
2167 		ASSERT(bioc->mirror_num == mirror_num);
2168 		btrfs_io_bio(bio)->mirror_num = mirror_num;
2169 	}
2170 
2171 	rbio = alloc_rbio(fs_info, bioc, stripe_len);
2172 	if (IS_ERR(rbio)) {
2173 		if (generic_io)
2174 			btrfs_put_bioc(bioc);
2175 		return PTR_ERR(rbio);
2176 	}
2177 
2178 	rbio->operation = BTRFS_RBIO_READ_REBUILD;
2179 	rbio_add_bio(rbio, bio);
2180 
2181 	rbio->faila = find_logical_bio_stripe(rbio, bio);
2182 	if (rbio->faila == -1) {
2183 		btrfs_warn(fs_info,
2184 "%s could not find the bad stripe in raid56 so that we cannot recover any more (bio has logical %llu len %llu, bioc has map_type %llu)",
2185 			   __func__, bio->bi_iter.bi_sector << 9,
2186 			   (u64)bio->bi_iter.bi_size, bioc->map_type);
2187 		if (generic_io)
2188 			btrfs_put_bioc(bioc);
2189 		kfree(rbio);
2190 		return -EIO;
2191 	}
2192 
2193 	if (generic_io) {
2194 		btrfs_bio_counter_inc_noblocked(fs_info);
2195 		rbio->generic_bio_cnt = 1;
2196 	} else {
2197 		btrfs_get_bioc(bioc);
2198 	}
2199 
2200 	/*
2201 	 * Loop retry:
2202 	 * for 'mirror == 2', reconstruct from all other stripes.
2203 	 * for 'mirror_num > 2', select a stripe to fail on every retry.
2204 	 */
2205 	if (mirror_num > 2) {
2206 		/*
2207 		 * 'mirror == 3' is to fail the p stripe and
2208 		 * reconstruct from the q stripe.  'mirror > 3' is to
2209 		 * fail a data stripe and reconstruct from p+q stripe.
2210 		 */
2211 		rbio->failb = rbio->real_stripes - (mirror_num - 1);
2212 		ASSERT(rbio->failb > 0);
2213 		if (rbio->failb <= rbio->faila)
2214 			rbio->failb--;
2215 	}
2216 
2217 	ret = lock_stripe_add(rbio);
2218 
2219 	/*
2220 	 * __raid56_parity_recover will end the bio with
2221 	 * any errors it hits.  We don't want to return
2222 	 * its error value up the stack because our caller
2223 	 * will end up calling bio_endio with any nonzero
2224 	 * return
2225 	 */
2226 	if (ret == 0)
2227 		__raid56_parity_recover(rbio);
2228 	/*
2229 	 * our rbio has been added to the list of
2230 	 * rbios that will be handled after the
2231 	 * currently lock owner is done
2232 	 */
2233 	return 0;
2234 
2235 }
2236 
rmw_work(struct btrfs_work * work)2237 static void rmw_work(struct btrfs_work *work)
2238 {
2239 	struct btrfs_raid_bio *rbio;
2240 
2241 	rbio = container_of(work, struct btrfs_raid_bio, work);
2242 	raid56_rmw_stripe(rbio);
2243 }
2244 
read_rebuild_work(struct btrfs_work * work)2245 static void read_rebuild_work(struct btrfs_work *work)
2246 {
2247 	struct btrfs_raid_bio *rbio;
2248 
2249 	rbio = container_of(work, struct btrfs_raid_bio, work);
2250 	__raid56_parity_recover(rbio);
2251 }
2252 
2253 /*
2254  * The following code is used to scrub/replace the parity stripe
2255  *
2256  * Caller must have already increased bio_counter for getting @bioc.
2257  *
2258  * Note: We need make sure all the pages that add into the scrub/replace
2259  * raid bio are correct and not be changed during the scrub/replace. That
2260  * is those pages just hold metadata or file data with checksum.
2261  */
2262 
2263 struct btrfs_raid_bio *
raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info * fs_info,struct bio * bio,struct btrfs_io_context * bioc,u64 stripe_len,struct btrfs_device * scrub_dev,unsigned long * dbitmap,int stripe_nsectors)2264 raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
2265 			       struct btrfs_io_context *bioc, u64 stripe_len,
2266 			       struct btrfs_device *scrub_dev,
2267 			       unsigned long *dbitmap, int stripe_nsectors)
2268 {
2269 	struct btrfs_raid_bio *rbio;
2270 	int i;
2271 
2272 	rbio = alloc_rbio(fs_info, bioc, stripe_len);
2273 	if (IS_ERR(rbio))
2274 		return NULL;
2275 	bio_list_add(&rbio->bio_list, bio);
2276 	/*
2277 	 * This is a special bio which is used to hold the completion handler
2278 	 * and make the scrub rbio is similar to the other types
2279 	 */
2280 	ASSERT(!bio->bi_iter.bi_size);
2281 	rbio->operation = BTRFS_RBIO_PARITY_SCRUB;
2282 
2283 	/*
2284 	 * After mapping bioc with BTRFS_MAP_WRITE, parities have been sorted
2285 	 * to the end position, so this search can start from the first parity
2286 	 * stripe.
2287 	 */
2288 	for (i = rbio->nr_data; i < rbio->real_stripes; i++) {
2289 		if (bioc->stripes[i].dev == scrub_dev) {
2290 			rbio->scrubp = i;
2291 			break;
2292 		}
2293 	}
2294 	ASSERT(i < rbio->real_stripes);
2295 
2296 	/* Now we just support the sectorsize equals to page size */
2297 	ASSERT(fs_info->sectorsize == PAGE_SIZE);
2298 	ASSERT(rbio->stripe_npages == stripe_nsectors);
2299 	bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors);
2300 
2301 	/*
2302 	 * We have already increased bio_counter when getting bioc, record it
2303 	 * so we can free it at rbio_orig_end_io().
2304 	 */
2305 	rbio->generic_bio_cnt = 1;
2306 
2307 	return rbio;
2308 }
2309 
2310 /* Used for both parity scrub and missing. */
raid56_add_scrub_pages(struct btrfs_raid_bio * rbio,struct page * page,u64 logical)2311 void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
2312 			    u64 logical)
2313 {
2314 	int stripe_offset;
2315 	int index;
2316 
2317 	ASSERT(logical >= rbio->bioc->raid_map[0]);
2318 	ASSERT(logical + PAGE_SIZE <= rbio->bioc->raid_map[0] +
2319 				rbio->stripe_len * rbio->nr_data);
2320 	stripe_offset = (int)(logical - rbio->bioc->raid_map[0]);
2321 	index = stripe_offset >> PAGE_SHIFT;
2322 	rbio->bio_pages[index] = page;
2323 }
2324 
2325 /*
2326  * We just scrub the parity that we have correct data on the same horizontal,
2327  * so we needn't allocate all pages for all the stripes.
2328  */
alloc_rbio_essential_pages(struct btrfs_raid_bio * rbio)2329 static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio)
2330 {
2331 	int i;
2332 	int bit;
2333 	int index;
2334 	struct page *page;
2335 
2336 	for_each_set_bit(bit, rbio->dbitmap, rbio->stripe_npages) {
2337 		for (i = 0; i < rbio->real_stripes; i++) {
2338 			index = i * rbio->stripe_npages + bit;
2339 			if (rbio->stripe_pages[index])
2340 				continue;
2341 
2342 			page = alloc_page(GFP_NOFS);
2343 			if (!page)
2344 				return -ENOMEM;
2345 			rbio->stripe_pages[index] = page;
2346 		}
2347 	}
2348 	return 0;
2349 }
2350 
finish_parity_scrub(struct btrfs_raid_bio * rbio,int need_check)2351 static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
2352 					 int need_check)
2353 {
2354 	struct btrfs_io_context *bioc = rbio->bioc;
2355 	void **pointers = rbio->finish_pointers;
2356 	unsigned long *pbitmap = rbio->finish_pbitmap;
2357 	int nr_data = rbio->nr_data;
2358 	int stripe;
2359 	int pagenr;
2360 	bool has_qstripe;
2361 	struct page *p_page = NULL;
2362 	struct page *q_page = NULL;
2363 	struct bio_list bio_list;
2364 	struct bio *bio;
2365 	int is_replace = 0;
2366 	int ret;
2367 
2368 	bio_list_init(&bio_list);
2369 
2370 	if (rbio->real_stripes - rbio->nr_data == 1)
2371 		has_qstripe = false;
2372 	else if (rbio->real_stripes - rbio->nr_data == 2)
2373 		has_qstripe = true;
2374 	else
2375 		BUG();
2376 
2377 	if (bioc->num_tgtdevs && bioc->tgtdev_map[rbio->scrubp]) {
2378 		is_replace = 1;
2379 		bitmap_copy(pbitmap, rbio->dbitmap, rbio->stripe_npages);
2380 	}
2381 
2382 	/*
2383 	 * Because the higher layers(scrubber) are unlikely to
2384 	 * use this area of the disk again soon, so don't cache
2385 	 * it.
2386 	 */
2387 	clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2388 
2389 	if (!need_check)
2390 		goto writeback;
2391 
2392 	p_page = alloc_page(GFP_NOFS);
2393 	if (!p_page)
2394 		goto cleanup;
2395 	SetPageUptodate(p_page);
2396 
2397 	if (has_qstripe) {
2398 		/* RAID6, allocate and map temp space for the Q stripe */
2399 		q_page = alloc_page(GFP_NOFS);
2400 		if (!q_page) {
2401 			__free_page(p_page);
2402 			goto cleanup;
2403 		}
2404 		SetPageUptodate(q_page);
2405 		pointers[rbio->real_stripes - 1] = kmap_local_page(q_page);
2406 	}
2407 
2408 	atomic_set(&rbio->error, 0);
2409 
2410 	/* Map the parity stripe just once */
2411 	pointers[nr_data] = kmap_local_page(p_page);
2412 
2413 	for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2414 		struct page *p;
2415 		void *parity;
2416 		/* first collect one page from each data stripe */
2417 		for (stripe = 0; stripe < nr_data; stripe++) {
2418 			p = page_in_rbio(rbio, stripe, pagenr, 0);
2419 			pointers[stripe] = kmap_local_page(p);
2420 		}
2421 
2422 		if (has_qstripe) {
2423 			/* RAID6, call the library function to fill in our P/Q */
2424 			raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
2425 						pointers);
2426 		} else {
2427 			/* raid5 */
2428 			copy_page(pointers[nr_data], pointers[0]);
2429 			run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
2430 		}
2431 
2432 		/* Check scrubbing parity and repair it */
2433 		p = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2434 		parity = kmap_local_page(p);
2435 		if (memcmp(parity, pointers[rbio->scrubp], PAGE_SIZE))
2436 			copy_page(parity, pointers[rbio->scrubp]);
2437 		else
2438 			/* Parity is right, needn't writeback */
2439 			bitmap_clear(rbio->dbitmap, pagenr, 1);
2440 		kunmap_local(parity);
2441 
2442 		for (stripe = nr_data - 1; stripe >= 0; stripe--)
2443 			kunmap_local(pointers[stripe]);
2444 	}
2445 
2446 	kunmap_local(pointers[nr_data]);
2447 	__free_page(p_page);
2448 	if (q_page) {
2449 		kunmap_local(pointers[rbio->real_stripes - 1]);
2450 		__free_page(q_page);
2451 	}
2452 
2453 writeback:
2454 	/*
2455 	 * time to start writing.  Make bios for everything from the
2456 	 * higher layers (the bio_list in our rbio) and our p/q.  Ignore
2457 	 * everything else.
2458 	 */
2459 	for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2460 		struct page *page;
2461 
2462 		page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2463 		ret = rbio_add_io_page(rbio, &bio_list,
2464 			       page, rbio->scrubp, pagenr, rbio->stripe_len);
2465 		if (ret)
2466 			goto cleanup;
2467 	}
2468 
2469 	if (!is_replace)
2470 		goto submit_write;
2471 
2472 	for_each_set_bit(pagenr, pbitmap, rbio->stripe_npages) {
2473 		struct page *page;
2474 
2475 		page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2476 		ret = rbio_add_io_page(rbio, &bio_list, page,
2477 				       bioc->tgtdev_map[rbio->scrubp],
2478 				       pagenr, rbio->stripe_len);
2479 		if (ret)
2480 			goto cleanup;
2481 	}
2482 
2483 submit_write:
2484 	nr_data = bio_list_size(&bio_list);
2485 	if (!nr_data) {
2486 		/* Every parity is right */
2487 		rbio_orig_end_io(rbio, BLK_STS_OK);
2488 		return;
2489 	}
2490 
2491 	atomic_set(&rbio->stripes_pending, nr_data);
2492 
2493 	while ((bio = bio_list_pop(&bio_list))) {
2494 		bio->bi_private = rbio;
2495 		bio->bi_end_io = raid_write_end_io;
2496 		bio->bi_opf = REQ_OP_WRITE;
2497 
2498 		submit_bio(bio);
2499 	}
2500 	return;
2501 
2502 cleanup:
2503 	rbio_orig_end_io(rbio, BLK_STS_IOERR);
2504 
2505 	while ((bio = bio_list_pop(&bio_list)))
2506 		bio_put(bio);
2507 }
2508 
is_data_stripe(struct btrfs_raid_bio * rbio,int stripe)2509 static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe)
2510 {
2511 	if (stripe >= 0 && stripe < rbio->nr_data)
2512 		return 1;
2513 	return 0;
2514 }
2515 
2516 /*
2517  * While we're doing the parity check and repair, we could have errors
2518  * in reading pages off the disk.  This checks for errors and if we're
2519  * not able to read the page it'll trigger parity reconstruction.  The
2520  * parity scrub will be finished after we've reconstructed the failed
2521  * stripes
2522  */
validate_rbio_for_parity_scrub(struct btrfs_raid_bio * rbio)2523 static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio)
2524 {
2525 	if (atomic_read(&rbio->error) > rbio->bioc->max_errors)
2526 		goto cleanup;
2527 
2528 	if (rbio->faila >= 0 || rbio->failb >= 0) {
2529 		int dfail = 0, failp = -1;
2530 
2531 		if (is_data_stripe(rbio, rbio->faila))
2532 			dfail++;
2533 		else if (is_parity_stripe(rbio->faila))
2534 			failp = rbio->faila;
2535 
2536 		if (is_data_stripe(rbio, rbio->failb))
2537 			dfail++;
2538 		else if (is_parity_stripe(rbio->failb))
2539 			failp = rbio->failb;
2540 
2541 		/*
2542 		 * Because we can not use a scrubbing parity to repair
2543 		 * the data, so the capability of the repair is declined.
2544 		 * (In the case of RAID5, we can not repair anything)
2545 		 */
2546 		if (dfail > rbio->bioc->max_errors - 1)
2547 			goto cleanup;
2548 
2549 		/*
2550 		 * If all data is good, only parity is correctly, just
2551 		 * repair the parity.
2552 		 */
2553 		if (dfail == 0) {
2554 			finish_parity_scrub(rbio, 0);
2555 			return;
2556 		}
2557 
2558 		/*
2559 		 * Here means we got one corrupted data stripe and one
2560 		 * corrupted parity on RAID6, if the corrupted parity
2561 		 * is scrubbing parity, luckily, use the other one to repair
2562 		 * the data, or we can not repair the data stripe.
2563 		 */
2564 		if (failp != rbio->scrubp)
2565 			goto cleanup;
2566 
2567 		__raid_recover_end_io(rbio);
2568 	} else {
2569 		finish_parity_scrub(rbio, 1);
2570 	}
2571 	return;
2572 
2573 cleanup:
2574 	rbio_orig_end_io(rbio, BLK_STS_IOERR);
2575 }
2576 
2577 /*
2578  * end io for the read phase of the rmw cycle.  All the bios here are physical
2579  * stripe bios we've read from the disk so we can recalculate the parity of the
2580  * stripe.
2581  *
2582  * This will usually kick off finish_rmw once all the bios are read in, but it
2583  * may trigger parity reconstruction if we had any errors along the way
2584  */
raid56_parity_scrub_end_io(struct bio * bio)2585 static void raid56_parity_scrub_end_io(struct bio *bio)
2586 {
2587 	struct btrfs_raid_bio *rbio = bio->bi_private;
2588 
2589 	if (bio->bi_status)
2590 		fail_bio_stripe(rbio, bio);
2591 	else
2592 		set_bio_pages_uptodate(bio);
2593 
2594 	bio_put(bio);
2595 
2596 	if (!atomic_dec_and_test(&rbio->stripes_pending))
2597 		return;
2598 
2599 	/*
2600 	 * this will normally call finish_rmw to start our write
2601 	 * but if there are any failed stripes we'll reconstruct
2602 	 * from parity first
2603 	 */
2604 	validate_rbio_for_parity_scrub(rbio);
2605 }
2606 
raid56_parity_scrub_stripe(struct btrfs_raid_bio * rbio)2607 static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
2608 {
2609 	int bios_to_read = 0;
2610 	struct bio_list bio_list;
2611 	int ret;
2612 	int pagenr;
2613 	int stripe;
2614 	struct bio *bio;
2615 
2616 	bio_list_init(&bio_list);
2617 
2618 	ret = alloc_rbio_essential_pages(rbio);
2619 	if (ret)
2620 		goto cleanup;
2621 
2622 	atomic_set(&rbio->error, 0);
2623 	/*
2624 	 * build a list of bios to read all the missing parts of this
2625 	 * stripe
2626 	 */
2627 	for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2628 		for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2629 			struct page *page;
2630 			/*
2631 			 * we want to find all the pages missing from
2632 			 * the rbio and read them from the disk.  If
2633 			 * page_in_rbio finds a page in the bio list
2634 			 * we don't need to read it off the stripe.
2635 			 */
2636 			page = page_in_rbio(rbio, stripe, pagenr, 1);
2637 			if (page)
2638 				continue;
2639 
2640 			page = rbio_stripe_page(rbio, stripe, pagenr);
2641 			/*
2642 			 * the bio cache may have handed us an uptodate
2643 			 * page.  If so, be happy and use it
2644 			 */
2645 			if (PageUptodate(page))
2646 				continue;
2647 
2648 			ret = rbio_add_io_page(rbio, &bio_list, page,
2649 				       stripe, pagenr, rbio->stripe_len);
2650 			if (ret)
2651 				goto cleanup;
2652 		}
2653 	}
2654 
2655 	bios_to_read = bio_list_size(&bio_list);
2656 	if (!bios_to_read) {
2657 		/*
2658 		 * this can happen if others have merged with
2659 		 * us, it means there is nothing left to read.
2660 		 * But if there are missing devices it may not be
2661 		 * safe to do the full stripe write yet.
2662 		 */
2663 		goto finish;
2664 	}
2665 
2666 	/*
2667 	 * The bioc may be freed once we submit the last bio. Make sure not to
2668 	 * touch it after that.
2669 	 */
2670 	atomic_set(&rbio->stripes_pending, bios_to_read);
2671 	while ((bio = bio_list_pop(&bio_list))) {
2672 		bio->bi_private = rbio;
2673 		bio->bi_end_io = raid56_parity_scrub_end_io;
2674 		bio->bi_opf = REQ_OP_READ;
2675 
2676 		btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
2677 
2678 		submit_bio(bio);
2679 	}
2680 	/* the actual write will happen once the reads are done */
2681 	return;
2682 
2683 cleanup:
2684 	rbio_orig_end_io(rbio, BLK_STS_IOERR);
2685 
2686 	while ((bio = bio_list_pop(&bio_list)))
2687 		bio_put(bio);
2688 
2689 	return;
2690 
2691 finish:
2692 	validate_rbio_for_parity_scrub(rbio);
2693 }
2694 
scrub_parity_work(struct btrfs_work * work)2695 static void scrub_parity_work(struct btrfs_work *work)
2696 {
2697 	struct btrfs_raid_bio *rbio;
2698 
2699 	rbio = container_of(work, struct btrfs_raid_bio, work);
2700 	raid56_parity_scrub_stripe(rbio);
2701 }
2702 
raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio * rbio)2703 void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
2704 {
2705 	if (!lock_stripe_add(rbio))
2706 		start_async_work(rbio, scrub_parity_work);
2707 }
2708 
2709 /* The following code is used for dev replace of a missing RAID 5/6 device. */
2710 
2711 struct btrfs_raid_bio *
raid56_alloc_missing_rbio(struct btrfs_fs_info * fs_info,struct bio * bio,struct btrfs_io_context * bioc,u64 length)2712 raid56_alloc_missing_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
2713 			  struct btrfs_io_context *bioc, u64 length)
2714 {
2715 	struct btrfs_raid_bio *rbio;
2716 
2717 	rbio = alloc_rbio(fs_info, bioc, length);
2718 	if (IS_ERR(rbio))
2719 		return NULL;
2720 
2721 	rbio->operation = BTRFS_RBIO_REBUILD_MISSING;
2722 	bio_list_add(&rbio->bio_list, bio);
2723 	/*
2724 	 * This is a special bio which is used to hold the completion handler
2725 	 * and make the scrub rbio is similar to the other types
2726 	 */
2727 	ASSERT(!bio->bi_iter.bi_size);
2728 
2729 	rbio->faila = find_logical_bio_stripe(rbio, bio);
2730 	if (rbio->faila == -1) {
2731 		btrfs_warn_rl(fs_info,
2732 	"can not determine the failed stripe number for full stripe %llu",
2733 			      bioc->raid_map[0]);
2734 		__free_raid_bio(rbio);
2735 		return NULL;
2736 	}
2737 
2738 	/*
2739 	 * When we get bioc, we have already increased bio_counter, record it
2740 	 * so we can free it at rbio_orig_end_io()
2741 	 */
2742 	rbio->generic_bio_cnt = 1;
2743 
2744 	return rbio;
2745 }
2746 
raid56_submit_missing_rbio(struct btrfs_raid_bio * rbio)2747 void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio)
2748 {
2749 	if (!lock_stripe_add(rbio))
2750 		start_async_work(rbio, read_rebuild_work);
2751 }
2752