• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2012 Fusion-io  All rights reserved.
4  * Copyright (C) 2012 Intel Corp. All rights reserved.
5  */
6 
7 #include <linux/sched.h>
8 #include <linux/bio.h>
9 #include <linux/slab.h>
10 #include <linux/blkdev.h>
11 #include <linux/raid/pq.h>
12 #include <linux/hash.h>
13 #include <linux/list_sort.h>
14 #include <linux/raid/xor.h>
15 #include <linux/mm.h>
16 #include "ctree.h"
17 #include "disk-io.h"
18 #include "volumes.h"
19 #include "raid56.h"
20 #include "async-thread.h"
21 
22 /* set when additional merges to this rbio are not allowed */
23 #define RBIO_RMW_LOCKED_BIT	1
24 
25 /*
26  * set when this rbio is sitting in the hash, but it is just a cache
27  * of past RMW
28  */
29 #define RBIO_CACHE_BIT		2
30 
31 /*
32  * set when it is safe to trust the stripe_pages for caching
33  */
34 #define RBIO_CACHE_READY_BIT	3
35 
36 #define RBIO_CACHE_SIZE 1024
37 
38 #define BTRFS_STRIPE_HASH_TABLE_BITS				11
39 
40 /* Used by the raid56 code to lock stripes for read/modify/write */
41 struct btrfs_stripe_hash {
42 	struct list_head hash_list;
43 	spinlock_t lock;
44 };
45 
46 /* Used by the raid56 code to lock stripes for read/modify/write */
47 struct btrfs_stripe_hash_table {
48 	struct list_head stripe_cache;
49 	spinlock_t cache_lock;
50 	int cache_size;
51 	struct btrfs_stripe_hash table[];
52 };
53 
54 enum btrfs_rbio_ops {
55 	BTRFS_RBIO_WRITE,
56 	BTRFS_RBIO_READ_REBUILD,
57 	BTRFS_RBIO_PARITY_SCRUB,
58 	BTRFS_RBIO_REBUILD_MISSING,
59 };
60 
61 struct btrfs_raid_bio {
62 	struct btrfs_fs_info *fs_info;
63 	struct btrfs_bio *bbio;
64 
65 	/* while we're doing rmw on a stripe
66 	 * we put it into a hash table so we can
67 	 * lock the stripe and merge more rbios
68 	 * into it.
69 	 */
70 	struct list_head hash_list;
71 
72 	/*
73 	 * LRU list for the stripe cache
74 	 */
75 	struct list_head stripe_cache;
76 
77 	/*
78 	 * for scheduling work in the helper threads
79 	 */
80 	struct btrfs_work work;
81 
82 	/*
83 	 * bio list and bio_list_lock are used
84 	 * to add more bios into the stripe
85 	 * in hopes of avoiding the full rmw
86 	 */
87 	struct bio_list bio_list;
88 	spinlock_t bio_list_lock;
89 
90 	/* also protected by the bio_list_lock, the
91 	 * plug list is used by the plugging code
92 	 * to collect partial bios while plugged.  The
93 	 * stripe locking code also uses it to hand off
94 	 * the stripe lock to the next pending IO
95 	 */
96 	struct list_head plug_list;
97 
98 	/*
99 	 * flags that tell us if it is safe to
100 	 * merge with this bio
101 	 */
102 	unsigned long flags;
103 
104 	/* size of each individual stripe on disk */
105 	int stripe_len;
106 
107 	/* number of data stripes (no p/q) */
108 	int nr_data;
109 
110 	int real_stripes;
111 
112 	int stripe_npages;
113 	/*
114 	 * set if we're doing a parity rebuild
115 	 * for a read from higher up, which is handled
116 	 * differently from a parity rebuild as part of
117 	 * rmw
118 	 */
119 	enum btrfs_rbio_ops operation;
120 
121 	/* first bad stripe */
122 	int faila;
123 
124 	/* second bad stripe (for raid6 use) */
125 	int failb;
126 
127 	int scrubp;
128 	/*
129 	 * number of pages needed to represent the full
130 	 * stripe
131 	 */
132 	int nr_pages;
133 
134 	/*
135 	 * size of all the bios in the bio_list.  This
136 	 * helps us decide if the rbio maps to a full
137 	 * stripe or not
138 	 */
139 	int bio_list_bytes;
140 
141 	int generic_bio_cnt;
142 
143 	refcount_t refs;
144 
145 	atomic_t stripes_pending;
146 
147 	atomic_t error;
148 	/*
149 	 * these are two arrays of pointers.  We allocate the
150 	 * rbio big enough to hold them both and setup their
151 	 * locations when the rbio is allocated
152 	 */
153 
154 	/* pointers to pages that we allocated for
155 	 * reading/writing stripes directly from the disk (including P/Q)
156 	 */
157 	struct page **stripe_pages;
158 
159 	/*
160 	 * pointers to the pages in the bio_list.  Stored
161 	 * here for faster lookup
162 	 */
163 	struct page **bio_pages;
164 
165 	/*
166 	 * bitmap to record which horizontal stripe has data
167 	 */
168 	unsigned long *dbitmap;
169 
170 	/* allocated with real_stripes-many pointers for finish_*() calls */
171 	void **finish_pointers;
172 
173 	/* allocated with stripe_npages-many bits for finish_*() calls */
174 	unsigned long *finish_pbitmap;
175 };
176 
177 static int __raid56_parity_recover(struct btrfs_raid_bio *rbio);
178 static noinline void finish_rmw(struct btrfs_raid_bio *rbio);
179 static void rmw_work(struct btrfs_work *work);
180 static void read_rebuild_work(struct btrfs_work *work);
181 static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio);
182 static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed);
183 static void __free_raid_bio(struct btrfs_raid_bio *rbio);
184 static void index_rbio_pages(struct btrfs_raid_bio *rbio);
185 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
186 
187 static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
188 					 int need_check);
189 static void scrub_parity_work(struct btrfs_work *work);
190 
start_async_work(struct btrfs_raid_bio * rbio,btrfs_func_t work_func)191 static void start_async_work(struct btrfs_raid_bio *rbio, btrfs_func_t work_func)
192 {
193 	btrfs_init_work(&rbio->work, work_func, NULL, NULL);
194 	btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
195 }
196 
197 /*
198  * the stripe hash table is used for locking, and to collect
199  * bios in hopes of making a full stripe
200  */
btrfs_alloc_stripe_hash_table(struct btrfs_fs_info * info)201 int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
202 {
203 	struct btrfs_stripe_hash_table *table;
204 	struct btrfs_stripe_hash_table *x;
205 	struct btrfs_stripe_hash *cur;
206 	struct btrfs_stripe_hash *h;
207 	int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS;
208 	int i;
209 	int table_size;
210 
211 	if (info->stripe_hash_table)
212 		return 0;
213 
214 	/*
215 	 * The table is large, starting with order 4 and can go as high as
216 	 * order 7 in case lock debugging is turned on.
217 	 *
218 	 * Try harder to allocate and fallback to vmalloc to lower the chance
219 	 * of a failing mount.
220 	 */
221 	table_size = sizeof(*table) + sizeof(*h) * num_entries;
222 	table = kvzalloc(table_size, GFP_KERNEL);
223 	if (!table)
224 		return -ENOMEM;
225 
226 	spin_lock_init(&table->cache_lock);
227 	INIT_LIST_HEAD(&table->stripe_cache);
228 
229 	h = table->table;
230 
231 	for (i = 0; i < num_entries; i++) {
232 		cur = h + i;
233 		INIT_LIST_HEAD(&cur->hash_list);
234 		spin_lock_init(&cur->lock);
235 	}
236 
237 	x = cmpxchg(&info->stripe_hash_table, NULL, table);
238 	if (x)
239 		kvfree(x);
240 	return 0;
241 }
242 
243 /*
244  * caching an rbio means to copy anything from the
245  * bio_pages array into the stripe_pages array.  We
246  * use the page uptodate bit in the stripe cache array
247  * to indicate if it has valid data
248  *
249  * once the caching is done, we set the cache ready
250  * bit.
251  */
cache_rbio_pages(struct btrfs_raid_bio * rbio)252 static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
253 {
254 	int i;
255 	char *s;
256 	char *d;
257 	int ret;
258 
259 	ret = alloc_rbio_pages(rbio);
260 	if (ret)
261 		return;
262 
263 	for (i = 0; i < rbio->nr_pages; i++) {
264 		if (!rbio->bio_pages[i])
265 			continue;
266 
267 		s = kmap(rbio->bio_pages[i]);
268 		d = kmap(rbio->stripe_pages[i]);
269 
270 		copy_page(d, s);
271 
272 		kunmap(rbio->bio_pages[i]);
273 		kunmap(rbio->stripe_pages[i]);
274 		SetPageUptodate(rbio->stripe_pages[i]);
275 	}
276 	set_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
277 }
278 
279 /*
280  * we hash on the first logical address of the stripe
281  */
rbio_bucket(struct btrfs_raid_bio * rbio)282 static int rbio_bucket(struct btrfs_raid_bio *rbio)
283 {
284 	u64 num = rbio->bbio->raid_map[0];
285 
286 	/*
287 	 * we shift down quite a bit.  We're using byte
288 	 * addressing, and most of the lower bits are zeros.
289 	 * This tends to upset hash_64, and it consistently
290 	 * returns just one or two different values.
291 	 *
292 	 * shifting off the lower bits fixes things.
293 	 */
294 	return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS);
295 }
296 
297 /*
298  * stealing an rbio means taking all the uptodate pages from the stripe
299  * array in the source rbio and putting them into the destination rbio
300  */
steal_rbio(struct btrfs_raid_bio * src,struct btrfs_raid_bio * dest)301 static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest)
302 {
303 	int i;
304 	struct page *s;
305 	struct page *d;
306 
307 	if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags))
308 		return;
309 
310 	for (i = 0; i < dest->nr_pages; i++) {
311 		s = src->stripe_pages[i];
312 		if (!s || !PageUptodate(s)) {
313 			continue;
314 		}
315 
316 		d = dest->stripe_pages[i];
317 		if (d)
318 			__free_page(d);
319 
320 		dest->stripe_pages[i] = s;
321 		src->stripe_pages[i] = NULL;
322 	}
323 }
324 
325 /*
326  * merging means we take the bio_list from the victim and
327  * splice it into the destination.  The victim should
328  * be discarded afterwards.
329  *
330  * must be called with dest->rbio_list_lock held
331  */
merge_rbio(struct btrfs_raid_bio * dest,struct btrfs_raid_bio * victim)332 static void merge_rbio(struct btrfs_raid_bio *dest,
333 		       struct btrfs_raid_bio *victim)
334 {
335 	bio_list_merge(&dest->bio_list, &victim->bio_list);
336 	dest->bio_list_bytes += victim->bio_list_bytes;
337 	dest->generic_bio_cnt += victim->generic_bio_cnt;
338 	bio_list_init(&victim->bio_list);
339 }
340 
341 /*
342  * used to prune items that are in the cache.  The caller
343  * must hold the hash table lock.
344  */
__remove_rbio_from_cache(struct btrfs_raid_bio * rbio)345 static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
346 {
347 	int bucket = rbio_bucket(rbio);
348 	struct btrfs_stripe_hash_table *table;
349 	struct btrfs_stripe_hash *h;
350 	int freeit = 0;
351 
352 	/*
353 	 * check the bit again under the hash table lock.
354 	 */
355 	if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
356 		return;
357 
358 	table = rbio->fs_info->stripe_hash_table;
359 	h = table->table + bucket;
360 
361 	/* hold the lock for the bucket because we may be
362 	 * removing it from the hash table
363 	 */
364 	spin_lock(&h->lock);
365 
366 	/*
367 	 * hold the lock for the bio list because we need
368 	 * to make sure the bio list is empty
369 	 */
370 	spin_lock(&rbio->bio_list_lock);
371 
372 	if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) {
373 		list_del_init(&rbio->stripe_cache);
374 		table->cache_size -= 1;
375 		freeit = 1;
376 
377 		/* if the bio list isn't empty, this rbio is
378 		 * still involved in an IO.  We take it out
379 		 * of the cache list, and drop the ref that
380 		 * was held for the list.
381 		 *
382 		 * If the bio_list was empty, we also remove
383 		 * the rbio from the hash_table, and drop
384 		 * the corresponding ref
385 		 */
386 		if (bio_list_empty(&rbio->bio_list)) {
387 			if (!list_empty(&rbio->hash_list)) {
388 				list_del_init(&rbio->hash_list);
389 				refcount_dec(&rbio->refs);
390 				BUG_ON(!list_empty(&rbio->plug_list));
391 			}
392 		}
393 	}
394 
395 	spin_unlock(&rbio->bio_list_lock);
396 	spin_unlock(&h->lock);
397 
398 	if (freeit)
399 		__free_raid_bio(rbio);
400 }
401 
402 /*
403  * prune a given rbio from the cache
404  */
remove_rbio_from_cache(struct btrfs_raid_bio * rbio)405 static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
406 {
407 	struct btrfs_stripe_hash_table *table;
408 	unsigned long flags;
409 
410 	if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
411 		return;
412 
413 	table = rbio->fs_info->stripe_hash_table;
414 
415 	spin_lock_irqsave(&table->cache_lock, flags);
416 	__remove_rbio_from_cache(rbio);
417 	spin_unlock_irqrestore(&table->cache_lock, flags);
418 }
419 
420 /*
421  * remove everything in the cache
422  */
btrfs_clear_rbio_cache(struct btrfs_fs_info * info)423 static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info)
424 {
425 	struct btrfs_stripe_hash_table *table;
426 	unsigned long flags;
427 	struct btrfs_raid_bio *rbio;
428 
429 	table = info->stripe_hash_table;
430 
431 	spin_lock_irqsave(&table->cache_lock, flags);
432 	while (!list_empty(&table->stripe_cache)) {
433 		rbio = list_entry(table->stripe_cache.next,
434 				  struct btrfs_raid_bio,
435 				  stripe_cache);
436 		__remove_rbio_from_cache(rbio);
437 	}
438 	spin_unlock_irqrestore(&table->cache_lock, flags);
439 }
440 
441 /*
442  * remove all cached entries and free the hash table
443  * used by unmount
444  */
btrfs_free_stripe_hash_table(struct btrfs_fs_info * info)445 void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info)
446 {
447 	if (!info->stripe_hash_table)
448 		return;
449 	btrfs_clear_rbio_cache(info);
450 	kvfree(info->stripe_hash_table);
451 	info->stripe_hash_table = NULL;
452 }
453 
454 /*
455  * insert an rbio into the stripe cache.  It
456  * must have already been prepared by calling
457  * cache_rbio_pages
458  *
459  * If this rbio was already cached, it gets
460  * moved to the front of the lru.
461  *
462  * If the size of the rbio cache is too big, we
463  * prune an item.
464  */
cache_rbio(struct btrfs_raid_bio * rbio)465 static void cache_rbio(struct btrfs_raid_bio *rbio)
466 {
467 	struct btrfs_stripe_hash_table *table;
468 	unsigned long flags;
469 
470 	if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags))
471 		return;
472 
473 	table = rbio->fs_info->stripe_hash_table;
474 
475 	spin_lock_irqsave(&table->cache_lock, flags);
476 	spin_lock(&rbio->bio_list_lock);
477 
478 	/* bump our ref if we were not in the list before */
479 	if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags))
480 		refcount_inc(&rbio->refs);
481 
482 	if (!list_empty(&rbio->stripe_cache)){
483 		list_move(&rbio->stripe_cache, &table->stripe_cache);
484 	} else {
485 		list_add(&rbio->stripe_cache, &table->stripe_cache);
486 		table->cache_size += 1;
487 	}
488 
489 	spin_unlock(&rbio->bio_list_lock);
490 
491 	if (table->cache_size > RBIO_CACHE_SIZE) {
492 		struct btrfs_raid_bio *found;
493 
494 		found = list_entry(table->stripe_cache.prev,
495 				  struct btrfs_raid_bio,
496 				  stripe_cache);
497 
498 		if (found != rbio)
499 			__remove_rbio_from_cache(found);
500 	}
501 
502 	spin_unlock_irqrestore(&table->cache_lock, flags);
503 }
504 
505 /*
506  * helper function to run the xor_blocks api.  It is only
507  * able to do MAX_XOR_BLOCKS at a time, so we need to
508  * loop through.
509  */
run_xor(void ** pages,int src_cnt,ssize_t len)510 static void run_xor(void **pages, int src_cnt, ssize_t len)
511 {
512 	int src_off = 0;
513 	int xor_src_cnt = 0;
514 	void *dest = pages[src_cnt];
515 
516 	while(src_cnt > 0) {
517 		xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
518 		xor_blocks(xor_src_cnt, len, dest, pages + src_off);
519 
520 		src_cnt -= xor_src_cnt;
521 		src_off += xor_src_cnt;
522 	}
523 }
524 
525 /*
526  * Returns true if the bio list inside this rbio covers an entire stripe (no
527  * rmw required).
528  */
rbio_is_full(struct btrfs_raid_bio * rbio)529 static int rbio_is_full(struct btrfs_raid_bio *rbio)
530 {
531 	unsigned long flags;
532 	unsigned long size = rbio->bio_list_bytes;
533 	int ret = 1;
534 
535 	spin_lock_irqsave(&rbio->bio_list_lock, flags);
536 	if (size != rbio->nr_data * rbio->stripe_len)
537 		ret = 0;
538 	BUG_ON(size > rbio->nr_data * rbio->stripe_len);
539 	spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
540 
541 	return ret;
542 }
543 
544 /*
545  * returns 1 if it is safe to merge two rbios together.
546  * The merging is safe if the two rbios correspond to
547  * the same stripe and if they are both going in the same
548  * direction (read vs write), and if neither one is
549  * locked for final IO
550  *
551  * The caller is responsible for locking such that
552  * rmw_locked is safe to test
553  */
rbio_can_merge(struct btrfs_raid_bio * last,struct btrfs_raid_bio * cur)554 static int rbio_can_merge(struct btrfs_raid_bio *last,
555 			  struct btrfs_raid_bio *cur)
556 {
557 	if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) ||
558 	    test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags))
559 		return 0;
560 
561 	/*
562 	 * we can't merge with cached rbios, since the
563 	 * idea is that when we merge the destination
564 	 * rbio is going to run our IO for us.  We can
565 	 * steal from cached rbios though, other functions
566 	 * handle that.
567 	 */
568 	if (test_bit(RBIO_CACHE_BIT, &last->flags) ||
569 	    test_bit(RBIO_CACHE_BIT, &cur->flags))
570 		return 0;
571 
572 	if (last->bbio->raid_map[0] !=
573 	    cur->bbio->raid_map[0])
574 		return 0;
575 
576 	/* we can't merge with different operations */
577 	if (last->operation != cur->operation)
578 		return 0;
579 	/*
580 	 * We've need read the full stripe from the drive.
581 	 * check and repair the parity and write the new results.
582 	 *
583 	 * We're not allowed to add any new bios to the
584 	 * bio list here, anyone else that wants to
585 	 * change this stripe needs to do their own rmw.
586 	 */
587 	if (last->operation == BTRFS_RBIO_PARITY_SCRUB)
588 		return 0;
589 
590 	if (last->operation == BTRFS_RBIO_REBUILD_MISSING)
591 		return 0;
592 
593 	if (last->operation == BTRFS_RBIO_READ_REBUILD) {
594 		int fa = last->faila;
595 		int fb = last->failb;
596 		int cur_fa = cur->faila;
597 		int cur_fb = cur->failb;
598 
599 		if (last->faila >= last->failb) {
600 			fa = last->failb;
601 			fb = last->faila;
602 		}
603 
604 		if (cur->faila >= cur->failb) {
605 			cur_fa = cur->failb;
606 			cur_fb = cur->faila;
607 		}
608 
609 		if (fa != cur_fa || fb != cur_fb)
610 			return 0;
611 	}
612 	return 1;
613 }
614 
rbio_stripe_page_index(struct btrfs_raid_bio * rbio,int stripe,int index)615 static int rbio_stripe_page_index(struct btrfs_raid_bio *rbio, int stripe,
616 				  int index)
617 {
618 	return stripe * rbio->stripe_npages + index;
619 }
620 
621 /*
622  * these are just the pages from the rbio array, not from anything
623  * the FS sent down to us
624  */
rbio_stripe_page(struct btrfs_raid_bio * rbio,int stripe,int index)625 static struct page *rbio_stripe_page(struct btrfs_raid_bio *rbio, int stripe,
626 				     int index)
627 {
628 	return rbio->stripe_pages[rbio_stripe_page_index(rbio, stripe, index)];
629 }
630 
631 /*
632  * helper to index into the pstripe
633  */
rbio_pstripe_page(struct btrfs_raid_bio * rbio,int index)634 static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index)
635 {
636 	return rbio_stripe_page(rbio, rbio->nr_data, index);
637 }
638 
639 /*
640  * helper to index into the qstripe, returns null
641  * if there is no qstripe
642  */
rbio_qstripe_page(struct btrfs_raid_bio * rbio,int index)643 static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index)
644 {
645 	if (rbio->nr_data + 1 == rbio->real_stripes)
646 		return NULL;
647 	return rbio_stripe_page(rbio, rbio->nr_data + 1, index);
648 }
649 
650 /*
651  * The first stripe in the table for a logical address
652  * has the lock.  rbios are added in one of three ways:
653  *
654  * 1) Nobody has the stripe locked yet.  The rbio is given
655  * the lock and 0 is returned.  The caller must start the IO
656  * themselves.
657  *
658  * 2) Someone has the stripe locked, but we're able to merge
659  * with the lock owner.  The rbio is freed and the IO will
660  * start automatically along with the existing rbio.  1 is returned.
661  *
662  * 3) Someone has the stripe locked, but we're not able to merge.
663  * The rbio is added to the lock owner's plug list, or merged into
664  * an rbio already on the plug list.  When the lock owner unlocks,
665  * the next rbio on the list is run and the IO is started automatically.
666  * 1 is returned
667  *
668  * If we return 0, the caller still owns the rbio and must continue with
669  * IO submission.  If we return 1, the caller must assume the rbio has
670  * already been freed.
671  */
lock_stripe_add(struct btrfs_raid_bio * rbio)672 static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
673 {
674 	int bucket = rbio_bucket(rbio);
675 	struct btrfs_stripe_hash *h = rbio->fs_info->stripe_hash_table->table + bucket;
676 	struct btrfs_raid_bio *cur;
677 	struct btrfs_raid_bio *pending;
678 	unsigned long flags;
679 	struct btrfs_raid_bio *freeit = NULL;
680 	struct btrfs_raid_bio *cache_drop = NULL;
681 	int ret = 0;
682 
683 	spin_lock_irqsave(&h->lock, flags);
684 	list_for_each_entry(cur, &h->hash_list, hash_list) {
685 		if (cur->bbio->raid_map[0] == rbio->bbio->raid_map[0]) {
686 			spin_lock(&cur->bio_list_lock);
687 
688 			/* can we steal this cached rbio's pages? */
689 			if (bio_list_empty(&cur->bio_list) &&
690 			    list_empty(&cur->plug_list) &&
691 			    test_bit(RBIO_CACHE_BIT, &cur->flags) &&
692 			    !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
693 				list_del_init(&cur->hash_list);
694 				refcount_dec(&cur->refs);
695 
696 				steal_rbio(cur, rbio);
697 				cache_drop = cur;
698 				spin_unlock(&cur->bio_list_lock);
699 
700 				goto lockit;
701 			}
702 
703 			/* can we merge into the lock owner? */
704 			if (rbio_can_merge(cur, rbio)) {
705 				merge_rbio(cur, rbio);
706 				spin_unlock(&cur->bio_list_lock);
707 				freeit = rbio;
708 				ret = 1;
709 				goto out;
710 			}
711 
712 
713 			/*
714 			 * we couldn't merge with the running
715 			 * rbio, see if we can merge with the
716 			 * pending ones.  We don't have to
717 			 * check for rmw_locked because there
718 			 * is no way they are inside finish_rmw
719 			 * right now
720 			 */
721 			list_for_each_entry(pending, &cur->plug_list,
722 					    plug_list) {
723 				if (rbio_can_merge(pending, rbio)) {
724 					merge_rbio(pending, rbio);
725 					spin_unlock(&cur->bio_list_lock);
726 					freeit = rbio;
727 					ret = 1;
728 					goto out;
729 				}
730 			}
731 
732 			/* no merging, put us on the tail of the plug list,
733 			 * our rbio will be started with the currently
734 			 * running rbio unlocks
735 			 */
736 			list_add_tail(&rbio->plug_list, &cur->plug_list);
737 			spin_unlock(&cur->bio_list_lock);
738 			ret = 1;
739 			goto out;
740 		}
741 	}
742 lockit:
743 	refcount_inc(&rbio->refs);
744 	list_add(&rbio->hash_list, &h->hash_list);
745 out:
746 	spin_unlock_irqrestore(&h->lock, flags);
747 	if (cache_drop)
748 		remove_rbio_from_cache(cache_drop);
749 	if (freeit)
750 		__free_raid_bio(freeit);
751 	return ret;
752 }
753 
754 /*
755  * called as rmw or parity rebuild is completed.  If the plug list has more
756  * rbios waiting for this stripe, the next one on the list will be started
757  */
unlock_stripe(struct btrfs_raid_bio * rbio)758 static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
759 {
760 	int bucket;
761 	struct btrfs_stripe_hash *h;
762 	unsigned long flags;
763 	int keep_cache = 0;
764 
765 	bucket = rbio_bucket(rbio);
766 	h = rbio->fs_info->stripe_hash_table->table + bucket;
767 
768 	if (list_empty(&rbio->plug_list))
769 		cache_rbio(rbio);
770 
771 	spin_lock_irqsave(&h->lock, flags);
772 	spin_lock(&rbio->bio_list_lock);
773 
774 	if (!list_empty(&rbio->hash_list)) {
775 		/*
776 		 * if we're still cached and there is no other IO
777 		 * to perform, just leave this rbio here for others
778 		 * to steal from later
779 		 */
780 		if (list_empty(&rbio->plug_list) &&
781 		    test_bit(RBIO_CACHE_BIT, &rbio->flags)) {
782 			keep_cache = 1;
783 			clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
784 			BUG_ON(!bio_list_empty(&rbio->bio_list));
785 			goto done;
786 		}
787 
788 		list_del_init(&rbio->hash_list);
789 		refcount_dec(&rbio->refs);
790 
791 		/*
792 		 * we use the plug list to hold all the rbios
793 		 * waiting for the chance to lock this stripe.
794 		 * hand the lock over to one of them.
795 		 */
796 		if (!list_empty(&rbio->plug_list)) {
797 			struct btrfs_raid_bio *next;
798 			struct list_head *head = rbio->plug_list.next;
799 
800 			next = list_entry(head, struct btrfs_raid_bio,
801 					  plug_list);
802 
803 			list_del_init(&rbio->plug_list);
804 
805 			list_add(&next->hash_list, &h->hash_list);
806 			refcount_inc(&next->refs);
807 			spin_unlock(&rbio->bio_list_lock);
808 			spin_unlock_irqrestore(&h->lock, flags);
809 
810 			if (next->operation == BTRFS_RBIO_READ_REBUILD)
811 				start_async_work(next, read_rebuild_work);
812 			else if (next->operation == BTRFS_RBIO_REBUILD_MISSING) {
813 				steal_rbio(rbio, next);
814 				start_async_work(next, read_rebuild_work);
815 			} else if (next->operation == BTRFS_RBIO_WRITE) {
816 				steal_rbio(rbio, next);
817 				start_async_work(next, rmw_work);
818 			} else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) {
819 				steal_rbio(rbio, next);
820 				start_async_work(next, scrub_parity_work);
821 			}
822 
823 			goto done_nolock;
824 		}
825 	}
826 done:
827 	spin_unlock(&rbio->bio_list_lock);
828 	spin_unlock_irqrestore(&h->lock, flags);
829 
830 done_nolock:
831 	if (!keep_cache)
832 		remove_rbio_from_cache(rbio);
833 }
834 
__free_raid_bio(struct btrfs_raid_bio * rbio)835 static void __free_raid_bio(struct btrfs_raid_bio *rbio)
836 {
837 	int i;
838 
839 	if (!refcount_dec_and_test(&rbio->refs))
840 		return;
841 
842 	WARN_ON(!list_empty(&rbio->stripe_cache));
843 	WARN_ON(!list_empty(&rbio->hash_list));
844 	WARN_ON(!bio_list_empty(&rbio->bio_list));
845 
846 	for (i = 0; i < rbio->nr_pages; i++) {
847 		if (rbio->stripe_pages[i]) {
848 			__free_page(rbio->stripe_pages[i]);
849 			rbio->stripe_pages[i] = NULL;
850 		}
851 	}
852 
853 	btrfs_put_bbio(rbio->bbio);
854 	kfree(rbio);
855 }
856 
rbio_endio_bio_list(struct bio * cur,blk_status_t err)857 static void rbio_endio_bio_list(struct bio *cur, blk_status_t err)
858 {
859 	struct bio *next;
860 
861 	while (cur) {
862 		next = cur->bi_next;
863 		cur->bi_next = NULL;
864 		cur->bi_status = err;
865 		bio_endio(cur);
866 		cur = next;
867 	}
868 }
869 
870 /*
871  * this frees the rbio and runs through all the bios in the
872  * bio_list and calls end_io on them
873  */
rbio_orig_end_io(struct btrfs_raid_bio * rbio,blk_status_t err)874 static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err)
875 {
876 	struct bio *cur = bio_list_get(&rbio->bio_list);
877 	struct bio *extra;
878 
879 	if (rbio->generic_bio_cnt)
880 		btrfs_bio_counter_sub(rbio->fs_info, rbio->generic_bio_cnt);
881 
882 	/*
883 	 * At this moment, rbio->bio_list is empty, however since rbio does not
884 	 * always have RBIO_RMW_LOCKED_BIT set and rbio is still linked on the
885 	 * hash list, rbio may be merged with others so that rbio->bio_list
886 	 * becomes non-empty.
887 	 * Once unlock_stripe() is done, rbio->bio_list will not be updated any
888 	 * more and we can call bio_endio() on all queued bios.
889 	 */
890 	unlock_stripe(rbio);
891 	extra = bio_list_get(&rbio->bio_list);
892 	__free_raid_bio(rbio);
893 
894 	rbio_endio_bio_list(cur, err);
895 	if (extra)
896 		rbio_endio_bio_list(extra, err);
897 }
898 
899 /*
900  * end io function used by finish_rmw.  When we finally
901  * get here, we've written a full stripe
902  */
raid_write_end_io(struct bio * bio)903 static void raid_write_end_io(struct bio *bio)
904 {
905 	struct btrfs_raid_bio *rbio = bio->bi_private;
906 	blk_status_t err = bio->bi_status;
907 	int max_errors;
908 
909 	if (err)
910 		fail_bio_stripe(rbio, bio);
911 
912 	bio_put(bio);
913 
914 	if (!atomic_dec_and_test(&rbio->stripes_pending))
915 		return;
916 
917 	err = BLK_STS_OK;
918 
919 	/* OK, we have read all the stripes we need to. */
920 	max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ?
921 		     0 : rbio->bbio->max_errors;
922 	if (atomic_read(&rbio->error) > max_errors)
923 		err = BLK_STS_IOERR;
924 
925 	rbio_orig_end_io(rbio, err);
926 }
927 
928 /*
929  * the read/modify/write code wants to use the original bio for
930  * any pages it included, and then use the rbio for everything
931  * else.  This function decides if a given index (stripe number)
932  * and page number in that stripe fall inside the original bio
933  * or the rbio.
934  *
935  * if you set bio_list_only, you'll get a NULL back for any ranges
936  * that are outside the bio_list
937  *
938  * This doesn't take any refs on anything, you get a bare page pointer
939  * and the caller must bump refs as required.
940  *
941  * You must call index_rbio_pages once before you can trust
942  * the answers from this function.
943  */
page_in_rbio(struct btrfs_raid_bio * rbio,int index,int pagenr,int bio_list_only)944 static struct page *page_in_rbio(struct btrfs_raid_bio *rbio,
945 				 int index, int pagenr, int bio_list_only)
946 {
947 	int chunk_page;
948 	struct page *p = NULL;
949 
950 	chunk_page = index * (rbio->stripe_len >> PAGE_SHIFT) + pagenr;
951 
952 	spin_lock_irq(&rbio->bio_list_lock);
953 	p = rbio->bio_pages[chunk_page];
954 	spin_unlock_irq(&rbio->bio_list_lock);
955 
956 	if (p || bio_list_only)
957 		return p;
958 
959 	return rbio->stripe_pages[chunk_page];
960 }
961 
962 /*
963  * number of pages we need for the entire stripe across all the
964  * drives
965  */
rbio_nr_pages(unsigned long stripe_len,int nr_stripes)966 static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
967 {
968 	return DIV_ROUND_UP(stripe_len, PAGE_SIZE) * nr_stripes;
969 }
970 
971 /*
972  * allocation and initial setup for the btrfs_raid_bio.  Not
973  * this does not allocate any pages for rbio->pages.
974  */
alloc_rbio(struct btrfs_fs_info * fs_info,struct btrfs_bio * bbio,u64 stripe_len)975 static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info,
976 					 struct btrfs_bio *bbio,
977 					 u64 stripe_len)
978 {
979 	struct btrfs_raid_bio *rbio;
980 	int nr_data = 0;
981 	int real_stripes = bbio->num_stripes - bbio->num_tgtdevs;
982 	int num_pages = rbio_nr_pages(stripe_len, real_stripes);
983 	int stripe_npages = DIV_ROUND_UP(stripe_len, PAGE_SIZE);
984 	void *p;
985 
986 	rbio = kzalloc(sizeof(*rbio) +
987 		       sizeof(*rbio->stripe_pages) * num_pages +
988 		       sizeof(*rbio->bio_pages) * num_pages +
989 		       sizeof(*rbio->finish_pointers) * real_stripes +
990 		       sizeof(*rbio->dbitmap) * BITS_TO_LONGS(stripe_npages) +
991 		       sizeof(*rbio->finish_pbitmap) *
992 				BITS_TO_LONGS(stripe_npages),
993 		       GFP_NOFS);
994 	if (!rbio)
995 		return ERR_PTR(-ENOMEM);
996 
997 	bio_list_init(&rbio->bio_list);
998 	INIT_LIST_HEAD(&rbio->plug_list);
999 	spin_lock_init(&rbio->bio_list_lock);
1000 	INIT_LIST_HEAD(&rbio->stripe_cache);
1001 	INIT_LIST_HEAD(&rbio->hash_list);
1002 	rbio->bbio = bbio;
1003 	rbio->fs_info = fs_info;
1004 	rbio->stripe_len = stripe_len;
1005 	rbio->nr_pages = num_pages;
1006 	rbio->real_stripes = real_stripes;
1007 	rbio->stripe_npages = stripe_npages;
1008 	rbio->faila = -1;
1009 	rbio->failb = -1;
1010 	refcount_set(&rbio->refs, 1);
1011 	atomic_set(&rbio->error, 0);
1012 	atomic_set(&rbio->stripes_pending, 0);
1013 
1014 	/*
1015 	 * the stripe_pages, bio_pages, etc arrays point to the extra
1016 	 * memory we allocated past the end of the rbio
1017 	 */
1018 	p = rbio + 1;
1019 #define CONSUME_ALLOC(ptr, count)	do {				\
1020 		ptr = p;						\
1021 		p = (unsigned char *)p + sizeof(*(ptr)) * (count);	\
1022 	} while (0)
1023 	CONSUME_ALLOC(rbio->stripe_pages, num_pages);
1024 	CONSUME_ALLOC(rbio->bio_pages, num_pages);
1025 	CONSUME_ALLOC(rbio->finish_pointers, real_stripes);
1026 	CONSUME_ALLOC(rbio->dbitmap, BITS_TO_LONGS(stripe_npages));
1027 	CONSUME_ALLOC(rbio->finish_pbitmap, BITS_TO_LONGS(stripe_npages));
1028 #undef  CONSUME_ALLOC
1029 
1030 	if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
1031 		nr_data = real_stripes - 1;
1032 	else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
1033 		nr_data = real_stripes - 2;
1034 	else
1035 		BUG();
1036 
1037 	rbio->nr_data = nr_data;
1038 	return rbio;
1039 }
1040 
1041 /* allocate pages for all the stripes in the bio, including parity */
alloc_rbio_pages(struct btrfs_raid_bio * rbio)1042 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio)
1043 {
1044 	int i;
1045 	struct page *page;
1046 
1047 	for (i = 0; i < rbio->nr_pages; i++) {
1048 		if (rbio->stripe_pages[i])
1049 			continue;
1050 		page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1051 		if (!page)
1052 			return -ENOMEM;
1053 		rbio->stripe_pages[i] = page;
1054 	}
1055 	return 0;
1056 }
1057 
1058 /* only allocate pages for p/q stripes */
alloc_rbio_parity_pages(struct btrfs_raid_bio * rbio)1059 static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
1060 {
1061 	int i;
1062 	struct page *page;
1063 
1064 	i = rbio_stripe_page_index(rbio, rbio->nr_data, 0);
1065 
1066 	for (; i < rbio->nr_pages; i++) {
1067 		if (rbio->stripe_pages[i])
1068 			continue;
1069 		page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1070 		if (!page)
1071 			return -ENOMEM;
1072 		rbio->stripe_pages[i] = page;
1073 	}
1074 	return 0;
1075 }
1076 
1077 /*
1078  * add a single page from a specific stripe into our list of bios for IO
1079  * this will try to merge into existing bios if possible, and returns
1080  * zero if all went well.
1081  */
rbio_add_io_page(struct btrfs_raid_bio * rbio,struct bio_list * bio_list,struct page * page,int stripe_nr,unsigned long page_index,unsigned long bio_max_len)1082 static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
1083 			    struct bio_list *bio_list,
1084 			    struct page *page,
1085 			    int stripe_nr,
1086 			    unsigned long page_index,
1087 			    unsigned long bio_max_len)
1088 {
1089 	struct bio *last = bio_list->tail;
1090 	u64 last_end = 0;
1091 	int ret;
1092 	struct bio *bio;
1093 	struct btrfs_bio_stripe *stripe;
1094 	u64 disk_start;
1095 
1096 	stripe = &rbio->bbio->stripes[stripe_nr];
1097 	disk_start = stripe->physical + (page_index << PAGE_SHIFT);
1098 
1099 	/* if the device is missing, just fail this stripe */
1100 	if (!stripe->dev->bdev)
1101 		return fail_rbio_index(rbio, stripe_nr);
1102 
1103 	/* see if we can add this page onto our existing bio */
1104 	if (last) {
1105 		last_end = (u64)last->bi_iter.bi_sector << 9;
1106 		last_end += last->bi_iter.bi_size;
1107 
1108 		/*
1109 		 * we can't merge these if they are from different
1110 		 * devices or if they are not contiguous
1111 		 */
1112 		if (last_end == disk_start && stripe->dev->bdev &&
1113 		    !last->bi_status &&
1114 		    last->bi_disk == stripe->dev->bdev->bd_disk &&
1115 		    last->bi_partno == stripe->dev->bdev->bd_partno) {
1116 			ret = bio_add_page(last, page, PAGE_SIZE, 0);
1117 			if (ret == PAGE_SIZE)
1118 				return 0;
1119 		}
1120 	}
1121 
1122 	/* put a new bio on the list */
1123 	bio = btrfs_io_bio_alloc(bio_max_len >> PAGE_SHIFT ?: 1);
1124 	bio->bi_iter.bi_size = 0;
1125 	bio_set_dev(bio, stripe->dev->bdev);
1126 	bio->bi_iter.bi_sector = disk_start >> 9;
1127 
1128 	bio_add_page(bio, page, PAGE_SIZE, 0);
1129 	bio_list_add(bio_list, bio);
1130 	return 0;
1131 }
1132 
1133 /*
1134  * while we're doing the read/modify/write cycle, we could
1135  * have errors in reading pages off the disk.  This checks
1136  * for errors and if we're not able to read the page it'll
1137  * trigger parity reconstruction.  The rmw will be finished
1138  * after we've reconstructed the failed stripes
1139  */
validate_rbio_for_rmw(struct btrfs_raid_bio * rbio)1140 static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio)
1141 {
1142 	if (rbio->faila >= 0 || rbio->failb >= 0) {
1143 		BUG_ON(rbio->faila == rbio->real_stripes - 1);
1144 		__raid56_parity_recover(rbio);
1145 	} else {
1146 		finish_rmw(rbio);
1147 	}
1148 }
1149 
1150 /*
1151  * helper function to walk our bio list and populate the bio_pages array with
1152  * the result.  This seems expensive, but it is faster than constantly
1153  * searching through the bio list as we setup the IO in finish_rmw or stripe
1154  * reconstruction.
1155  *
1156  * This must be called before you trust the answers from page_in_rbio
1157  */
index_rbio_pages(struct btrfs_raid_bio * rbio)1158 static void index_rbio_pages(struct btrfs_raid_bio *rbio)
1159 {
1160 	struct bio *bio;
1161 	u64 start;
1162 	unsigned long stripe_offset;
1163 	unsigned long page_index;
1164 
1165 	spin_lock_irq(&rbio->bio_list_lock);
1166 	bio_list_for_each(bio, &rbio->bio_list) {
1167 		struct bio_vec bvec;
1168 		struct bvec_iter iter;
1169 		int i = 0;
1170 
1171 		start = (u64)bio->bi_iter.bi_sector << 9;
1172 		stripe_offset = start - rbio->bbio->raid_map[0];
1173 		page_index = stripe_offset >> PAGE_SHIFT;
1174 
1175 		if (bio_flagged(bio, BIO_CLONED))
1176 			bio->bi_iter = btrfs_io_bio(bio)->iter;
1177 
1178 		bio_for_each_segment(bvec, bio, iter) {
1179 			rbio->bio_pages[page_index + i] = bvec.bv_page;
1180 			i++;
1181 		}
1182 	}
1183 	spin_unlock_irq(&rbio->bio_list_lock);
1184 }
1185 
1186 /*
1187  * this is called from one of two situations.  We either
1188  * have a full stripe from the higher layers, or we've read all
1189  * the missing bits off disk.
1190  *
1191  * This will calculate the parity and then send down any
1192  * changed blocks.
1193  */
finish_rmw(struct btrfs_raid_bio * rbio)1194 static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
1195 {
1196 	struct btrfs_bio *bbio = rbio->bbio;
1197 	void **pointers = rbio->finish_pointers;
1198 	int nr_data = rbio->nr_data;
1199 	int stripe;
1200 	int pagenr;
1201 	int p_stripe = -1;
1202 	int q_stripe = -1;
1203 	struct bio_list bio_list;
1204 	struct bio *bio;
1205 	int ret;
1206 
1207 	bio_list_init(&bio_list);
1208 
1209 	if (rbio->real_stripes - rbio->nr_data == 1) {
1210 		p_stripe = rbio->real_stripes - 1;
1211 	} else if (rbio->real_stripes - rbio->nr_data == 2) {
1212 		p_stripe = rbio->real_stripes - 2;
1213 		q_stripe = rbio->real_stripes - 1;
1214 	} else {
1215 		BUG();
1216 	}
1217 
1218 	/* at this point we either have a full stripe,
1219 	 * or we've read the full stripe from the drive.
1220 	 * recalculate the parity and write the new results.
1221 	 *
1222 	 * We're not allowed to add any new bios to the
1223 	 * bio list here, anyone else that wants to
1224 	 * change this stripe needs to do their own rmw.
1225 	 */
1226 	spin_lock_irq(&rbio->bio_list_lock);
1227 	set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1228 	spin_unlock_irq(&rbio->bio_list_lock);
1229 
1230 	atomic_set(&rbio->error, 0);
1231 
1232 	/*
1233 	 * now that we've set rmw_locked, run through the
1234 	 * bio list one last time and map the page pointers
1235 	 *
1236 	 * We don't cache full rbios because we're assuming
1237 	 * the higher layers are unlikely to use this area of
1238 	 * the disk again soon.  If they do use it again,
1239 	 * hopefully they will send another full bio.
1240 	 */
1241 	index_rbio_pages(rbio);
1242 	if (!rbio_is_full(rbio))
1243 		cache_rbio_pages(rbio);
1244 	else
1245 		clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
1246 
1247 	for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1248 		struct page *p;
1249 		/* first collect one page from each data stripe */
1250 		for (stripe = 0; stripe < nr_data; stripe++) {
1251 			p = page_in_rbio(rbio, stripe, pagenr, 0);
1252 			pointers[stripe] = kmap(p);
1253 		}
1254 
1255 		/* then add the parity stripe */
1256 		p = rbio_pstripe_page(rbio, pagenr);
1257 		SetPageUptodate(p);
1258 		pointers[stripe++] = kmap(p);
1259 
1260 		if (q_stripe != -1) {
1261 
1262 			/*
1263 			 * raid6, add the qstripe and call the
1264 			 * library function to fill in our p/q
1265 			 */
1266 			p = rbio_qstripe_page(rbio, pagenr);
1267 			SetPageUptodate(p);
1268 			pointers[stripe++] = kmap(p);
1269 
1270 			raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
1271 						pointers);
1272 		} else {
1273 			/* raid5 */
1274 			copy_page(pointers[nr_data], pointers[0]);
1275 			run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
1276 		}
1277 
1278 
1279 		for (stripe = 0; stripe < rbio->real_stripes; stripe++)
1280 			kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
1281 	}
1282 
1283 	/*
1284 	 * time to start writing.  Make bios for everything from the
1285 	 * higher layers (the bio_list in our rbio) and our p/q.  Ignore
1286 	 * everything else.
1287 	 */
1288 	for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1289 		for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1290 			struct page *page;
1291 			if (stripe < rbio->nr_data) {
1292 				page = page_in_rbio(rbio, stripe, pagenr, 1);
1293 				if (!page)
1294 					continue;
1295 			} else {
1296 			       page = rbio_stripe_page(rbio, stripe, pagenr);
1297 			}
1298 
1299 			ret = rbio_add_io_page(rbio, &bio_list,
1300 				       page, stripe, pagenr, rbio->stripe_len);
1301 			if (ret)
1302 				goto cleanup;
1303 		}
1304 	}
1305 
1306 	if (likely(!bbio->num_tgtdevs))
1307 		goto write_data;
1308 
1309 	for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1310 		if (!bbio->tgtdev_map[stripe])
1311 			continue;
1312 
1313 		for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1314 			struct page *page;
1315 			if (stripe < rbio->nr_data) {
1316 				page = page_in_rbio(rbio, stripe, pagenr, 1);
1317 				if (!page)
1318 					continue;
1319 			} else {
1320 			       page = rbio_stripe_page(rbio, stripe, pagenr);
1321 			}
1322 
1323 			ret = rbio_add_io_page(rbio, &bio_list, page,
1324 					       rbio->bbio->tgtdev_map[stripe],
1325 					       pagenr, rbio->stripe_len);
1326 			if (ret)
1327 				goto cleanup;
1328 		}
1329 	}
1330 
1331 write_data:
1332 	atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list));
1333 	BUG_ON(atomic_read(&rbio->stripes_pending) == 0);
1334 
1335 	while (1) {
1336 		bio = bio_list_pop(&bio_list);
1337 		if (!bio)
1338 			break;
1339 
1340 		bio->bi_private = rbio;
1341 		bio->bi_end_io = raid_write_end_io;
1342 		bio->bi_opf = REQ_OP_WRITE;
1343 
1344 		submit_bio(bio);
1345 	}
1346 	return;
1347 
1348 cleanup:
1349 	rbio_orig_end_io(rbio, BLK_STS_IOERR);
1350 
1351 	while ((bio = bio_list_pop(&bio_list)))
1352 		bio_put(bio);
1353 }
1354 
1355 /*
1356  * helper to find the stripe number for a given bio.  Used to figure out which
1357  * stripe has failed.  This expects the bio to correspond to a physical disk,
1358  * so it looks up based on physical sector numbers.
1359  */
find_bio_stripe(struct btrfs_raid_bio * rbio,struct bio * bio)1360 static int find_bio_stripe(struct btrfs_raid_bio *rbio,
1361 			   struct bio *bio)
1362 {
1363 	u64 physical = bio->bi_iter.bi_sector;
1364 	u64 stripe_start;
1365 	int i;
1366 	struct btrfs_bio_stripe *stripe;
1367 
1368 	physical <<= 9;
1369 
1370 	for (i = 0; i < rbio->bbio->num_stripes; i++) {
1371 		stripe = &rbio->bbio->stripes[i];
1372 		stripe_start = stripe->physical;
1373 		if (physical >= stripe_start &&
1374 		    physical < stripe_start + rbio->stripe_len &&
1375 		    stripe->dev->bdev &&
1376 		    bio->bi_disk == stripe->dev->bdev->bd_disk &&
1377 		    bio->bi_partno == stripe->dev->bdev->bd_partno) {
1378 			return i;
1379 		}
1380 	}
1381 	return -1;
1382 }
1383 
1384 /*
1385  * helper to find the stripe number for a given
1386  * bio (before mapping).  Used to figure out which stripe has
1387  * failed.  This looks up based on logical block numbers.
1388  */
find_logical_bio_stripe(struct btrfs_raid_bio * rbio,struct bio * bio)1389 static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio,
1390 				   struct bio *bio)
1391 {
1392 	u64 logical = bio->bi_iter.bi_sector;
1393 	u64 stripe_start;
1394 	int i;
1395 
1396 	logical <<= 9;
1397 
1398 	for (i = 0; i < rbio->nr_data; i++) {
1399 		stripe_start = rbio->bbio->raid_map[i];
1400 		if (logical >= stripe_start &&
1401 		    logical < stripe_start + rbio->stripe_len) {
1402 			return i;
1403 		}
1404 	}
1405 	return -1;
1406 }
1407 
1408 /*
1409  * returns -EIO if we had too many failures
1410  */
fail_rbio_index(struct btrfs_raid_bio * rbio,int failed)1411 static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed)
1412 {
1413 	unsigned long flags;
1414 	int ret = 0;
1415 
1416 	spin_lock_irqsave(&rbio->bio_list_lock, flags);
1417 
1418 	/* we already know this stripe is bad, move on */
1419 	if (rbio->faila == failed || rbio->failb == failed)
1420 		goto out;
1421 
1422 	if (rbio->faila == -1) {
1423 		/* first failure on this rbio */
1424 		rbio->faila = failed;
1425 		atomic_inc(&rbio->error);
1426 	} else if (rbio->failb == -1) {
1427 		/* second failure on this rbio */
1428 		rbio->failb = failed;
1429 		atomic_inc(&rbio->error);
1430 	} else {
1431 		ret = -EIO;
1432 	}
1433 out:
1434 	spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
1435 
1436 	return ret;
1437 }
1438 
1439 /*
1440  * helper to fail a stripe based on a physical disk
1441  * bio.
1442  */
fail_bio_stripe(struct btrfs_raid_bio * rbio,struct bio * bio)1443 static int fail_bio_stripe(struct btrfs_raid_bio *rbio,
1444 			   struct bio *bio)
1445 {
1446 	int failed = find_bio_stripe(rbio, bio);
1447 
1448 	if (failed < 0)
1449 		return -EIO;
1450 
1451 	return fail_rbio_index(rbio, failed);
1452 }
1453 
1454 /*
1455  * this sets each page in the bio uptodate.  It should only be used on private
1456  * rbio pages, nothing that comes in from the higher layers
1457  */
set_bio_pages_uptodate(struct bio * bio)1458 static void set_bio_pages_uptodate(struct bio *bio)
1459 {
1460 	struct bio_vec *bvec;
1461 	struct bvec_iter_all iter_all;
1462 
1463 	ASSERT(!bio_flagged(bio, BIO_CLONED));
1464 
1465 	bio_for_each_segment_all(bvec, bio, iter_all)
1466 		SetPageUptodate(bvec->bv_page);
1467 }
1468 
1469 /*
1470  * end io for the read phase of the rmw cycle.  All the bios here are physical
1471  * stripe bios we've read from the disk so we can recalculate the parity of the
1472  * stripe.
1473  *
1474  * This will usually kick off finish_rmw once all the bios are read in, but it
1475  * may trigger parity reconstruction if we had any errors along the way
1476  */
raid_rmw_end_io(struct bio * bio)1477 static void raid_rmw_end_io(struct bio *bio)
1478 {
1479 	struct btrfs_raid_bio *rbio = bio->bi_private;
1480 
1481 	if (bio->bi_status)
1482 		fail_bio_stripe(rbio, bio);
1483 	else
1484 		set_bio_pages_uptodate(bio);
1485 
1486 	bio_put(bio);
1487 
1488 	if (!atomic_dec_and_test(&rbio->stripes_pending))
1489 		return;
1490 
1491 	if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
1492 		goto cleanup;
1493 
1494 	/*
1495 	 * this will normally call finish_rmw to start our write
1496 	 * but if there are any failed stripes we'll reconstruct
1497 	 * from parity first
1498 	 */
1499 	validate_rbio_for_rmw(rbio);
1500 	return;
1501 
1502 cleanup:
1503 
1504 	rbio_orig_end_io(rbio, BLK_STS_IOERR);
1505 }
1506 
1507 /*
1508  * the stripe must be locked by the caller.  It will
1509  * unlock after all the writes are done
1510  */
raid56_rmw_stripe(struct btrfs_raid_bio * rbio)1511 static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
1512 {
1513 	int bios_to_read = 0;
1514 	struct bio_list bio_list;
1515 	int ret;
1516 	int pagenr;
1517 	int stripe;
1518 	struct bio *bio;
1519 
1520 	bio_list_init(&bio_list);
1521 
1522 	ret = alloc_rbio_pages(rbio);
1523 	if (ret)
1524 		goto cleanup;
1525 
1526 	index_rbio_pages(rbio);
1527 
1528 	atomic_set(&rbio->error, 0);
1529 	/*
1530 	 * build a list of bios to read all the missing parts of this
1531 	 * stripe
1532 	 */
1533 	for (stripe = 0; stripe < rbio->nr_data; stripe++) {
1534 		for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1535 			struct page *page;
1536 			/*
1537 			 * we want to find all the pages missing from
1538 			 * the rbio and read them from the disk.  If
1539 			 * page_in_rbio finds a page in the bio list
1540 			 * we don't need to read it off the stripe.
1541 			 */
1542 			page = page_in_rbio(rbio, stripe, pagenr, 1);
1543 			if (page)
1544 				continue;
1545 
1546 			page = rbio_stripe_page(rbio, stripe, pagenr);
1547 			/*
1548 			 * the bio cache may have handed us an uptodate
1549 			 * page.  If so, be happy and use it
1550 			 */
1551 			if (PageUptodate(page))
1552 				continue;
1553 
1554 			ret = rbio_add_io_page(rbio, &bio_list, page,
1555 				       stripe, pagenr, rbio->stripe_len);
1556 			if (ret)
1557 				goto cleanup;
1558 		}
1559 	}
1560 
1561 	bios_to_read = bio_list_size(&bio_list);
1562 	if (!bios_to_read) {
1563 		/*
1564 		 * this can happen if others have merged with
1565 		 * us, it means there is nothing left to read.
1566 		 * But if there are missing devices it may not be
1567 		 * safe to do the full stripe write yet.
1568 		 */
1569 		goto finish;
1570 	}
1571 
1572 	/*
1573 	 * the bbio may be freed once we submit the last bio.  Make sure
1574 	 * not to touch it after that
1575 	 */
1576 	atomic_set(&rbio->stripes_pending, bios_to_read);
1577 	while (1) {
1578 		bio = bio_list_pop(&bio_list);
1579 		if (!bio)
1580 			break;
1581 
1582 		bio->bi_private = rbio;
1583 		bio->bi_end_io = raid_rmw_end_io;
1584 		bio->bi_opf = REQ_OP_READ;
1585 
1586 		btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
1587 
1588 		submit_bio(bio);
1589 	}
1590 	/* the actual write will happen once the reads are done */
1591 	return 0;
1592 
1593 cleanup:
1594 	rbio_orig_end_io(rbio, BLK_STS_IOERR);
1595 
1596 	while ((bio = bio_list_pop(&bio_list)))
1597 		bio_put(bio);
1598 
1599 	return -EIO;
1600 
1601 finish:
1602 	validate_rbio_for_rmw(rbio);
1603 	return 0;
1604 }
1605 
1606 /*
1607  * if the upper layers pass in a full stripe, we thank them by only allocating
1608  * enough pages to hold the parity, and sending it all down quickly.
1609  */
full_stripe_write(struct btrfs_raid_bio * rbio)1610 static int full_stripe_write(struct btrfs_raid_bio *rbio)
1611 {
1612 	int ret;
1613 
1614 	ret = alloc_rbio_parity_pages(rbio);
1615 	if (ret) {
1616 		__free_raid_bio(rbio);
1617 		return ret;
1618 	}
1619 
1620 	ret = lock_stripe_add(rbio);
1621 	if (ret == 0)
1622 		finish_rmw(rbio);
1623 	return 0;
1624 }
1625 
1626 /*
1627  * partial stripe writes get handed over to async helpers.
1628  * We're really hoping to merge a few more writes into this
1629  * rbio before calculating new parity
1630  */
partial_stripe_write(struct btrfs_raid_bio * rbio)1631 static int partial_stripe_write(struct btrfs_raid_bio *rbio)
1632 {
1633 	int ret;
1634 
1635 	ret = lock_stripe_add(rbio);
1636 	if (ret == 0)
1637 		start_async_work(rbio, rmw_work);
1638 	return 0;
1639 }
1640 
1641 /*
1642  * sometimes while we were reading from the drive to
1643  * recalculate parity, enough new bios come into create
1644  * a full stripe.  So we do a check here to see if we can
1645  * go directly to finish_rmw
1646  */
__raid56_parity_write(struct btrfs_raid_bio * rbio)1647 static int __raid56_parity_write(struct btrfs_raid_bio *rbio)
1648 {
1649 	/* head off into rmw land if we don't have a full stripe */
1650 	if (!rbio_is_full(rbio))
1651 		return partial_stripe_write(rbio);
1652 	return full_stripe_write(rbio);
1653 }
1654 
1655 /*
1656  * We use plugging call backs to collect full stripes.
1657  * Any time we get a partial stripe write while plugged
1658  * we collect it into a list.  When the unplug comes down,
1659  * we sort the list by logical block number and merge
1660  * everything we can into the same rbios
1661  */
1662 struct btrfs_plug_cb {
1663 	struct blk_plug_cb cb;
1664 	struct btrfs_fs_info *info;
1665 	struct list_head rbio_list;
1666 	struct btrfs_work work;
1667 };
1668 
1669 /*
1670  * rbios on the plug list are sorted for easier merging.
1671  */
plug_cmp(void * priv,struct list_head * a,struct list_head * b)1672 static int plug_cmp(void *priv, struct list_head *a, struct list_head *b)
1673 {
1674 	struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio,
1675 						 plug_list);
1676 	struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
1677 						 plug_list);
1678 	u64 a_sector = ra->bio_list.head->bi_iter.bi_sector;
1679 	u64 b_sector = rb->bio_list.head->bi_iter.bi_sector;
1680 
1681 	if (a_sector < b_sector)
1682 		return -1;
1683 	if (a_sector > b_sector)
1684 		return 1;
1685 	return 0;
1686 }
1687 
run_plug(struct btrfs_plug_cb * plug)1688 static void run_plug(struct btrfs_plug_cb *plug)
1689 {
1690 	struct btrfs_raid_bio *cur;
1691 	struct btrfs_raid_bio *last = NULL;
1692 
1693 	/*
1694 	 * sort our plug list then try to merge
1695 	 * everything we can in hopes of creating full
1696 	 * stripes.
1697 	 */
1698 	list_sort(NULL, &plug->rbio_list, plug_cmp);
1699 	while (!list_empty(&plug->rbio_list)) {
1700 		cur = list_entry(plug->rbio_list.next,
1701 				 struct btrfs_raid_bio, plug_list);
1702 		list_del_init(&cur->plug_list);
1703 
1704 		if (rbio_is_full(cur)) {
1705 			int ret;
1706 
1707 			/* we have a full stripe, send it down */
1708 			ret = full_stripe_write(cur);
1709 			BUG_ON(ret);
1710 			continue;
1711 		}
1712 		if (last) {
1713 			if (rbio_can_merge(last, cur)) {
1714 				merge_rbio(last, cur);
1715 				__free_raid_bio(cur);
1716 				continue;
1717 
1718 			}
1719 			__raid56_parity_write(last);
1720 		}
1721 		last = cur;
1722 	}
1723 	if (last) {
1724 		__raid56_parity_write(last);
1725 	}
1726 	kfree(plug);
1727 }
1728 
1729 /*
1730  * if the unplug comes from schedule, we have to push the
1731  * work off to a helper thread
1732  */
unplug_work(struct btrfs_work * work)1733 static void unplug_work(struct btrfs_work *work)
1734 {
1735 	struct btrfs_plug_cb *plug;
1736 	plug = container_of(work, struct btrfs_plug_cb, work);
1737 	run_plug(plug);
1738 }
1739 
btrfs_raid_unplug(struct blk_plug_cb * cb,bool from_schedule)1740 static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
1741 {
1742 	struct btrfs_plug_cb *plug;
1743 	plug = container_of(cb, struct btrfs_plug_cb, cb);
1744 
1745 	if (from_schedule) {
1746 		btrfs_init_work(&plug->work, unplug_work, NULL, NULL);
1747 		btrfs_queue_work(plug->info->rmw_workers,
1748 				 &plug->work);
1749 		return;
1750 	}
1751 	run_plug(plug);
1752 }
1753 
1754 /*
1755  * our main entry point for writes from the rest of the FS.
1756  */
raid56_parity_write(struct btrfs_fs_info * fs_info,struct bio * bio,struct btrfs_bio * bbio,u64 stripe_len)1757 int raid56_parity_write(struct btrfs_fs_info *fs_info, struct bio *bio,
1758 			struct btrfs_bio *bbio, u64 stripe_len)
1759 {
1760 	struct btrfs_raid_bio *rbio;
1761 	struct btrfs_plug_cb *plug = NULL;
1762 	struct blk_plug_cb *cb;
1763 	int ret;
1764 
1765 	rbio = alloc_rbio(fs_info, bbio, stripe_len);
1766 	if (IS_ERR(rbio)) {
1767 		btrfs_put_bbio(bbio);
1768 		return PTR_ERR(rbio);
1769 	}
1770 	bio_list_add(&rbio->bio_list, bio);
1771 	rbio->bio_list_bytes = bio->bi_iter.bi_size;
1772 	rbio->operation = BTRFS_RBIO_WRITE;
1773 
1774 	btrfs_bio_counter_inc_noblocked(fs_info);
1775 	rbio->generic_bio_cnt = 1;
1776 
1777 	/*
1778 	 * don't plug on full rbios, just get them out the door
1779 	 * as quickly as we can
1780 	 */
1781 	if (rbio_is_full(rbio)) {
1782 		ret = full_stripe_write(rbio);
1783 		if (ret)
1784 			btrfs_bio_counter_dec(fs_info);
1785 		return ret;
1786 	}
1787 
1788 	cb = blk_check_plugged(btrfs_raid_unplug, fs_info, sizeof(*plug));
1789 	if (cb) {
1790 		plug = container_of(cb, struct btrfs_plug_cb, cb);
1791 		if (!plug->info) {
1792 			plug->info = fs_info;
1793 			INIT_LIST_HEAD(&plug->rbio_list);
1794 		}
1795 		list_add_tail(&rbio->plug_list, &plug->rbio_list);
1796 		ret = 0;
1797 	} else {
1798 		ret = __raid56_parity_write(rbio);
1799 		if (ret)
1800 			btrfs_bio_counter_dec(fs_info);
1801 	}
1802 	return ret;
1803 }
1804 
1805 /*
1806  * all parity reconstruction happens here.  We've read in everything
1807  * we can find from the drives and this does the heavy lifting of
1808  * sorting the good from the bad.
1809  */
__raid_recover_end_io(struct btrfs_raid_bio * rbio)1810 static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
1811 {
1812 	int pagenr, stripe;
1813 	void **pointers;
1814 	int faila = -1, failb = -1;
1815 	struct page *page;
1816 	blk_status_t err;
1817 	int i;
1818 
1819 	pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
1820 	if (!pointers) {
1821 		err = BLK_STS_RESOURCE;
1822 		goto cleanup_io;
1823 	}
1824 
1825 	faila = rbio->faila;
1826 	failb = rbio->failb;
1827 
1828 	if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1829 	    rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
1830 		spin_lock_irq(&rbio->bio_list_lock);
1831 		set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1832 		spin_unlock_irq(&rbio->bio_list_lock);
1833 	}
1834 
1835 	index_rbio_pages(rbio);
1836 
1837 	for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1838 		/*
1839 		 * Now we just use bitmap to mark the horizontal stripes in
1840 		 * which we have data when doing parity scrub.
1841 		 */
1842 		if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB &&
1843 		    !test_bit(pagenr, rbio->dbitmap))
1844 			continue;
1845 
1846 		/* setup our array of pointers with pages
1847 		 * from each stripe
1848 		 */
1849 		for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1850 			/*
1851 			 * if we're rebuilding a read, we have to use
1852 			 * pages from the bio list
1853 			 */
1854 			if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1855 			     rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
1856 			    (stripe == faila || stripe == failb)) {
1857 				page = page_in_rbio(rbio, stripe, pagenr, 0);
1858 			} else {
1859 				page = rbio_stripe_page(rbio, stripe, pagenr);
1860 			}
1861 			pointers[stripe] = kmap(page);
1862 		}
1863 
1864 		/* all raid6 handling here */
1865 		if (rbio->bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) {
1866 			/*
1867 			 * single failure, rebuild from parity raid5
1868 			 * style
1869 			 */
1870 			if (failb < 0) {
1871 				if (faila == rbio->nr_data) {
1872 					/*
1873 					 * Just the P stripe has failed, without
1874 					 * a bad data or Q stripe.
1875 					 * TODO, we should redo the xor here.
1876 					 */
1877 					err = BLK_STS_IOERR;
1878 					goto cleanup;
1879 				}
1880 				/*
1881 				 * a single failure in raid6 is rebuilt
1882 				 * in the pstripe code below
1883 				 */
1884 				goto pstripe;
1885 			}
1886 
1887 			/* make sure our ps and qs are in order */
1888 			if (faila > failb) {
1889 				int tmp = failb;
1890 				failb = faila;
1891 				faila = tmp;
1892 			}
1893 
1894 			/* if the q stripe is failed, do a pstripe reconstruction
1895 			 * from the xors.
1896 			 * If both the q stripe and the P stripe are failed, we're
1897 			 * here due to a crc mismatch and we can't give them the
1898 			 * data they want
1899 			 */
1900 			if (rbio->bbio->raid_map[failb] == RAID6_Q_STRIPE) {
1901 				if (rbio->bbio->raid_map[faila] ==
1902 				    RAID5_P_STRIPE) {
1903 					err = BLK_STS_IOERR;
1904 					goto cleanup;
1905 				}
1906 				/*
1907 				 * otherwise we have one bad data stripe and
1908 				 * a good P stripe.  raid5!
1909 				 */
1910 				goto pstripe;
1911 			}
1912 
1913 			if (rbio->bbio->raid_map[failb] == RAID5_P_STRIPE) {
1914 				raid6_datap_recov(rbio->real_stripes,
1915 						  PAGE_SIZE, faila, pointers);
1916 			} else {
1917 				raid6_2data_recov(rbio->real_stripes,
1918 						  PAGE_SIZE, faila, failb,
1919 						  pointers);
1920 			}
1921 		} else {
1922 			void *p;
1923 
1924 			/* rebuild from P stripe here (raid5 or raid6) */
1925 			BUG_ON(failb != -1);
1926 pstripe:
1927 			/* Copy parity block into failed block to start with */
1928 			copy_page(pointers[faila], pointers[rbio->nr_data]);
1929 
1930 			/* rearrange the pointer array */
1931 			p = pointers[faila];
1932 			for (stripe = faila; stripe < rbio->nr_data - 1; stripe++)
1933 				pointers[stripe] = pointers[stripe + 1];
1934 			pointers[rbio->nr_data - 1] = p;
1935 
1936 			/* xor in the rest */
1937 			run_xor(pointers, rbio->nr_data - 1, PAGE_SIZE);
1938 		}
1939 		/* if we're doing this rebuild as part of an rmw, go through
1940 		 * and set all of our private rbio pages in the
1941 		 * failed stripes as uptodate.  This way finish_rmw will
1942 		 * know they can be trusted.  If this was a read reconstruction,
1943 		 * other endio functions will fiddle the uptodate bits
1944 		 */
1945 		if (rbio->operation == BTRFS_RBIO_WRITE) {
1946 			for (i = 0;  i < rbio->stripe_npages; i++) {
1947 				if (faila != -1) {
1948 					page = rbio_stripe_page(rbio, faila, i);
1949 					SetPageUptodate(page);
1950 				}
1951 				if (failb != -1) {
1952 					page = rbio_stripe_page(rbio, failb, i);
1953 					SetPageUptodate(page);
1954 				}
1955 			}
1956 		}
1957 		for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1958 			/*
1959 			 * if we're rebuilding a read, we have to use
1960 			 * pages from the bio list
1961 			 */
1962 			if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1963 			     rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
1964 			    (stripe == faila || stripe == failb)) {
1965 				page = page_in_rbio(rbio, stripe, pagenr, 0);
1966 			} else {
1967 				page = rbio_stripe_page(rbio, stripe, pagenr);
1968 			}
1969 			kunmap(page);
1970 		}
1971 	}
1972 
1973 	err = BLK_STS_OK;
1974 cleanup:
1975 	kfree(pointers);
1976 
1977 cleanup_io:
1978 	/*
1979 	 * Similar to READ_REBUILD, REBUILD_MISSING at this point also has a
1980 	 * valid rbio which is consistent with ondisk content, thus such a
1981 	 * valid rbio can be cached to avoid further disk reads.
1982 	 */
1983 	if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1984 	    rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
1985 		/*
1986 		 * - In case of two failures, where rbio->failb != -1:
1987 		 *
1988 		 *   Do not cache this rbio since the above read reconstruction
1989 		 *   (raid6_datap_recov() or raid6_2data_recov()) may have
1990 		 *   changed some content of stripes which are not identical to
1991 		 *   on-disk content any more, otherwise, a later write/recover
1992 		 *   may steal stripe_pages from this rbio and end up with
1993 		 *   corruptions or rebuild failures.
1994 		 *
1995 		 * - In case of single failure, where rbio->failb == -1:
1996 		 *
1997 		 *   Cache this rbio iff the above read reconstruction is
1998 		 *   executed without problems.
1999 		 */
2000 		if (err == BLK_STS_OK && rbio->failb < 0)
2001 			cache_rbio_pages(rbio);
2002 		else
2003 			clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2004 
2005 		rbio_orig_end_io(rbio, err);
2006 	} else if (err == BLK_STS_OK) {
2007 		rbio->faila = -1;
2008 		rbio->failb = -1;
2009 
2010 		if (rbio->operation == BTRFS_RBIO_WRITE)
2011 			finish_rmw(rbio);
2012 		else if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB)
2013 			finish_parity_scrub(rbio, 0);
2014 		else
2015 			BUG();
2016 	} else {
2017 		rbio_orig_end_io(rbio, err);
2018 	}
2019 }
2020 
2021 /*
2022  * This is called only for stripes we've read from disk to
2023  * reconstruct the parity.
2024  */
raid_recover_end_io(struct bio * bio)2025 static void raid_recover_end_io(struct bio *bio)
2026 {
2027 	struct btrfs_raid_bio *rbio = bio->bi_private;
2028 
2029 	/*
2030 	 * we only read stripe pages off the disk, set them
2031 	 * up to date if there were no errors
2032 	 */
2033 	if (bio->bi_status)
2034 		fail_bio_stripe(rbio, bio);
2035 	else
2036 		set_bio_pages_uptodate(bio);
2037 	bio_put(bio);
2038 
2039 	if (!atomic_dec_and_test(&rbio->stripes_pending))
2040 		return;
2041 
2042 	if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2043 		rbio_orig_end_io(rbio, BLK_STS_IOERR);
2044 	else
2045 		__raid_recover_end_io(rbio);
2046 }
2047 
2048 /*
2049  * reads everything we need off the disk to reconstruct
2050  * the parity. endio handlers trigger final reconstruction
2051  * when the IO is done.
2052  *
2053  * This is used both for reads from the higher layers and for
2054  * parity construction required to finish a rmw cycle.
2055  */
__raid56_parity_recover(struct btrfs_raid_bio * rbio)2056 static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
2057 {
2058 	int bios_to_read = 0;
2059 	struct bio_list bio_list;
2060 	int ret;
2061 	int pagenr;
2062 	int stripe;
2063 	struct bio *bio;
2064 
2065 	bio_list_init(&bio_list);
2066 
2067 	ret = alloc_rbio_pages(rbio);
2068 	if (ret)
2069 		goto cleanup;
2070 
2071 	atomic_set(&rbio->error, 0);
2072 
2073 	/*
2074 	 * read everything that hasn't failed.  Thanks to the
2075 	 * stripe cache, it is possible that some or all of these
2076 	 * pages are going to be uptodate.
2077 	 */
2078 	for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2079 		if (rbio->faila == stripe || rbio->failb == stripe) {
2080 			atomic_inc(&rbio->error);
2081 			continue;
2082 		}
2083 
2084 		for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
2085 			struct page *p;
2086 
2087 			/*
2088 			 * the rmw code may have already read this
2089 			 * page in
2090 			 */
2091 			p = rbio_stripe_page(rbio, stripe, pagenr);
2092 			if (PageUptodate(p))
2093 				continue;
2094 
2095 			ret = rbio_add_io_page(rbio, &bio_list,
2096 				       rbio_stripe_page(rbio, stripe, pagenr),
2097 				       stripe, pagenr, rbio->stripe_len);
2098 			if (ret < 0)
2099 				goto cleanup;
2100 		}
2101 	}
2102 
2103 	bios_to_read = bio_list_size(&bio_list);
2104 	if (!bios_to_read) {
2105 		/*
2106 		 * we might have no bios to read just because the pages
2107 		 * were up to date, or we might have no bios to read because
2108 		 * the devices were gone.
2109 		 */
2110 		if (atomic_read(&rbio->error) <= rbio->bbio->max_errors) {
2111 			__raid_recover_end_io(rbio);
2112 			goto out;
2113 		} else {
2114 			goto cleanup;
2115 		}
2116 	}
2117 
2118 	/*
2119 	 * the bbio may be freed once we submit the last bio.  Make sure
2120 	 * not to touch it after that
2121 	 */
2122 	atomic_set(&rbio->stripes_pending, bios_to_read);
2123 	while (1) {
2124 		bio = bio_list_pop(&bio_list);
2125 		if (!bio)
2126 			break;
2127 
2128 		bio->bi_private = rbio;
2129 		bio->bi_end_io = raid_recover_end_io;
2130 		bio->bi_opf = REQ_OP_READ;
2131 
2132 		btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
2133 
2134 		submit_bio(bio);
2135 	}
2136 out:
2137 	return 0;
2138 
2139 cleanup:
2140 	if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
2141 	    rbio->operation == BTRFS_RBIO_REBUILD_MISSING)
2142 		rbio_orig_end_io(rbio, BLK_STS_IOERR);
2143 
2144 	while ((bio = bio_list_pop(&bio_list)))
2145 		bio_put(bio);
2146 
2147 	return -EIO;
2148 }
2149 
2150 /*
2151  * the main entry point for reads from the higher layers.  This
2152  * is really only called when the normal read path had a failure,
2153  * so we assume the bio they send down corresponds to a failed part
2154  * of the drive.
2155  */
raid56_parity_recover(struct btrfs_fs_info * fs_info,struct bio * bio,struct btrfs_bio * bbio,u64 stripe_len,int mirror_num,int generic_io)2156 int raid56_parity_recover(struct btrfs_fs_info *fs_info, struct bio *bio,
2157 			  struct btrfs_bio *bbio, u64 stripe_len,
2158 			  int mirror_num, int generic_io)
2159 {
2160 	struct btrfs_raid_bio *rbio;
2161 	int ret;
2162 
2163 	if (generic_io) {
2164 		ASSERT(bbio->mirror_num == mirror_num);
2165 		btrfs_io_bio(bio)->mirror_num = mirror_num;
2166 	}
2167 
2168 	rbio = alloc_rbio(fs_info, bbio, stripe_len);
2169 	if (IS_ERR(rbio)) {
2170 		if (generic_io)
2171 			btrfs_put_bbio(bbio);
2172 		return PTR_ERR(rbio);
2173 	}
2174 
2175 	rbio->operation = BTRFS_RBIO_READ_REBUILD;
2176 	bio_list_add(&rbio->bio_list, bio);
2177 	rbio->bio_list_bytes = bio->bi_iter.bi_size;
2178 
2179 	rbio->faila = find_logical_bio_stripe(rbio, bio);
2180 	if (rbio->faila == -1) {
2181 		btrfs_warn(fs_info,
2182 	"%s could not find the bad stripe in raid56 so that we cannot recover any more (bio has logical %llu len %llu, bbio has map_type %llu)",
2183 			   __func__, (u64)bio->bi_iter.bi_sector << 9,
2184 			   (u64)bio->bi_iter.bi_size, bbio->map_type);
2185 		if (generic_io)
2186 			btrfs_put_bbio(bbio);
2187 		kfree(rbio);
2188 		return -EIO;
2189 	}
2190 
2191 	if (generic_io) {
2192 		btrfs_bio_counter_inc_noblocked(fs_info);
2193 		rbio->generic_bio_cnt = 1;
2194 	} else {
2195 		btrfs_get_bbio(bbio);
2196 	}
2197 
2198 	/*
2199 	 * Loop retry:
2200 	 * for 'mirror == 2', reconstruct from all other stripes.
2201 	 * for 'mirror_num > 2', select a stripe to fail on every retry.
2202 	 */
2203 	if (mirror_num > 2) {
2204 		/*
2205 		 * 'mirror == 3' is to fail the p stripe and
2206 		 * reconstruct from the q stripe.  'mirror > 3' is to
2207 		 * fail a data stripe and reconstruct from p+q stripe.
2208 		 */
2209 		rbio->failb = rbio->real_stripes - (mirror_num - 1);
2210 		ASSERT(rbio->failb > 0);
2211 		if (rbio->failb <= rbio->faila)
2212 			rbio->failb--;
2213 	}
2214 
2215 	ret = lock_stripe_add(rbio);
2216 
2217 	/*
2218 	 * __raid56_parity_recover will end the bio with
2219 	 * any errors it hits.  We don't want to return
2220 	 * its error value up the stack because our caller
2221 	 * will end up calling bio_endio with any nonzero
2222 	 * return
2223 	 */
2224 	if (ret == 0)
2225 		__raid56_parity_recover(rbio);
2226 	/*
2227 	 * our rbio has been added to the list of
2228 	 * rbios that will be handled after the
2229 	 * currently lock owner is done
2230 	 */
2231 	return 0;
2232 
2233 }
2234 
rmw_work(struct btrfs_work * work)2235 static void rmw_work(struct btrfs_work *work)
2236 {
2237 	struct btrfs_raid_bio *rbio;
2238 
2239 	rbio = container_of(work, struct btrfs_raid_bio, work);
2240 	raid56_rmw_stripe(rbio);
2241 }
2242 
read_rebuild_work(struct btrfs_work * work)2243 static void read_rebuild_work(struct btrfs_work *work)
2244 {
2245 	struct btrfs_raid_bio *rbio;
2246 
2247 	rbio = container_of(work, struct btrfs_raid_bio, work);
2248 	__raid56_parity_recover(rbio);
2249 }
2250 
2251 /*
2252  * The following code is used to scrub/replace the parity stripe
2253  *
2254  * Caller must have already increased bio_counter for getting @bbio.
2255  *
2256  * Note: We need make sure all the pages that add into the scrub/replace
2257  * raid bio are correct and not be changed during the scrub/replace. That
2258  * is those pages just hold metadata or file data with checksum.
2259  */
2260 
2261 struct btrfs_raid_bio *
raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info * fs_info,struct bio * bio,struct btrfs_bio * bbio,u64 stripe_len,struct btrfs_device * scrub_dev,unsigned long * dbitmap,int stripe_nsectors)2262 raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
2263 			       struct btrfs_bio *bbio, u64 stripe_len,
2264 			       struct btrfs_device *scrub_dev,
2265 			       unsigned long *dbitmap, int stripe_nsectors)
2266 {
2267 	struct btrfs_raid_bio *rbio;
2268 	int i;
2269 
2270 	rbio = alloc_rbio(fs_info, bbio, stripe_len);
2271 	if (IS_ERR(rbio))
2272 		return NULL;
2273 	bio_list_add(&rbio->bio_list, bio);
2274 	/*
2275 	 * This is a special bio which is used to hold the completion handler
2276 	 * and make the scrub rbio is similar to the other types
2277 	 */
2278 	ASSERT(!bio->bi_iter.bi_size);
2279 	rbio->operation = BTRFS_RBIO_PARITY_SCRUB;
2280 
2281 	/*
2282 	 * After mapping bbio with BTRFS_MAP_WRITE, parities have been sorted
2283 	 * to the end position, so this search can start from the first parity
2284 	 * stripe.
2285 	 */
2286 	for (i = rbio->nr_data; i < rbio->real_stripes; i++) {
2287 		if (bbio->stripes[i].dev == scrub_dev) {
2288 			rbio->scrubp = i;
2289 			break;
2290 		}
2291 	}
2292 	ASSERT(i < rbio->real_stripes);
2293 
2294 	/* Now we just support the sectorsize equals to page size */
2295 	ASSERT(fs_info->sectorsize == PAGE_SIZE);
2296 	ASSERT(rbio->stripe_npages == stripe_nsectors);
2297 	bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors);
2298 
2299 	/*
2300 	 * We have already increased bio_counter when getting bbio, record it
2301 	 * so we can free it at rbio_orig_end_io().
2302 	 */
2303 	rbio->generic_bio_cnt = 1;
2304 
2305 	return rbio;
2306 }
2307 
2308 /* Used for both parity scrub and missing. */
raid56_add_scrub_pages(struct btrfs_raid_bio * rbio,struct page * page,u64 logical)2309 void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
2310 			    u64 logical)
2311 {
2312 	int stripe_offset;
2313 	int index;
2314 
2315 	ASSERT(logical >= rbio->bbio->raid_map[0]);
2316 	ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] +
2317 				rbio->stripe_len * rbio->nr_data);
2318 	stripe_offset = (int)(logical - rbio->bbio->raid_map[0]);
2319 	index = stripe_offset >> PAGE_SHIFT;
2320 	rbio->bio_pages[index] = page;
2321 }
2322 
2323 /*
2324  * We just scrub the parity that we have correct data on the same horizontal,
2325  * so we needn't allocate all pages for all the stripes.
2326  */
alloc_rbio_essential_pages(struct btrfs_raid_bio * rbio)2327 static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio)
2328 {
2329 	int i;
2330 	int bit;
2331 	int index;
2332 	struct page *page;
2333 
2334 	for_each_set_bit(bit, rbio->dbitmap, rbio->stripe_npages) {
2335 		for (i = 0; i < rbio->real_stripes; i++) {
2336 			index = i * rbio->stripe_npages + bit;
2337 			if (rbio->stripe_pages[index])
2338 				continue;
2339 
2340 			page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2341 			if (!page)
2342 				return -ENOMEM;
2343 			rbio->stripe_pages[index] = page;
2344 		}
2345 	}
2346 	return 0;
2347 }
2348 
finish_parity_scrub(struct btrfs_raid_bio * rbio,int need_check)2349 static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
2350 					 int need_check)
2351 {
2352 	struct btrfs_bio *bbio = rbio->bbio;
2353 	void **pointers = rbio->finish_pointers;
2354 	unsigned long *pbitmap = rbio->finish_pbitmap;
2355 	int nr_data = rbio->nr_data;
2356 	int stripe;
2357 	int pagenr;
2358 	int p_stripe = -1;
2359 	int q_stripe = -1;
2360 	struct page *p_page = NULL;
2361 	struct page *q_page = NULL;
2362 	struct bio_list bio_list;
2363 	struct bio *bio;
2364 	int is_replace = 0;
2365 	int ret;
2366 
2367 	bio_list_init(&bio_list);
2368 
2369 	if (rbio->real_stripes - rbio->nr_data == 1) {
2370 		p_stripe = rbio->real_stripes - 1;
2371 	} else if (rbio->real_stripes - rbio->nr_data == 2) {
2372 		p_stripe = rbio->real_stripes - 2;
2373 		q_stripe = rbio->real_stripes - 1;
2374 	} else {
2375 		BUG();
2376 	}
2377 
2378 	if (bbio->num_tgtdevs && bbio->tgtdev_map[rbio->scrubp]) {
2379 		is_replace = 1;
2380 		bitmap_copy(pbitmap, rbio->dbitmap, rbio->stripe_npages);
2381 	}
2382 
2383 	/*
2384 	 * Because the higher layers(scrubber) are unlikely to
2385 	 * use this area of the disk again soon, so don't cache
2386 	 * it.
2387 	 */
2388 	clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2389 
2390 	if (!need_check)
2391 		goto writeback;
2392 
2393 	p_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2394 	if (!p_page)
2395 		goto cleanup;
2396 	SetPageUptodate(p_page);
2397 
2398 	if (q_stripe != -1) {
2399 		q_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2400 		if (!q_page) {
2401 			__free_page(p_page);
2402 			goto cleanup;
2403 		}
2404 		SetPageUptodate(q_page);
2405 	}
2406 
2407 	atomic_set(&rbio->error, 0);
2408 
2409 	for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2410 		struct page *p;
2411 		void *parity;
2412 		/* first collect one page from each data stripe */
2413 		for (stripe = 0; stripe < nr_data; stripe++) {
2414 			p = page_in_rbio(rbio, stripe, pagenr, 0);
2415 			pointers[stripe] = kmap(p);
2416 		}
2417 
2418 		/* then add the parity stripe */
2419 		pointers[stripe++] = kmap(p_page);
2420 
2421 		if (q_stripe != -1) {
2422 
2423 			/*
2424 			 * raid6, add the qstripe and call the
2425 			 * library function to fill in our p/q
2426 			 */
2427 			pointers[stripe++] = kmap(q_page);
2428 
2429 			raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
2430 						pointers);
2431 		} else {
2432 			/* raid5 */
2433 			copy_page(pointers[nr_data], pointers[0]);
2434 			run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
2435 		}
2436 
2437 		/* Check scrubbing parity and repair it */
2438 		p = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2439 		parity = kmap(p);
2440 		if (memcmp(parity, pointers[rbio->scrubp], PAGE_SIZE))
2441 			copy_page(parity, pointers[rbio->scrubp]);
2442 		else
2443 			/* Parity is right, needn't writeback */
2444 			bitmap_clear(rbio->dbitmap, pagenr, 1);
2445 		kunmap(p);
2446 
2447 		for (stripe = 0; stripe < nr_data; stripe++)
2448 			kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
2449 		kunmap(p_page);
2450 	}
2451 
2452 	__free_page(p_page);
2453 	if (q_page)
2454 		__free_page(q_page);
2455 
2456 writeback:
2457 	/*
2458 	 * time to start writing.  Make bios for everything from the
2459 	 * higher layers (the bio_list in our rbio) and our p/q.  Ignore
2460 	 * everything else.
2461 	 */
2462 	for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2463 		struct page *page;
2464 
2465 		page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2466 		ret = rbio_add_io_page(rbio, &bio_list,
2467 			       page, rbio->scrubp, pagenr, rbio->stripe_len);
2468 		if (ret)
2469 			goto cleanup;
2470 	}
2471 
2472 	if (!is_replace)
2473 		goto submit_write;
2474 
2475 	for_each_set_bit(pagenr, pbitmap, rbio->stripe_npages) {
2476 		struct page *page;
2477 
2478 		page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2479 		ret = rbio_add_io_page(rbio, &bio_list, page,
2480 				       bbio->tgtdev_map[rbio->scrubp],
2481 				       pagenr, rbio->stripe_len);
2482 		if (ret)
2483 			goto cleanup;
2484 	}
2485 
2486 submit_write:
2487 	nr_data = bio_list_size(&bio_list);
2488 	if (!nr_data) {
2489 		/* Every parity is right */
2490 		rbio_orig_end_io(rbio, BLK_STS_OK);
2491 		return;
2492 	}
2493 
2494 	atomic_set(&rbio->stripes_pending, nr_data);
2495 
2496 	while (1) {
2497 		bio = bio_list_pop(&bio_list);
2498 		if (!bio)
2499 			break;
2500 
2501 		bio->bi_private = rbio;
2502 		bio->bi_end_io = raid_write_end_io;
2503 		bio->bi_opf = REQ_OP_WRITE;
2504 
2505 		submit_bio(bio);
2506 	}
2507 	return;
2508 
2509 cleanup:
2510 	rbio_orig_end_io(rbio, BLK_STS_IOERR);
2511 
2512 	while ((bio = bio_list_pop(&bio_list)))
2513 		bio_put(bio);
2514 }
2515 
is_data_stripe(struct btrfs_raid_bio * rbio,int stripe)2516 static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe)
2517 {
2518 	if (stripe >= 0 && stripe < rbio->nr_data)
2519 		return 1;
2520 	return 0;
2521 }
2522 
2523 /*
2524  * While we're doing the parity check and repair, we could have errors
2525  * in reading pages off the disk.  This checks for errors and if we're
2526  * not able to read the page it'll trigger parity reconstruction.  The
2527  * parity scrub will be finished after we've reconstructed the failed
2528  * stripes
2529  */
validate_rbio_for_parity_scrub(struct btrfs_raid_bio * rbio)2530 static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio)
2531 {
2532 	if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2533 		goto cleanup;
2534 
2535 	if (rbio->faila >= 0 || rbio->failb >= 0) {
2536 		int dfail = 0, failp = -1;
2537 
2538 		if (is_data_stripe(rbio, rbio->faila))
2539 			dfail++;
2540 		else if (is_parity_stripe(rbio->faila))
2541 			failp = rbio->faila;
2542 
2543 		if (is_data_stripe(rbio, rbio->failb))
2544 			dfail++;
2545 		else if (is_parity_stripe(rbio->failb))
2546 			failp = rbio->failb;
2547 
2548 		/*
2549 		 * Because we can not use a scrubbing parity to repair
2550 		 * the data, so the capability of the repair is declined.
2551 		 * (In the case of RAID5, we can not repair anything)
2552 		 */
2553 		if (dfail > rbio->bbio->max_errors - 1)
2554 			goto cleanup;
2555 
2556 		/*
2557 		 * If all data is good, only parity is correctly, just
2558 		 * repair the parity.
2559 		 */
2560 		if (dfail == 0) {
2561 			finish_parity_scrub(rbio, 0);
2562 			return;
2563 		}
2564 
2565 		/*
2566 		 * Here means we got one corrupted data stripe and one
2567 		 * corrupted parity on RAID6, if the corrupted parity
2568 		 * is scrubbing parity, luckily, use the other one to repair
2569 		 * the data, or we can not repair the data stripe.
2570 		 */
2571 		if (failp != rbio->scrubp)
2572 			goto cleanup;
2573 
2574 		__raid_recover_end_io(rbio);
2575 	} else {
2576 		finish_parity_scrub(rbio, 1);
2577 	}
2578 	return;
2579 
2580 cleanup:
2581 	rbio_orig_end_io(rbio, BLK_STS_IOERR);
2582 }
2583 
2584 /*
2585  * end io for the read phase of the rmw cycle.  All the bios here are physical
2586  * stripe bios we've read from the disk so we can recalculate the parity of the
2587  * stripe.
2588  *
2589  * This will usually kick off finish_rmw once all the bios are read in, but it
2590  * may trigger parity reconstruction if we had any errors along the way
2591  */
raid56_parity_scrub_end_io(struct bio * bio)2592 static void raid56_parity_scrub_end_io(struct bio *bio)
2593 {
2594 	struct btrfs_raid_bio *rbio = bio->bi_private;
2595 
2596 	if (bio->bi_status)
2597 		fail_bio_stripe(rbio, bio);
2598 	else
2599 		set_bio_pages_uptodate(bio);
2600 
2601 	bio_put(bio);
2602 
2603 	if (!atomic_dec_and_test(&rbio->stripes_pending))
2604 		return;
2605 
2606 	/*
2607 	 * this will normally call finish_rmw to start our write
2608 	 * but if there are any failed stripes we'll reconstruct
2609 	 * from parity first
2610 	 */
2611 	validate_rbio_for_parity_scrub(rbio);
2612 }
2613 
raid56_parity_scrub_stripe(struct btrfs_raid_bio * rbio)2614 static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
2615 {
2616 	int bios_to_read = 0;
2617 	struct bio_list bio_list;
2618 	int ret;
2619 	int pagenr;
2620 	int stripe;
2621 	struct bio *bio;
2622 
2623 	bio_list_init(&bio_list);
2624 
2625 	ret = alloc_rbio_essential_pages(rbio);
2626 	if (ret)
2627 		goto cleanup;
2628 
2629 	atomic_set(&rbio->error, 0);
2630 	/*
2631 	 * build a list of bios to read all the missing parts of this
2632 	 * stripe
2633 	 */
2634 	for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2635 		for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2636 			struct page *page;
2637 			/*
2638 			 * we want to find all the pages missing from
2639 			 * the rbio and read them from the disk.  If
2640 			 * page_in_rbio finds a page in the bio list
2641 			 * we don't need to read it off the stripe.
2642 			 */
2643 			page = page_in_rbio(rbio, stripe, pagenr, 1);
2644 			if (page)
2645 				continue;
2646 
2647 			page = rbio_stripe_page(rbio, stripe, pagenr);
2648 			/*
2649 			 * the bio cache may have handed us an uptodate
2650 			 * page.  If so, be happy and use it
2651 			 */
2652 			if (PageUptodate(page))
2653 				continue;
2654 
2655 			ret = rbio_add_io_page(rbio, &bio_list, page,
2656 				       stripe, pagenr, rbio->stripe_len);
2657 			if (ret)
2658 				goto cleanup;
2659 		}
2660 	}
2661 
2662 	bios_to_read = bio_list_size(&bio_list);
2663 	if (!bios_to_read) {
2664 		/*
2665 		 * this can happen if others have merged with
2666 		 * us, it means there is nothing left to read.
2667 		 * But if there are missing devices it may not be
2668 		 * safe to do the full stripe write yet.
2669 		 */
2670 		goto finish;
2671 	}
2672 
2673 	/*
2674 	 * the bbio may be freed once we submit the last bio.  Make sure
2675 	 * not to touch it after that
2676 	 */
2677 	atomic_set(&rbio->stripes_pending, bios_to_read);
2678 	while (1) {
2679 		bio = bio_list_pop(&bio_list);
2680 		if (!bio)
2681 			break;
2682 
2683 		bio->bi_private = rbio;
2684 		bio->bi_end_io = raid56_parity_scrub_end_io;
2685 		bio->bi_opf = REQ_OP_READ;
2686 
2687 		btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
2688 
2689 		submit_bio(bio);
2690 	}
2691 	/* the actual write will happen once the reads are done */
2692 	return;
2693 
2694 cleanup:
2695 	rbio_orig_end_io(rbio, BLK_STS_IOERR);
2696 
2697 	while ((bio = bio_list_pop(&bio_list)))
2698 		bio_put(bio);
2699 
2700 	return;
2701 
2702 finish:
2703 	validate_rbio_for_parity_scrub(rbio);
2704 }
2705 
scrub_parity_work(struct btrfs_work * work)2706 static void scrub_parity_work(struct btrfs_work *work)
2707 {
2708 	struct btrfs_raid_bio *rbio;
2709 
2710 	rbio = container_of(work, struct btrfs_raid_bio, work);
2711 	raid56_parity_scrub_stripe(rbio);
2712 }
2713 
raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio * rbio)2714 void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
2715 {
2716 	if (!lock_stripe_add(rbio))
2717 		start_async_work(rbio, scrub_parity_work);
2718 }
2719 
2720 /* The following code is used for dev replace of a missing RAID 5/6 device. */
2721 
2722 struct btrfs_raid_bio *
raid56_alloc_missing_rbio(struct btrfs_fs_info * fs_info,struct bio * bio,struct btrfs_bio * bbio,u64 length)2723 raid56_alloc_missing_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
2724 			  struct btrfs_bio *bbio, u64 length)
2725 {
2726 	struct btrfs_raid_bio *rbio;
2727 
2728 	rbio = alloc_rbio(fs_info, bbio, length);
2729 	if (IS_ERR(rbio))
2730 		return NULL;
2731 
2732 	rbio->operation = BTRFS_RBIO_REBUILD_MISSING;
2733 	bio_list_add(&rbio->bio_list, bio);
2734 	/*
2735 	 * This is a special bio which is used to hold the completion handler
2736 	 * and make the scrub rbio is similar to the other types
2737 	 */
2738 	ASSERT(!bio->bi_iter.bi_size);
2739 
2740 	rbio->faila = find_logical_bio_stripe(rbio, bio);
2741 	if (rbio->faila == -1) {
2742 		BUG();
2743 		kfree(rbio);
2744 		return NULL;
2745 	}
2746 
2747 	/*
2748 	 * When we get bbio, we have already increased bio_counter, record it
2749 	 * so we can free it at rbio_orig_end_io()
2750 	 */
2751 	rbio->generic_bio_cnt = 1;
2752 
2753 	return rbio;
2754 }
2755 
raid56_submit_missing_rbio(struct btrfs_raid_bio * rbio)2756 void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio)
2757 {
2758 	if (!lock_stripe_add(rbio))
2759 		start_async_work(rbio, read_rebuild_work);
2760 }
2761