• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2012 Fusion-io  All rights reserved.
3  * Copyright (C) 2012 Intel Corp. All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public
7  * License v2 as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public
15  * License along with this program; if not, write to the
16  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17  * Boston, MA 021110-1307, USA.
18  */
19 #include <linux/sched.h>
20 #include <linux/wait.h>
21 #include <linux/bio.h>
22 #include <linux/slab.h>
23 #include <linux/buffer_head.h>
24 #include <linux/blkdev.h>
25 #include <linux/random.h>
26 #include <linux/iocontext.h>
27 #include <linux/capability.h>
28 #include <linux/ratelimit.h>
29 #include <linux/kthread.h>
30 #include <linux/raid/pq.h>
31 #include <linux/hash.h>
32 #include <linux/list_sort.h>
33 #include <linux/raid/xor.h>
34 #include <linux/vmalloc.h>
35 #include <asm/div64.h>
36 #include "ctree.h"
37 #include "extent_map.h"
38 #include "disk-io.h"
39 #include "transaction.h"
40 #include "print-tree.h"
41 #include "volumes.h"
42 #include "raid56.h"
43 #include "async-thread.h"
44 #include "check-integrity.h"
45 #include "rcu-string.h"
46 
47 /* set when additional merges to this rbio are not allowed */
48 #define RBIO_RMW_LOCKED_BIT	1
49 
50 /*
51  * set when this rbio is sitting in the hash, but it is just a cache
52  * of past RMW
53  */
54 #define RBIO_CACHE_BIT		2
55 
56 /*
57  * set when it is safe to trust the stripe_pages for caching
58  */
59 #define RBIO_CACHE_READY_BIT	3
60 
61 #define RBIO_CACHE_SIZE 1024
62 
63 enum btrfs_rbio_ops {
64 	BTRFS_RBIO_WRITE,
65 	BTRFS_RBIO_READ_REBUILD,
66 	BTRFS_RBIO_PARITY_SCRUB,
67 	BTRFS_RBIO_REBUILD_MISSING,
68 };
69 
70 struct btrfs_raid_bio {
71 	struct btrfs_fs_info *fs_info;
72 	struct btrfs_bio *bbio;
73 
74 	/* while we're doing rmw on a stripe
75 	 * we put it into a hash table so we can
76 	 * lock the stripe and merge more rbios
77 	 * into it.
78 	 */
79 	struct list_head hash_list;
80 
81 	/*
82 	 * LRU list for the stripe cache
83 	 */
84 	struct list_head stripe_cache;
85 
86 	/*
87 	 * for scheduling work in the helper threads
88 	 */
89 	struct btrfs_work work;
90 
91 	/*
92 	 * bio list and bio_list_lock are used
93 	 * to add more bios into the stripe
94 	 * in hopes of avoiding the full rmw
95 	 */
96 	struct bio_list bio_list;
97 	spinlock_t bio_list_lock;
98 
99 	/* also protected by the bio_list_lock, the
100 	 * plug list is used by the plugging code
101 	 * to collect partial bios while plugged.  The
102 	 * stripe locking code also uses it to hand off
103 	 * the stripe lock to the next pending IO
104 	 */
105 	struct list_head plug_list;
106 
107 	/*
108 	 * flags that tell us if it is safe to
109 	 * merge with this bio
110 	 */
111 	unsigned long flags;
112 
113 	/* size of each individual stripe on disk */
114 	int stripe_len;
115 
116 	/* number of data stripes (no p/q) */
117 	int nr_data;
118 
119 	int real_stripes;
120 
121 	int stripe_npages;
122 	/*
123 	 * set if we're doing a parity rebuild
124 	 * for a read from higher up, which is handled
125 	 * differently from a parity rebuild as part of
126 	 * rmw
127 	 */
128 	enum btrfs_rbio_ops operation;
129 
130 	/* first bad stripe */
131 	int faila;
132 
133 	/* second bad stripe (for raid6 use) */
134 	int failb;
135 
136 	int scrubp;
137 	/*
138 	 * number of pages needed to represent the full
139 	 * stripe
140 	 */
141 	int nr_pages;
142 
143 	/*
144 	 * size of all the bios in the bio_list.  This
145 	 * helps us decide if the rbio maps to a full
146 	 * stripe or not
147 	 */
148 	int bio_list_bytes;
149 
150 	int generic_bio_cnt;
151 
152 	atomic_t refs;
153 
154 	atomic_t stripes_pending;
155 
156 	atomic_t error;
157 	/*
158 	 * these are two arrays of pointers.  We allocate the
159 	 * rbio big enough to hold them both and setup their
160 	 * locations when the rbio is allocated
161 	 */
162 
163 	/* pointers to pages that we allocated for
164 	 * reading/writing stripes directly from the disk (including P/Q)
165 	 */
166 	struct page **stripe_pages;
167 
168 	/*
169 	 * pointers to the pages in the bio_list.  Stored
170 	 * here for faster lookup
171 	 */
172 	struct page **bio_pages;
173 
174 	/*
175 	 * bitmap to record which horizontal stripe has data
176 	 */
177 	unsigned long *dbitmap;
178 };
179 
180 static int __raid56_parity_recover(struct btrfs_raid_bio *rbio);
181 static noinline void finish_rmw(struct btrfs_raid_bio *rbio);
182 static void rmw_work(struct btrfs_work *work);
183 static void read_rebuild_work(struct btrfs_work *work);
184 static void async_rmw_stripe(struct btrfs_raid_bio *rbio);
185 static void async_read_rebuild(struct btrfs_raid_bio *rbio);
186 static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio);
187 static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed);
188 static void __free_raid_bio(struct btrfs_raid_bio *rbio);
189 static void index_rbio_pages(struct btrfs_raid_bio *rbio);
190 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
191 
192 static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
193 					 int need_check);
194 static void async_scrub_parity(struct btrfs_raid_bio *rbio);
195 
196 /*
197  * the stripe hash table is used for locking, and to collect
198  * bios in hopes of making a full stripe
199  */
btrfs_alloc_stripe_hash_table(struct btrfs_fs_info * info)200 int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
201 {
202 	struct btrfs_stripe_hash_table *table;
203 	struct btrfs_stripe_hash_table *x;
204 	struct btrfs_stripe_hash *cur;
205 	struct btrfs_stripe_hash *h;
206 	int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS;
207 	int i;
208 	int table_size;
209 
210 	if (info->stripe_hash_table)
211 		return 0;
212 
213 	/*
214 	 * The table is large, starting with order 4 and can go as high as
215 	 * order 7 in case lock debugging is turned on.
216 	 *
217 	 * Try harder to allocate and fallback to vmalloc to lower the chance
218 	 * of a failing mount.
219 	 */
220 	table_size = sizeof(*table) + sizeof(*h) * num_entries;
221 	table = kzalloc(table_size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
222 	if (!table) {
223 		table = vzalloc(table_size);
224 		if (!table)
225 			return -ENOMEM;
226 	}
227 
228 	spin_lock_init(&table->cache_lock);
229 	INIT_LIST_HEAD(&table->stripe_cache);
230 
231 	h = table->table;
232 
233 	for (i = 0; i < num_entries; i++) {
234 		cur = h + i;
235 		INIT_LIST_HEAD(&cur->hash_list);
236 		spin_lock_init(&cur->lock);
237 		init_waitqueue_head(&cur->wait);
238 	}
239 
240 	x = cmpxchg(&info->stripe_hash_table, NULL, table);
241 	if (x)
242 		kvfree(x);
243 	return 0;
244 }
245 
246 /*
247  * caching an rbio means to copy anything from the
248  * bio_pages array into the stripe_pages array.  We
249  * use the page uptodate bit in the stripe cache array
250  * to indicate if it has valid data
251  *
252  * once the caching is done, we set the cache ready
253  * bit.
254  */
cache_rbio_pages(struct btrfs_raid_bio * rbio)255 static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
256 {
257 	int i;
258 	char *s;
259 	char *d;
260 	int ret;
261 
262 	ret = alloc_rbio_pages(rbio);
263 	if (ret)
264 		return;
265 
266 	for (i = 0; i < rbio->nr_pages; i++) {
267 		if (!rbio->bio_pages[i])
268 			continue;
269 
270 		s = kmap(rbio->bio_pages[i]);
271 		d = kmap(rbio->stripe_pages[i]);
272 
273 		memcpy(d, s, PAGE_CACHE_SIZE);
274 
275 		kunmap(rbio->bio_pages[i]);
276 		kunmap(rbio->stripe_pages[i]);
277 		SetPageUptodate(rbio->stripe_pages[i]);
278 	}
279 	set_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
280 }
281 
282 /*
283  * we hash on the first logical address of the stripe
284  */
rbio_bucket(struct btrfs_raid_bio * rbio)285 static int rbio_bucket(struct btrfs_raid_bio *rbio)
286 {
287 	u64 num = rbio->bbio->raid_map[0];
288 
289 	/*
290 	 * we shift down quite a bit.  We're using byte
291 	 * addressing, and most of the lower bits are zeros.
292 	 * This tends to upset hash_64, and it consistently
293 	 * returns just one or two different values.
294 	 *
295 	 * shifting off the lower bits fixes things.
296 	 */
297 	return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS);
298 }
299 
300 /*
301  * stealing an rbio means taking all the uptodate pages from the stripe
302  * array in the source rbio and putting them into the destination rbio
303  */
steal_rbio(struct btrfs_raid_bio * src,struct btrfs_raid_bio * dest)304 static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest)
305 {
306 	int i;
307 	struct page *s;
308 	struct page *d;
309 
310 	if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags))
311 		return;
312 
313 	for (i = 0; i < dest->nr_pages; i++) {
314 		s = src->stripe_pages[i];
315 		if (!s || !PageUptodate(s)) {
316 			continue;
317 		}
318 
319 		d = dest->stripe_pages[i];
320 		if (d)
321 			__free_page(d);
322 
323 		dest->stripe_pages[i] = s;
324 		src->stripe_pages[i] = NULL;
325 	}
326 }
327 
328 /*
329  * merging means we take the bio_list from the victim and
330  * splice it into the destination.  The victim should
331  * be discarded afterwards.
332  *
333  * must be called with dest->rbio_list_lock held
334  */
merge_rbio(struct btrfs_raid_bio * dest,struct btrfs_raid_bio * victim)335 static void merge_rbio(struct btrfs_raid_bio *dest,
336 		       struct btrfs_raid_bio *victim)
337 {
338 	bio_list_merge(&dest->bio_list, &victim->bio_list);
339 	dest->bio_list_bytes += victim->bio_list_bytes;
340 	dest->generic_bio_cnt += victim->generic_bio_cnt;
341 	bio_list_init(&victim->bio_list);
342 }
343 
344 /*
345  * used to prune items that are in the cache.  The caller
346  * must hold the hash table lock.
347  */
__remove_rbio_from_cache(struct btrfs_raid_bio * rbio)348 static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
349 {
350 	int bucket = rbio_bucket(rbio);
351 	struct btrfs_stripe_hash_table *table;
352 	struct btrfs_stripe_hash *h;
353 	int freeit = 0;
354 
355 	/*
356 	 * check the bit again under the hash table lock.
357 	 */
358 	if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
359 		return;
360 
361 	table = rbio->fs_info->stripe_hash_table;
362 	h = table->table + bucket;
363 
364 	/* hold the lock for the bucket because we may be
365 	 * removing it from the hash table
366 	 */
367 	spin_lock(&h->lock);
368 
369 	/*
370 	 * hold the lock for the bio list because we need
371 	 * to make sure the bio list is empty
372 	 */
373 	spin_lock(&rbio->bio_list_lock);
374 
375 	if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) {
376 		list_del_init(&rbio->stripe_cache);
377 		table->cache_size -= 1;
378 		freeit = 1;
379 
380 		/* if the bio list isn't empty, this rbio is
381 		 * still involved in an IO.  We take it out
382 		 * of the cache list, and drop the ref that
383 		 * was held for the list.
384 		 *
385 		 * If the bio_list was empty, we also remove
386 		 * the rbio from the hash_table, and drop
387 		 * the corresponding ref
388 		 */
389 		if (bio_list_empty(&rbio->bio_list)) {
390 			if (!list_empty(&rbio->hash_list)) {
391 				list_del_init(&rbio->hash_list);
392 				atomic_dec(&rbio->refs);
393 				BUG_ON(!list_empty(&rbio->plug_list));
394 			}
395 		}
396 	}
397 
398 	spin_unlock(&rbio->bio_list_lock);
399 	spin_unlock(&h->lock);
400 
401 	if (freeit)
402 		__free_raid_bio(rbio);
403 }
404 
405 /*
406  * prune a given rbio from the cache
407  */
remove_rbio_from_cache(struct btrfs_raid_bio * rbio)408 static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
409 {
410 	struct btrfs_stripe_hash_table *table;
411 	unsigned long flags;
412 
413 	if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
414 		return;
415 
416 	table = rbio->fs_info->stripe_hash_table;
417 
418 	spin_lock_irqsave(&table->cache_lock, flags);
419 	__remove_rbio_from_cache(rbio);
420 	spin_unlock_irqrestore(&table->cache_lock, flags);
421 }
422 
423 /*
424  * remove everything in the cache
425  */
btrfs_clear_rbio_cache(struct btrfs_fs_info * info)426 static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info)
427 {
428 	struct btrfs_stripe_hash_table *table;
429 	unsigned long flags;
430 	struct btrfs_raid_bio *rbio;
431 
432 	table = info->stripe_hash_table;
433 
434 	spin_lock_irqsave(&table->cache_lock, flags);
435 	while (!list_empty(&table->stripe_cache)) {
436 		rbio = list_entry(table->stripe_cache.next,
437 				  struct btrfs_raid_bio,
438 				  stripe_cache);
439 		__remove_rbio_from_cache(rbio);
440 	}
441 	spin_unlock_irqrestore(&table->cache_lock, flags);
442 }
443 
444 /*
445  * remove all cached entries and free the hash table
446  * used by unmount
447  */
btrfs_free_stripe_hash_table(struct btrfs_fs_info * info)448 void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info)
449 {
450 	if (!info->stripe_hash_table)
451 		return;
452 	btrfs_clear_rbio_cache(info);
453 	kvfree(info->stripe_hash_table);
454 	info->stripe_hash_table = NULL;
455 }
456 
457 /*
458  * insert an rbio into the stripe cache.  It
459  * must have already been prepared by calling
460  * cache_rbio_pages
461  *
462  * If this rbio was already cached, it gets
463  * moved to the front of the lru.
464  *
465  * If the size of the rbio cache is too big, we
466  * prune an item.
467  */
cache_rbio(struct btrfs_raid_bio * rbio)468 static void cache_rbio(struct btrfs_raid_bio *rbio)
469 {
470 	struct btrfs_stripe_hash_table *table;
471 	unsigned long flags;
472 
473 	if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags))
474 		return;
475 
476 	table = rbio->fs_info->stripe_hash_table;
477 
478 	spin_lock_irqsave(&table->cache_lock, flags);
479 	spin_lock(&rbio->bio_list_lock);
480 
481 	/* bump our ref if we were not in the list before */
482 	if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags))
483 		atomic_inc(&rbio->refs);
484 
485 	if (!list_empty(&rbio->stripe_cache)){
486 		list_move(&rbio->stripe_cache, &table->stripe_cache);
487 	} else {
488 		list_add(&rbio->stripe_cache, &table->stripe_cache);
489 		table->cache_size += 1;
490 	}
491 
492 	spin_unlock(&rbio->bio_list_lock);
493 
494 	if (table->cache_size > RBIO_CACHE_SIZE) {
495 		struct btrfs_raid_bio *found;
496 
497 		found = list_entry(table->stripe_cache.prev,
498 				  struct btrfs_raid_bio,
499 				  stripe_cache);
500 
501 		if (found != rbio)
502 			__remove_rbio_from_cache(found);
503 	}
504 
505 	spin_unlock_irqrestore(&table->cache_lock, flags);
506 	return;
507 }
508 
509 /*
510  * helper function to run the xor_blocks api.  It is only
511  * able to do MAX_XOR_BLOCKS at a time, so we need to
512  * loop through.
513  */
run_xor(void ** pages,int src_cnt,ssize_t len)514 static void run_xor(void **pages, int src_cnt, ssize_t len)
515 {
516 	int src_off = 0;
517 	int xor_src_cnt = 0;
518 	void *dest = pages[src_cnt];
519 
520 	while(src_cnt > 0) {
521 		xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
522 		xor_blocks(xor_src_cnt, len, dest, pages + src_off);
523 
524 		src_cnt -= xor_src_cnt;
525 		src_off += xor_src_cnt;
526 	}
527 }
528 
529 /*
530  * returns true if the bio list inside this rbio
531  * covers an entire stripe (no rmw required).
532  * Must be called with the bio list lock held, or
533  * at a time when you know it is impossible to add
534  * new bios into the list
535  */
__rbio_is_full(struct btrfs_raid_bio * rbio)536 static int __rbio_is_full(struct btrfs_raid_bio *rbio)
537 {
538 	unsigned long size = rbio->bio_list_bytes;
539 	int ret = 1;
540 
541 	if (size != rbio->nr_data * rbio->stripe_len)
542 		ret = 0;
543 
544 	BUG_ON(size > rbio->nr_data * rbio->stripe_len);
545 	return ret;
546 }
547 
rbio_is_full(struct btrfs_raid_bio * rbio)548 static int rbio_is_full(struct btrfs_raid_bio *rbio)
549 {
550 	unsigned long flags;
551 	int ret;
552 
553 	spin_lock_irqsave(&rbio->bio_list_lock, flags);
554 	ret = __rbio_is_full(rbio);
555 	spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
556 	return ret;
557 }
558 
559 /*
560  * returns 1 if it is safe to merge two rbios together.
561  * The merging is safe if the two rbios correspond to
562  * the same stripe and if they are both going in the same
563  * direction (read vs write), and if neither one is
564  * locked for final IO
565  *
566  * The caller is responsible for locking such that
567  * rmw_locked is safe to test
568  */
rbio_can_merge(struct btrfs_raid_bio * last,struct btrfs_raid_bio * cur)569 static int rbio_can_merge(struct btrfs_raid_bio *last,
570 			  struct btrfs_raid_bio *cur)
571 {
572 	if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) ||
573 	    test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags))
574 		return 0;
575 
576 	/*
577 	 * we can't merge with cached rbios, since the
578 	 * idea is that when we merge the destination
579 	 * rbio is going to run our IO for us.  We can
580 	 * steal from cached rbio's though, other functions
581 	 * handle that.
582 	 */
583 	if (test_bit(RBIO_CACHE_BIT, &last->flags) ||
584 	    test_bit(RBIO_CACHE_BIT, &cur->flags))
585 		return 0;
586 
587 	if (last->bbio->raid_map[0] !=
588 	    cur->bbio->raid_map[0])
589 		return 0;
590 
591 	/* we can't merge with different operations */
592 	if (last->operation != cur->operation)
593 		return 0;
594 	/*
595 	 * We've need read the full stripe from the drive.
596 	 * check and repair the parity and write the new results.
597 	 *
598 	 * We're not allowed to add any new bios to the
599 	 * bio list here, anyone else that wants to
600 	 * change this stripe needs to do their own rmw.
601 	 */
602 	if (last->operation == BTRFS_RBIO_PARITY_SCRUB ||
603 	    cur->operation == BTRFS_RBIO_PARITY_SCRUB)
604 		return 0;
605 
606 	if (last->operation == BTRFS_RBIO_REBUILD_MISSING ||
607 	    cur->operation == BTRFS_RBIO_REBUILD_MISSING)
608 		return 0;
609 
610 	return 1;
611 }
612 
613 /*
614  * helper to index into the pstripe
615  */
rbio_pstripe_page(struct btrfs_raid_bio * rbio,int index)616 static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index)
617 {
618 	index += (rbio->nr_data * rbio->stripe_len) >> PAGE_CACHE_SHIFT;
619 	return rbio->stripe_pages[index];
620 }
621 
622 /*
623  * helper to index into the qstripe, returns null
624  * if there is no qstripe
625  */
rbio_qstripe_page(struct btrfs_raid_bio * rbio,int index)626 static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index)
627 {
628 	if (rbio->nr_data + 1 == rbio->real_stripes)
629 		return NULL;
630 
631 	index += ((rbio->nr_data + 1) * rbio->stripe_len) >>
632 		PAGE_CACHE_SHIFT;
633 	return rbio->stripe_pages[index];
634 }
635 
636 /*
637  * The first stripe in the table for a logical address
638  * has the lock.  rbios are added in one of three ways:
639  *
640  * 1) Nobody has the stripe locked yet.  The rbio is given
641  * the lock and 0 is returned.  The caller must start the IO
642  * themselves.
643  *
644  * 2) Someone has the stripe locked, but we're able to merge
645  * with the lock owner.  The rbio is freed and the IO will
646  * start automatically along with the existing rbio.  1 is returned.
647  *
648  * 3) Someone has the stripe locked, but we're not able to merge.
649  * The rbio is added to the lock owner's plug list, or merged into
650  * an rbio already on the plug list.  When the lock owner unlocks,
651  * the next rbio on the list is run and the IO is started automatically.
652  * 1 is returned
653  *
654  * If we return 0, the caller still owns the rbio and must continue with
655  * IO submission.  If we return 1, the caller must assume the rbio has
656  * already been freed.
657  */
lock_stripe_add(struct btrfs_raid_bio * rbio)658 static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
659 {
660 	int bucket = rbio_bucket(rbio);
661 	struct btrfs_stripe_hash *h = rbio->fs_info->stripe_hash_table->table + bucket;
662 	struct btrfs_raid_bio *cur;
663 	struct btrfs_raid_bio *pending;
664 	unsigned long flags;
665 	DEFINE_WAIT(wait);
666 	struct btrfs_raid_bio *freeit = NULL;
667 	struct btrfs_raid_bio *cache_drop = NULL;
668 	int ret = 0;
669 	int walk = 0;
670 
671 	spin_lock_irqsave(&h->lock, flags);
672 	list_for_each_entry(cur, &h->hash_list, hash_list) {
673 		walk++;
674 		if (cur->bbio->raid_map[0] == rbio->bbio->raid_map[0]) {
675 			spin_lock(&cur->bio_list_lock);
676 
677 			/* can we steal this cached rbio's pages? */
678 			if (bio_list_empty(&cur->bio_list) &&
679 			    list_empty(&cur->plug_list) &&
680 			    test_bit(RBIO_CACHE_BIT, &cur->flags) &&
681 			    !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
682 				list_del_init(&cur->hash_list);
683 				atomic_dec(&cur->refs);
684 
685 				steal_rbio(cur, rbio);
686 				cache_drop = cur;
687 				spin_unlock(&cur->bio_list_lock);
688 
689 				goto lockit;
690 			}
691 
692 			/* can we merge into the lock owner? */
693 			if (rbio_can_merge(cur, rbio)) {
694 				merge_rbio(cur, rbio);
695 				spin_unlock(&cur->bio_list_lock);
696 				freeit = rbio;
697 				ret = 1;
698 				goto out;
699 			}
700 
701 
702 			/*
703 			 * we couldn't merge with the running
704 			 * rbio, see if we can merge with the
705 			 * pending ones.  We don't have to
706 			 * check for rmw_locked because there
707 			 * is no way they are inside finish_rmw
708 			 * right now
709 			 */
710 			list_for_each_entry(pending, &cur->plug_list,
711 					    plug_list) {
712 				if (rbio_can_merge(pending, rbio)) {
713 					merge_rbio(pending, rbio);
714 					spin_unlock(&cur->bio_list_lock);
715 					freeit = rbio;
716 					ret = 1;
717 					goto out;
718 				}
719 			}
720 
721 			/* no merging, put us on the tail of the plug list,
722 			 * our rbio will be started with the currently
723 			 * running rbio unlocks
724 			 */
725 			list_add_tail(&rbio->plug_list, &cur->plug_list);
726 			spin_unlock(&cur->bio_list_lock);
727 			ret = 1;
728 			goto out;
729 		}
730 	}
731 lockit:
732 	atomic_inc(&rbio->refs);
733 	list_add(&rbio->hash_list, &h->hash_list);
734 out:
735 	spin_unlock_irqrestore(&h->lock, flags);
736 	if (cache_drop)
737 		remove_rbio_from_cache(cache_drop);
738 	if (freeit)
739 		__free_raid_bio(freeit);
740 	return ret;
741 }
742 
743 /*
744  * called as rmw or parity rebuild is completed.  If the plug list has more
745  * rbios waiting for this stripe, the next one on the list will be started
746  */
unlock_stripe(struct btrfs_raid_bio * rbio)747 static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
748 {
749 	int bucket;
750 	struct btrfs_stripe_hash *h;
751 	unsigned long flags;
752 	int keep_cache = 0;
753 
754 	bucket = rbio_bucket(rbio);
755 	h = rbio->fs_info->stripe_hash_table->table + bucket;
756 
757 	if (list_empty(&rbio->plug_list))
758 		cache_rbio(rbio);
759 
760 	spin_lock_irqsave(&h->lock, flags);
761 	spin_lock(&rbio->bio_list_lock);
762 
763 	if (!list_empty(&rbio->hash_list)) {
764 		/*
765 		 * if we're still cached and there is no other IO
766 		 * to perform, just leave this rbio here for others
767 		 * to steal from later
768 		 */
769 		if (list_empty(&rbio->plug_list) &&
770 		    test_bit(RBIO_CACHE_BIT, &rbio->flags)) {
771 			keep_cache = 1;
772 			clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
773 			BUG_ON(!bio_list_empty(&rbio->bio_list));
774 			goto done;
775 		}
776 
777 		list_del_init(&rbio->hash_list);
778 		atomic_dec(&rbio->refs);
779 
780 		/*
781 		 * we use the plug list to hold all the rbios
782 		 * waiting for the chance to lock this stripe.
783 		 * hand the lock over to one of them.
784 		 */
785 		if (!list_empty(&rbio->plug_list)) {
786 			struct btrfs_raid_bio *next;
787 			struct list_head *head = rbio->plug_list.next;
788 
789 			next = list_entry(head, struct btrfs_raid_bio,
790 					  plug_list);
791 
792 			list_del_init(&rbio->plug_list);
793 
794 			list_add(&next->hash_list, &h->hash_list);
795 			atomic_inc(&next->refs);
796 			spin_unlock(&rbio->bio_list_lock);
797 			spin_unlock_irqrestore(&h->lock, flags);
798 
799 			if (next->operation == BTRFS_RBIO_READ_REBUILD)
800 				async_read_rebuild(next);
801 			else if (next->operation == BTRFS_RBIO_REBUILD_MISSING) {
802 				steal_rbio(rbio, next);
803 				async_read_rebuild(next);
804 			} else if (next->operation == BTRFS_RBIO_WRITE) {
805 				steal_rbio(rbio, next);
806 				async_rmw_stripe(next);
807 			} else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) {
808 				steal_rbio(rbio, next);
809 				async_scrub_parity(next);
810 			}
811 
812 			goto done_nolock;
813 			/*
814 			 * The barrier for this waitqueue_active is not needed,
815 			 * we're protected by h->lock and can't miss a wakeup.
816 			 */
817 		} else if (waitqueue_active(&h->wait)) {
818 			spin_unlock(&rbio->bio_list_lock);
819 			spin_unlock_irqrestore(&h->lock, flags);
820 			wake_up(&h->wait);
821 			goto done_nolock;
822 		}
823 	}
824 done:
825 	spin_unlock(&rbio->bio_list_lock);
826 	spin_unlock_irqrestore(&h->lock, flags);
827 
828 done_nolock:
829 	if (!keep_cache)
830 		remove_rbio_from_cache(rbio);
831 }
832 
__free_raid_bio(struct btrfs_raid_bio * rbio)833 static void __free_raid_bio(struct btrfs_raid_bio *rbio)
834 {
835 	int i;
836 
837 	WARN_ON(atomic_read(&rbio->refs) < 0);
838 	if (!atomic_dec_and_test(&rbio->refs))
839 		return;
840 
841 	WARN_ON(!list_empty(&rbio->stripe_cache));
842 	WARN_ON(!list_empty(&rbio->hash_list));
843 	WARN_ON(!bio_list_empty(&rbio->bio_list));
844 
845 	for (i = 0; i < rbio->nr_pages; i++) {
846 		if (rbio->stripe_pages[i]) {
847 			__free_page(rbio->stripe_pages[i]);
848 			rbio->stripe_pages[i] = NULL;
849 		}
850 	}
851 
852 	btrfs_put_bbio(rbio->bbio);
853 	kfree(rbio);
854 }
855 
free_raid_bio(struct btrfs_raid_bio * rbio)856 static void free_raid_bio(struct btrfs_raid_bio *rbio)
857 {
858 	unlock_stripe(rbio);
859 	__free_raid_bio(rbio);
860 }
861 
862 /*
863  * this frees the rbio and runs through all the bios in the
864  * bio_list and calls end_io on them
865  */
rbio_orig_end_io(struct btrfs_raid_bio * rbio,int err)866 static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, int err)
867 {
868 	struct bio *cur = bio_list_get(&rbio->bio_list);
869 	struct bio *next;
870 
871 	if (rbio->generic_bio_cnt)
872 		btrfs_bio_counter_sub(rbio->fs_info, rbio->generic_bio_cnt);
873 
874 	free_raid_bio(rbio);
875 
876 	while (cur) {
877 		next = cur->bi_next;
878 		cur->bi_next = NULL;
879 		cur->bi_error = err;
880 		bio_endio(cur);
881 		cur = next;
882 	}
883 }
884 
885 /*
886  * end io function used by finish_rmw.  When we finally
887  * get here, we've written a full stripe
888  */
raid_write_end_io(struct bio * bio)889 static void raid_write_end_io(struct bio *bio)
890 {
891 	struct btrfs_raid_bio *rbio = bio->bi_private;
892 	int err = bio->bi_error;
893 
894 	if (err)
895 		fail_bio_stripe(rbio, bio);
896 
897 	bio_put(bio);
898 
899 	if (!atomic_dec_and_test(&rbio->stripes_pending))
900 		return;
901 
902 	err = 0;
903 
904 	/* OK, we have read all the stripes we need to. */
905 	if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
906 		err = -EIO;
907 
908 	rbio_orig_end_io(rbio, err);
909 	return;
910 }
911 
912 /*
913  * the read/modify/write code wants to use the original bio for
914  * any pages it included, and then use the rbio for everything
915  * else.  This function decides if a given index (stripe number)
916  * and page number in that stripe fall inside the original bio
917  * or the rbio.
918  *
919  * if you set bio_list_only, you'll get a NULL back for any ranges
920  * that are outside the bio_list
921  *
922  * This doesn't take any refs on anything, you get a bare page pointer
923  * and the caller must bump refs as required.
924  *
925  * You must call index_rbio_pages once before you can trust
926  * the answers from this function.
927  */
page_in_rbio(struct btrfs_raid_bio * rbio,int index,int pagenr,int bio_list_only)928 static struct page *page_in_rbio(struct btrfs_raid_bio *rbio,
929 				 int index, int pagenr, int bio_list_only)
930 {
931 	int chunk_page;
932 	struct page *p = NULL;
933 
934 	chunk_page = index * (rbio->stripe_len >> PAGE_SHIFT) + pagenr;
935 
936 	spin_lock_irq(&rbio->bio_list_lock);
937 	p = rbio->bio_pages[chunk_page];
938 	spin_unlock_irq(&rbio->bio_list_lock);
939 
940 	if (p || bio_list_only)
941 		return p;
942 
943 	return rbio->stripe_pages[chunk_page];
944 }
945 
946 /*
947  * number of pages we need for the entire stripe across all the
948  * drives
949  */
rbio_nr_pages(unsigned long stripe_len,int nr_stripes)950 static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
951 {
952 	unsigned long nr = stripe_len * nr_stripes;
953 	return DIV_ROUND_UP(nr, PAGE_CACHE_SIZE);
954 }
955 
956 /*
957  * allocation and initial setup for the btrfs_raid_bio.  Not
958  * this does not allocate any pages for rbio->pages.
959  */
alloc_rbio(struct btrfs_root * root,struct btrfs_bio * bbio,u64 stripe_len)960 static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root,
961 			  struct btrfs_bio *bbio, u64 stripe_len)
962 {
963 	struct btrfs_raid_bio *rbio;
964 	int nr_data = 0;
965 	int real_stripes = bbio->num_stripes - bbio->num_tgtdevs;
966 	int num_pages = rbio_nr_pages(stripe_len, real_stripes);
967 	int stripe_npages = DIV_ROUND_UP(stripe_len, PAGE_SIZE);
968 	void *p;
969 
970 	rbio = kzalloc(sizeof(*rbio) + num_pages * sizeof(struct page *) * 2 +
971 		       DIV_ROUND_UP(stripe_npages, BITS_PER_LONG / 8),
972 			GFP_NOFS);
973 	if (!rbio)
974 		return ERR_PTR(-ENOMEM);
975 
976 	bio_list_init(&rbio->bio_list);
977 	INIT_LIST_HEAD(&rbio->plug_list);
978 	spin_lock_init(&rbio->bio_list_lock);
979 	INIT_LIST_HEAD(&rbio->stripe_cache);
980 	INIT_LIST_HEAD(&rbio->hash_list);
981 	rbio->bbio = bbio;
982 	rbio->fs_info = root->fs_info;
983 	rbio->stripe_len = stripe_len;
984 	rbio->nr_pages = num_pages;
985 	rbio->real_stripes = real_stripes;
986 	rbio->stripe_npages = stripe_npages;
987 	rbio->faila = -1;
988 	rbio->failb = -1;
989 	atomic_set(&rbio->refs, 1);
990 	atomic_set(&rbio->error, 0);
991 	atomic_set(&rbio->stripes_pending, 0);
992 
993 	/*
994 	 * the stripe_pages and bio_pages array point to the extra
995 	 * memory we allocated past the end of the rbio
996 	 */
997 	p = rbio + 1;
998 	rbio->stripe_pages = p;
999 	rbio->bio_pages = p + sizeof(struct page *) * num_pages;
1000 	rbio->dbitmap = p + sizeof(struct page *) * num_pages * 2;
1001 
1002 	if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
1003 		nr_data = real_stripes - 1;
1004 	else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
1005 		nr_data = real_stripes - 2;
1006 	else
1007 		BUG();
1008 
1009 	rbio->nr_data = nr_data;
1010 	return rbio;
1011 }
1012 
1013 /* allocate pages for all the stripes in the bio, including parity */
alloc_rbio_pages(struct btrfs_raid_bio * rbio)1014 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio)
1015 {
1016 	int i;
1017 	struct page *page;
1018 
1019 	for (i = 0; i < rbio->nr_pages; i++) {
1020 		if (rbio->stripe_pages[i])
1021 			continue;
1022 		page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1023 		if (!page)
1024 			return -ENOMEM;
1025 		rbio->stripe_pages[i] = page;
1026 		ClearPageUptodate(page);
1027 	}
1028 	return 0;
1029 }
1030 
1031 /* allocate pages for just the p/q stripes */
alloc_rbio_parity_pages(struct btrfs_raid_bio * rbio)1032 static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
1033 {
1034 	int i;
1035 	struct page *page;
1036 
1037 	i = (rbio->nr_data * rbio->stripe_len) >> PAGE_CACHE_SHIFT;
1038 
1039 	for (; i < rbio->nr_pages; i++) {
1040 		if (rbio->stripe_pages[i])
1041 			continue;
1042 		page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1043 		if (!page)
1044 			return -ENOMEM;
1045 		rbio->stripe_pages[i] = page;
1046 	}
1047 	return 0;
1048 }
1049 
1050 /*
1051  * add a single page from a specific stripe into our list of bios for IO
1052  * this will try to merge into existing bios if possible, and returns
1053  * zero if all went well.
1054  */
rbio_add_io_page(struct btrfs_raid_bio * rbio,struct bio_list * bio_list,struct page * page,int stripe_nr,unsigned long page_index,unsigned long bio_max_len)1055 static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
1056 			    struct bio_list *bio_list,
1057 			    struct page *page,
1058 			    int stripe_nr,
1059 			    unsigned long page_index,
1060 			    unsigned long bio_max_len)
1061 {
1062 	struct bio *last = bio_list->tail;
1063 	u64 last_end = 0;
1064 	int ret;
1065 	struct bio *bio;
1066 	struct btrfs_bio_stripe *stripe;
1067 	u64 disk_start;
1068 
1069 	stripe = &rbio->bbio->stripes[stripe_nr];
1070 	disk_start = stripe->physical + (page_index << PAGE_CACHE_SHIFT);
1071 
1072 	/* if the device is missing, just fail this stripe */
1073 	if (!stripe->dev->bdev)
1074 		return fail_rbio_index(rbio, stripe_nr);
1075 
1076 	/* see if we can add this page onto our existing bio */
1077 	if (last) {
1078 		last_end = (u64)last->bi_iter.bi_sector << 9;
1079 		last_end += last->bi_iter.bi_size;
1080 
1081 		/*
1082 		 * we can't merge these if they are from different
1083 		 * devices or if they are not contiguous
1084 		 */
1085 		if (last_end == disk_start && stripe->dev->bdev &&
1086 		    !last->bi_error &&
1087 		    last->bi_bdev == stripe->dev->bdev) {
1088 			ret = bio_add_page(last, page, PAGE_CACHE_SIZE, 0);
1089 			if (ret == PAGE_CACHE_SIZE)
1090 				return 0;
1091 		}
1092 	}
1093 
1094 	/* put a new bio on the list */
1095 	bio = btrfs_io_bio_alloc(GFP_NOFS, bio_max_len >> PAGE_SHIFT?:1);
1096 	if (!bio)
1097 		return -ENOMEM;
1098 
1099 	bio->bi_iter.bi_size = 0;
1100 	bio->bi_bdev = stripe->dev->bdev;
1101 	bio->bi_iter.bi_sector = disk_start >> 9;
1102 
1103 	bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
1104 	bio_list_add(bio_list, bio);
1105 	return 0;
1106 }
1107 
1108 /*
1109  * while we're doing the read/modify/write cycle, we could
1110  * have errors in reading pages off the disk.  This checks
1111  * for errors and if we're not able to read the page it'll
1112  * trigger parity reconstruction.  The rmw will be finished
1113  * after we've reconstructed the failed stripes
1114  */
validate_rbio_for_rmw(struct btrfs_raid_bio * rbio)1115 static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio)
1116 {
1117 	if (rbio->faila >= 0 || rbio->failb >= 0) {
1118 		BUG_ON(rbio->faila == rbio->real_stripes - 1);
1119 		__raid56_parity_recover(rbio);
1120 	} else {
1121 		finish_rmw(rbio);
1122 	}
1123 }
1124 
1125 /*
1126  * these are just the pages from the rbio array, not from anything
1127  * the FS sent down to us
1128  */
rbio_stripe_page(struct btrfs_raid_bio * rbio,int stripe,int page)1129 static struct page *rbio_stripe_page(struct btrfs_raid_bio *rbio, int stripe, int page)
1130 {
1131 	int index;
1132 	index = stripe * (rbio->stripe_len >> PAGE_CACHE_SHIFT);
1133 	index += page;
1134 	return rbio->stripe_pages[index];
1135 }
1136 
1137 /*
1138  * helper function to walk our bio list and populate the bio_pages array with
1139  * the result.  This seems expensive, but it is faster than constantly
1140  * searching through the bio list as we setup the IO in finish_rmw or stripe
1141  * reconstruction.
1142  *
1143  * This must be called before you trust the answers from page_in_rbio
1144  */
index_rbio_pages(struct btrfs_raid_bio * rbio)1145 static void index_rbio_pages(struct btrfs_raid_bio *rbio)
1146 {
1147 	struct bio *bio;
1148 	u64 start;
1149 	unsigned long stripe_offset;
1150 	unsigned long page_index;
1151 	struct page *p;
1152 	int i;
1153 
1154 	spin_lock_irq(&rbio->bio_list_lock);
1155 	bio_list_for_each(bio, &rbio->bio_list) {
1156 		start = (u64)bio->bi_iter.bi_sector << 9;
1157 		stripe_offset = start - rbio->bbio->raid_map[0];
1158 		page_index = stripe_offset >> PAGE_CACHE_SHIFT;
1159 
1160 		for (i = 0; i < bio->bi_vcnt; i++) {
1161 			p = bio->bi_io_vec[i].bv_page;
1162 			rbio->bio_pages[page_index + i] = p;
1163 		}
1164 	}
1165 	spin_unlock_irq(&rbio->bio_list_lock);
1166 }
1167 
1168 /*
1169  * this is called from one of two situations.  We either
1170  * have a full stripe from the higher layers, or we've read all
1171  * the missing bits off disk.
1172  *
1173  * This will calculate the parity and then send down any
1174  * changed blocks.
1175  */
finish_rmw(struct btrfs_raid_bio * rbio)1176 static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
1177 {
1178 	struct btrfs_bio *bbio = rbio->bbio;
1179 	void *pointers[rbio->real_stripes];
1180 	int stripe_len = rbio->stripe_len;
1181 	int nr_data = rbio->nr_data;
1182 	int stripe;
1183 	int pagenr;
1184 	int p_stripe = -1;
1185 	int q_stripe = -1;
1186 	struct bio_list bio_list;
1187 	struct bio *bio;
1188 	int pages_per_stripe = stripe_len >> PAGE_CACHE_SHIFT;
1189 	int ret;
1190 
1191 	bio_list_init(&bio_list);
1192 
1193 	if (rbio->real_stripes - rbio->nr_data == 1) {
1194 		p_stripe = rbio->real_stripes - 1;
1195 	} else if (rbio->real_stripes - rbio->nr_data == 2) {
1196 		p_stripe = rbio->real_stripes - 2;
1197 		q_stripe = rbio->real_stripes - 1;
1198 	} else {
1199 		BUG();
1200 	}
1201 
1202 	/* at this point we either have a full stripe,
1203 	 * or we've read the full stripe from the drive.
1204 	 * recalculate the parity and write the new results.
1205 	 *
1206 	 * We're not allowed to add any new bios to the
1207 	 * bio list here, anyone else that wants to
1208 	 * change this stripe needs to do their own rmw.
1209 	 */
1210 	spin_lock_irq(&rbio->bio_list_lock);
1211 	set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1212 	spin_unlock_irq(&rbio->bio_list_lock);
1213 
1214 	atomic_set(&rbio->error, 0);
1215 
1216 	/*
1217 	 * now that we've set rmw_locked, run through the
1218 	 * bio list one last time and map the page pointers
1219 	 *
1220 	 * We don't cache full rbios because we're assuming
1221 	 * the higher layers are unlikely to use this area of
1222 	 * the disk again soon.  If they do use it again,
1223 	 * hopefully they will send another full bio.
1224 	 */
1225 	index_rbio_pages(rbio);
1226 	if (!rbio_is_full(rbio))
1227 		cache_rbio_pages(rbio);
1228 	else
1229 		clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
1230 
1231 	for (pagenr = 0; pagenr < pages_per_stripe; pagenr++) {
1232 		struct page *p;
1233 		/* first collect one page from each data stripe */
1234 		for (stripe = 0; stripe < nr_data; stripe++) {
1235 			p = page_in_rbio(rbio, stripe, pagenr, 0);
1236 			pointers[stripe] = kmap(p);
1237 		}
1238 
1239 		/* then add the parity stripe */
1240 		p = rbio_pstripe_page(rbio, pagenr);
1241 		SetPageUptodate(p);
1242 		pointers[stripe++] = kmap(p);
1243 
1244 		if (q_stripe != -1) {
1245 
1246 			/*
1247 			 * raid6, add the qstripe and call the
1248 			 * library function to fill in our p/q
1249 			 */
1250 			p = rbio_qstripe_page(rbio, pagenr);
1251 			SetPageUptodate(p);
1252 			pointers[stripe++] = kmap(p);
1253 
1254 			raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
1255 						pointers);
1256 		} else {
1257 			/* raid5 */
1258 			memcpy(pointers[nr_data], pointers[0], PAGE_SIZE);
1259 			run_xor(pointers + 1, nr_data - 1, PAGE_CACHE_SIZE);
1260 		}
1261 
1262 
1263 		for (stripe = 0; stripe < rbio->real_stripes; stripe++)
1264 			kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
1265 	}
1266 
1267 	/*
1268 	 * time to start writing.  Make bios for everything from the
1269 	 * higher layers (the bio_list in our rbio) and our p/q.  Ignore
1270 	 * everything else.
1271 	 */
1272 	for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1273 		for (pagenr = 0; pagenr < pages_per_stripe; pagenr++) {
1274 			struct page *page;
1275 			if (stripe < rbio->nr_data) {
1276 				page = page_in_rbio(rbio, stripe, pagenr, 1);
1277 				if (!page)
1278 					continue;
1279 			} else {
1280 			       page = rbio_stripe_page(rbio, stripe, pagenr);
1281 			}
1282 
1283 			ret = rbio_add_io_page(rbio, &bio_list,
1284 				       page, stripe, pagenr, rbio->stripe_len);
1285 			if (ret)
1286 				goto cleanup;
1287 		}
1288 	}
1289 
1290 	if (likely(!bbio->num_tgtdevs))
1291 		goto write_data;
1292 
1293 	for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1294 		if (!bbio->tgtdev_map[stripe])
1295 			continue;
1296 
1297 		for (pagenr = 0; pagenr < pages_per_stripe; pagenr++) {
1298 			struct page *page;
1299 			if (stripe < rbio->nr_data) {
1300 				page = page_in_rbio(rbio, stripe, pagenr, 1);
1301 				if (!page)
1302 					continue;
1303 			} else {
1304 			       page = rbio_stripe_page(rbio, stripe, pagenr);
1305 			}
1306 
1307 			ret = rbio_add_io_page(rbio, &bio_list, page,
1308 					       rbio->bbio->tgtdev_map[stripe],
1309 					       pagenr, rbio->stripe_len);
1310 			if (ret)
1311 				goto cleanup;
1312 		}
1313 	}
1314 
1315 write_data:
1316 	atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list));
1317 	BUG_ON(atomic_read(&rbio->stripes_pending) == 0);
1318 
1319 	while (1) {
1320 		bio = bio_list_pop(&bio_list);
1321 		if (!bio)
1322 			break;
1323 
1324 		bio->bi_private = rbio;
1325 		bio->bi_end_io = raid_write_end_io;
1326 		submit_bio(WRITE, bio);
1327 	}
1328 	return;
1329 
1330 cleanup:
1331 	rbio_orig_end_io(rbio, -EIO);
1332 }
1333 
1334 /*
1335  * helper to find the stripe number for a given bio.  Used to figure out which
1336  * stripe has failed.  This expects the bio to correspond to a physical disk,
1337  * so it looks up based on physical sector numbers.
1338  */
find_bio_stripe(struct btrfs_raid_bio * rbio,struct bio * bio)1339 static int find_bio_stripe(struct btrfs_raid_bio *rbio,
1340 			   struct bio *bio)
1341 {
1342 	u64 physical = bio->bi_iter.bi_sector;
1343 	u64 stripe_start;
1344 	int i;
1345 	struct btrfs_bio_stripe *stripe;
1346 
1347 	physical <<= 9;
1348 
1349 	for (i = 0; i < rbio->bbio->num_stripes; i++) {
1350 		stripe = &rbio->bbio->stripes[i];
1351 		stripe_start = stripe->physical;
1352 		if (physical >= stripe_start &&
1353 		    physical < stripe_start + rbio->stripe_len &&
1354 		    bio->bi_bdev == stripe->dev->bdev) {
1355 			return i;
1356 		}
1357 	}
1358 	return -1;
1359 }
1360 
1361 /*
1362  * helper to find the stripe number for a given
1363  * bio (before mapping).  Used to figure out which stripe has
1364  * failed.  This looks up based on logical block numbers.
1365  */
find_logical_bio_stripe(struct btrfs_raid_bio * rbio,struct bio * bio)1366 static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio,
1367 				   struct bio *bio)
1368 {
1369 	u64 logical = bio->bi_iter.bi_sector;
1370 	u64 stripe_start;
1371 	int i;
1372 
1373 	logical <<= 9;
1374 
1375 	for (i = 0; i < rbio->nr_data; i++) {
1376 		stripe_start = rbio->bbio->raid_map[i];
1377 		if (logical >= stripe_start &&
1378 		    logical < stripe_start + rbio->stripe_len) {
1379 			return i;
1380 		}
1381 	}
1382 	return -1;
1383 }
1384 
1385 /*
1386  * returns -EIO if we had too many failures
1387  */
fail_rbio_index(struct btrfs_raid_bio * rbio,int failed)1388 static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed)
1389 {
1390 	unsigned long flags;
1391 	int ret = 0;
1392 
1393 	spin_lock_irqsave(&rbio->bio_list_lock, flags);
1394 
1395 	/* we already know this stripe is bad, move on */
1396 	if (rbio->faila == failed || rbio->failb == failed)
1397 		goto out;
1398 
1399 	if (rbio->faila == -1) {
1400 		/* first failure on this rbio */
1401 		rbio->faila = failed;
1402 		atomic_inc(&rbio->error);
1403 	} else if (rbio->failb == -1) {
1404 		/* second failure on this rbio */
1405 		rbio->failb = failed;
1406 		atomic_inc(&rbio->error);
1407 	} else {
1408 		ret = -EIO;
1409 	}
1410 out:
1411 	spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
1412 
1413 	return ret;
1414 }
1415 
1416 /*
1417  * helper to fail a stripe based on a physical disk
1418  * bio.
1419  */
fail_bio_stripe(struct btrfs_raid_bio * rbio,struct bio * bio)1420 static int fail_bio_stripe(struct btrfs_raid_bio *rbio,
1421 			   struct bio *bio)
1422 {
1423 	int failed = find_bio_stripe(rbio, bio);
1424 
1425 	if (failed < 0)
1426 		return -EIO;
1427 
1428 	return fail_rbio_index(rbio, failed);
1429 }
1430 
1431 /*
1432  * this sets each page in the bio uptodate.  It should only be used on private
1433  * rbio pages, nothing that comes in from the higher layers
1434  */
set_bio_pages_uptodate(struct bio * bio)1435 static void set_bio_pages_uptodate(struct bio *bio)
1436 {
1437 	int i;
1438 	struct page *p;
1439 
1440 	for (i = 0; i < bio->bi_vcnt; i++) {
1441 		p = bio->bi_io_vec[i].bv_page;
1442 		SetPageUptodate(p);
1443 	}
1444 }
1445 
1446 /*
1447  * end io for the read phase of the rmw cycle.  All the bios here are physical
1448  * stripe bios we've read from the disk so we can recalculate the parity of the
1449  * stripe.
1450  *
1451  * This will usually kick off finish_rmw once all the bios are read in, but it
1452  * may trigger parity reconstruction if we had any errors along the way
1453  */
raid_rmw_end_io(struct bio * bio)1454 static void raid_rmw_end_io(struct bio *bio)
1455 {
1456 	struct btrfs_raid_bio *rbio = bio->bi_private;
1457 
1458 	if (bio->bi_error)
1459 		fail_bio_stripe(rbio, bio);
1460 	else
1461 		set_bio_pages_uptodate(bio);
1462 
1463 	bio_put(bio);
1464 
1465 	if (!atomic_dec_and_test(&rbio->stripes_pending))
1466 		return;
1467 
1468 	if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
1469 		goto cleanup;
1470 
1471 	/*
1472 	 * this will normally call finish_rmw to start our write
1473 	 * but if there are any failed stripes we'll reconstruct
1474 	 * from parity first
1475 	 */
1476 	validate_rbio_for_rmw(rbio);
1477 	return;
1478 
1479 cleanup:
1480 
1481 	rbio_orig_end_io(rbio, -EIO);
1482 }
1483 
async_rmw_stripe(struct btrfs_raid_bio * rbio)1484 static void async_rmw_stripe(struct btrfs_raid_bio *rbio)
1485 {
1486 	btrfs_init_work(&rbio->work, btrfs_rmw_helper,
1487 			rmw_work, NULL, NULL);
1488 
1489 	btrfs_queue_work(rbio->fs_info->rmw_workers,
1490 			 &rbio->work);
1491 }
1492 
async_read_rebuild(struct btrfs_raid_bio * rbio)1493 static void async_read_rebuild(struct btrfs_raid_bio *rbio)
1494 {
1495 	btrfs_init_work(&rbio->work, btrfs_rmw_helper,
1496 			read_rebuild_work, NULL, NULL);
1497 
1498 	btrfs_queue_work(rbio->fs_info->rmw_workers,
1499 			 &rbio->work);
1500 }
1501 
1502 /*
1503  * the stripe must be locked by the caller.  It will
1504  * unlock after all the writes are done
1505  */
raid56_rmw_stripe(struct btrfs_raid_bio * rbio)1506 static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
1507 {
1508 	int bios_to_read = 0;
1509 	struct bio_list bio_list;
1510 	int ret;
1511 	int nr_pages = DIV_ROUND_UP(rbio->stripe_len, PAGE_CACHE_SIZE);
1512 	int pagenr;
1513 	int stripe;
1514 	struct bio *bio;
1515 
1516 	bio_list_init(&bio_list);
1517 
1518 	ret = alloc_rbio_pages(rbio);
1519 	if (ret)
1520 		goto cleanup;
1521 
1522 	index_rbio_pages(rbio);
1523 
1524 	atomic_set(&rbio->error, 0);
1525 	/*
1526 	 * build a list of bios to read all the missing parts of this
1527 	 * stripe
1528 	 */
1529 	for (stripe = 0; stripe < rbio->nr_data; stripe++) {
1530 		for (pagenr = 0; pagenr < nr_pages; pagenr++) {
1531 			struct page *page;
1532 			/*
1533 			 * we want to find all the pages missing from
1534 			 * the rbio and read them from the disk.  If
1535 			 * page_in_rbio finds a page in the bio list
1536 			 * we don't need to read it off the stripe.
1537 			 */
1538 			page = page_in_rbio(rbio, stripe, pagenr, 1);
1539 			if (page)
1540 				continue;
1541 
1542 			page = rbio_stripe_page(rbio, stripe, pagenr);
1543 			/*
1544 			 * the bio cache may have handed us an uptodate
1545 			 * page.  If so, be happy and use it
1546 			 */
1547 			if (PageUptodate(page))
1548 				continue;
1549 
1550 			ret = rbio_add_io_page(rbio, &bio_list, page,
1551 				       stripe, pagenr, rbio->stripe_len);
1552 			if (ret)
1553 				goto cleanup;
1554 		}
1555 	}
1556 
1557 	bios_to_read = bio_list_size(&bio_list);
1558 	if (!bios_to_read) {
1559 		/*
1560 		 * this can happen if others have merged with
1561 		 * us, it means there is nothing left to read.
1562 		 * But if there are missing devices it may not be
1563 		 * safe to do the full stripe write yet.
1564 		 */
1565 		goto finish;
1566 	}
1567 
1568 	/*
1569 	 * the bbio may be freed once we submit the last bio.  Make sure
1570 	 * not to touch it after that
1571 	 */
1572 	atomic_set(&rbio->stripes_pending, bios_to_read);
1573 	while (1) {
1574 		bio = bio_list_pop(&bio_list);
1575 		if (!bio)
1576 			break;
1577 
1578 		bio->bi_private = rbio;
1579 		bio->bi_end_io = raid_rmw_end_io;
1580 
1581 		btrfs_bio_wq_end_io(rbio->fs_info, bio,
1582 				    BTRFS_WQ_ENDIO_RAID56);
1583 
1584 		submit_bio(READ, bio);
1585 	}
1586 	/* the actual write will happen once the reads are done */
1587 	return 0;
1588 
1589 cleanup:
1590 	rbio_orig_end_io(rbio, -EIO);
1591 	return -EIO;
1592 
1593 finish:
1594 	validate_rbio_for_rmw(rbio);
1595 	return 0;
1596 }
1597 
1598 /*
1599  * if the upper layers pass in a full stripe, we thank them by only allocating
1600  * enough pages to hold the parity, and sending it all down quickly.
1601  */
full_stripe_write(struct btrfs_raid_bio * rbio)1602 static int full_stripe_write(struct btrfs_raid_bio *rbio)
1603 {
1604 	int ret;
1605 
1606 	ret = alloc_rbio_parity_pages(rbio);
1607 	if (ret) {
1608 		__free_raid_bio(rbio);
1609 		return ret;
1610 	}
1611 
1612 	ret = lock_stripe_add(rbio);
1613 	if (ret == 0)
1614 		finish_rmw(rbio);
1615 	return 0;
1616 }
1617 
1618 /*
1619  * partial stripe writes get handed over to async helpers.
1620  * We're really hoping to merge a few more writes into this
1621  * rbio before calculating new parity
1622  */
partial_stripe_write(struct btrfs_raid_bio * rbio)1623 static int partial_stripe_write(struct btrfs_raid_bio *rbio)
1624 {
1625 	int ret;
1626 
1627 	ret = lock_stripe_add(rbio);
1628 	if (ret == 0)
1629 		async_rmw_stripe(rbio);
1630 	return 0;
1631 }
1632 
1633 /*
1634  * sometimes while we were reading from the drive to
1635  * recalculate parity, enough new bios come into create
1636  * a full stripe.  So we do a check here to see if we can
1637  * go directly to finish_rmw
1638  */
__raid56_parity_write(struct btrfs_raid_bio * rbio)1639 static int __raid56_parity_write(struct btrfs_raid_bio *rbio)
1640 {
1641 	/* head off into rmw land if we don't have a full stripe */
1642 	if (!rbio_is_full(rbio))
1643 		return partial_stripe_write(rbio);
1644 	return full_stripe_write(rbio);
1645 }
1646 
1647 /*
1648  * We use plugging call backs to collect full stripes.
1649  * Any time we get a partial stripe write while plugged
1650  * we collect it into a list.  When the unplug comes down,
1651  * we sort the list by logical block number and merge
1652  * everything we can into the same rbios
1653  */
1654 struct btrfs_plug_cb {
1655 	struct blk_plug_cb cb;
1656 	struct btrfs_fs_info *info;
1657 	struct list_head rbio_list;
1658 	struct btrfs_work work;
1659 };
1660 
1661 /*
1662  * rbios on the plug list are sorted for easier merging.
1663  */
plug_cmp(void * priv,struct list_head * a,struct list_head * b)1664 static int plug_cmp(void *priv, struct list_head *a, struct list_head *b)
1665 {
1666 	struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio,
1667 						 plug_list);
1668 	struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
1669 						 plug_list);
1670 	u64 a_sector = ra->bio_list.head->bi_iter.bi_sector;
1671 	u64 b_sector = rb->bio_list.head->bi_iter.bi_sector;
1672 
1673 	if (a_sector < b_sector)
1674 		return -1;
1675 	if (a_sector > b_sector)
1676 		return 1;
1677 	return 0;
1678 }
1679 
run_plug(struct btrfs_plug_cb * plug)1680 static void run_plug(struct btrfs_plug_cb *plug)
1681 {
1682 	struct btrfs_raid_bio *cur;
1683 	struct btrfs_raid_bio *last = NULL;
1684 
1685 	/*
1686 	 * sort our plug list then try to merge
1687 	 * everything we can in hopes of creating full
1688 	 * stripes.
1689 	 */
1690 	list_sort(NULL, &plug->rbio_list, plug_cmp);
1691 	while (!list_empty(&plug->rbio_list)) {
1692 		cur = list_entry(plug->rbio_list.next,
1693 				 struct btrfs_raid_bio, plug_list);
1694 		list_del_init(&cur->plug_list);
1695 
1696 		if (rbio_is_full(cur)) {
1697 			/* we have a full stripe, send it down */
1698 			full_stripe_write(cur);
1699 			continue;
1700 		}
1701 		if (last) {
1702 			if (rbio_can_merge(last, cur)) {
1703 				merge_rbio(last, cur);
1704 				__free_raid_bio(cur);
1705 				continue;
1706 
1707 			}
1708 			__raid56_parity_write(last);
1709 		}
1710 		last = cur;
1711 	}
1712 	if (last) {
1713 		__raid56_parity_write(last);
1714 	}
1715 	kfree(plug);
1716 }
1717 
1718 /*
1719  * if the unplug comes from schedule, we have to push the
1720  * work off to a helper thread
1721  */
unplug_work(struct btrfs_work * work)1722 static void unplug_work(struct btrfs_work *work)
1723 {
1724 	struct btrfs_plug_cb *plug;
1725 	plug = container_of(work, struct btrfs_plug_cb, work);
1726 	run_plug(plug);
1727 }
1728 
btrfs_raid_unplug(struct blk_plug_cb * cb,bool from_schedule)1729 static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
1730 {
1731 	struct btrfs_plug_cb *plug;
1732 	plug = container_of(cb, struct btrfs_plug_cb, cb);
1733 
1734 	if (from_schedule) {
1735 		btrfs_init_work(&plug->work, btrfs_rmw_helper,
1736 				unplug_work, NULL, NULL);
1737 		btrfs_queue_work(plug->info->rmw_workers,
1738 				 &plug->work);
1739 		return;
1740 	}
1741 	run_plug(plug);
1742 }
1743 
1744 /*
1745  * our main entry point for writes from the rest of the FS.
1746  */
raid56_parity_write(struct btrfs_root * root,struct bio * bio,struct btrfs_bio * bbio,u64 stripe_len)1747 int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
1748 			struct btrfs_bio *bbio, u64 stripe_len)
1749 {
1750 	struct btrfs_raid_bio *rbio;
1751 	struct btrfs_plug_cb *plug = NULL;
1752 	struct blk_plug_cb *cb;
1753 	int ret;
1754 
1755 	rbio = alloc_rbio(root, bbio, stripe_len);
1756 	if (IS_ERR(rbio)) {
1757 		btrfs_put_bbio(bbio);
1758 		return PTR_ERR(rbio);
1759 	}
1760 	bio_list_add(&rbio->bio_list, bio);
1761 	rbio->bio_list_bytes = bio->bi_iter.bi_size;
1762 	rbio->operation = BTRFS_RBIO_WRITE;
1763 
1764 	btrfs_bio_counter_inc_noblocked(root->fs_info);
1765 	rbio->generic_bio_cnt = 1;
1766 
1767 	/*
1768 	 * don't plug on full rbios, just get them out the door
1769 	 * as quickly as we can
1770 	 */
1771 	if (rbio_is_full(rbio)) {
1772 		ret = full_stripe_write(rbio);
1773 		if (ret)
1774 			btrfs_bio_counter_dec(root->fs_info);
1775 		return ret;
1776 	}
1777 
1778 	cb = blk_check_plugged(btrfs_raid_unplug, root->fs_info,
1779 			       sizeof(*plug));
1780 	if (cb) {
1781 		plug = container_of(cb, struct btrfs_plug_cb, cb);
1782 		if (!plug->info) {
1783 			plug->info = root->fs_info;
1784 			INIT_LIST_HEAD(&plug->rbio_list);
1785 		}
1786 		list_add_tail(&rbio->plug_list, &plug->rbio_list);
1787 		ret = 0;
1788 	} else {
1789 		ret = __raid56_parity_write(rbio);
1790 		if (ret)
1791 			btrfs_bio_counter_dec(root->fs_info);
1792 	}
1793 	return ret;
1794 }
1795 
1796 /*
1797  * all parity reconstruction happens here.  We've read in everything
1798  * we can find from the drives and this does the heavy lifting of
1799  * sorting the good from the bad.
1800  */
__raid_recover_end_io(struct btrfs_raid_bio * rbio)1801 static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
1802 {
1803 	int pagenr, stripe;
1804 	void **pointers;
1805 	int faila = -1, failb = -1;
1806 	int nr_pages = DIV_ROUND_UP(rbio->stripe_len, PAGE_CACHE_SIZE);
1807 	struct page *page;
1808 	int err;
1809 	int i;
1810 
1811 	pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
1812 	if (!pointers) {
1813 		err = -ENOMEM;
1814 		goto cleanup_io;
1815 	}
1816 
1817 	faila = rbio->faila;
1818 	failb = rbio->failb;
1819 
1820 	if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1821 	    rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
1822 		spin_lock_irq(&rbio->bio_list_lock);
1823 		set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1824 		spin_unlock_irq(&rbio->bio_list_lock);
1825 	}
1826 
1827 	index_rbio_pages(rbio);
1828 
1829 	for (pagenr = 0; pagenr < nr_pages; pagenr++) {
1830 		/*
1831 		 * Now we just use bitmap to mark the horizontal stripes in
1832 		 * which we have data when doing parity scrub.
1833 		 */
1834 		if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB &&
1835 		    !test_bit(pagenr, rbio->dbitmap))
1836 			continue;
1837 
1838 		/* setup our array of pointers with pages
1839 		 * from each stripe
1840 		 */
1841 		for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1842 			/*
1843 			 * if we're rebuilding a read, we have to use
1844 			 * pages from the bio list
1845 			 */
1846 			if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1847 			     rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
1848 			    (stripe == faila || stripe == failb)) {
1849 				page = page_in_rbio(rbio, stripe, pagenr, 0);
1850 			} else {
1851 				page = rbio_stripe_page(rbio, stripe, pagenr);
1852 			}
1853 			pointers[stripe] = kmap(page);
1854 		}
1855 
1856 		/* all raid6 handling here */
1857 		if (rbio->bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) {
1858 			/*
1859 			 * single failure, rebuild from parity raid5
1860 			 * style
1861 			 */
1862 			if (failb < 0) {
1863 				if (faila == rbio->nr_data) {
1864 					/*
1865 					 * Just the P stripe has failed, without
1866 					 * a bad data or Q stripe.
1867 					 * TODO, we should redo the xor here.
1868 					 */
1869 					err = -EIO;
1870 					goto cleanup;
1871 				}
1872 				/*
1873 				 * a single failure in raid6 is rebuilt
1874 				 * in the pstripe code below
1875 				 */
1876 				goto pstripe;
1877 			}
1878 
1879 			/* make sure our ps and qs are in order */
1880 			if (faila > failb) {
1881 				int tmp = failb;
1882 				failb = faila;
1883 				faila = tmp;
1884 			}
1885 
1886 			/* if the q stripe is failed, do a pstripe reconstruction
1887 			 * from the xors.
1888 			 * If both the q stripe and the P stripe are failed, we're
1889 			 * here due to a crc mismatch and we can't give them the
1890 			 * data they want
1891 			 */
1892 			if (rbio->bbio->raid_map[failb] == RAID6_Q_STRIPE) {
1893 				if (rbio->bbio->raid_map[faila] ==
1894 				    RAID5_P_STRIPE) {
1895 					err = -EIO;
1896 					goto cleanup;
1897 				}
1898 				/*
1899 				 * otherwise we have one bad data stripe and
1900 				 * a good P stripe.  raid5!
1901 				 */
1902 				goto pstripe;
1903 			}
1904 
1905 			if (rbio->bbio->raid_map[failb] == RAID5_P_STRIPE) {
1906 				raid6_datap_recov(rbio->real_stripes,
1907 						  PAGE_SIZE, faila, pointers);
1908 			} else {
1909 				raid6_2data_recov(rbio->real_stripes,
1910 						  PAGE_SIZE, faila, failb,
1911 						  pointers);
1912 			}
1913 		} else {
1914 			void *p;
1915 
1916 			/* rebuild from P stripe here (raid5 or raid6) */
1917 			BUG_ON(failb != -1);
1918 pstripe:
1919 			/* Copy parity block into failed block to start with */
1920 			memcpy(pointers[faila],
1921 			       pointers[rbio->nr_data],
1922 			       PAGE_CACHE_SIZE);
1923 
1924 			/* rearrange the pointer array */
1925 			p = pointers[faila];
1926 			for (stripe = faila; stripe < rbio->nr_data - 1; stripe++)
1927 				pointers[stripe] = pointers[stripe + 1];
1928 			pointers[rbio->nr_data - 1] = p;
1929 
1930 			/* xor in the rest */
1931 			run_xor(pointers, rbio->nr_data - 1, PAGE_CACHE_SIZE);
1932 		}
1933 		/* if we're doing this rebuild as part of an rmw, go through
1934 		 * and set all of our private rbio pages in the
1935 		 * failed stripes as uptodate.  This way finish_rmw will
1936 		 * know they can be trusted.  If this was a read reconstruction,
1937 		 * other endio functions will fiddle the uptodate bits
1938 		 */
1939 		if (rbio->operation == BTRFS_RBIO_WRITE) {
1940 			for (i = 0;  i < nr_pages; i++) {
1941 				if (faila != -1) {
1942 					page = rbio_stripe_page(rbio, faila, i);
1943 					SetPageUptodate(page);
1944 				}
1945 				if (failb != -1) {
1946 					page = rbio_stripe_page(rbio, failb, i);
1947 					SetPageUptodate(page);
1948 				}
1949 			}
1950 		}
1951 		for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1952 			/*
1953 			 * if we're rebuilding a read, we have to use
1954 			 * pages from the bio list
1955 			 */
1956 			if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1957 			     rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
1958 			    (stripe == faila || stripe == failb)) {
1959 				page = page_in_rbio(rbio, stripe, pagenr, 0);
1960 			} else {
1961 				page = rbio_stripe_page(rbio, stripe, pagenr);
1962 			}
1963 			kunmap(page);
1964 		}
1965 	}
1966 
1967 	err = 0;
1968 cleanup:
1969 	kfree(pointers);
1970 
1971 cleanup_io:
1972 	if (rbio->operation == BTRFS_RBIO_READ_REBUILD) {
1973 		if (err == 0)
1974 			cache_rbio_pages(rbio);
1975 		else
1976 			clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
1977 
1978 		rbio_orig_end_io(rbio, err);
1979 	} else if (rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
1980 		rbio_orig_end_io(rbio, err);
1981 	} else if (err == 0) {
1982 		rbio->faila = -1;
1983 		rbio->failb = -1;
1984 
1985 		if (rbio->operation == BTRFS_RBIO_WRITE)
1986 			finish_rmw(rbio);
1987 		else if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB)
1988 			finish_parity_scrub(rbio, 0);
1989 		else
1990 			BUG();
1991 	} else {
1992 		rbio_orig_end_io(rbio, err);
1993 	}
1994 }
1995 
1996 /*
1997  * This is called only for stripes we've read from disk to
1998  * reconstruct the parity.
1999  */
raid_recover_end_io(struct bio * bio)2000 static void raid_recover_end_io(struct bio *bio)
2001 {
2002 	struct btrfs_raid_bio *rbio = bio->bi_private;
2003 
2004 	/*
2005 	 * we only read stripe pages off the disk, set them
2006 	 * up to date if there were no errors
2007 	 */
2008 	if (bio->bi_error)
2009 		fail_bio_stripe(rbio, bio);
2010 	else
2011 		set_bio_pages_uptodate(bio);
2012 	bio_put(bio);
2013 
2014 	if (!atomic_dec_and_test(&rbio->stripes_pending))
2015 		return;
2016 
2017 	if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2018 		rbio_orig_end_io(rbio, -EIO);
2019 	else
2020 		__raid_recover_end_io(rbio);
2021 }
2022 
2023 /*
2024  * reads everything we need off the disk to reconstruct
2025  * the parity. endio handlers trigger final reconstruction
2026  * when the IO is done.
2027  *
2028  * This is used both for reads from the higher layers and for
2029  * parity construction required to finish a rmw cycle.
2030  */
__raid56_parity_recover(struct btrfs_raid_bio * rbio)2031 static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
2032 {
2033 	int bios_to_read = 0;
2034 	struct bio_list bio_list;
2035 	int ret;
2036 	int nr_pages = DIV_ROUND_UP(rbio->stripe_len, PAGE_CACHE_SIZE);
2037 	int pagenr;
2038 	int stripe;
2039 	struct bio *bio;
2040 
2041 	bio_list_init(&bio_list);
2042 
2043 	ret = alloc_rbio_pages(rbio);
2044 	if (ret)
2045 		goto cleanup;
2046 
2047 	atomic_set(&rbio->error, 0);
2048 
2049 	/*
2050 	 * read everything that hasn't failed.  Thanks to the
2051 	 * stripe cache, it is possible that some or all of these
2052 	 * pages are going to be uptodate.
2053 	 */
2054 	for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2055 		if (rbio->faila == stripe || rbio->failb == stripe) {
2056 			atomic_inc(&rbio->error);
2057 			continue;
2058 		}
2059 
2060 		for (pagenr = 0; pagenr < nr_pages; pagenr++) {
2061 			struct page *p;
2062 
2063 			/*
2064 			 * the rmw code may have already read this
2065 			 * page in
2066 			 */
2067 			p = rbio_stripe_page(rbio, stripe, pagenr);
2068 			if (PageUptodate(p))
2069 				continue;
2070 
2071 			ret = rbio_add_io_page(rbio, &bio_list,
2072 				       rbio_stripe_page(rbio, stripe, pagenr),
2073 				       stripe, pagenr, rbio->stripe_len);
2074 			if (ret < 0)
2075 				goto cleanup;
2076 		}
2077 	}
2078 
2079 	bios_to_read = bio_list_size(&bio_list);
2080 	if (!bios_to_read) {
2081 		/*
2082 		 * we might have no bios to read just because the pages
2083 		 * were up to date, or we might have no bios to read because
2084 		 * the devices were gone.
2085 		 */
2086 		if (atomic_read(&rbio->error) <= rbio->bbio->max_errors) {
2087 			__raid_recover_end_io(rbio);
2088 			goto out;
2089 		} else {
2090 			goto cleanup;
2091 		}
2092 	}
2093 
2094 	/*
2095 	 * the bbio may be freed once we submit the last bio.  Make sure
2096 	 * not to touch it after that
2097 	 */
2098 	atomic_set(&rbio->stripes_pending, bios_to_read);
2099 	while (1) {
2100 		bio = bio_list_pop(&bio_list);
2101 		if (!bio)
2102 			break;
2103 
2104 		bio->bi_private = rbio;
2105 		bio->bi_end_io = raid_recover_end_io;
2106 
2107 		btrfs_bio_wq_end_io(rbio->fs_info, bio,
2108 				    BTRFS_WQ_ENDIO_RAID56);
2109 
2110 		submit_bio(READ, bio);
2111 	}
2112 out:
2113 	return 0;
2114 
2115 cleanup:
2116 	if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
2117 	    rbio->operation == BTRFS_RBIO_REBUILD_MISSING)
2118 		rbio_orig_end_io(rbio, -EIO);
2119 	return -EIO;
2120 }
2121 
2122 /*
2123  * the main entry point for reads from the higher layers.  This
2124  * is really only called when the normal read path had a failure,
2125  * so we assume the bio they send down corresponds to a failed part
2126  * of the drive.
2127  */
raid56_parity_recover(struct btrfs_root * root,struct bio * bio,struct btrfs_bio * bbio,u64 stripe_len,int mirror_num,int generic_io)2128 int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
2129 			  struct btrfs_bio *bbio, u64 stripe_len,
2130 			  int mirror_num, int generic_io)
2131 {
2132 	struct btrfs_raid_bio *rbio;
2133 	int ret;
2134 
2135 	rbio = alloc_rbio(root, bbio, stripe_len);
2136 	if (IS_ERR(rbio)) {
2137 		if (generic_io)
2138 			btrfs_put_bbio(bbio);
2139 		return PTR_ERR(rbio);
2140 	}
2141 
2142 	rbio->operation = BTRFS_RBIO_READ_REBUILD;
2143 	bio_list_add(&rbio->bio_list, bio);
2144 	rbio->bio_list_bytes = bio->bi_iter.bi_size;
2145 
2146 	rbio->faila = find_logical_bio_stripe(rbio, bio);
2147 	if (rbio->faila == -1) {
2148 		BUG();
2149 		if (generic_io)
2150 			btrfs_put_bbio(bbio);
2151 		kfree(rbio);
2152 		return -EIO;
2153 	}
2154 
2155 	if (generic_io) {
2156 		btrfs_bio_counter_inc_noblocked(root->fs_info);
2157 		rbio->generic_bio_cnt = 1;
2158 	} else {
2159 		btrfs_get_bbio(bbio);
2160 	}
2161 
2162 	/*
2163 	 * Loop retry:
2164 	 * for 'mirror == 2', reconstruct from all other stripes.
2165 	 * for 'mirror_num > 2', select a stripe to fail on every retry.
2166 	 */
2167 	if (mirror_num > 2) {
2168 		/*
2169 		 * 'mirror == 3' is to fail the p stripe and
2170 		 * reconstruct from the q stripe.  'mirror > 3' is to
2171 		 * fail a data stripe and reconstruct from p+q stripe.
2172 		 */
2173 		rbio->failb = rbio->real_stripes - (mirror_num - 1);
2174 		ASSERT(rbio->failb > 0);
2175 		if (rbio->failb <= rbio->faila)
2176 			rbio->failb--;
2177 	}
2178 
2179 	ret = lock_stripe_add(rbio);
2180 
2181 	/*
2182 	 * __raid56_parity_recover will end the bio with
2183 	 * any errors it hits.  We don't want to return
2184 	 * its error value up the stack because our caller
2185 	 * will end up calling bio_endio with any nonzero
2186 	 * return
2187 	 */
2188 	if (ret == 0)
2189 		__raid56_parity_recover(rbio);
2190 	/*
2191 	 * our rbio has been added to the list of
2192 	 * rbios that will be handled after the
2193 	 * currently lock owner is done
2194 	 */
2195 	return 0;
2196 
2197 }
2198 
rmw_work(struct btrfs_work * work)2199 static void rmw_work(struct btrfs_work *work)
2200 {
2201 	struct btrfs_raid_bio *rbio;
2202 
2203 	rbio = container_of(work, struct btrfs_raid_bio, work);
2204 	raid56_rmw_stripe(rbio);
2205 }
2206 
read_rebuild_work(struct btrfs_work * work)2207 static void read_rebuild_work(struct btrfs_work *work)
2208 {
2209 	struct btrfs_raid_bio *rbio;
2210 
2211 	rbio = container_of(work, struct btrfs_raid_bio, work);
2212 	__raid56_parity_recover(rbio);
2213 }
2214 
2215 /*
2216  * The following code is used to scrub/replace the parity stripe
2217  *
2218  * Note: We need make sure all the pages that add into the scrub/replace
2219  * raid bio are correct and not be changed during the scrub/replace. That
2220  * is those pages just hold metadata or file data with checksum.
2221  */
2222 
2223 struct btrfs_raid_bio *
raid56_parity_alloc_scrub_rbio(struct btrfs_root * root,struct bio * bio,struct btrfs_bio * bbio,u64 stripe_len,struct btrfs_device * scrub_dev,unsigned long * dbitmap,int stripe_nsectors)2224 raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio,
2225 			       struct btrfs_bio *bbio, u64 stripe_len,
2226 			       struct btrfs_device *scrub_dev,
2227 			       unsigned long *dbitmap, int stripe_nsectors)
2228 {
2229 	struct btrfs_raid_bio *rbio;
2230 	int i;
2231 
2232 	rbio = alloc_rbio(root, bbio, stripe_len);
2233 	if (IS_ERR(rbio))
2234 		return NULL;
2235 	bio_list_add(&rbio->bio_list, bio);
2236 	/*
2237 	 * This is a special bio which is used to hold the completion handler
2238 	 * and make the scrub rbio is similar to the other types
2239 	 */
2240 	ASSERT(!bio->bi_iter.bi_size);
2241 	rbio->operation = BTRFS_RBIO_PARITY_SCRUB;
2242 
2243 	for (i = 0; i < rbio->real_stripes; i++) {
2244 		if (bbio->stripes[i].dev == scrub_dev) {
2245 			rbio->scrubp = i;
2246 			break;
2247 		}
2248 	}
2249 
2250 	/* Now we just support the sectorsize equals to page size */
2251 	ASSERT(root->sectorsize == PAGE_SIZE);
2252 	ASSERT(rbio->stripe_npages == stripe_nsectors);
2253 	bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors);
2254 
2255 	return rbio;
2256 }
2257 
2258 /* Used for both parity scrub and missing. */
raid56_add_scrub_pages(struct btrfs_raid_bio * rbio,struct page * page,u64 logical)2259 void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
2260 			    u64 logical)
2261 {
2262 	int stripe_offset;
2263 	int index;
2264 
2265 	ASSERT(logical >= rbio->bbio->raid_map[0]);
2266 	ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] +
2267 				rbio->stripe_len * rbio->nr_data);
2268 	stripe_offset = (int)(logical - rbio->bbio->raid_map[0]);
2269 	index = stripe_offset >> PAGE_CACHE_SHIFT;
2270 	rbio->bio_pages[index] = page;
2271 }
2272 
2273 /*
2274  * We just scrub the parity that we have correct data on the same horizontal,
2275  * so we needn't allocate all pages for all the stripes.
2276  */
alloc_rbio_essential_pages(struct btrfs_raid_bio * rbio)2277 static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio)
2278 {
2279 	int i;
2280 	int bit;
2281 	int index;
2282 	struct page *page;
2283 
2284 	for_each_set_bit(bit, rbio->dbitmap, rbio->stripe_npages) {
2285 		for (i = 0; i < rbio->real_stripes; i++) {
2286 			index = i * rbio->stripe_npages + bit;
2287 			if (rbio->stripe_pages[index])
2288 				continue;
2289 
2290 			page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2291 			if (!page)
2292 				return -ENOMEM;
2293 			rbio->stripe_pages[index] = page;
2294 			ClearPageUptodate(page);
2295 		}
2296 	}
2297 	return 0;
2298 }
2299 
2300 /*
2301  * end io function used by finish_rmw.  When we finally
2302  * get here, we've written a full stripe
2303  */
raid_write_parity_end_io(struct bio * bio)2304 static void raid_write_parity_end_io(struct bio *bio)
2305 {
2306 	struct btrfs_raid_bio *rbio = bio->bi_private;
2307 	int err = bio->bi_error;
2308 
2309 	if (bio->bi_error)
2310 		fail_bio_stripe(rbio, bio);
2311 
2312 	bio_put(bio);
2313 
2314 	if (!atomic_dec_and_test(&rbio->stripes_pending))
2315 		return;
2316 
2317 	err = 0;
2318 
2319 	if (atomic_read(&rbio->error))
2320 		err = -EIO;
2321 
2322 	rbio_orig_end_io(rbio, err);
2323 }
2324 
finish_parity_scrub(struct btrfs_raid_bio * rbio,int need_check)2325 static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
2326 					 int need_check)
2327 {
2328 	struct btrfs_bio *bbio = rbio->bbio;
2329 	void *pointers[rbio->real_stripes];
2330 	DECLARE_BITMAP(pbitmap, rbio->stripe_npages);
2331 	int nr_data = rbio->nr_data;
2332 	int stripe;
2333 	int pagenr;
2334 	int p_stripe = -1;
2335 	int q_stripe = -1;
2336 	struct page *p_page = NULL;
2337 	struct page *q_page = NULL;
2338 	struct bio_list bio_list;
2339 	struct bio *bio;
2340 	int is_replace = 0;
2341 	int ret;
2342 
2343 	bio_list_init(&bio_list);
2344 
2345 	if (rbio->real_stripes - rbio->nr_data == 1) {
2346 		p_stripe = rbio->real_stripes - 1;
2347 	} else if (rbio->real_stripes - rbio->nr_data == 2) {
2348 		p_stripe = rbio->real_stripes - 2;
2349 		q_stripe = rbio->real_stripes - 1;
2350 	} else {
2351 		BUG();
2352 	}
2353 
2354 	if (bbio->num_tgtdevs && bbio->tgtdev_map[rbio->scrubp]) {
2355 		is_replace = 1;
2356 		bitmap_copy(pbitmap, rbio->dbitmap, rbio->stripe_npages);
2357 	}
2358 
2359 	/*
2360 	 * Because the higher layers(scrubber) are unlikely to
2361 	 * use this area of the disk again soon, so don't cache
2362 	 * it.
2363 	 */
2364 	clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2365 
2366 	if (!need_check)
2367 		goto writeback;
2368 
2369 	p_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2370 	if (!p_page)
2371 		goto cleanup;
2372 	SetPageUptodate(p_page);
2373 
2374 	if (q_stripe != -1) {
2375 		q_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2376 		if (!q_page) {
2377 			__free_page(p_page);
2378 			goto cleanup;
2379 		}
2380 		SetPageUptodate(q_page);
2381 	}
2382 
2383 	atomic_set(&rbio->error, 0);
2384 
2385 	for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2386 		struct page *p;
2387 		void *parity;
2388 		/* first collect one page from each data stripe */
2389 		for (stripe = 0; stripe < nr_data; stripe++) {
2390 			p = page_in_rbio(rbio, stripe, pagenr, 0);
2391 			pointers[stripe] = kmap(p);
2392 		}
2393 
2394 		/* then add the parity stripe */
2395 		pointers[stripe++] = kmap(p_page);
2396 
2397 		if (q_stripe != -1) {
2398 
2399 			/*
2400 			 * raid6, add the qstripe and call the
2401 			 * library function to fill in our p/q
2402 			 */
2403 			pointers[stripe++] = kmap(q_page);
2404 
2405 			raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
2406 						pointers);
2407 		} else {
2408 			/* raid5 */
2409 			memcpy(pointers[nr_data], pointers[0], PAGE_SIZE);
2410 			run_xor(pointers + 1, nr_data - 1, PAGE_CACHE_SIZE);
2411 		}
2412 
2413 		/* Check scrubbing pairty and repair it */
2414 		p = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2415 		parity = kmap(p);
2416 		if (memcmp(parity, pointers[rbio->scrubp], PAGE_CACHE_SIZE))
2417 			memcpy(parity, pointers[rbio->scrubp], PAGE_CACHE_SIZE);
2418 		else
2419 			/* Parity is right, needn't writeback */
2420 			bitmap_clear(rbio->dbitmap, pagenr, 1);
2421 		kunmap(p);
2422 
2423 		for (stripe = 0; stripe < nr_data; stripe++)
2424 			kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
2425 		kunmap(p_page);
2426 	}
2427 
2428 	__free_page(p_page);
2429 	if (q_page)
2430 		__free_page(q_page);
2431 
2432 writeback:
2433 	/*
2434 	 * time to start writing.  Make bios for everything from the
2435 	 * higher layers (the bio_list in our rbio) and our p/q.  Ignore
2436 	 * everything else.
2437 	 */
2438 	for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2439 		struct page *page;
2440 
2441 		page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2442 		ret = rbio_add_io_page(rbio, &bio_list,
2443 			       page, rbio->scrubp, pagenr, rbio->stripe_len);
2444 		if (ret)
2445 			goto cleanup;
2446 	}
2447 
2448 	if (!is_replace)
2449 		goto submit_write;
2450 
2451 	for_each_set_bit(pagenr, pbitmap, rbio->stripe_npages) {
2452 		struct page *page;
2453 
2454 		page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2455 		ret = rbio_add_io_page(rbio, &bio_list, page,
2456 				       bbio->tgtdev_map[rbio->scrubp],
2457 				       pagenr, rbio->stripe_len);
2458 		if (ret)
2459 			goto cleanup;
2460 	}
2461 
2462 submit_write:
2463 	nr_data = bio_list_size(&bio_list);
2464 	if (!nr_data) {
2465 		/* Every parity is right */
2466 		rbio_orig_end_io(rbio, 0);
2467 		return;
2468 	}
2469 
2470 	atomic_set(&rbio->stripes_pending, nr_data);
2471 
2472 	while (1) {
2473 		bio = bio_list_pop(&bio_list);
2474 		if (!bio)
2475 			break;
2476 
2477 		bio->bi_private = rbio;
2478 		bio->bi_end_io = raid_write_parity_end_io;
2479 		submit_bio(WRITE, bio);
2480 	}
2481 	return;
2482 
2483 cleanup:
2484 	rbio_orig_end_io(rbio, -EIO);
2485 }
2486 
is_data_stripe(struct btrfs_raid_bio * rbio,int stripe)2487 static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe)
2488 {
2489 	if (stripe >= 0 && stripe < rbio->nr_data)
2490 		return 1;
2491 	return 0;
2492 }
2493 
2494 /*
2495  * While we're doing the parity check and repair, we could have errors
2496  * in reading pages off the disk.  This checks for errors and if we're
2497  * not able to read the page it'll trigger parity reconstruction.  The
2498  * parity scrub will be finished after we've reconstructed the failed
2499  * stripes
2500  */
validate_rbio_for_parity_scrub(struct btrfs_raid_bio * rbio)2501 static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio)
2502 {
2503 	if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2504 		goto cleanup;
2505 
2506 	if (rbio->faila >= 0 || rbio->failb >= 0) {
2507 		int dfail = 0, failp = -1;
2508 
2509 		if (is_data_stripe(rbio, rbio->faila))
2510 			dfail++;
2511 		else if (is_parity_stripe(rbio->faila))
2512 			failp = rbio->faila;
2513 
2514 		if (is_data_stripe(rbio, rbio->failb))
2515 			dfail++;
2516 		else if (is_parity_stripe(rbio->failb))
2517 			failp = rbio->failb;
2518 
2519 		/*
2520 		 * Because we can not use a scrubbing parity to repair
2521 		 * the data, so the capability of the repair is declined.
2522 		 * (In the case of RAID5, we can not repair anything)
2523 		 */
2524 		if (dfail > rbio->bbio->max_errors - 1)
2525 			goto cleanup;
2526 
2527 		/*
2528 		 * If all data is good, only parity is correctly, just
2529 		 * repair the parity.
2530 		 */
2531 		if (dfail == 0) {
2532 			finish_parity_scrub(rbio, 0);
2533 			return;
2534 		}
2535 
2536 		/*
2537 		 * Here means we got one corrupted data stripe and one
2538 		 * corrupted parity on RAID6, if the corrupted parity
2539 		 * is scrubbing parity, luckly, use the other one to repair
2540 		 * the data, or we can not repair the data stripe.
2541 		 */
2542 		if (failp != rbio->scrubp)
2543 			goto cleanup;
2544 
2545 		__raid_recover_end_io(rbio);
2546 	} else {
2547 		finish_parity_scrub(rbio, 1);
2548 	}
2549 	return;
2550 
2551 cleanup:
2552 	rbio_orig_end_io(rbio, -EIO);
2553 }
2554 
2555 /*
2556  * end io for the read phase of the rmw cycle.  All the bios here are physical
2557  * stripe bios we've read from the disk so we can recalculate the parity of the
2558  * stripe.
2559  *
2560  * This will usually kick off finish_rmw once all the bios are read in, but it
2561  * may trigger parity reconstruction if we had any errors along the way
2562  */
raid56_parity_scrub_end_io(struct bio * bio)2563 static void raid56_parity_scrub_end_io(struct bio *bio)
2564 {
2565 	struct btrfs_raid_bio *rbio = bio->bi_private;
2566 
2567 	if (bio->bi_error)
2568 		fail_bio_stripe(rbio, bio);
2569 	else
2570 		set_bio_pages_uptodate(bio);
2571 
2572 	bio_put(bio);
2573 
2574 	if (!atomic_dec_and_test(&rbio->stripes_pending))
2575 		return;
2576 
2577 	/*
2578 	 * this will normally call finish_rmw to start our write
2579 	 * but if there are any failed stripes we'll reconstruct
2580 	 * from parity first
2581 	 */
2582 	validate_rbio_for_parity_scrub(rbio);
2583 }
2584 
raid56_parity_scrub_stripe(struct btrfs_raid_bio * rbio)2585 static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
2586 {
2587 	int bios_to_read = 0;
2588 	struct bio_list bio_list;
2589 	int ret;
2590 	int pagenr;
2591 	int stripe;
2592 	struct bio *bio;
2593 
2594 	ret = alloc_rbio_essential_pages(rbio);
2595 	if (ret)
2596 		goto cleanup;
2597 
2598 	bio_list_init(&bio_list);
2599 
2600 	atomic_set(&rbio->error, 0);
2601 	/*
2602 	 * build a list of bios to read all the missing parts of this
2603 	 * stripe
2604 	 */
2605 	for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2606 		for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2607 			struct page *page;
2608 			/*
2609 			 * we want to find all the pages missing from
2610 			 * the rbio and read them from the disk.  If
2611 			 * page_in_rbio finds a page in the bio list
2612 			 * we don't need to read it off the stripe.
2613 			 */
2614 			page = page_in_rbio(rbio, stripe, pagenr, 1);
2615 			if (page)
2616 				continue;
2617 
2618 			page = rbio_stripe_page(rbio, stripe, pagenr);
2619 			/*
2620 			 * the bio cache may have handed us an uptodate
2621 			 * page.  If so, be happy and use it
2622 			 */
2623 			if (PageUptodate(page))
2624 				continue;
2625 
2626 			ret = rbio_add_io_page(rbio, &bio_list, page,
2627 				       stripe, pagenr, rbio->stripe_len);
2628 			if (ret)
2629 				goto cleanup;
2630 		}
2631 	}
2632 
2633 	bios_to_read = bio_list_size(&bio_list);
2634 	if (!bios_to_read) {
2635 		/*
2636 		 * this can happen if others have merged with
2637 		 * us, it means there is nothing left to read.
2638 		 * But if there are missing devices it may not be
2639 		 * safe to do the full stripe write yet.
2640 		 */
2641 		goto finish;
2642 	}
2643 
2644 	/*
2645 	 * the bbio may be freed once we submit the last bio.  Make sure
2646 	 * not to touch it after that
2647 	 */
2648 	atomic_set(&rbio->stripes_pending, bios_to_read);
2649 	while (1) {
2650 		bio = bio_list_pop(&bio_list);
2651 		if (!bio)
2652 			break;
2653 
2654 		bio->bi_private = rbio;
2655 		bio->bi_end_io = raid56_parity_scrub_end_io;
2656 
2657 		btrfs_bio_wq_end_io(rbio->fs_info, bio,
2658 				    BTRFS_WQ_ENDIO_RAID56);
2659 
2660 		submit_bio(READ, bio);
2661 	}
2662 	/* the actual write will happen once the reads are done */
2663 	return;
2664 
2665 cleanup:
2666 	rbio_orig_end_io(rbio, -EIO);
2667 	return;
2668 
2669 finish:
2670 	validate_rbio_for_parity_scrub(rbio);
2671 }
2672 
scrub_parity_work(struct btrfs_work * work)2673 static void scrub_parity_work(struct btrfs_work *work)
2674 {
2675 	struct btrfs_raid_bio *rbio;
2676 
2677 	rbio = container_of(work, struct btrfs_raid_bio, work);
2678 	raid56_parity_scrub_stripe(rbio);
2679 }
2680 
async_scrub_parity(struct btrfs_raid_bio * rbio)2681 static void async_scrub_parity(struct btrfs_raid_bio *rbio)
2682 {
2683 	btrfs_init_work(&rbio->work, btrfs_rmw_helper,
2684 			scrub_parity_work, NULL, NULL);
2685 
2686 	btrfs_queue_work(rbio->fs_info->rmw_workers,
2687 			 &rbio->work);
2688 }
2689 
raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio * rbio)2690 void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
2691 {
2692 	if (!lock_stripe_add(rbio))
2693 		async_scrub_parity(rbio);
2694 }
2695 
2696 /* The following code is used for dev replace of a missing RAID 5/6 device. */
2697 
2698 struct btrfs_raid_bio *
raid56_alloc_missing_rbio(struct btrfs_root * root,struct bio * bio,struct btrfs_bio * bbio,u64 length)2699 raid56_alloc_missing_rbio(struct btrfs_root *root, struct bio *bio,
2700 			  struct btrfs_bio *bbio, u64 length)
2701 {
2702 	struct btrfs_raid_bio *rbio;
2703 
2704 	rbio = alloc_rbio(root, bbio, length);
2705 	if (IS_ERR(rbio))
2706 		return NULL;
2707 
2708 	rbio->operation = BTRFS_RBIO_REBUILD_MISSING;
2709 	bio_list_add(&rbio->bio_list, bio);
2710 	/*
2711 	 * This is a special bio which is used to hold the completion handler
2712 	 * and make the scrub rbio is similar to the other types
2713 	 */
2714 	ASSERT(!bio->bi_iter.bi_size);
2715 
2716 	rbio->faila = find_logical_bio_stripe(rbio, bio);
2717 	if (rbio->faila == -1) {
2718 		BUG();
2719 		kfree(rbio);
2720 		return NULL;
2721 	}
2722 
2723 	return rbio;
2724 }
2725 
missing_raid56_work(struct btrfs_work * work)2726 static void missing_raid56_work(struct btrfs_work *work)
2727 {
2728 	struct btrfs_raid_bio *rbio;
2729 
2730 	rbio = container_of(work, struct btrfs_raid_bio, work);
2731 	__raid56_parity_recover(rbio);
2732 }
2733 
async_missing_raid56(struct btrfs_raid_bio * rbio)2734 static void async_missing_raid56(struct btrfs_raid_bio *rbio)
2735 {
2736 	btrfs_init_work(&rbio->work, btrfs_rmw_helper,
2737 			missing_raid56_work, NULL, NULL);
2738 
2739 	btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
2740 }
2741 
raid56_submit_missing_rbio(struct btrfs_raid_bio * rbio)2742 void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio)
2743 {
2744 	if (!lock_stripe_add(rbio))
2745 		async_missing_raid56(rbio);
2746 }
2747