• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2011 STRATO.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 
19 #include <linux/sched.h>
20 #include <linux/pagemap.h>
21 #include <linux/writeback.h>
22 #include <linux/blkdev.h>
23 #include <linux/rbtree.h>
24 #include <linux/slab.h>
25 #include <linux/workqueue.h>
26 #include "ctree.h"
27 #include "volumes.h"
28 #include "disk-io.h"
29 #include "transaction.h"
30 #include "dev-replace.h"
31 
32 #undef DEBUG
33 
34 /*
35  * This is the implementation for the generic read ahead framework.
36  *
37  * To trigger a readahead, btrfs_reada_add must be called. It will start
38  * a read ahead for the given range [start, end) on tree root. The returned
39  * handle can either be used to wait on the readahead to finish
40  * (btrfs_reada_wait), or to send it to the background (btrfs_reada_detach).
41  *
42  * The read ahead works as follows:
43  * On btrfs_reada_add, the root of the tree is inserted into a radix_tree.
44  * reada_start_machine will then search for extents to prefetch and trigger
45  * some reads. When a read finishes for a node, all contained node/leaf
46  * pointers that lie in the given range will also be enqueued. The reads will
47  * be triggered in sequential order, thus giving a big win over a naive
48  * enumeration. It will also make use of multi-device layouts. Each disk
49  * will have its on read pointer and all disks will by utilized in parallel.
50  * Also will no two disks read both sides of a mirror simultaneously, as this
51  * would waste seeking capacity. Instead both disks will read different parts
52  * of the filesystem.
53  * Any number of readaheads can be started in parallel. The read order will be
54  * determined globally, i.e. 2 parallel readaheads will normally finish faster
55  * than the 2 started one after another.
56  */
57 
58 #define MAX_IN_FLIGHT 6
59 
60 struct reada_extctl {
61 	struct list_head	list;
62 	struct reada_control	*rc;
63 	u64			generation;
64 };
65 
66 struct reada_extent {
67 	u64			logical;
68 	struct btrfs_key	top;
69 	int			err;
70 	struct list_head	extctl;
71 	int 			refcnt;
72 	spinlock_t		lock;
73 	struct reada_zone	*zones[BTRFS_MAX_MIRRORS];
74 	int			nzones;
75 	int			scheduled;
76 };
77 
78 struct reada_zone {
79 	u64			start;
80 	u64			end;
81 	u64			elems;
82 	struct list_head	list;
83 	spinlock_t		lock;
84 	int			locked;
85 	struct btrfs_device	*device;
86 	struct btrfs_device	*devs[BTRFS_MAX_MIRRORS]; /* full list, incl
87 							   * self */
88 	int			ndevs;
89 	struct kref		refcnt;
90 };
91 
92 struct reada_machine_work {
93 	struct btrfs_work	work;
94 	struct btrfs_fs_info	*fs_info;
95 };
96 
97 static void reada_extent_put(struct btrfs_fs_info *, struct reada_extent *);
98 static void reada_control_release(struct kref *kref);
99 static void reada_zone_release(struct kref *kref);
100 static void reada_start_machine(struct btrfs_fs_info *fs_info);
101 static void __reada_start_machine(struct btrfs_fs_info *fs_info);
102 
103 static int reada_add_block(struct reada_control *rc, u64 logical,
104 			   struct btrfs_key *top, u64 generation);
105 
106 /* recurses */
107 /* in case of err, eb might be NULL */
__readahead_hook(struct btrfs_fs_info * fs_info,struct reada_extent * re,struct extent_buffer * eb,u64 start,int err)108 static void __readahead_hook(struct btrfs_fs_info *fs_info,
109 			     struct reada_extent *re, struct extent_buffer *eb,
110 			     u64 start, int err)
111 {
112 	int level = 0;
113 	int nritems;
114 	int i;
115 	u64 bytenr;
116 	u64 generation;
117 	struct list_head list;
118 
119 	if (eb)
120 		level = btrfs_header_level(eb);
121 
122 	spin_lock(&re->lock);
123 	/*
124 	 * just take the full list from the extent. afterwards we
125 	 * don't need the lock anymore
126 	 */
127 	list_replace_init(&re->extctl, &list);
128 	re->scheduled = 0;
129 	spin_unlock(&re->lock);
130 
131 	/*
132 	 * this is the error case, the extent buffer has not been
133 	 * read correctly. We won't access anything from it and
134 	 * just cleanup our data structures. Effectively this will
135 	 * cut the branch below this node from read ahead.
136 	 */
137 	if (err)
138 		goto cleanup;
139 
140 	/*
141 	 * FIXME: currently we just set nritems to 0 if this is a leaf,
142 	 * effectively ignoring the content. In a next step we could
143 	 * trigger more readahead depending from the content, e.g.
144 	 * fetch the checksums for the extents in the leaf.
145 	 */
146 	if (!level)
147 		goto cleanup;
148 
149 	nritems = btrfs_header_nritems(eb);
150 	generation = btrfs_header_generation(eb);
151 	for (i = 0; i < nritems; i++) {
152 		struct reada_extctl *rec;
153 		u64 n_gen;
154 		struct btrfs_key key;
155 		struct btrfs_key next_key;
156 
157 		btrfs_node_key_to_cpu(eb, &key, i);
158 		if (i + 1 < nritems)
159 			btrfs_node_key_to_cpu(eb, &next_key, i + 1);
160 		else
161 			next_key = re->top;
162 		bytenr = btrfs_node_blockptr(eb, i);
163 		n_gen = btrfs_node_ptr_generation(eb, i);
164 
165 		list_for_each_entry(rec, &list, list) {
166 			struct reada_control *rc = rec->rc;
167 
168 			/*
169 			 * if the generation doesn't match, just ignore this
170 			 * extctl. This will probably cut off a branch from
171 			 * prefetch. Alternatively one could start a new (sub-)
172 			 * prefetch for this branch, starting again from root.
173 			 * FIXME: move the generation check out of this loop
174 			 */
175 #ifdef DEBUG
176 			if (rec->generation != generation) {
177 				btrfs_debug(fs_info,
178 					    "generation mismatch for (%llu,%d,%llu) %llu != %llu",
179 					    key.objectid, key.type, key.offset,
180 					    rec->generation, generation);
181 			}
182 #endif
183 			if (rec->generation == generation &&
184 			    btrfs_comp_cpu_keys(&key, &rc->key_end) < 0 &&
185 			    btrfs_comp_cpu_keys(&next_key, &rc->key_start) > 0)
186 				reada_add_block(rc, bytenr, &next_key, n_gen);
187 		}
188 	}
189 
190 cleanup:
191 	/*
192 	 * free extctl records
193 	 */
194 	while (!list_empty(&list)) {
195 		struct reada_control *rc;
196 		struct reada_extctl *rec;
197 
198 		rec = list_first_entry(&list, struct reada_extctl, list);
199 		list_del(&rec->list);
200 		rc = rec->rc;
201 		kfree(rec);
202 
203 		kref_get(&rc->refcnt);
204 		if (atomic_dec_and_test(&rc->elems)) {
205 			kref_put(&rc->refcnt, reada_control_release);
206 			wake_up(&rc->wait);
207 		}
208 		kref_put(&rc->refcnt, reada_control_release);
209 
210 		reada_extent_put(fs_info, re);	/* one ref for each entry */
211 	}
212 
213 	return;
214 }
215 
216 /*
217  * start is passed separately in case eb in NULL, which may be the case with
218  * failed I/O
219  */
btree_readahead_hook(struct btrfs_fs_info * fs_info,struct extent_buffer * eb,u64 start,int err)220 int btree_readahead_hook(struct btrfs_fs_info *fs_info,
221 			 struct extent_buffer *eb, u64 start, int err)
222 {
223 	int ret = 0;
224 	struct reada_extent *re;
225 
226 	/* find extent */
227 	spin_lock(&fs_info->reada_lock);
228 	re = radix_tree_lookup(&fs_info->reada_tree,
229 			       start >> PAGE_SHIFT);
230 	if (re)
231 		re->refcnt++;
232 	spin_unlock(&fs_info->reada_lock);
233 	if (!re) {
234 		ret = -1;
235 		goto start_machine;
236 	}
237 
238 	__readahead_hook(fs_info, re, eb, start, err);
239 	reada_extent_put(fs_info, re);	/* our ref */
240 
241 start_machine:
242 	reada_start_machine(fs_info);
243 	return ret;
244 }
245 
reada_find_zone(struct btrfs_fs_info * fs_info,struct btrfs_device * dev,u64 logical,struct btrfs_bio * bbio)246 static struct reada_zone *reada_find_zone(struct btrfs_fs_info *fs_info,
247 					  struct btrfs_device *dev, u64 logical,
248 					  struct btrfs_bio *bbio)
249 {
250 	int ret;
251 	struct reada_zone *zone;
252 	struct btrfs_block_group_cache *cache = NULL;
253 	u64 start;
254 	u64 end;
255 	int i;
256 
257 	zone = NULL;
258 	spin_lock(&fs_info->reada_lock);
259 	ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone,
260 				     logical >> PAGE_SHIFT, 1);
261 	if (ret == 1 && logical >= zone->start && logical <= zone->end) {
262 		kref_get(&zone->refcnt);
263 		spin_unlock(&fs_info->reada_lock);
264 		return zone;
265 	}
266 
267 	spin_unlock(&fs_info->reada_lock);
268 
269 	cache = btrfs_lookup_block_group(fs_info, logical);
270 	if (!cache)
271 		return NULL;
272 
273 	start = cache->key.objectid;
274 	end = start + cache->key.offset - 1;
275 	btrfs_put_block_group(cache);
276 
277 	zone = kzalloc(sizeof(*zone), GFP_KERNEL);
278 	if (!zone)
279 		return NULL;
280 
281 	zone->start = start;
282 	zone->end = end;
283 	INIT_LIST_HEAD(&zone->list);
284 	spin_lock_init(&zone->lock);
285 	zone->locked = 0;
286 	kref_init(&zone->refcnt);
287 	zone->elems = 0;
288 	zone->device = dev; /* our device always sits at index 0 */
289 	for (i = 0; i < bbio->num_stripes; ++i) {
290 		/* bounds have already been checked */
291 		zone->devs[i] = bbio->stripes[i].dev;
292 	}
293 	zone->ndevs = bbio->num_stripes;
294 
295 	spin_lock(&fs_info->reada_lock);
296 	ret = radix_tree_insert(&dev->reada_zones,
297 				(unsigned long)(zone->end >> PAGE_SHIFT),
298 				zone);
299 
300 	if (ret == -EEXIST) {
301 		kfree(zone);
302 		ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone,
303 					     logical >> PAGE_SHIFT, 1);
304 		if (ret == 1 && logical >= zone->start && logical <= zone->end)
305 			kref_get(&zone->refcnt);
306 		else
307 			zone = NULL;
308 	}
309 	spin_unlock(&fs_info->reada_lock);
310 
311 	return zone;
312 }
313 
reada_find_extent(struct btrfs_root * root,u64 logical,struct btrfs_key * top)314 static struct reada_extent *reada_find_extent(struct btrfs_root *root,
315 					      u64 logical,
316 					      struct btrfs_key *top)
317 {
318 	int ret;
319 	struct reada_extent *re = NULL;
320 	struct reada_extent *re_exist = NULL;
321 	struct btrfs_fs_info *fs_info = root->fs_info;
322 	struct btrfs_bio *bbio = NULL;
323 	struct btrfs_device *dev;
324 	struct btrfs_device *prev_dev;
325 	u32 blocksize;
326 	u64 length;
327 	int real_stripes;
328 	int nzones = 0;
329 	unsigned long index = logical >> PAGE_SHIFT;
330 	int dev_replace_is_ongoing;
331 	int have_zone = 0;
332 
333 	spin_lock(&fs_info->reada_lock);
334 	re = radix_tree_lookup(&fs_info->reada_tree, index);
335 	if (re)
336 		re->refcnt++;
337 	spin_unlock(&fs_info->reada_lock);
338 
339 	if (re)
340 		return re;
341 
342 	re = kzalloc(sizeof(*re), GFP_KERNEL);
343 	if (!re)
344 		return NULL;
345 
346 	blocksize = root->nodesize;
347 	re->logical = logical;
348 	re->top = *top;
349 	INIT_LIST_HEAD(&re->extctl);
350 	spin_lock_init(&re->lock);
351 	re->refcnt = 1;
352 
353 	/*
354 	 * map block
355 	 */
356 	length = blocksize;
357 	ret = btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS, logical, &length,
358 			      &bbio, 0);
359 	if (ret || !bbio || length < blocksize)
360 		goto error;
361 
362 	if (bbio->num_stripes > BTRFS_MAX_MIRRORS) {
363 		btrfs_err(root->fs_info,
364 			   "readahead: more than %d copies not supported",
365 			   BTRFS_MAX_MIRRORS);
366 		goto error;
367 	}
368 
369 	real_stripes = bbio->num_stripes - bbio->num_tgtdevs;
370 	for (nzones = 0; nzones < real_stripes; ++nzones) {
371 		struct reada_zone *zone;
372 
373 		dev = bbio->stripes[nzones].dev;
374 
375 		/* cannot read ahead on missing device. */
376 		 if (!dev->bdev)
377 			continue;
378 
379 		zone = reada_find_zone(fs_info, dev, logical, bbio);
380 		if (!zone)
381 			continue;
382 
383 		re->zones[re->nzones++] = zone;
384 		spin_lock(&zone->lock);
385 		if (!zone->elems)
386 			kref_get(&zone->refcnt);
387 		++zone->elems;
388 		spin_unlock(&zone->lock);
389 		spin_lock(&fs_info->reada_lock);
390 		kref_put(&zone->refcnt, reada_zone_release);
391 		spin_unlock(&fs_info->reada_lock);
392 	}
393 	if (re->nzones == 0) {
394 		/* not a single zone found, error and out */
395 		goto error;
396 	}
397 
398 	/* insert extent in reada_tree + all per-device trees, all or nothing */
399 	btrfs_dev_replace_lock(&fs_info->dev_replace, 0);
400 	spin_lock(&fs_info->reada_lock);
401 	ret = radix_tree_insert(&fs_info->reada_tree, index, re);
402 	if (ret == -EEXIST) {
403 		re_exist = radix_tree_lookup(&fs_info->reada_tree, index);
404 		BUG_ON(!re_exist);
405 		re_exist->refcnt++;
406 		spin_unlock(&fs_info->reada_lock);
407 		btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
408 		goto error;
409 	}
410 	if (ret) {
411 		spin_unlock(&fs_info->reada_lock);
412 		btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
413 		goto error;
414 	}
415 	prev_dev = NULL;
416 	dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(
417 			&fs_info->dev_replace);
418 	for (nzones = 0; nzones < re->nzones; ++nzones) {
419 		dev = re->zones[nzones]->device;
420 
421 		if (dev == prev_dev) {
422 			/*
423 			 * in case of DUP, just add the first zone. As both
424 			 * are on the same device, there's nothing to gain
425 			 * from adding both.
426 			 * Also, it wouldn't work, as the tree is per device
427 			 * and adding would fail with EEXIST
428 			 */
429 			continue;
430 		}
431 		if (!dev->bdev)
432 			continue;
433 
434 		if (dev_replace_is_ongoing &&
435 		    dev == fs_info->dev_replace.tgtdev) {
436 			/*
437 			 * as this device is selected for reading only as
438 			 * a last resort, skip it for read ahead.
439 			 */
440 			continue;
441 		}
442 		prev_dev = dev;
443 		ret = radix_tree_insert(&dev->reada_extents, index, re);
444 		if (ret) {
445 			while (--nzones >= 0) {
446 				dev = re->zones[nzones]->device;
447 				BUG_ON(dev == NULL);
448 				/* ignore whether the entry was inserted */
449 				radix_tree_delete(&dev->reada_extents, index);
450 			}
451 			BUG_ON(fs_info == NULL);
452 			radix_tree_delete(&fs_info->reada_tree, index);
453 			spin_unlock(&fs_info->reada_lock);
454 			btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
455 			goto error;
456 		}
457 		have_zone = 1;
458 	}
459 	spin_unlock(&fs_info->reada_lock);
460 	btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
461 
462 	if (!have_zone)
463 		goto error;
464 
465 	btrfs_put_bbio(bbio);
466 	return re;
467 
468 error:
469 	for (nzones = 0; nzones < re->nzones; ++nzones) {
470 		struct reada_zone *zone;
471 
472 		zone = re->zones[nzones];
473 		kref_get(&zone->refcnt);
474 		spin_lock(&zone->lock);
475 		--zone->elems;
476 		if (zone->elems == 0) {
477 			/*
478 			 * no fs_info->reada_lock needed, as this can't be
479 			 * the last ref
480 			 */
481 			kref_put(&zone->refcnt, reada_zone_release);
482 		}
483 		spin_unlock(&zone->lock);
484 
485 		spin_lock(&fs_info->reada_lock);
486 		kref_put(&zone->refcnt, reada_zone_release);
487 		spin_unlock(&fs_info->reada_lock);
488 	}
489 	btrfs_put_bbio(bbio);
490 	kfree(re);
491 	return re_exist;
492 }
493 
reada_extent_put(struct btrfs_fs_info * fs_info,struct reada_extent * re)494 static void reada_extent_put(struct btrfs_fs_info *fs_info,
495 			     struct reada_extent *re)
496 {
497 	int i;
498 	unsigned long index = re->logical >> PAGE_SHIFT;
499 
500 	spin_lock(&fs_info->reada_lock);
501 	if (--re->refcnt) {
502 		spin_unlock(&fs_info->reada_lock);
503 		return;
504 	}
505 
506 	radix_tree_delete(&fs_info->reada_tree, index);
507 	for (i = 0; i < re->nzones; ++i) {
508 		struct reada_zone *zone = re->zones[i];
509 
510 		radix_tree_delete(&zone->device->reada_extents, index);
511 	}
512 
513 	spin_unlock(&fs_info->reada_lock);
514 
515 	for (i = 0; i < re->nzones; ++i) {
516 		struct reada_zone *zone = re->zones[i];
517 
518 		kref_get(&zone->refcnt);
519 		spin_lock(&zone->lock);
520 		--zone->elems;
521 		if (zone->elems == 0) {
522 			/* no fs_info->reada_lock needed, as this can't be
523 			 * the last ref */
524 			kref_put(&zone->refcnt, reada_zone_release);
525 		}
526 		spin_unlock(&zone->lock);
527 
528 		spin_lock(&fs_info->reada_lock);
529 		kref_put(&zone->refcnt, reada_zone_release);
530 		spin_unlock(&fs_info->reada_lock);
531 	}
532 
533 	kfree(re);
534 }
535 
reada_zone_release(struct kref * kref)536 static void reada_zone_release(struct kref *kref)
537 {
538 	struct reada_zone *zone = container_of(kref, struct reada_zone, refcnt);
539 
540 	radix_tree_delete(&zone->device->reada_zones,
541 			  zone->end >> PAGE_SHIFT);
542 
543 	kfree(zone);
544 }
545 
reada_control_release(struct kref * kref)546 static void reada_control_release(struct kref *kref)
547 {
548 	struct reada_control *rc = container_of(kref, struct reada_control,
549 						refcnt);
550 
551 	kfree(rc);
552 }
553 
reada_add_block(struct reada_control * rc,u64 logical,struct btrfs_key * top,u64 generation)554 static int reada_add_block(struct reada_control *rc, u64 logical,
555 			   struct btrfs_key *top, u64 generation)
556 {
557 	struct btrfs_root *root = rc->root;
558 	struct reada_extent *re;
559 	struct reada_extctl *rec;
560 
561 	re = reada_find_extent(root, logical, top); /* takes one ref */
562 	if (!re)
563 		return -1;
564 
565 	rec = kzalloc(sizeof(*rec), GFP_KERNEL);
566 	if (!rec) {
567 		reada_extent_put(root->fs_info, re);
568 		return -ENOMEM;
569 	}
570 
571 	rec->rc = rc;
572 	rec->generation = generation;
573 	atomic_inc(&rc->elems);
574 
575 	spin_lock(&re->lock);
576 	list_add_tail(&rec->list, &re->extctl);
577 	spin_unlock(&re->lock);
578 
579 	/* leave the ref on the extent */
580 
581 	return 0;
582 }
583 
584 /*
585  * called with fs_info->reada_lock held
586  */
reada_peer_zones_set_lock(struct reada_zone * zone,int lock)587 static void reada_peer_zones_set_lock(struct reada_zone *zone, int lock)
588 {
589 	int i;
590 	unsigned long index = zone->end >> PAGE_SHIFT;
591 
592 	for (i = 0; i < zone->ndevs; ++i) {
593 		struct reada_zone *peer;
594 		peer = radix_tree_lookup(&zone->devs[i]->reada_zones, index);
595 		if (peer && peer->device != zone->device)
596 			peer->locked = lock;
597 	}
598 }
599 
600 /*
601  * called with fs_info->reada_lock held
602  */
reada_pick_zone(struct btrfs_device * dev)603 static int reada_pick_zone(struct btrfs_device *dev)
604 {
605 	struct reada_zone *top_zone = NULL;
606 	struct reada_zone *top_locked_zone = NULL;
607 	u64 top_elems = 0;
608 	u64 top_locked_elems = 0;
609 	unsigned long index = 0;
610 	int ret;
611 
612 	if (dev->reada_curr_zone) {
613 		reada_peer_zones_set_lock(dev->reada_curr_zone, 0);
614 		kref_put(&dev->reada_curr_zone->refcnt, reada_zone_release);
615 		dev->reada_curr_zone = NULL;
616 	}
617 	/* pick the zone with the most elements */
618 	while (1) {
619 		struct reada_zone *zone;
620 
621 		ret = radix_tree_gang_lookup(&dev->reada_zones,
622 					     (void **)&zone, index, 1);
623 		if (ret == 0)
624 			break;
625 		index = (zone->end >> PAGE_SHIFT) + 1;
626 		if (zone->locked) {
627 			if (zone->elems > top_locked_elems) {
628 				top_locked_elems = zone->elems;
629 				top_locked_zone = zone;
630 			}
631 		} else {
632 			if (zone->elems > top_elems) {
633 				top_elems = zone->elems;
634 				top_zone = zone;
635 			}
636 		}
637 	}
638 	if (top_zone)
639 		dev->reada_curr_zone = top_zone;
640 	else if (top_locked_zone)
641 		dev->reada_curr_zone = top_locked_zone;
642 	else
643 		return 0;
644 
645 	dev->reada_next = dev->reada_curr_zone->start;
646 	kref_get(&dev->reada_curr_zone->refcnt);
647 	reada_peer_zones_set_lock(dev->reada_curr_zone, 1);
648 
649 	return 1;
650 }
651 
reada_start_machine_dev(struct btrfs_fs_info * fs_info,struct btrfs_device * dev)652 static int reada_start_machine_dev(struct btrfs_fs_info *fs_info,
653 				   struct btrfs_device *dev)
654 {
655 	struct reada_extent *re = NULL;
656 	int mirror_num = 0;
657 	struct extent_buffer *eb = NULL;
658 	u64 logical;
659 	int ret;
660 	int i;
661 
662 	spin_lock(&fs_info->reada_lock);
663 	if (dev->reada_curr_zone == NULL) {
664 		ret = reada_pick_zone(dev);
665 		if (!ret) {
666 			spin_unlock(&fs_info->reada_lock);
667 			return 0;
668 		}
669 	}
670 	/*
671 	 * FIXME currently we issue the reads one extent at a time. If we have
672 	 * a contiguous block of extents, we could also coagulate them or use
673 	 * plugging to speed things up
674 	 */
675 	ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re,
676 				     dev->reada_next >> PAGE_SHIFT, 1);
677 	if (ret == 0 || re->logical > dev->reada_curr_zone->end) {
678 		ret = reada_pick_zone(dev);
679 		if (!ret) {
680 			spin_unlock(&fs_info->reada_lock);
681 			return 0;
682 		}
683 		re = NULL;
684 		ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re,
685 					dev->reada_next >> PAGE_SHIFT, 1);
686 	}
687 	if (ret == 0) {
688 		spin_unlock(&fs_info->reada_lock);
689 		return 0;
690 	}
691 	dev->reada_next = re->logical + fs_info->tree_root->nodesize;
692 	re->refcnt++;
693 
694 	spin_unlock(&fs_info->reada_lock);
695 
696 	spin_lock(&re->lock);
697 	if (re->scheduled || list_empty(&re->extctl)) {
698 		spin_unlock(&re->lock);
699 		reada_extent_put(fs_info, re);
700 		return 0;
701 	}
702 	re->scheduled = 1;
703 	spin_unlock(&re->lock);
704 
705 	/*
706 	 * find mirror num
707 	 */
708 	for (i = 0; i < re->nzones; ++i) {
709 		if (re->zones[i]->device == dev) {
710 			mirror_num = i + 1;
711 			break;
712 		}
713 	}
714 	logical = re->logical;
715 
716 	atomic_inc(&dev->reada_in_flight);
717 	ret = reada_tree_block_flagged(fs_info->extent_root, logical,
718 			mirror_num, &eb);
719 	if (ret)
720 		__readahead_hook(fs_info, re, NULL, logical, ret);
721 	else if (eb)
722 		__readahead_hook(fs_info, re, eb, eb->start, ret);
723 
724 	if (eb)
725 		free_extent_buffer(eb);
726 
727 	atomic_dec(&dev->reada_in_flight);
728 	reada_extent_put(fs_info, re);
729 
730 	return 1;
731 
732 }
733 
reada_start_machine_worker(struct btrfs_work * work)734 static void reada_start_machine_worker(struct btrfs_work *work)
735 {
736 	struct reada_machine_work *rmw;
737 	struct btrfs_fs_info *fs_info;
738 	int old_ioprio;
739 
740 	rmw = container_of(work, struct reada_machine_work, work);
741 	fs_info = rmw->fs_info;
742 
743 	kfree(rmw);
744 
745 	old_ioprio = IOPRIO_PRIO_VALUE(task_nice_ioclass(current),
746 				       task_nice_ioprio(current));
747 	set_task_ioprio(current, BTRFS_IOPRIO_READA);
748 	__reada_start_machine(fs_info);
749 	set_task_ioprio(current, old_ioprio);
750 
751 	atomic_dec(&fs_info->reada_works_cnt);
752 }
753 
__reada_start_machine(struct btrfs_fs_info * fs_info)754 static void __reada_start_machine(struct btrfs_fs_info *fs_info)
755 {
756 	struct btrfs_device *device;
757 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
758 	u64 enqueued;
759 	u64 total = 0;
760 	int i;
761 
762 	do {
763 		enqueued = 0;
764 		mutex_lock(&fs_devices->device_list_mutex);
765 		list_for_each_entry(device, &fs_devices->devices, dev_list) {
766 			if (atomic_read(&device->reada_in_flight) <
767 			    MAX_IN_FLIGHT)
768 				enqueued += reada_start_machine_dev(fs_info,
769 								    device);
770 		}
771 		mutex_unlock(&fs_devices->device_list_mutex);
772 		total += enqueued;
773 	} while (enqueued && total < 10000);
774 
775 	if (enqueued == 0)
776 		return;
777 
778 	/*
779 	 * If everything is already in the cache, this is effectively single
780 	 * threaded. To a) not hold the caller for too long and b) to utilize
781 	 * more cores, we broke the loop above after 10000 iterations and now
782 	 * enqueue to workers to finish it. This will distribute the load to
783 	 * the cores.
784 	 */
785 	for (i = 0; i < 2; ++i) {
786 		reada_start_machine(fs_info);
787 		if (atomic_read(&fs_info->reada_works_cnt) >
788 		    BTRFS_MAX_MIRRORS * 2)
789 			break;
790 	}
791 }
792 
reada_start_machine(struct btrfs_fs_info * fs_info)793 static void reada_start_machine(struct btrfs_fs_info *fs_info)
794 {
795 	struct reada_machine_work *rmw;
796 
797 	rmw = kzalloc(sizeof(*rmw), GFP_KERNEL);
798 	if (!rmw) {
799 		/* FIXME we cannot handle this properly right now */
800 		BUG();
801 	}
802 	btrfs_init_work(&rmw->work, btrfs_readahead_helper,
803 			reada_start_machine_worker, NULL, NULL);
804 	rmw->fs_info = fs_info;
805 
806 	btrfs_queue_work(fs_info->readahead_workers, &rmw->work);
807 	atomic_inc(&fs_info->reada_works_cnt);
808 }
809 
810 #ifdef DEBUG
dump_devs(struct btrfs_fs_info * fs_info,int all)811 static void dump_devs(struct btrfs_fs_info *fs_info, int all)
812 {
813 	struct btrfs_device *device;
814 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
815 	unsigned long index;
816 	int ret;
817 	int i;
818 	int j;
819 	int cnt;
820 
821 	spin_lock(&fs_info->reada_lock);
822 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
823 		btrfs_debug(fs_info, "dev %lld has %d in flight", device->devid,
824 			atomic_read(&device->reada_in_flight));
825 		index = 0;
826 		while (1) {
827 			struct reada_zone *zone;
828 			ret = radix_tree_gang_lookup(&device->reada_zones,
829 						     (void **)&zone, index, 1);
830 			if (ret == 0)
831 				break;
832 			pr_debug("  zone %llu-%llu elems %llu locked %d devs",
833 				    zone->start, zone->end, zone->elems,
834 				    zone->locked);
835 			for (j = 0; j < zone->ndevs; ++j) {
836 				pr_cont(" %lld",
837 					zone->devs[j]->devid);
838 			}
839 			if (device->reada_curr_zone == zone)
840 				pr_cont(" curr off %llu",
841 					device->reada_next - zone->start);
842 			pr_cont("\n");
843 			index = (zone->end >> PAGE_SHIFT) + 1;
844 		}
845 		cnt = 0;
846 		index = 0;
847 		while (all) {
848 			struct reada_extent *re = NULL;
849 
850 			ret = radix_tree_gang_lookup(&device->reada_extents,
851 						     (void **)&re, index, 1);
852 			if (ret == 0)
853 				break;
854 			pr_debug("  re: logical %llu size %u empty %d scheduled %d",
855 				re->logical, fs_info->tree_root->nodesize,
856 				list_empty(&re->extctl), re->scheduled);
857 
858 			for (i = 0; i < re->nzones; ++i) {
859 				pr_cont(" zone %llu-%llu devs",
860 					re->zones[i]->start,
861 					re->zones[i]->end);
862 				for (j = 0; j < re->zones[i]->ndevs; ++j) {
863 					pr_cont(" %lld",
864 						re->zones[i]->devs[j]->devid);
865 				}
866 			}
867 			pr_cont("\n");
868 			index = (re->logical >> PAGE_SHIFT) + 1;
869 			if (++cnt > 15)
870 				break;
871 		}
872 	}
873 
874 	index = 0;
875 	cnt = 0;
876 	while (all) {
877 		struct reada_extent *re = NULL;
878 
879 		ret = radix_tree_gang_lookup(&fs_info->reada_tree, (void **)&re,
880 					     index, 1);
881 		if (ret == 0)
882 			break;
883 		if (!re->scheduled) {
884 			index = (re->logical >> PAGE_SHIFT) + 1;
885 			continue;
886 		}
887 		pr_debug("re: logical %llu size %u list empty %d scheduled %d",
888 			re->logical, fs_info->tree_root->nodesize,
889 			list_empty(&re->extctl), re->scheduled);
890 		for (i = 0; i < re->nzones; ++i) {
891 			pr_cont(" zone %llu-%llu devs",
892 				re->zones[i]->start,
893 				re->zones[i]->end);
894 			for (j = 0; j < re->zones[i]->ndevs; ++j) {
895 				pr_cont(" %lld",
896 				       re->zones[i]->devs[j]->devid);
897 			}
898 		}
899 		pr_cont("\n");
900 		index = (re->logical >> PAGE_SHIFT) + 1;
901 	}
902 	spin_unlock(&fs_info->reada_lock);
903 }
904 #endif
905 
906 /*
907  * interface
908  */
btrfs_reada_add(struct btrfs_root * root,struct btrfs_key * key_start,struct btrfs_key * key_end)909 struct reada_control *btrfs_reada_add(struct btrfs_root *root,
910 			struct btrfs_key *key_start, struct btrfs_key *key_end)
911 {
912 	struct reada_control *rc;
913 	u64 start;
914 	u64 generation;
915 	int ret;
916 	struct extent_buffer *node;
917 	static struct btrfs_key max_key = {
918 		.objectid = (u64)-1,
919 		.type = (u8)-1,
920 		.offset = (u64)-1
921 	};
922 
923 	rc = kzalloc(sizeof(*rc), GFP_KERNEL);
924 	if (!rc)
925 		return ERR_PTR(-ENOMEM);
926 
927 	rc->root = root;
928 	rc->key_start = *key_start;
929 	rc->key_end = *key_end;
930 	atomic_set(&rc->elems, 0);
931 	init_waitqueue_head(&rc->wait);
932 	kref_init(&rc->refcnt);
933 	kref_get(&rc->refcnt); /* one ref for having elements */
934 
935 	node = btrfs_root_node(root);
936 	start = node->start;
937 	generation = btrfs_header_generation(node);
938 	free_extent_buffer(node);
939 
940 	ret = reada_add_block(rc, start, &max_key, generation);
941 	if (ret) {
942 		kfree(rc);
943 		return ERR_PTR(ret);
944 	}
945 
946 	reada_start_machine(root->fs_info);
947 
948 	return rc;
949 }
950 
951 #ifdef DEBUG
btrfs_reada_wait(void * handle)952 int btrfs_reada_wait(void *handle)
953 {
954 	struct reada_control *rc = handle;
955 	struct btrfs_fs_info *fs_info = rc->root->fs_info;
956 
957 	while (atomic_read(&rc->elems)) {
958 		if (!atomic_read(&fs_info->reada_works_cnt))
959 			reada_start_machine(fs_info);
960 		wait_event_timeout(rc->wait, atomic_read(&rc->elems) == 0,
961 				   5 * HZ);
962 		dump_devs(rc->root->fs_info,
963 			  atomic_read(&rc->elems) < 10 ? 1 : 0);
964 	}
965 
966 	dump_devs(rc->root->fs_info, atomic_read(&rc->elems) < 10 ? 1 : 0);
967 
968 	kref_put(&rc->refcnt, reada_control_release);
969 
970 	return 0;
971 }
972 #else
btrfs_reada_wait(void * handle)973 int btrfs_reada_wait(void *handle)
974 {
975 	struct reada_control *rc = handle;
976 	struct btrfs_fs_info *fs_info = rc->root->fs_info;
977 
978 	while (atomic_read(&rc->elems)) {
979 		if (!atomic_read(&fs_info->reada_works_cnt))
980 			reada_start_machine(fs_info);
981 		wait_event_timeout(rc->wait, atomic_read(&rc->elems) == 0,
982 				   (HZ + 9) / 10);
983 	}
984 
985 	kref_put(&rc->refcnt, reada_control_release);
986 
987 	return 0;
988 }
989 #endif
990 
btrfs_reada_detach(void * handle)991 void btrfs_reada_detach(void *handle)
992 {
993 	struct reada_control *rc = handle;
994 
995 	kref_put(&rc->refcnt, reada_control_release);
996 }
997