• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2011 STRATO.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 
19 #include <linux/sched.h>
20 #include <linux/pagemap.h>
21 #include <linux/writeback.h>
22 #include <linux/blkdev.h>
23 #include <linux/rbtree.h>
24 #include <linux/slab.h>
25 #include <linux/workqueue.h>
26 #include "ctree.h"
27 #include "volumes.h"
28 #include "disk-io.h"
29 #include "transaction.h"
30 #include "dev-replace.h"
31 
32 #undef DEBUG
33 
34 /*
35  * This is the implementation for the generic read ahead framework.
36  *
37  * To trigger a readahead, btrfs_reada_add must be called. It will start
38  * a read ahead for the given range [start, end) on tree root. The returned
39  * handle can either be used to wait on the readahead to finish
40  * (btrfs_reada_wait), or to send it to the background (btrfs_reada_detach).
41  *
42  * The read ahead works as follows:
43  * On btrfs_reada_add, the root of the tree is inserted into a radix_tree.
44  * reada_start_machine will then search for extents to prefetch and trigger
45  * some reads. When a read finishes for a node, all contained node/leaf
46  * pointers that lie in the given range will also be enqueued. The reads will
47  * be triggered in sequential order, thus giving a big win over a naive
48  * enumeration. It will also make use of multi-device layouts. Each disk
49  * will have its on read pointer and all disks will by utilized in parallel.
50  * Also will no two disks read both sides of a mirror simultaneously, as this
51  * would waste seeking capacity. Instead both disks will read different parts
52  * of the filesystem.
53  * Any number of readaheads can be started in parallel. The read order will be
54  * determined globally, i.e. 2 parallel readaheads will normally finish faster
55  * than the 2 started one after another.
56  */
57 
58 #define MAX_IN_FLIGHT 6
59 
60 struct reada_extctl {
61 	struct list_head	list;
62 	struct reada_control	*rc;
63 	u64			generation;
64 };
65 
66 struct reada_extent {
67 	u64			logical;
68 	struct btrfs_key	top;
69 	u32			blocksize;
70 	int			err;
71 	struct list_head	extctl;
72 	int 			refcnt;
73 	spinlock_t		lock;
74 	struct reada_zone	*zones[BTRFS_MAX_MIRRORS];
75 	int			nzones;
76 	struct btrfs_device	*scheduled_for;
77 };
78 
79 struct reada_zone {
80 	u64			start;
81 	u64			end;
82 	u64			elems;
83 	struct list_head	list;
84 	spinlock_t		lock;
85 	int			locked;
86 	struct btrfs_device	*device;
87 	struct btrfs_device	*devs[BTRFS_MAX_MIRRORS]; /* full list, incl
88 							   * self */
89 	int			ndevs;
90 	struct kref		refcnt;
91 };
92 
93 struct reada_machine_work {
94 	struct btrfs_work	work;
95 	struct btrfs_fs_info	*fs_info;
96 };
97 
98 static void reada_extent_put(struct btrfs_fs_info *, struct reada_extent *);
99 static void reada_control_release(struct kref *kref);
100 static void reada_zone_release(struct kref *kref);
101 static void reada_start_machine(struct btrfs_fs_info *fs_info);
102 static void __reada_start_machine(struct btrfs_fs_info *fs_info);
103 
104 static int reada_add_block(struct reada_control *rc, u64 logical,
105 			   struct btrfs_key *top, int level, u64 generation);
106 
107 /* recurses */
108 /* in case of err, eb might be NULL */
__readahead_hook(struct btrfs_root * root,struct extent_buffer * eb,u64 start,int err)109 static int __readahead_hook(struct btrfs_root *root, struct extent_buffer *eb,
110 			    u64 start, int err)
111 {
112 	int level = 0;
113 	int nritems;
114 	int i;
115 	u64 bytenr;
116 	u64 generation;
117 	struct reada_extent *re;
118 	struct btrfs_fs_info *fs_info = root->fs_info;
119 	struct list_head list;
120 	unsigned long index = start >> PAGE_CACHE_SHIFT;
121 	struct btrfs_device *for_dev;
122 
123 	if (eb)
124 		level = btrfs_header_level(eb);
125 
126 	/* find extent */
127 	spin_lock(&fs_info->reada_lock);
128 	re = radix_tree_lookup(&fs_info->reada_tree, index);
129 	if (re)
130 		re->refcnt++;
131 	spin_unlock(&fs_info->reada_lock);
132 
133 	if (!re)
134 		return -1;
135 
136 	spin_lock(&re->lock);
137 	/*
138 	 * just take the full list from the extent. afterwards we
139 	 * don't need the lock anymore
140 	 */
141 	list_replace_init(&re->extctl, &list);
142 	for_dev = re->scheduled_for;
143 	re->scheduled_for = NULL;
144 	spin_unlock(&re->lock);
145 
146 	if (err == 0) {
147 		nritems = level ? btrfs_header_nritems(eb) : 0;
148 		generation = btrfs_header_generation(eb);
149 		/*
150 		 * FIXME: currently we just set nritems to 0 if this is a leaf,
151 		 * effectively ignoring the content. In a next step we could
152 		 * trigger more readahead depending from the content, e.g.
153 		 * fetch the checksums for the extents in the leaf.
154 		 */
155 	} else {
156 		/*
157 		 * this is the error case, the extent buffer has not been
158 		 * read correctly. We won't access anything from it and
159 		 * just cleanup our data structures. Effectively this will
160 		 * cut the branch below this node from read ahead.
161 		 */
162 		nritems = 0;
163 		generation = 0;
164 	}
165 
166 	for (i = 0; i < nritems; i++) {
167 		struct reada_extctl *rec;
168 		u64 n_gen;
169 		struct btrfs_key key;
170 		struct btrfs_key next_key;
171 
172 		btrfs_node_key_to_cpu(eb, &key, i);
173 		if (i + 1 < nritems)
174 			btrfs_node_key_to_cpu(eb, &next_key, i + 1);
175 		else
176 			next_key = re->top;
177 		bytenr = btrfs_node_blockptr(eb, i);
178 		n_gen = btrfs_node_ptr_generation(eb, i);
179 
180 		list_for_each_entry(rec, &list, list) {
181 			struct reada_control *rc = rec->rc;
182 
183 			/*
184 			 * if the generation doesn't match, just ignore this
185 			 * extctl. This will probably cut off a branch from
186 			 * prefetch. Alternatively one could start a new (sub-)
187 			 * prefetch for this branch, starting again from root.
188 			 * FIXME: move the generation check out of this loop
189 			 */
190 #ifdef DEBUG
191 			if (rec->generation != generation) {
192 				printk(KERN_DEBUG "generation mismatch for "
193 						"(%llu,%d,%llu) %llu != %llu\n",
194 				       key.objectid, key.type, key.offset,
195 				       rec->generation, generation);
196 			}
197 #endif
198 			if (rec->generation == generation &&
199 			    btrfs_comp_cpu_keys(&key, &rc->key_end) < 0 &&
200 			    btrfs_comp_cpu_keys(&next_key, &rc->key_start) > 0)
201 				reada_add_block(rc, bytenr, &next_key,
202 						level - 1, n_gen);
203 		}
204 	}
205 	/*
206 	 * free extctl records
207 	 */
208 	while (!list_empty(&list)) {
209 		struct reada_control *rc;
210 		struct reada_extctl *rec;
211 
212 		rec = list_first_entry(&list, struct reada_extctl, list);
213 		list_del(&rec->list);
214 		rc = rec->rc;
215 		kfree(rec);
216 
217 		kref_get(&rc->refcnt);
218 		if (atomic_dec_and_test(&rc->elems)) {
219 			kref_put(&rc->refcnt, reada_control_release);
220 			wake_up(&rc->wait);
221 		}
222 		kref_put(&rc->refcnt, reada_control_release);
223 
224 		reada_extent_put(fs_info, re);	/* one ref for each entry */
225 	}
226 	reada_extent_put(fs_info, re);	/* our ref */
227 	if (for_dev)
228 		atomic_dec(&for_dev->reada_in_flight);
229 
230 	return 0;
231 }
232 
233 /*
234  * start is passed separately in case eb in NULL, which may be the case with
235  * failed I/O
236  */
btree_readahead_hook(struct btrfs_root * root,struct extent_buffer * eb,u64 start,int err)237 int btree_readahead_hook(struct btrfs_root *root, struct extent_buffer *eb,
238 			 u64 start, int err)
239 {
240 	int ret;
241 
242 	ret = __readahead_hook(root, eb, start, err);
243 
244 	reada_start_machine(root->fs_info);
245 
246 	return ret;
247 }
248 
reada_find_zone(struct btrfs_fs_info * fs_info,struct btrfs_device * dev,u64 logical,struct btrfs_bio * bbio)249 static struct reada_zone *reada_find_zone(struct btrfs_fs_info *fs_info,
250 					  struct btrfs_device *dev, u64 logical,
251 					  struct btrfs_bio *bbio)
252 {
253 	int ret;
254 	struct reada_zone *zone;
255 	struct btrfs_block_group_cache *cache = NULL;
256 	u64 start;
257 	u64 end;
258 	int i;
259 
260 	zone = NULL;
261 	spin_lock(&fs_info->reada_lock);
262 	ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone,
263 				     logical >> PAGE_CACHE_SHIFT, 1);
264 	if (ret == 1)
265 		kref_get(&zone->refcnt);
266 	spin_unlock(&fs_info->reada_lock);
267 
268 	if (ret == 1) {
269 		if (logical >= zone->start && logical < zone->end)
270 			return zone;
271 		spin_lock(&fs_info->reada_lock);
272 		kref_put(&zone->refcnt, reada_zone_release);
273 		spin_unlock(&fs_info->reada_lock);
274 	}
275 
276 	cache = btrfs_lookup_block_group(fs_info, logical);
277 	if (!cache)
278 		return NULL;
279 
280 	start = cache->key.objectid;
281 	end = start + cache->key.offset - 1;
282 	btrfs_put_block_group(cache);
283 
284 	zone = kzalloc(sizeof(*zone), GFP_NOFS);
285 	if (!zone)
286 		return NULL;
287 
288 	zone->start = start;
289 	zone->end = end;
290 	INIT_LIST_HEAD(&zone->list);
291 	spin_lock_init(&zone->lock);
292 	zone->locked = 0;
293 	kref_init(&zone->refcnt);
294 	zone->elems = 0;
295 	zone->device = dev; /* our device always sits at index 0 */
296 	for (i = 0; i < bbio->num_stripes; ++i) {
297 		/* bounds have already been checked */
298 		zone->devs[i] = bbio->stripes[i].dev;
299 	}
300 	zone->ndevs = bbio->num_stripes;
301 
302 	spin_lock(&fs_info->reada_lock);
303 	ret = radix_tree_insert(&dev->reada_zones,
304 				(unsigned long)(zone->end >> PAGE_CACHE_SHIFT),
305 				zone);
306 
307 	if (ret == -EEXIST) {
308 		kfree(zone);
309 		ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone,
310 					     logical >> PAGE_CACHE_SHIFT, 1);
311 		if (ret == 1)
312 			kref_get(&zone->refcnt);
313 	}
314 	spin_unlock(&fs_info->reada_lock);
315 
316 	return zone;
317 }
318 
reada_find_extent(struct btrfs_root * root,u64 logical,struct btrfs_key * top,int level)319 static struct reada_extent *reada_find_extent(struct btrfs_root *root,
320 					      u64 logical,
321 					      struct btrfs_key *top, int level)
322 {
323 	int ret;
324 	struct reada_extent *re = NULL;
325 	struct reada_extent *re_exist = NULL;
326 	struct btrfs_fs_info *fs_info = root->fs_info;
327 	struct btrfs_bio *bbio = NULL;
328 	struct btrfs_device *dev;
329 	struct btrfs_device *prev_dev;
330 	u32 blocksize;
331 	u64 length;
332 	int nzones = 0;
333 	int i;
334 	unsigned long index = logical >> PAGE_CACHE_SHIFT;
335 	int dev_replace_is_ongoing;
336 
337 	spin_lock(&fs_info->reada_lock);
338 	re = radix_tree_lookup(&fs_info->reada_tree, index);
339 	if (re)
340 		re->refcnt++;
341 	spin_unlock(&fs_info->reada_lock);
342 
343 	if (re)
344 		return re;
345 
346 	re = kzalloc(sizeof(*re), GFP_NOFS);
347 	if (!re)
348 		return NULL;
349 
350 	blocksize = btrfs_level_size(root, level);
351 	re->logical = logical;
352 	re->blocksize = blocksize;
353 	re->top = *top;
354 	INIT_LIST_HEAD(&re->extctl);
355 	spin_lock_init(&re->lock);
356 	re->refcnt = 1;
357 
358 	/*
359 	 * map block
360 	 */
361 	length = blocksize;
362 	ret = btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS, logical, &length,
363 			      &bbio, 0);
364 	if (ret || !bbio || length < blocksize)
365 		goto error;
366 
367 	if (bbio->num_stripes > BTRFS_MAX_MIRRORS) {
368 		printk(KERN_ERR "btrfs readahead: more than %d copies not "
369 				"supported", BTRFS_MAX_MIRRORS);
370 		goto error;
371 	}
372 
373 	for (nzones = 0; nzones < bbio->num_stripes; ++nzones) {
374 		struct reada_zone *zone;
375 
376 		dev = bbio->stripes[nzones].dev;
377 		zone = reada_find_zone(fs_info, dev, logical, bbio);
378 		if (!zone)
379 			break;
380 
381 		re->zones[nzones] = zone;
382 		spin_lock(&zone->lock);
383 		if (!zone->elems)
384 			kref_get(&zone->refcnt);
385 		++zone->elems;
386 		spin_unlock(&zone->lock);
387 		spin_lock(&fs_info->reada_lock);
388 		kref_put(&zone->refcnt, reada_zone_release);
389 		spin_unlock(&fs_info->reada_lock);
390 	}
391 	re->nzones = nzones;
392 	if (nzones == 0) {
393 		/* not a single zone found, error and out */
394 		goto error;
395 	}
396 
397 	/* insert extent in reada_tree + all per-device trees, all or nothing */
398 	btrfs_dev_replace_lock(&fs_info->dev_replace);
399 	spin_lock(&fs_info->reada_lock);
400 	ret = radix_tree_insert(&fs_info->reada_tree, index, re);
401 	if (ret == -EEXIST) {
402 		re_exist = radix_tree_lookup(&fs_info->reada_tree, index);
403 		BUG_ON(!re_exist);
404 		re_exist->refcnt++;
405 		spin_unlock(&fs_info->reada_lock);
406 		btrfs_dev_replace_unlock(&fs_info->dev_replace);
407 		goto error;
408 	}
409 	if (ret) {
410 		spin_unlock(&fs_info->reada_lock);
411 		btrfs_dev_replace_unlock(&fs_info->dev_replace);
412 		goto error;
413 	}
414 	prev_dev = NULL;
415 	dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(
416 			&fs_info->dev_replace);
417 	for (i = 0; i < nzones; ++i) {
418 		dev = bbio->stripes[i].dev;
419 		if (dev == prev_dev) {
420 			/*
421 			 * in case of DUP, just add the first zone. As both
422 			 * are on the same device, there's nothing to gain
423 			 * from adding both.
424 			 * Also, it wouldn't work, as the tree is per device
425 			 * and adding would fail with EEXIST
426 			 */
427 			continue;
428 		}
429 		if (!dev->bdev) {
430 			/* cannot read ahead on missing device */
431 			continue;
432 		}
433 		if (dev_replace_is_ongoing &&
434 		    dev == fs_info->dev_replace.tgtdev) {
435 			/*
436 			 * as this device is selected for reading only as
437 			 * a last resort, skip it for read ahead.
438 			 */
439 			continue;
440 		}
441 		prev_dev = dev;
442 		ret = radix_tree_insert(&dev->reada_extents, index, re);
443 		if (ret) {
444 			while (--i >= 0) {
445 				dev = bbio->stripes[i].dev;
446 				BUG_ON(dev == NULL);
447 				/* ignore whether the entry was inserted */
448 				radix_tree_delete(&dev->reada_extents, index);
449 			}
450 			BUG_ON(fs_info == NULL);
451 			radix_tree_delete(&fs_info->reada_tree, index);
452 			spin_unlock(&fs_info->reada_lock);
453 			btrfs_dev_replace_unlock(&fs_info->dev_replace);
454 			goto error;
455 		}
456 	}
457 	spin_unlock(&fs_info->reada_lock);
458 	btrfs_dev_replace_unlock(&fs_info->dev_replace);
459 
460 	kfree(bbio);
461 	return re;
462 
463 error:
464 	while (nzones) {
465 		struct reada_zone *zone;
466 
467 		--nzones;
468 		zone = re->zones[nzones];
469 		kref_get(&zone->refcnt);
470 		spin_lock(&zone->lock);
471 		--zone->elems;
472 		if (zone->elems == 0) {
473 			/*
474 			 * no fs_info->reada_lock needed, as this can't be
475 			 * the last ref
476 			 */
477 			kref_put(&zone->refcnt, reada_zone_release);
478 		}
479 		spin_unlock(&zone->lock);
480 
481 		spin_lock(&fs_info->reada_lock);
482 		kref_put(&zone->refcnt, reada_zone_release);
483 		spin_unlock(&fs_info->reada_lock);
484 	}
485 	kfree(bbio);
486 	kfree(re);
487 	return re_exist;
488 }
489 
reada_extent_put(struct btrfs_fs_info * fs_info,struct reada_extent * re)490 static void reada_extent_put(struct btrfs_fs_info *fs_info,
491 			     struct reada_extent *re)
492 {
493 	int i;
494 	unsigned long index = re->logical >> PAGE_CACHE_SHIFT;
495 
496 	spin_lock(&fs_info->reada_lock);
497 	if (--re->refcnt) {
498 		spin_unlock(&fs_info->reada_lock);
499 		return;
500 	}
501 
502 	radix_tree_delete(&fs_info->reada_tree, index);
503 	for (i = 0; i < re->nzones; ++i) {
504 		struct reada_zone *zone = re->zones[i];
505 
506 		radix_tree_delete(&zone->device->reada_extents, index);
507 	}
508 
509 	spin_unlock(&fs_info->reada_lock);
510 
511 	for (i = 0; i < re->nzones; ++i) {
512 		struct reada_zone *zone = re->zones[i];
513 
514 		kref_get(&zone->refcnt);
515 		spin_lock(&zone->lock);
516 		--zone->elems;
517 		if (zone->elems == 0) {
518 			/* no fs_info->reada_lock needed, as this can't be
519 			 * the last ref */
520 			kref_put(&zone->refcnt, reada_zone_release);
521 		}
522 		spin_unlock(&zone->lock);
523 
524 		spin_lock(&fs_info->reada_lock);
525 		kref_put(&zone->refcnt, reada_zone_release);
526 		spin_unlock(&fs_info->reada_lock);
527 	}
528 	if (re->scheduled_for)
529 		atomic_dec(&re->scheduled_for->reada_in_flight);
530 
531 	kfree(re);
532 }
533 
reada_zone_release(struct kref * kref)534 static void reada_zone_release(struct kref *kref)
535 {
536 	struct reada_zone *zone = container_of(kref, struct reada_zone, refcnt);
537 
538 	radix_tree_delete(&zone->device->reada_zones,
539 			  zone->end >> PAGE_CACHE_SHIFT);
540 
541 	kfree(zone);
542 }
543 
reada_control_release(struct kref * kref)544 static void reada_control_release(struct kref *kref)
545 {
546 	struct reada_control *rc = container_of(kref, struct reada_control,
547 						refcnt);
548 
549 	kfree(rc);
550 }
551 
reada_add_block(struct reada_control * rc,u64 logical,struct btrfs_key * top,int level,u64 generation)552 static int reada_add_block(struct reada_control *rc, u64 logical,
553 			   struct btrfs_key *top, int level, u64 generation)
554 {
555 	struct btrfs_root *root = rc->root;
556 	struct reada_extent *re;
557 	struct reada_extctl *rec;
558 
559 	re = reada_find_extent(root, logical, top, level); /* takes one ref */
560 	if (!re)
561 		return -1;
562 
563 	rec = kzalloc(sizeof(*rec), GFP_NOFS);
564 	if (!rec) {
565 		reada_extent_put(root->fs_info, re);
566 		return -1;
567 	}
568 
569 	rec->rc = rc;
570 	rec->generation = generation;
571 	atomic_inc(&rc->elems);
572 
573 	spin_lock(&re->lock);
574 	list_add_tail(&rec->list, &re->extctl);
575 	spin_unlock(&re->lock);
576 
577 	/* leave the ref on the extent */
578 
579 	return 0;
580 }
581 
582 /*
583  * called with fs_info->reada_lock held
584  */
reada_peer_zones_set_lock(struct reada_zone * zone,int lock)585 static void reada_peer_zones_set_lock(struct reada_zone *zone, int lock)
586 {
587 	int i;
588 	unsigned long index = zone->end >> PAGE_CACHE_SHIFT;
589 
590 	for (i = 0; i < zone->ndevs; ++i) {
591 		struct reada_zone *peer;
592 		peer = radix_tree_lookup(&zone->devs[i]->reada_zones, index);
593 		if (peer && peer->device != zone->device)
594 			peer->locked = lock;
595 	}
596 }
597 
598 /*
599  * called with fs_info->reada_lock held
600  */
reada_pick_zone(struct btrfs_device * dev)601 static int reada_pick_zone(struct btrfs_device *dev)
602 {
603 	struct reada_zone *top_zone = NULL;
604 	struct reada_zone *top_locked_zone = NULL;
605 	u64 top_elems = 0;
606 	u64 top_locked_elems = 0;
607 	unsigned long index = 0;
608 	int ret;
609 
610 	if (dev->reada_curr_zone) {
611 		reada_peer_zones_set_lock(dev->reada_curr_zone, 0);
612 		kref_put(&dev->reada_curr_zone->refcnt, reada_zone_release);
613 		dev->reada_curr_zone = NULL;
614 	}
615 	/* pick the zone with the most elements */
616 	while (1) {
617 		struct reada_zone *zone;
618 
619 		ret = radix_tree_gang_lookup(&dev->reada_zones,
620 					     (void **)&zone, index, 1);
621 		if (ret == 0)
622 			break;
623 		index = (zone->end >> PAGE_CACHE_SHIFT) + 1;
624 		if (zone->locked) {
625 			if (zone->elems > top_locked_elems) {
626 				top_locked_elems = zone->elems;
627 				top_locked_zone = zone;
628 			}
629 		} else {
630 			if (zone->elems > top_elems) {
631 				top_elems = zone->elems;
632 				top_zone = zone;
633 			}
634 		}
635 	}
636 	if (top_zone)
637 		dev->reada_curr_zone = top_zone;
638 	else if (top_locked_zone)
639 		dev->reada_curr_zone = top_locked_zone;
640 	else
641 		return 0;
642 
643 	dev->reada_next = dev->reada_curr_zone->start;
644 	kref_get(&dev->reada_curr_zone->refcnt);
645 	reada_peer_zones_set_lock(dev->reada_curr_zone, 1);
646 
647 	return 1;
648 }
649 
reada_start_machine_dev(struct btrfs_fs_info * fs_info,struct btrfs_device * dev)650 static int reada_start_machine_dev(struct btrfs_fs_info *fs_info,
651 				   struct btrfs_device *dev)
652 {
653 	struct reada_extent *re = NULL;
654 	int mirror_num = 0;
655 	struct extent_buffer *eb = NULL;
656 	u64 logical;
657 	u32 blocksize;
658 	int ret;
659 	int i;
660 	int need_kick = 0;
661 
662 	spin_lock(&fs_info->reada_lock);
663 	if (dev->reada_curr_zone == NULL) {
664 		ret = reada_pick_zone(dev);
665 		if (!ret) {
666 			spin_unlock(&fs_info->reada_lock);
667 			return 0;
668 		}
669 	}
670 	/*
671 	 * FIXME currently we issue the reads one extent at a time. If we have
672 	 * a contiguous block of extents, we could also coagulate them or use
673 	 * plugging to speed things up
674 	 */
675 	ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re,
676 				     dev->reada_next >> PAGE_CACHE_SHIFT, 1);
677 	if (ret == 0 || re->logical >= dev->reada_curr_zone->end) {
678 		ret = reada_pick_zone(dev);
679 		if (!ret) {
680 			spin_unlock(&fs_info->reada_lock);
681 			return 0;
682 		}
683 		re = NULL;
684 		ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re,
685 					dev->reada_next >> PAGE_CACHE_SHIFT, 1);
686 	}
687 	if (ret == 0) {
688 		spin_unlock(&fs_info->reada_lock);
689 		return 0;
690 	}
691 	dev->reada_next = re->logical + re->blocksize;
692 	re->refcnt++;
693 
694 	spin_unlock(&fs_info->reada_lock);
695 
696 	/*
697 	 * find mirror num
698 	 */
699 	for (i = 0; i < re->nzones; ++i) {
700 		if (re->zones[i]->device == dev) {
701 			mirror_num = i + 1;
702 			break;
703 		}
704 	}
705 	logical = re->logical;
706 	blocksize = re->blocksize;
707 
708 	spin_lock(&re->lock);
709 	if (re->scheduled_for == NULL) {
710 		re->scheduled_for = dev;
711 		need_kick = 1;
712 	}
713 	spin_unlock(&re->lock);
714 
715 	reada_extent_put(fs_info, re);
716 
717 	if (!need_kick)
718 		return 0;
719 
720 	atomic_inc(&dev->reada_in_flight);
721 	ret = reada_tree_block_flagged(fs_info->extent_root, logical, blocksize,
722 			 mirror_num, &eb);
723 	if (ret)
724 		__readahead_hook(fs_info->extent_root, NULL, logical, ret);
725 	else if (eb)
726 		__readahead_hook(fs_info->extent_root, eb, eb->start, ret);
727 
728 	if (eb)
729 		free_extent_buffer(eb);
730 
731 	return 1;
732 
733 }
734 
reada_start_machine_worker(struct btrfs_work * work)735 static void reada_start_machine_worker(struct btrfs_work *work)
736 {
737 	struct reada_machine_work *rmw;
738 	struct btrfs_fs_info *fs_info;
739 	int old_ioprio;
740 
741 	rmw = container_of(work, struct reada_machine_work, work);
742 	fs_info = rmw->fs_info;
743 
744 	kfree(rmw);
745 
746 	old_ioprio = IOPRIO_PRIO_VALUE(task_nice_ioclass(current),
747 				       task_nice_ioprio(current));
748 	set_task_ioprio(current, BTRFS_IOPRIO_READA);
749 	__reada_start_machine(fs_info);
750 	set_task_ioprio(current, old_ioprio);
751 }
752 
__reada_start_machine(struct btrfs_fs_info * fs_info)753 static void __reada_start_machine(struct btrfs_fs_info *fs_info)
754 {
755 	struct btrfs_device *device;
756 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
757 	u64 enqueued;
758 	u64 total = 0;
759 	int i;
760 
761 	do {
762 		enqueued = 0;
763 		list_for_each_entry(device, &fs_devices->devices, dev_list) {
764 			if (atomic_read(&device->reada_in_flight) <
765 			    MAX_IN_FLIGHT)
766 				enqueued += reada_start_machine_dev(fs_info,
767 								    device);
768 		}
769 		total += enqueued;
770 	} while (enqueued && total < 10000);
771 
772 	if (enqueued == 0)
773 		return;
774 
775 	/*
776 	 * If everything is already in the cache, this is effectively single
777 	 * threaded. To a) not hold the caller for too long and b) to utilize
778 	 * more cores, we broke the loop above after 10000 iterations and now
779 	 * enqueue to workers to finish it. This will distribute the load to
780 	 * the cores.
781 	 */
782 	for (i = 0; i < 2; ++i)
783 		reada_start_machine(fs_info);
784 }
785 
reada_start_machine(struct btrfs_fs_info * fs_info)786 static void reada_start_machine(struct btrfs_fs_info *fs_info)
787 {
788 	struct reada_machine_work *rmw;
789 
790 	rmw = kzalloc(sizeof(*rmw), GFP_NOFS);
791 	if (!rmw) {
792 		/* FIXME we cannot handle this properly right now */
793 		BUG();
794 	}
795 	rmw->work.func = reada_start_machine_worker;
796 	rmw->fs_info = fs_info;
797 
798 	btrfs_queue_worker(&fs_info->readahead_workers, &rmw->work);
799 }
800 
801 #ifdef DEBUG
dump_devs(struct btrfs_fs_info * fs_info,int all)802 static void dump_devs(struct btrfs_fs_info *fs_info, int all)
803 {
804 	struct btrfs_device *device;
805 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
806 	unsigned long index;
807 	int ret;
808 	int i;
809 	int j;
810 	int cnt;
811 
812 	spin_lock(&fs_info->reada_lock);
813 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
814 		printk(KERN_DEBUG "dev %lld has %d in flight\n", device->devid,
815 			atomic_read(&device->reada_in_flight));
816 		index = 0;
817 		while (1) {
818 			struct reada_zone *zone;
819 			ret = radix_tree_gang_lookup(&device->reada_zones,
820 						     (void **)&zone, index, 1);
821 			if (ret == 0)
822 				break;
823 			printk(KERN_DEBUG "  zone %llu-%llu elems %llu locked "
824 				"%d devs", zone->start, zone->end, zone->elems,
825 				zone->locked);
826 			for (j = 0; j < zone->ndevs; ++j) {
827 				printk(KERN_CONT " %lld",
828 					zone->devs[j]->devid);
829 			}
830 			if (device->reada_curr_zone == zone)
831 				printk(KERN_CONT " curr off %llu",
832 					device->reada_next - zone->start);
833 			printk(KERN_CONT "\n");
834 			index = (zone->end >> PAGE_CACHE_SHIFT) + 1;
835 		}
836 		cnt = 0;
837 		index = 0;
838 		while (all) {
839 			struct reada_extent *re = NULL;
840 
841 			ret = radix_tree_gang_lookup(&device->reada_extents,
842 						     (void **)&re, index, 1);
843 			if (ret == 0)
844 				break;
845 			printk(KERN_DEBUG
846 				"  re: logical %llu size %u empty %d for %lld",
847 				re->logical, re->blocksize,
848 				list_empty(&re->extctl), re->scheduled_for ?
849 				re->scheduled_for->devid : -1);
850 
851 			for (i = 0; i < re->nzones; ++i) {
852 				printk(KERN_CONT " zone %llu-%llu devs",
853 					re->zones[i]->start,
854 					re->zones[i]->end);
855 				for (j = 0; j < re->zones[i]->ndevs; ++j) {
856 					printk(KERN_CONT " %lld",
857 						re->zones[i]->devs[j]->devid);
858 				}
859 			}
860 			printk(KERN_CONT "\n");
861 			index = (re->logical >> PAGE_CACHE_SHIFT) + 1;
862 			if (++cnt > 15)
863 				break;
864 		}
865 	}
866 
867 	index = 0;
868 	cnt = 0;
869 	while (all) {
870 		struct reada_extent *re = NULL;
871 
872 		ret = radix_tree_gang_lookup(&fs_info->reada_tree, (void **)&re,
873 					     index, 1);
874 		if (ret == 0)
875 			break;
876 		if (!re->scheduled_for) {
877 			index = (re->logical >> PAGE_CACHE_SHIFT) + 1;
878 			continue;
879 		}
880 		printk(KERN_DEBUG
881 			"re: logical %llu size %u list empty %d for %lld",
882 			re->logical, re->blocksize, list_empty(&re->extctl),
883 			re->scheduled_for ? re->scheduled_for->devid : -1);
884 		for (i = 0; i < re->nzones; ++i) {
885 			printk(KERN_CONT " zone %llu-%llu devs",
886 				re->zones[i]->start,
887 				re->zones[i]->end);
888 			for (i = 0; i < re->nzones; ++i) {
889 				printk(KERN_CONT " zone %llu-%llu devs",
890 					re->zones[i]->start,
891 					re->zones[i]->end);
892 				for (j = 0; j < re->zones[i]->ndevs; ++j) {
893 					printk(KERN_CONT " %lld",
894 						re->zones[i]->devs[j]->devid);
895 				}
896 			}
897 		}
898 		printk(KERN_CONT "\n");
899 		index = (re->logical >> PAGE_CACHE_SHIFT) + 1;
900 	}
901 	spin_unlock(&fs_info->reada_lock);
902 }
903 #endif
904 
905 /*
906  * interface
907  */
btrfs_reada_add(struct btrfs_root * root,struct btrfs_key * key_start,struct btrfs_key * key_end)908 struct reada_control *btrfs_reada_add(struct btrfs_root *root,
909 			struct btrfs_key *key_start, struct btrfs_key *key_end)
910 {
911 	struct reada_control *rc;
912 	u64 start;
913 	u64 generation;
914 	int level;
915 	struct extent_buffer *node;
916 	static struct btrfs_key max_key = {
917 		.objectid = (u64)-1,
918 		.type = (u8)-1,
919 		.offset = (u64)-1
920 	};
921 
922 	rc = kzalloc(sizeof(*rc), GFP_NOFS);
923 	if (!rc)
924 		return ERR_PTR(-ENOMEM);
925 
926 	rc->root = root;
927 	rc->key_start = *key_start;
928 	rc->key_end = *key_end;
929 	atomic_set(&rc->elems, 0);
930 	init_waitqueue_head(&rc->wait);
931 	kref_init(&rc->refcnt);
932 	kref_get(&rc->refcnt); /* one ref for having elements */
933 
934 	node = btrfs_root_node(root);
935 	start = node->start;
936 	level = btrfs_header_level(node);
937 	generation = btrfs_header_generation(node);
938 	free_extent_buffer(node);
939 
940 	if (reada_add_block(rc, start, &max_key, level, generation)) {
941 		kfree(rc);
942 		return ERR_PTR(-ENOMEM);
943 	}
944 
945 	reada_start_machine(root->fs_info);
946 
947 	return rc;
948 }
949 
950 #ifdef DEBUG
btrfs_reada_wait(void * handle)951 int btrfs_reada_wait(void *handle)
952 {
953 	struct reada_control *rc = handle;
954 
955 	while (atomic_read(&rc->elems)) {
956 		wait_event_timeout(rc->wait, atomic_read(&rc->elems) == 0,
957 				   5 * HZ);
958 		dump_devs(rc->root->fs_info,
959 			  atomic_read(&rc->elems) < 10 ? 1 : 0);
960 	}
961 
962 	dump_devs(rc->root->fs_info, atomic_read(&rc->elems) < 10 ? 1 : 0);
963 
964 	kref_put(&rc->refcnt, reada_control_release);
965 
966 	return 0;
967 }
968 #else
btrfs_reada_wait(void * handle)969 int btrfs_reada_wait(void *handle)
970 {
971 	struct reada_control *rc = handle;
972 
973 	while (atomic_read(&rc->elems)) {
974 		wait_event(rc->wait, atomic_read(&rc->elems) == 0);
975 	}
976 
977 	kref_put(&rc->refcnt, reada_control_release);
978 
979 	return 0;
980 }
981 #endif
982 
btrfs_reada_detach(void * handle)983 void btrfs_reada_detach(void *handle)
984 {
985 	struct reada_control *rc = handle;
986 
987 	kref_put(&rc->refcnt, reada_control_release);
988 }
989