• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2011 STRATO.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 
19 #include <linux/sched.h>
20 #include <linux/pagemap.h>
21 #include <linux/writeback.h>
22 #include <linux/blkdev.h>
23 #include <linux/rbtree.h>
24 #include <linux/slab.h>
25 #include <linux/workqueue.h>
26 #include "ctree.h"
27 #include "volumes.h"
28 #include "disk-io.h"
29 #include "transaction.h"
30 
31 #undef DEBUG
32 
33 /*
34  * This is the implementation for the generic read ahead framework.
35  *
36  * To trigger a readahead, btrfs_reada_add must be called. It will start
37  * a read ahead for the given range [start, end) on tree root. The returned
38  * handle can either be used to wait on the readahead to finish
39  * (btrfs_reada_wait), or to send it to the background (btrfs_reada_detach).
40  *
41  * The read ahead works as follows:
42  * On btrfs_reada_add, the root of the tree is inserted into a radix_tree.
43  * reada_start_machine will then search for extents to prefetch and trigger
44  * some reads. When a read finishes for a node, all contained node/leaf
45  * pointers that lie in the given range will also be enqueued. The reads will
46  * be triggered in sequential order, thus giving a big win over a naive
47  * enumeration. It will also make use of multi-device layouts. Each disk
48  * will have its on read pointer and all disks will by utilized in parallel.
49  * Also will no two disks read both sides of a mirror simultaneously, as this
50  * would waste seeking capacity. Instead both disks will read different parts
51  * of the filesystem.
52  * Any number of readaheads can be started in parallel. The read order will be
53  * determined globally, i.e. 2 parallel readaheads will normally finish faster
54  * than the 2 started one after another.
55  */
56 
57 #define MAX_IN_FLIGHT 6
58 
59 struct reada_extctl {
60 	struct list_head	list;
61 	struct reada_control	*rc;
62 	u64			generation;
63 };
64 
65 struct reada_extent {
66 	u64			logical;
67 	struct btrfs_key	top;
68 	u32			blocksize;
69 	int			err;
70 	struct list_head	extctl;
71 	struct kref		refcnt;
72 	spinlock_t		lock;
73 	struct reada_zone	*zones[BTRFS_MAX_MIRRORS];
74 	int			nzones;
75 	struct btrfs_device	*scheduled_for;
76 };
77 
78 struct reada_zone {
79 	u64			start;
80 	u64			end;
81 	u64			elems;
82 	struct list_head	list;
83 	spinlock_t		lock;
84 	int			locked;
85 	struct btrfs_device	*device;
86 	struct btrfs_device	*devs[BTRFS_MAX_MIRRORS]; /* full list, incl
87 							   * self */
88 	int			ndevs;
89 	struct kref		refcnt;
90 };
91 
92 struct reada_machine_work {
93 	struct btrfs_work	work;
94 	struct btrfs_fs_info	*fs_info;
95 };
96 
97 static void reada_extent_put(struct btrfs_fs_info *, struct reada_extent *);
98 static void reada_control_release(struct kref *kref);
99 static void reada_zone_release(struct kref *kref);
100 static void reada_start_machine(struct btrfs_fs_info *fs_info);
101 static void __reada_start_machine(struct btrfs_fs_info *fs_info);
102 
103 static int reada_add_block(struct reada_control *rc, u64 logical,
104 			   struct btrfs_key *top, int level, u64 generation);
105 
106 /* recurses */
107 /* in case of err, eb might be NULL */
__readahead_hook(struct btrfs_root * root,struct extent_buffer * eb,u64 start,int err)108 static int __readahead_hook(struct btrfs_root *root, struct extent_buffer *eb,
109 			    u64 start, int err)
110 {
111 	int level = 0;
112 	int nritems;
113 	int i;
114 	u64 bytenr;
115 	u64 generation;
116 	struct reada_extent *re;
117 	struct btrfs_fs_info *fs_info = root->fs_info;
118 	struct list_head list;
119 	unsigned long index = start >> PAGE_CACHE_SHIFT;
120 	struct btrfs_device *for_dev;
121 
122 	if (eb)
123 		level = btrfs_header_level(eb);
124 
125 	/* find extent */
126 	spin_lock(&fs_info->reada_lock);
127 	re = radix_tree_lookup(&fs_info->reada_tree, index);
128 	if (re)
129 		kref_get(&re->refcnt);
130 	spin_unlock(&fs_info->reada_lock);
131 
132 	if (!re)
133 		return -1;
134 
135 	spin_lock(&re->lock);
136 	/*
137 	 * just take the full list from the extent. afterwards we
138 	 * don't need the lock anymore
139 	 */
140 	list_replace_init(&re->extctl, &list);
141 	for_dev = re->scheduled_for;
142 	re->scheduled_for = NULL;
143 	spin_unlock(&re->lock);
144 
145 	if (err == 0) {
146 		nritems = level ? btrfs_header_nritems(eb) : 0;
147 		generation = btrfs_header_generation(eb);
148 		/*
149 		 * FIXME: currently we just set nritems to 0 if this is a leaf,
150 		 * effectively ignoring the content. In a next step we could
151 		 * trigger more readahead depending from the content, e.g.
152 		 * fetch the checksums for the extents in the leaf.
153 		 */
154 	} else {
155 		/*
156 		 * this is the error case, the extent buffer has not been
157 		 * read correctly. We won't access anything from it and
158 		 * just cleanup our data structures. Effectively this will
159 		 * cut the branch below this node from read ahead.
160 		 */
161 		nritems = 0;
162 		generation = 0;
163 	}
164 
165 	for (i = 0; i < nritems; i++) {
166 		struct reada_extctl *rec;
167 		u64 n_gen;
168 		struct btrfs_key key;
169 		struct btrfs_key next_key;
170 
171 		btrfs_node_key_to_cpu(eb, &key, i);
172 		if (i + 1 < nritems)
173 			btrfs_node_key_to_cpu(eb, &next_key, i + 1);
174 		else
175 			next_key = re->top;
176 		bytenr = btrfs_node_blockptr(eb, i);
177 		n_gen = btrfs_node_ptr_generation(eb, i);
178 
179 		list_for_each_entry(rec, &list, list) {
180 			struct reada_control *rc = rec->rc;
181 
182 			/*
183 			 * if the generation doesn't match, just ignore this
184 			 * extctl. This will probably cut off a branch from
185 			 * prefetch. Alternatively one could start a new (sub-)
186 			 * prefetch for this branch, starting again from root.
187 			 * FIXME: move the generation check out of this loop
188 			 */
189 #ifdef DEBUG
190 			if (rec->generation != generation) {
191 				printk(KERN_DEBUG "generation mismatch for "
192 						"(%llu,%d,%llu) %llu != %llu\n",
193 				       key.objectid, key.type, key.offset,
194 				       rec->generation, generation);
195 			}
196 #endif
197 			if (rec->generation == generation &&
198 			    btrfs_comp_cpu_keys(&key, &rc->key_end) < 0 &&
199 			    btrfs_comp_cpu_keys(&next_key, &rc->key_start) > 0)
200 				reada_add_block(rc, bytenr, &next_key,
201 						level - 1, n_gen);
202 		}
203 	}
204 	/*
205 	 * free extctl records
206 	 */
207 	while (!list_empty(&list)) {
208 		struct reada_control *rc;
209 		struct reada_extctl *rec;
210 
211 		rec = list_first_entry(&list, struct reada_extctl, list);
212 		list_del(&rec->list);
213 		rc = rec->rc;
214 		kfree(rec);
215 
216 		kref_get(&rc->refcnt);
217 		if (atomic_dec_and_test(&rc->elems)) {
218 			kref_put(&rc->refcnt, reada_control_release);
219 			wake_up(&rc->wait);
220 		}
221 		kref_put(&rc->refcnt, reada_control_release);
222 
223 		reada_extent_put(fs_info, re);	/* one ref for each entry */
224 	}
225 	reada_extent_put(fs_info, re);	/* our ref */
226 	if (for_dev)
227 		atomic_dec(&for_dev->reada_in_flight);
228 
229 	return 0;
230 }
231 
232 /*
233  * start is passed separately in case eb in NULL, which may be the case with
234  * failed I/O
235  */
btree_readahead_hook(struct btrfs_root * root,struct extent_buffer * eb,u64 start,int err)236 int btree_readahead_hook(struct btrfs_root *root, struct extent_buffer *eb,
237 			 u64 start, int err)
238 {
239 	int ret;
240 
241 	ret = __readahead_hook(root, eb, start, err);
242 
243 	reada_start_machine(root->fs_info);
244 
245 	return ret;
246 }
247 
reada_find_zone(struct btrfs_fs_info * fs_info,struct btrfs_device * dev,u64 logical,struct btrfs_bio * bbio)248 static struct reada_zone *reada_find_zone(struct btrfs_fs_info *fs_info,
249 					  struct btrfs_device *dev, u64 logical,
250 					  struct btrfs_bio *bbio)
251 {
252 	int ret;
253 	struct reada_zone *zone;
254 	struct btrfs_block_group_cache *cache = NULL;
255 	u64 start;
256 	u64 end;
257 	int i;
258 
259 	zone = NULL;
260 	spin_lock(&fs_info->reada_lock);
261 	ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone,
262 				     logical >> PAGE_CACHE_SHIFT, 1);
263 	if (ret == 1)
264 		kref_get(&zone->refcnt);
265 	spin_unlock(&fs_info->reada_lock);
266 
267 	if (ret == 1) {
268 		if (logical >= zone->start && logical < zone->end)
269 			return zone;
270 		spin_lock(&fs_info->reada_lock);
271 		kref_put(&zone->refcnt, reada_zone_release);
272 		spin_unlock(&fs_info->reada_lock);
273 	}
274 
275 	cache = btrfs_lookup_block_group(fs_info, logical);
276 	if (!cache)
277 		return NULL;
278 
279 	start = cache->key.objectid;
280 	end = start + cache->key.offset - 1;
281 	btrfs_put_block_group(cache);
282 
283 	zone = kzalloc(sizeof(*zone), GFP_NOFS);
284 	if (!zone)
285 		return NULL;
286 
287 	zone->start = start;
288 	zone->end = end;
289 	INIT_LIST_HEAD(&zone->list);
290 	spin_lock_init(&zone->lock);
291 	zone->locked = 0;
292 	kref_init(&zone->refcnt);
293 	zone->elems = 0;
294 	zone->device = dev; /* our device always sits at index 0 */
295 	for (i = 0; i < bbio->num_stripes; ++i) {
296 		/* bounds have already been checked */
297 		zone->devs[i] = bbio->stripes[i].dev;
298 	}
299 	zone->ndevs = bbio->num_stripes;
300 
301 	spin_lock(&fs_info->reada_lock);
302 	ret = radix_tree_insert(&dev->reada_zones,
303 				(unsigned long)(zone->end >> PAGE_CACHE_SHIFT),
304 				zone);
305 
306 	if (ret == -EEXIST) {
307 		kfree(zone);
308 		ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone,
309 					     logical >> PAGE_CACHE_SHIFT, 1);
310 		if (ret == 1)
311 			kref_get(&zone->refcnt);
312 	}
313 	spin_unlock(&fs_info->reada_lock);
314 
315 	return zone;
316 }
317 
reada_find_extent(struct btrfs_root * root,u64 logical,struct btrfs_key * top,int level)318 static struct reada_extent *reada_find_extent(struct btrfs_root *root,
319 					      u64 logical,
320 					      struct btrfs_key *top, int level)
321 {
322 	int ret;
323 	struct reada_extent *re = NULL;
324 	struct reada_extent *re_exist = NULL;
325 	struct btrfs_fs_info *fs_info = root->fs_info;
326 	struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
327 	struct btrfs_bio *bbio = NULL;
328 	struct btrfs_device *dev;
329 	struct btrfs_device *prev_dev;
330 	u32 blocksize;
331 	u64 length;
332 	int nzones = 0;
333 	int i;
334 	unsigned long index = logical >> PAGE_CACHE_SHIFT;
335 
336 	spin_lock(&fs_info->reada_lock);
337 	re = radix_tree_lookup(&fs_info->reada_tree, index);
338 	if (re)
339 		kref_get(&re->refcnt);
340 	spin_unlock(&fs_info->reada_lock);
341 
342 	if (re)
343 		return re;
344 
345 	re = kzalloc(sizeof(*re), GFP_NOFS);
346 	if (!re)
347 		return NULL;
348 
349 	blocksize = btrfs_level_size(root, level);
350 	re->logical = logical;
351 	re->blocksize = blocksize;
352 	re->top = *top;
353 	INIT_LIST_HEAD(&re->extctl);
354 	spin_lock_init(&re->lock);
355 	kref_init(&re->refcnt);
356 
357 	/*
358 	 * map block
359 	 */
360 	length = blocksize;
361 	ret = btrfs_map_block(map_tree, REQ_WRITE, logical, &length, &bbio, 0);
362 	if (ret || !bbio || length < blocksize)
363 		goto error;
364 
365 	if (bbio->num_stripes > BTRFS_MAX_MIRRORS) {
366 		printk(KERN_ERR "btrfs readahead: more than %d copies not "
367 				"supported", BTRFS_MAX_MIRRORS);
368 		goto error;
369 	}
370 
371 	for (nzones = 0; nzones < bbio->num_stripes; ++nzones) {
372 		struct reada_zone *zone;
373 
374 		dev = bbio->stripes[nzones].dev;
375 		zone = reada_find_zone(fs_info, dev, logical, bbio);
376 		if (!zone)
377 			break;
378 
379 		re->zones[nzones] = zone;
380 		spin_lock(&zone->lock);
381 		if (!zone->elems)
382 			kref_get(&zone->refcnt);
383 		++zone->elems;
384 		spin_unlock(&zone->lock);
385 		spin_lock(&fs_info->reada_lock);
386 		kref_put(&zone->refcnt, reada_zone_release);
387 		spin_unlock(&fs_info->reada_lock);
388 	}
389 	re->nzones = nzones;
390 	if (nzones == 0) {
391 		/* not a single zone found, error and out */
392 		goto error;
393 	}
394 
395 	/* insert extent in reada_tree + all per-device trees, all or nothing */
396 	spin_lock(&fs_info->reada_lock);
397 	ret = radix_tree_insert(&fs_info->reada_tree, index, re);
398 	if (ret == -EEXIST) {
399 		re_exist = radix_tree_lookup(&fs_info->reada_tree, index);
400 		BUG_ON(!re_exist);
401 		kref_get(&re_exist->refcnt);
402 		spin_unlock(&fs_info->reada_lock);
403 		goto error;
404 	}
405 	if (ret) {
406 		spin_unlock(&fs_info->reada_lock);
407 		goto error;
408 	}
409 	prev_dev = NULL;
410 	for (i = 0; i < nzones; ++i) {
411 		dev = bbio->stripes[i].dev;
412 		if (dev == prev_dev) {
413 			/*
414 			 * in case of DUP, just add the first zone. As both
415 			 * are on the same device, there's nothing to gain
416 			 * from adding both.
417 			 * Also, it wouldn't work, as the tree is per device
418 			 * and adding would fail with EEXIST
419 			 */
420 			continue;
421 		}
422 		prev_dev = dev;
423 		ret = radix_tree_insert(&dev->reada_extents, index, re);
424 		if (ret) {
425 			while (--i >= 0) {
426 				dev = bbio->stripes[i].dev;
427 				BUG_ON(dev == NULL);
428 				radix_tree_delete(&dev->reada_extents, index);
429 			}
430 			BUG_ON(fs_info == NULL);
431 			radix_tree_delete(&fs_info->reada_tree, index);
432 			spin_unlock(&fs_info->reada_lock);
433 			goto error;
434 		}
435 	}
436 	spin_unlock(&fs_info->reada_lock);
437 
438 	kfree(bbio);
439 	return re;
440 
441 error:
442 	while (nzones) {
443 		struct reada_zone *zone;
444 
445 		--nzones;
446 		zone = re->zones[nzones];
447 		kref_get(&zone->refcnt);
448 		spin_lock(&zone->lock);
449 		--zone->elems;
450 		if (zone->elems == 0) {
451 			/*
452 			 * no fs_info->reada_lock needed, as this can't be
453 			 * the last ref
454 			 */
455 			kref_put(&zone->refcnt, reada_zone_release);
456 		}
457 		spin_unlock(&zone->lock);
458 
459 		spin_lock(&fs_info->reada_lock);
460 		kref_put(&zone->refcnt, reada_zone_release);
461 		spin_unlock(&fs_info->reada_lock);
462 	}
463 	kfree(bbio);
464 	kfree(re);
465 	return re_exist;
466 }
467 
reada_kref_dummy(struct kref * kr)468 static void reada_kref_dummy(struct kref *kr)
469 {
470 }
471 
reada_extent_put(struct btrfs_fs_info * fs_info,struct reada_extent * re)472 static void reada_extent_put(struct btrfs_fs_info *fs_info,
473 			     struct reada_extent *re)
474 {
475 	int i;
476 	unsigned long index = re->logical >> PAGE_CACHE_SHIFT;
477 
478 	spin_lock(&fs_info->reada_lock);
479 	if (!kref_put(&re->refcnt, reada_kref_dummy)) {
480 		spin_unlock(&fs_info->reada_lock);
481 		return;
482 	}
483 
484 	radix_tree_delete(&fs_info->reada_tree, index);
485 	for (i = 0; i < re->nzones; ++i) {
486 		struct reada_zone *zone = re->zones[i];
487 
488 		radix_tree_delete(&zone->device->reada_extents, index);
489 	}
490 
491 	spin_unlock(&fs_info->reada_lock);
492 
493 	for (i = 0; i < re->nzones; ++i) {
494 		struct reada_zone *zone = re->zones[i];
495 
496 		kref_get(&zone->refcnt);
497 		spin_lock(&zone->lock);
498 		--zone->elems;
499 		if (zone->elems == 0) {
500 			/* no fs_info->reada_lock needed, as this can't be
501 			 * the last ref */
502 			kref_put(&zone->refcnt, reada_zone_release);
503 		}
504 		spin_unlock(&zone->lock);
505 
506 		spin_lock(&fs_info->reada_lock);
507 		kref_put(&zone->refcnt, reada_zone_release);
508 		spin_unlock(&fs_info->reada_lock);
509 	}
510 	if (re->scheduled_for)
511 		atomic_dec(&re->scheduled_for->reada_in_flight);
512 
513 	kfree(re);
514 }
515 
reada_zone_release(struct kref * kref)516 static void reada_zone_release(struct kref *kref)
517 {
518 	struct reada_zone *zone = container_of(kref, struct reada_zone, refcnt);
519 
520 	radix_tree_delete(&zone->device->reada_zones,
521 			  zone->end >> PAGE_CACHE_SHIFT);
522 
523 	kfree(zone);
524 }
525 
reada_control_release(struct kref * kref)526 static void reada_control_release(struct kref *kref)
527 {
528 	struct reada_control *rc = container_of(kref, struct reada_control,
529 						refcnt);
530 
531 	kfree(rc);
532 }
533 
reada_add_block(struct reada_control * rc,u64 logical,struct btrfs_key * top,int level,u64 generation)534 static int reada_add_block(struct reada_control *rc, u64 logical,
535 			   struct btrfs_key *top, int level, u64 generation)
536 {
537 	struct btrfs_root *root = rc->root;
538 	struct reada_extent *re;
539 	struct reada_extctl *rec;
540 
541 	re = reada_find_extent(root, logical, top, level); /* takes one ref */
542 	if (!re)
543 		return -1;
544 
545 	rec = kzalloc(sizeof(*rec), GFP_NOFS);
546 	if (!rec) {
547 		reada_extent_put(root->fs_info, re);
548 		return -1;
549 	}
550 
551 	rec->rc = rc;
552 	rec->generation = generation;
553 	atomic_inc(&rc->elems);
554 
555 	spin_lock(&re->lock);
556 	list_add_tail(&rec->list, &re->extctl);
557 	spin_unlock(&re->lock);
558 
559 	/* leave the ref on the extent */
560 
561 	return 0;
562 }
563 
564 /*
565  * called with fs_info->reada_lock held
566  */
reada_peer_zones_set_lock(struct reada_zone * zone,int lock)567 static void reada_peer_zones_set_lock(struct reada_zone *zone, int lock)
568 {
569 	int i;
570 	unsigned long index = zone->end >> PAGE_CACHE_SHIFT;
571 
572 	for (i = 0; i < zone->ndevs; ++i) {
573 		struct reada_zone *peer;
574 		peer = radix_tree_lookup(&zone->devs[i]->reada_zones, index);
575 		if (peer && peer->device != zone->device)
576 			peer->locked = lock;
577 	}
578 }
579 
580 /*
581  * called with fs_info->reada_lock held
582  */
reada_pick_zone(struct btrfs_device * dev)583 static int reada_pick_zone(struct btrfs_device *dev)
584 {
585 	struct reada_zone *top_zone = NULL;
586 	struct reada_zone *top_locked_zone = NULL;
587 	u64 top_elems = 0;
588 	u64 top_locked_elems = 0;
589 	unsigned long index = 0;
590 	int ret;
591 
592 	if (dev->reada_curr_zone) {
593 		reada_peer_zones_set_lock(dev->reada_curr_zone, 0);
594 		kref_put(&dev->reada_curr_zone->refcnt, reada_zone_release);
595 		dev->reada_curr_zone = NULL;
596 	}
597 	/* pick the zone with the most elements */
598 	while (1) {
599 		struct reada_zone *zone;
600 
601 		ret = radix_tree_gang_lookup(&dev->reada_zones,
602 					     (void **)&zone, index, 1);
603 		if (ret == 0)
604 			break;
605 		index = (zone->end >> PAGE_CACHE_SHIFT) + 1;
606 		if (zone->locked) {
607 			if (zone->elems > top_locked_elems) {
608 				top_locked_elems = zone->elems;
609 				top_locked_zone = zone;
610 			}
611 		} else {
612 			if (zone->elems > top_elems) {
613 				top_elems = zone->elems;
614 				top_zone = zone;
615 			}
616 		}
617 	}
618 	if (top_zone)
619 		dev->reada_curr_zone = top_zone;
620 	else if (top_locked_zone)
621 		dev->reada_curr_zone = top_locked_zone;
622 	else
623 		return 0;
624 
625 	dev->reada_next = dev->reada_curr_zone->start;
626 	kref_get(&dev->reada_curr_zone->refcnt);
627 	reada_peer_zones_set_lock(dev->reada_curr_zone, 1);
628 
629 	return 1;
630 }
631 
reada_start_machine_dev(struct btrfs_fs_info * fs_info,struct btrfs_device * dev)632 static int reada_start_machine_dev(struct btrfs_fs_info *fs_info,
633 				   struct btrfs_device *dev)
634 {
635 	struct reada_extent *re = NULL;
636 	int mirror_num = 0;
637 	struct extent_buffer *eb = NULL;
638 	u64 logical;
639 	u32 blocksize;
640 	int ret;
641 	int i;
642 	int need_kick = 0;
643 
644 	spin_lock(&fs_info->reada_lock);
645 	if (dev->reada_curr_zone == NULL) {
646 		ret = reada_pick_zone(dev);
647 		if (!ret) {
648 			spin_unlock(&fs_info->reada_lock);
649 			return 0;
650 		}
651 	}
652 	/*
653 	 * FIXME currently we issue the reads one extent at a time. If we have
654 	 * a contiguous block of extents, we could also coagulate them or use
655 	 * plugging to speed things up
656 	 */
657 	ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re,
658 				     dev->reada_next >> PAGE_CACHE_SHIFT, 1);
659 	if (ret == 0 || re->logical >= dev->reada_curr_zone->end) {
660 		ret = reada_pick_zone(dev);
661 		if (!ret) {
662 			spin_unlock(&fs_info->reada_lock);
663 			return 0;
664 		}
665 		re = NULL;
666 		ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re,
667 					dev->reada_next >> PAGE_CACHE_SHIFT, 1);
668 	}
669 	if (ret == 0) {
670 		spin_unlock(&fs_info->reada_lock);
671 		return 0;
672 	}
673 	dev->reada_next = re->logical + re->blocksize;
674 	kref_get(&re->refcnt);
675 
676 	spin_unlock(&fs_info->reada_lock);
677 
678 	/*
679 	 * find mirror num
680 	 */
681 	for (i = 0; i < re->nzones; ++i) {
682 		if (re->zones[i]->device == dev) {
683 			mirror_num = i + 1;
684 			break;
685 		}
686 	}
687 	logical = re->logical;
688 	blocksize = re->blocksize;
689 
690 	spin_lock(&re->lock);
691 	if (re->scheduled_for == NULL) {
692 		re->scheduled_for = dev;
693 		need_kick = 1;
694 	}
695 	spin_unlock(&re->lock);
696 
697 	reada_extent_put(fs_info, re);
698 
699 	if (!need_kick)
700 		return 0;
701 
702 	atomic_inc(&dev->reada_in_flight);
703 	ret = reada_tree_block_flagged(fs_info->extent_root, logical, blocksize,
704 			 mirror_num, &eb);
705 	if (ret)
706 		__readahead_hook(fs_info->extent_root, NULL, logical, ret);
707 	else if (eb)
708 		__readahead_hook(fs_info->extent_root, eb, eb->start, ret);
709 
710 	if (eb)
711 		free_extent_buffer(eb);
712 
713 	return 1;
714 
715 }
716 
reada_start_machine_worker(struct btrfs_work * work)717 static void reada_start_machine_worker(struct btrfs_work *work)
718 {
719 	struct reada_machine_work *rmw;
720 	struct btrfs_fs_info *fs_info;
721 
722 	rmw = container_of(work, struct reada_machine_work, work);
723 	fs_info = rmw->fs_info;
724 
725 	kfree(rmw);
726 
727 	__reada_start_machine(fs_info);
728 }
729 
__reada_start_machine(struct btrfs_fs_info * fs_info)730 static void __reada_start_machine(struct btrfs_fs_info *fs_info)
731 {
732 	struct btrfs_device *device;
733 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
734 	u64 enqueued;
735 	u64 total = 0;
736 	int i;
737 
738 	do {
739 		enqueued = 0;
740 		list_for_each_entry(device, &fs_devices->devices, dev_list) {
741 			if (atomic_read(&device->reada_in_flight) <
742 			    MAX_IN_FLIGHT)
743 				enqueued += reada_start_machine_dev(fs_info,
744 								    device);
745 		}
746 		total += enqueued;
747 	} while (enqueued && total < 10000);
748 
749 	if (enqueued == 0)
750 		return;
751 
752 	/*
753 	 * If everything is already in the cache, this is effectively single
754 	 * threaded. To a) not hold the caller for too long and b) to utilize
755 	 * more cores, we broke the loop above after 10000 iterations and now
756 	 * enqueue to workers to finish it. This will distribute the load to
757 	 * the cores.
758 	 */
759 	for (i = 0; i < 2; ++i)
760 		reada_start_machine(fs_info);
761 }
762 
reada_start_machine(struct btrfs_fs_info * fs_info)763 static void reada_start_machine(struct btrfs_fs_info *fs_info)
764 {
765 	struct reada_machine_work *rmw;
766 
767 	rmw = kzalloc(sizeof(*rmw), GFP_NOFS);
768 	if (!rmw) {
769 		/* FIXME we cannot handle this properly right now */
770 		BUG();
771 	}
772 	rmw->work.func = reada_start_machine_worker;
773 	rmw->fs_info = fs_info;
774 
775 	btrfs_queue_worker(&fs_info->readahead_workers, &rmw->work);
776 }
777 
778 #ifdef DEBUG
dump_devs(struct btrfs_fs_info * fs_info,int all)779 static void dump_devs(struct btrfs_fs_info *fs_info, int all)
780 {
781 	struct btrfs_device *device;
782 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
783 	unsigned long index;
784 	int ret;
785 	int i;
786 	int j;
787 	int cnt;
788 
789 	spin_lock(&fs_info->reada_lock);
790 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
791 		printk(KERN_DEBUG "dev %lld has %d in flight\n", device->devid,
792 			atomic_read(&device->reada_in_flight));
793 		index = 0;
794 		while (1) {
795 			struct reada_zone *zone;
796 			ret = radix_tree_gang_lookup(&device->reada_zones,
797 						     (void **)&zone, index, 1);
798 			if (ret == 0)
799 				break;
800 			printk(KERN_DEBUG "  zone %llu-%llu elems %llu locked "
801 				"%d devs", zone->start, zone->end, zone->elems,
802 				zone->locked);
803 			for (j = 0; j < zone->ndevs; ++j) {
804 				printk(KERN_CONT " %lld",
805 					zone->devs[j]->devid);
806 			}
807 			if (device->reada_curr_zone == zone)
808 				printk(KERN_CONT " curr off %llu",
809 					device->reada_next - zone->start);
810 			printk(KERN_CONT "\n");
811 			index = (zone->end >> PAGE_CACHE_SHIFT) + 1;
812 		}
813 		cnt = 0;
814 		index = 0;
815 		while (all) {
816 			struct reada_extent *re = NULL;
817 
818 			ret = radix_tree_gang_lookup(&device->reada_extents,
819 						     (void **)&re, index, 1);
820 			if (ret == 0)
821 				break;
822 			printk(KERN_DEBUG
823 				"  re: logical %llu size %u empty %d for %lld",
824 				re->logical, re->blocksize,
825 				list_empty(&re->extctl), re->scheduled_for ?
826 				re->scheduled_for->devid : -1);
827 
828 			for (i = 0; i < re->nzones; ++i) {
829 				printk(KERN_CONT " zone %llu-%llu devs",
830 					re->zones[i]->start,
831 					re->zones[i]->end);
832 				for (j = 0; j < re->zones[i]->ndevs; ++j) {
833 					printk(KERN_CONT " %lld",
834 						re->zones[i]->devs[j]->devid);
835 				}
836 			}
837 			printk(KERN_CONT "\n");
838 			index = (re->logical >> PAGE_CACHE_SHIFT) + 1;
839 			if (++cnt > 15)
840 				break;
841 		}
842 	}
843 
844 	index = 0;
845 	cnt = 0;
846 	while (all) {
847 		struct reada_extent *re = NULL;
848 
849 		ret = radix_tree_gang_lookup(&fs_info->reada_tree, (void **)&re,
850 					     index, 1);
851 		if (ret == 0)
852 			break;
853 		if (!re->scheduled_for) {
854 			index = (re->logical >> PAGE_CACHE_SHIFT) + 1;
855 			continue;
856 		}
857 		printk(KERN_DEBUG
858 			"re: logical %llu size %u list empty %d for %lld",
859 			re->logical, re->blocksize, list_empty(&re->extctl),
860 			re->scheduled_for ? re->scheduled_for->devid : -1);
861 		for (i = 0; i < re->nzones; ++i) {
862 			printk(KERN_CONT " zone %llu-%llu devs",
863 				re->zones[i]->start,
864 				re->zones[i]->end);
865 			for (i = 0; i < re->nzones; ++i) {
866 				printk(KERN_CONT " zone %llu-%llu devs",
867 					re->zones[i]->start,
868 					re->zones[i]->end);
869 				for (j = 0; j < re->zones[i]->ndevs; ++j) {
870 					printk(KERN_CONT " %lld",
871 						re->zones[i]->devs[j]->devid);
872 				}
873 			}
874 		}
875 		printk(KERN_CONT "\n");
876 		index = (re->logical >> PAGE_CACHE_SHIFT) + 1;
877 	}
878 	spin_unlock(&fs_info->reada_lock);
879 }
880 #endif
881 
882 /*
883  * interface
884  */
btrfs_reada_add(struct btrfs_root * root,struct btrfs_key * key_start,struct btrfs_key * key_end)885 struct reada_control *btrfs_reada_add(struct btrfs_root *root,
886 			struct btrfs_key *key_start, struct btrfs_key *key_end)
887 {
888 	struct reada_control *rc;
889 	u64 start;
890 	u64 generation;
891 	int level;
892 	struct extent_buffer *node;
893 	static struct btrfs_key max_key = {
894 		.objectid = (u64)-1,
895 		.type = (u8)-1,
896 		.offset = (u64)-1
897 	};
898 
899 	rc = kzalloc(sizeof(*rc), GFP_NOFS);
900 	if (!rc)
901 		return ERR_PTR(-ENOMEM);
902 
903 	rc->root = root;
904 	rc->key_start = *key_start;
905 	rc->key_end = *key_end;
906 	atomic_set(&rc->elems, 0);
907 	init_waitqueue_head(&rc->wait);
908 	kref_init(&rc->refcnt);
909 	kref_get(&rc->refcnt); /* one ref for having elements */
910 
911 	node = btrfs_root_node(root);
912 	start = node->start;
913 	level = btrfs_header_level(node);
914 	generation = btrfs_header_generation(node);
915 	free_extent_buffer(node);
916 
917 	reada_add_block(rc, start, &max_key, level, generation);
918 
919 	reada_start_machine(root->fs_info);
920 
921 	return rc;
922 }
923 
924 #ifdef DEBUG
btrfs_reada_wait(void * handle)925 int btrfs_reada_wait(void *handle)
926 {
927 	struct reada_control *rc = handle;
928 
929 	while (atomic_read(&rc->elems)) {
930 		wait_event_timeout(rc->wait, atomic_read(&rc->elems) == 0,
931 				   5 * HZ);
932 		dump_devs(rc->root->fs_info, rc->elems < 10 ? 1 : 0);
933 	}
934 
935 	dump_devs(rc->root->fs_info, rc->elems < 10 ? 1 : 0);
936 
937 	kref_put(&rc->refcnt, reada_control_release);
938 
939 	return 0;
940 }
941 #else
btrfs_reada_wait(void * handle)942 int btrfs_reada_wait(void *handle)
943 {
944 	struct reada_control *rc = handle;
945 
946 	while (atomic_read(&rc->elems)) {
947 		wait_event(rc->wait, atomic_read(&rc->elems) == 0);
948 	}
949 
950 	kref_put(&rc->refcnt, reada_control_release);
951 
952 	return 0;
953 }
954 #endif
955 
btrfs_reada_detach(void * handle)956 void btrfs_reada_detach(void *handle)
957 {
958 	struct reada_control *rc = handle;
959 
960 	kref_put(&rc->refcnt, reada_control_release);
961 }
962