1 /*
2 * Copyright (C) 2011 STRATO. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19 #include <linux/sched.h>
20 #include <linux/pagemap.h>
21 #include <linux/writeback.h>
22 #include <linux/blkdev.h>
23 #include <linux/rbtree.h>
24 #include <linux/slab.h>
25 #include <linux/workqueue.h>
26 #include "ctree.h"
27 #include "volumes.h"
28 #include "disk-io.h"
29 #include "transaction.h"
30 #include "dev-replace.h"
31
32 #undef DEBUG
33
34 /*
35 * This is the implementation for the generic read ahead framework.
36 *
37 * To trigger a readahead, btrfs_reada_add must be called. It will start
38 * a read ahead for the given range [start, end) on tree root. The returned
39 * handle can either be used to wait on the readahead to finish
40 * (btrfs_reada_wait), or to send it to the background (btrfs_reada_detach).
41 *
42 * The read ahead works as follows:
43 * On btrfs_reada_add, the root of the tree is inserted into a radix_tree.
44 * reada_start_machine will then search for extents to prefetch and trigger
45 * some reads. When a read finishes for a node, all contained node/leaf
46 * pointers that lie in the given range will also be enqueued. The reads will
47 * be triggered in sequential order, thus giving a big win over a naive
48 * enumeration. It will also make use of multi-device layouts. Each disk
49 * will have its on read pointer and all disks will by utilized in parallel.
50 * Also will no two disks read both sides of a mirror simultaneously, as this
51 * would waste seeking capacity. Instead both disks will read different parts
52 * of the filesystem.
53 * Any number of readaheads can be started in parallel. The read order will be
54 * determined globally, i.e. 2 parallel readaheads will normally finish faster
55 * than the 2 started one after another.
56 */
57
58 #define MAX_IN_FLIGHT 6
59
60 struct reada_extctl {
61 struct list_head list;
62 struct reada_control *rc;
63 u64 generation;
64 };
65
66 struct reada_extent {
67 u64 logical;
68 struct btrfs_key top;
69 u32 blocksize;
70 int err;
71 struct list_head extctl;
72 int refcnt;
73 spinlock_t lock;
74 struct reada_zone *zones[BTRFS_MAX_MIRRORS];
75 int nzones;
76 struct btrfs_device *scheduled_for;
77 };
78
79 struct reada_zone {
80 u64 start;
81 u64 end;
82 u64 elems;
83 struct list_head list;
84 spinlock_t lock;
85 int locked;
86 struct btrfs_device *device;
87 struct btrfs_device *devs[BTRFS_MAX_MIRRORS]; /* full list, incl
88 * self */
89 int ndevs;
90 struct kref refcnt;
91 };
92
93 struct reada_machine_work {
94 struct btrfs_work work;
95 struct btrfs_fs_info *fs_info;
96 };
97
98 static void reada_extent_put(struct btrfs_fs_info *, struct reada_extent *);
99 static void reada_control_release(struct kref *kref);
100 static void reada_zone_release(struct kref *kref);
101 static void reada_start_machine(struct btrfs_fs_info *fs_info);
102 static void __reada_start_machine(struct btrfs_fs_info *fs_info);
103
104 static int reada_add_block(struct reada_control *rc, u64 logical,
105 struct btrfs_key *top, int level, u64 generation);
106
107 /* recurses */
108 /* in case of err, eb might be NULL */
__readahead_hook(struct btrfs_root * root,struct extent_buffer * eb,u64 start,int err)109 static int __readahead_hook(struct btrfs_root *root, struct extent_buffer *eb,
110 u64 start, int err)
111 {
112 int level = 0;
113 int nritems;
114 int i;
115 u64 bytenr;
116 u64 generation;
117 struct reada_extent *re;
118 struct btrfs_fs_info *fs_info = root->fs_info;
119 struct list_head list;
120 unsigned long index = start >> PAGE_CACHE_SHIFT;
121 struct btrfs_device *for_dev;
122
123 if (eb)
124 level = btrfs_header_level(eb);
125
126 /* find extent */
127 spin_lock(&fs_info->reada_lock);
128 re = radix_tree_lookup(&fs_info->reada_tree, index);
129 if (re)
130 re->refcnt++;
131 spin_unlock(&fs_info->reada_lock);
132
133 if (!re)
134 return -1;
135
136 spin_lock(&re->lock);
137 /*
138 * just take the full list from the extent. afterwards we
139 * don't need the lock anymore
140 */
141 list_replace_init(&re->extctl, &list);
142 for_dev = re->scheduled_for;
143 re->scheduled_for = NULL;
144 spin_unlock(&re->lock);
145
146 if (err == 0) {
147 nritems = level ? btrfs_header_nritems(eb) : 0;
148 generation = btrfs_header_generation(eb);
149 /*
150 * FIXME: currently we just set nritems to 0 if this is a leaf,
151 * effectively ignoring the content. In a next step we could
152 * trigger more readahead depending from the content, e.g.
153 * fetch the checksums for the extents in the leaf.
154 */
155 } else {
156 /*
157 * this is the error case, the extent buffer has not been
158 * read correctly. We won't access anything from it and
159 * just cleanup our data structures. Effectively this will
160 * cut the branch below this node from read ahead.
161 */
162 nritems = 0;
163 generation = 0;
164 }
165
166 for (i = 0; i < nritems; i++) {
167 struct reada_extctl *rec;
168 u64 n_gen;
169 struct btrfs_key key;
170 struct btrfs_key next_key;
171
172 btrfs_node_key_to_cpu(eb, &key, i);
173 if (i + 1 < nritems)
174 btrfs_node_key_to_cpu(eb, &next_key, i + 1);
175 else
176 next_key = re->top;
177 bytenr = btrfs_node_blockptr(eb, i);
178 n_gen = btrfs_node_ptr_generation(eb, i);
179
180 list_for_each_entry(rec, &list, list) {
181 struct reada_control *rc = rec->rc;
182
183 /*
184 * if the generation doesn't match, just ignore this
185 * extctl. This will probably cut off a branch from
186 * prefetch. Alternatively one could start a new (sub-)
187 * prefetch for this branch, starting again from root.
188 * FIXME: move the generation check out of this loop
189 */
190 #ifdef DEBUG
191 if (rec->generation != generation) {
192 btrfs_debug(root->fs_info,
193 "generation mismatch for (%llu,%d,%llu) %llu != %llu",
194 key.objectid, key.type, key.offset,
195 rec->generation, generation);
196 }
197 #endif
198 if (rec->generation == generation &&
199 btrfs_comp_cpu_keys(&key, &rc->key_end) < 0 &&
200 btrfs_comp_cpu_keys(&next_key, &rc->key_start) > 0)
201 reada_add_block(rc, bytenr, &next_key,
202 level - 1, n_gen);
203 }
204 }
205 /*
206 * free extctl records
207 */
208 while (!list_empty(&list)) {
209 struct reada_control *rc;
210 struct reada_extctl *rec;
211
212 rec = list_first_entry(&list, struct reada_extctl, list);
213 list_del(&rec->list);
214 rc = rec->rc;
215 kfree(rec);
216
217 kref_get(&rc->refcnt);
218 if (atomic_dec_and_test(&rc->elems)) {
219 kref_put(&rc->refcnt, reada_control_release);
220 wake_up(&rc->wait);
221 }
222 kref_put(&rc->refcnt, reada_control_release);
223
224 reada_extent_put(fs_info, re); /* one ref for each entry */
225 }
226 reada_extent_put(fs_info, re); /* our ref */
227 if (for_dev)
228 atomic_dec(&for_dev->reada_in_flight);
229
230 return 0;
231 }
232
233 /*
234 * start is passed separately in case eb in NULL, which may be the case with
235 * failed I/O
236 */
btree_readahead_hook(struct btrfs_root * root,struct extent_buffer * eb,u64 start,int err)237 int btree_readahead_hook(struct btrfs_root *root, struct extent_buffer *eb,
238 u64 start, int err)
239 {
240 int ret;
241
242 ret = __readahead_hook(root, eb, start, err);
243
244 reada_start_machine(root->fs_info);
245
246 return ret;
247 }
248
reada_find_zone(struct btrfs_fs_info * fs_info,struct btrfs_device * dev,u64 logical,struct btrfs_bio * bbio)249 static struct reada_zone *reada_find_zone(struct btrfs_fs_info *fs_info,
250 struct btrfs_device *dev, u64 logical,
251 struct btrfs_bio *bbio)
252 {
253 int ret;
254 struct reada_zone *zone;
255 struct btrfs_block_group_cache *cache = NULL;
256 u64 start;
257 u64 end;
258 int i;
259
260 zone = NULL;
261 spin_lock(&fs_info->reada_lock);
262 ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone,
263 logical >> PAGE_CACHE_SHIFT, 1);
264 if (ret == 1)
265 kref_get(&zone->refcnt);
266 spin_unlock(&fs_info->reada_lock);
267
268 if (ret == 1) {
269 if (logical >= zone->start && logical < zone->end)
270 return zone;
271 spin_lock(&fs_info->reada_lock);
272 kref_put(&zone->refcnt, reada_zone_release);
273 spin_unlock(&fs_info->reada_lock);
274 }
275
276 cache = btrfs_lookup_block_group(fs_info, logical);
277 if (!cache)
278 return NULL;
279
280 start = cache->key.objectid;
281 end = start + cache->key.offset - 1;
282 btrfs_put_block_group(cache);
283
284 zone = kzalloc(sizeof(*zone), GFP_NOFS);
285 if (!zone)
286 return NULL;
287
288 zone->start = start;
289 zone->end = end;
290 INIT_LIST_HEAD(&zone->list);
291 spin_lock_init(&zone->lock);
292 zone->locked = 0;
293 kref_init(&zone->refcnt);
294 zone->elems = 0;
295 zone->device = dev; /* our device always sits at index 0 */
296 for (i = 0; i < bbio->num_stripes; ++i) {
297 /* bounds have already been checked */
298 zone->devs[i] = bbio->stripes[i].dev;
299 }
300 zone->ndevs = bbio->num_stripes;
301
302 spin_lock(&fs_info->reada_lock);
303 ret = radix_tree_insert(&dev->reada_zones,
304 (unsigned long)(zone->end >> PAGE_CACHE_SHIFT),
305 zone);
306
307 if (ret == -EEXIST) {
308 kfree(zone);
309 ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone,
310 logical >> PAGE_CACHE_SHIFT, 1);
311 if (ret == 1)
312 kref_get(&zone->refcnt);
313 }
314 spin_unlock(&fs_info->reada_lock);
315
316 return zone;
317 }
318
reada_find_extent(struct btrfs_root * root,u64 logical,struct btrfs_key * top,int level)319 static struct reada_extent *reada_find_extent(struct btrfs_root *root,
320 u64 logical,
321 struct btrfs_key *top, int level)
322 {
323 int ret;
324 struct reada_extent *re = NULL;
325 struct reada_extent *re_exist = NULL;
326 struct btrfs_fs_info *fs_info = root->fs_info;
327 struct btrfs_bio *bbio = NULL;
328 struct btrfs_device *dev;
329 struct btrfs_device *prev_dev;
330 u32 blocksize;
331 u64 length;
332 int nzones = 0;
333 int i;
334 unsigned long index = logical >> PAGE_CACHE_SHIFT;
335 int dev_replace_is_ongoing;
336
337 spin_lock(&fs_info->reada_lock);
338 re = radix_tree_lookup(&fs_info->reada_tree, index);
339 if (re)
340 re->refcnt++;
341 spin_unlock(&fs_info->reada_lock);
342
343 if (re)
344 return re;
345
346 re = kzalloc(sizeof(*re), GFP_NOFS);
347 if (!re)
348 return NULL;
349
350 blocksize = root->nodesize;
351 re->logical = logical;
352 re->blocksize = blocksize;
353 re->top = *top;
354 INIT_LIST_HEAD(&re->extctl);
355 spin_lock_init(&re->lock);
356 re->refcnt = 1;
357
358 /*
359 * map block
360 */
361 length = blocksize;
362 ret = btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS, logical, &length,
363 &bbio, 0);
364 if (ret || !bbio || length < blocksize)
365 goto error;
366
367 if (bbio->num_stripes > BTRFS_MAX_MIRRORS) {
368 btrfs_err(root->fs_info,
369 "readahead: more than %d copies not supported",
370 BTRFS_MAX_MIRRORS);
371 goto error;
372 }
373
374 for (nzones = 0; nzones < bbio->num_stripes; ++nzones) {
375 struct reada_zone *zone;
376
377 dev = bbio->stripes[nzones].dev;
378 zone = reada_find_zone(fs_info, dev, logical, bbio);
379 if (!zone)
380 break;
381
382 re->zones[nzones] = zone;
383 spin_lock(&zone->lock);
384 if (!zone->elems)
385 kref_get(&zone->refcnt);
386 ++zone->elems;
387 spin_unlock(&zone->lock);
388 spin_lock(&fs_info->reada_lock);
389 kref_put(&zone->refcnt, reada_zone_release);
390 spin_unlock(&fs_info->reada_lock);
391 }
392 re->nzones = nzones;
393 if (nzones == 0) {
394 /* not a single zone found, error and out */
395 goto error;
396 }
397
398 /* insert extent in reada_tree + all per-device trees, all or nothing */
399 btrfs_dev_replace_lock(&fs_info->dev_replace);
400 spin_lock(&fs_info->reada_lock);
401 ret = radix_tree_insert(&fs_info->reada_tree, index, re);
402 if (ret == -EEXIST) {
403 re_exist = radix_tree_lookup(&fs_info->reada_tree, index);
404 BUG_ON(!re_exist);
405 re_exist->refcnt++;
406 spin_unlock(&fs_info->reada_lock);
407 btrfs_dev_replace_unlock(&fs_info->dev_replace);
408 goto error;
409 }
410 if (ret) {
411 spin_unlock(&fs_info->reada_lock);
412 btrfs_dev_replace_unlock(&fs_info->dev_replace);
413 goto error;
414 }
415 prev_dev = NULL;
416 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(
417 &fs_info->dev_replace);
418 for (i = 0; i < nzones; ++i) {
419 dev = bbio->stripes[i].dev;
420 if (dev == prev_dev) {
421 /*
422 * in case of DUP, just add the first zone. As both
423 * are on the same device, there's nothing to gain
424 * from adding both.
425 * Also, it wouldn't work, as the tree is per device
426 * and adding would fail with EEXIST
427 */
428 continue;
429 }
430 if (!dev->bdev) {
431 /*
432 * cannot read ahead on missing device, but for RAID5/6,
433 * REQ_GET_READ_MIRRORS return 1. So don't skip missing
434 * device for such case.
435 */
436 if (nzones > 1)
437 continue;
438 }
439 if (dev_replace_is_ongoing &&
440 dev == fs_info->dev_replace.tgtdev) {
441 /*
442 * as this device is selected for reading only as
443 * a last resort, skip it for read ahead.
444 */
445 continue;
446 }
447 prev_dev = dev;
448 ret = radix_tree_insert(&dev->reada_extents, index, re);
449 if (ret) {
450 while (--i >= 0) {
451 dev = bbio->stripes[i].dev;
452 BUG_ON(dev == NULL);
453 /* ignore whether the entry was inserted */
454 radix_tree_delete(&dev->reada_extents, index);
455 }
456 BUG_ON(fs_info == NULL);
457 radix_tree_delete(&fs_info->reada_tree, index);
458 spin_unlock(&fs_info->reada_lock);
459 btrfs_dev_replace_unlock(&fs_info->dev_replace);
460 goto error;
461 }
462 }
463 spin_unlock(&fs_info->reada_lock);
464 btrfs_dev_replace_unlock(&fs_info->dev_replace);
465
466 kfree(bbio);
467 return re;
468
469 error:
470 while (nzones) {
471 struct reada_zone *zone;
472
473 --nzones;
474 zone = re->zones[nzones];
475 kref_get(&zone->refcnt);
476 spin_lock(&zone->lock);
477 --zone->elems;
478 if (zone->elems == 0) {
479 /*
480 * no fs_info->reada_lock needed, as this can't be
481 * the last ref
482 */
483 kref_put(&zone->refcnt, reada_zone_release);
484 }
485 spin_unlock(&zone->lock);
486
487 spin_lock(&fs_info->reada_lock);
488 kref_put(&zone->refcnt, reada_zone_release);
489 spin_unlock(&fs_info->reada_lock);
490 }
491 kfree(bbio);
492 kfree(re);
493 return re_exist;
494 }
495
reada_extent_put(struct btrfs_fs_info * fs_info,struct reada_extent * re)496 static void reada_extent_put(struct btrfs_fs_info *fs_info,
497 struct reada_extent *re)
498 {
499 int i;
500 unsigned long index = re->logical >> PAGE_CACHE_SHIFT;
501
502 spin_lock(&fs_info->reada_lock);
503 if (--re->refcnt) {
504 spin_unlock(&fs_info->reada_lock);
505 return;
506 }
507
508 radix_tree_delete(&fs_info->reada_tree, index);
509 for (i = 0; i < re->nzones; ++i) {
510 struct reada_zone *zone = re->zones[i];
511
512 radix_tree_delete(&zone->device->reada_extents, index);
513 }
514
515 spin_unlock(&fs_info->reada_lock);
516
517 for (i = 0; i < re->nzones; ++i) {
518 struct reada_zone *zone = re->zones[i];
519
520 kref_get(&zone->refcnt);
521 spin_lock(&zone->lock);
522 --zone->elems;
523 if (zone->elems == 0) {
524 /* no fs_info->reada_lock needed, as this can't be
525 * the last ref */
526 kref_put(&zone->refcnt, reada_zone_release);
527 }
528 spin_unlock(&zone->lock);
529
530 spin_lock(&fs_info->reada_lock);
531 kref_put(&zone->refcnt, reada_zone_release);
532 spin_unlock(&fs_info->reada_lock);
533 }
534 if (re->scheduled_for)
535 atomic_dec(&re->scheduled_for->reada_in_flight);
536
537 kfree(re);
538 }
539
reada_zone_release(struct kref * kref)540 static void reada_zone_release(struct kref *kref)
541 {
542 struct reada_zone *zone = container_of(kref, struct reada_zone, refcnt);
543
544 radix_tree_delete(&zone->device->reada_zones,
545 zone->end >> PAGE_CACHE_SHIFT);
546
547 kfree(zone);
548 }
549
reada_control_release(struct kref * kref)550 static void reada_control_release(struct kref *kref)
551 {
552 struct reada_control *rc = container_of(kref, struct reada_control,
553 refcnt);
554
555 kfree(rc);
556 }
557
reada_add_block(struct reada_control * rc,u64 logical,struct btrfs_key * top,int level,u64 generation)558 static int reada_add_block(struct reada_control *rc, u64 logical,
559 struct btrfs_key *top, int level, u64 generation)
560 {
561 struct btrfs_root *root = rc->root;
562 struct reada_extent *re;
563 struct reada_extctl *rec;
564
565 re = reada_find_extent(root, logical, top, level); /* takes one ref */
566 if (!re)
567 return -1;
568
569 rec = kzalloc(sizeof(*rec), GFP_NOFS);
570 if (!rec) {
571 reada_extent_put(root->fs_info, re);
572 return -1;
573 }
574
575 rec->rc = rc;
576 rec->generation = generation;
577 atomic_inc(&rc->elems);
578
579 spin_lock(&re->lock);
580 list_add_tail(&rec->list, &re->extctl);
581 spin_unlock(&re->lock);
582
583 /* leave the ref on the extent */
584
585 return 0;
586 }
587
588 /*
589 * called with fs_info->reada_lock held
590 */
reada_peer_zones_set_lock(struct reada_zone * zone,int lock)591 static void reada_peer_zones_set_lock(struct reada_zone *zone, int lock)
592 {
593 int i;
594 unsigned long index = zone->end >> PAGE_CACHE_SHIFT;
595
596 for (i = 0; i < zone->ndevs; ++i) {
597 struct reada_zone *peer;
598 peer = radix_tree_lookup(&zone->devs[i]->reada_zones, index);
599 if (peer && peer->device != zone->device)
600 peer->locked = lock;
601 }
602 }
603
604 /*
605 * called with fs_info->reada_lock held
606 */
reada_pick_zone(struct btrfs_device * dev)607 static int reada_pick_zone(struct btrfs_device *dev)
608 {
609 struct reada_zone *top_zone = NULL;
610 struct reada_zone *top_locked_zone = NULL;
611 u64 top_elems = 0;
612 u64 top_locked_elems = 0;
613 unsigned long index = 0;
614 int ret;
615
616 if (dev->reada_curr_zone) {
617 reada_peer_zones_set_lock(dev->reada_curr_zone, 0);
618 kref_put(&dev->reada_curr_zone->refcnt, reada_zone_release);
619 dev->reada_curr_zone = NULL;
620 }
621 /* pick the zone with the most elements */
622 while (1) {
623 struct reada_zone *zone;
624
625 ret = radix_tree_gang_lookup(&dev->reada_zones,
626 (void **)&zone, index, 1);
627 if (ret == 0)
628 break;
629 index = (zone->end >> PAGE_CACHE_SHIFT) + 1;
630 if (zone->locked) {
631 if (zone->elems > top_locked_elems) {
632 top_locked_elems = zone->elems;
633 top_locked_zone = zone;
634 }
635 } else {
636 if (zone->elems > top_elems) {
637 top_elems = zone->elems;
638 top_zone = zone;
639 }
640 }
641 }
642 if (top_zone)
643 dev->reada_curr_zone = top_zone;
644 else if (top_locked_zone)
645 dev->reada_curr_zone = top_locked_zone;
646 else
647 return 0;
648
649 dev->reada_next = dev->reada_curr_zone->start;
650 kref_get(&dev->reada_curr_zone->refcnt);
651 reada_peer_zones_set_lock(dev->reada_curr_zone, 1);
652
653 return 1;
654 }
655
reada_start_machine_dev(struct btrfs_fs_info * fs_info,struct btrfs_device * dev)656 static int reada_start_machine_dev(struct btrfs_fs_info *fs_info,
657 struct btrfs_device *dev)
658 {
659 struct reada_extent *re = NULL;
660 int mirror_num = 0;
661 struct extent_buffer *eb = NULL;
662 u64 logical;
663 u32 blocksize;
664 int ret;
665 int i;
666 int need_kick = 0;
667
668 spin_lock(&fs_info->reada_lock);
669 if (dev->reada_curr_zone == NULL) {
670 ret = reada_pick_zone(dev);
671 if (!ret) {
672 spin_unlock(&fs_info->reada_lock);
673 return 0;
674 }
675 }
676 /*
677 * FIXME currently we issue the reads one extent at a time. If we have
678 * a contiguous block of extents, we could also coagulate them or use
679 * plugging to speed things up
680 */
681 ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re,
682 dev->reada_next >> PAGE_CACHE_SHIFT, 1);
683 if (ret == 0 || re->logical >= dev->reada_curr_zone->end) {
684 ret = reada_pick_zone(dev);
685 if (!ret) {
686 spin_unlock(&fs_info->reada_lock);
687 return 0;
688 }
689 re = NULL;
690 ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re,
691 dev->reada_next >> PAGE_CACHE_SHIFT, 1);
692 }
693 if (ret == 0) {
694 spin_unlock(&fs_info->reada_lock);
695 return 0;
696 }
697 dev->reada_next = re->logical + re->blocksize;
698 re->refcnt++;
699
700 spin_unlock(&fs_info->reada_lock);
701
702 /*
703 * find mirror num
704 */
705 for (i = 0; i < re->nzones; ++i) {
706 if (re->zones[i]->device == dev) {
707 mirror_num = i + 1;
708 break;
709 }
710 }
711 logical = re->logical;
712 blocksize = re->blocksize;
713
714 spin_lock(&re->lock);
715 if (re->scheduled_for == NULL) {
716 re->scheduled_for = dev;
717 need_kick = 1;
718 }
719 spin_unlock(&re->lock);
720
721 reada_extent_put(fs_info, re);
722
723 if (!need_kick)
724 return 0;
725
726 atomic_inc(&dev->reada_in_flight);
727 ret = reada_tree_block_flagged(fs_info->extent_root, logical, blocksize,
728 mirror_num, &eb);
729 if (ret)
730 __readahead_hook(fs_info->extent_root, NULL, logical, ret);
731 else if (eb)
732 __readahead_hook(fs_info->extent_root, eb, eb->start, ret);
733
734 if (eb)
735 free_extent_buffer(eb);
736
737 return 1;
738
739 }
740
reada_start_machine_worker(struct btrfs_work * work)741 static void reada_start_machine_worker(struct btrfs_work *work)
742 {
743 struct reada_machine_work *rmw;
744 struct btrfs_fs_info *fs_info;
745 int old_ioprio;
746
747 rmw = container_of(work, struct reada_machine_work, work);
748 fs_info = rmw->fs_info;
749
750 kfree(rmw);
751
752 old_ioprio = IOPRIO_PRIO_VALUE(task_nice_ioclass(current),
753 task_nice_ioprio(current));
754 set_task_ioprio(current, BTRFS_IOPRIO_READA);
755 __reada_start_machine(fs_info);
756 set_task_ioprio(current, old_ioprio);
757 }
758
__reada_start_machine(struct btrfs_fs_info * fs_info)759 static void __reada_start_machine(struct btrfs_fs_info *fs_info)
760 {
761 struct btrfs_device *device;
762 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
763 u64 enqueued;
764 u64 total = 0;
765 int i;
766
767 do {
768 enqueued = 0;
769 list_for_each_entry(device, &fs_devices->devices, dev_list) {
770 if (atomic_read(&device->reada_in_flight) <
771 MAX_IN_FLIGHT)
772 enqueued += reada_start_machine_dev(fs_info,
773 device);
774 }
775 total += enqueued;
776 } while (enqueued && total < 10000);
777
778 if (enqueued == 0)
779 return;
780
781 /*
782 * If everything is already in the cache, this is effectively single
783 * threaded. To a) not hold the caller for too long and b) to utilize
784 * more cores, we broke the loop above after 10000 iterations and now
785 * enqueue to workers to finish it. This will distribute the load to
786 * the cores.
787 */
788 for (i = 0; i < 2; ++i)
789 reada_start_machine(fs_info);
790 }
791
reada_start_machine(struct btrfs_fs_info * fs_info)792 static void reada_start_machine(struct btrfs_fs_info *fs_info)
793 {
794 struct reada_machine_work *rmw;
795
796 rmw = kzalloc(sizeof(*rmw), GFP_NOFS);
797 if (!rmw) {
798 /* FIXME we cannot handle this properly right now */
799 BUG();
800 }
801 btrfs_init_work(&rmw->work, btrfs_readahead_helper,
802 reada_start_machine_worker, NULL, NULL);
803 rmw->fs_info = fs_info;
804
805 btrfs_queue_work(fs_info->readahead_workers, &rmw->work);
806 }
807
808 #ifdef DEBUG
dump_devs(struct btrfs_fs_info * fs_info,int all)809 static void dump_devs(struct btrfs_fs_info *fs_info, int all)
810 {
811 struct btrfs_device *device;
812 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
813 unsigned long index;
814 int ret;
815 int i;
816 int j;
817 int cnt;
818
819 spin_lock(&fs_info->reada_lock);
820 list_for_each_entry(device, &fs_devices->devices, dev_list) {
821 printk(KERN_DEBUG "dev %lld has %d in flight\n", device->devid,
822 atomic_read(&device->reada_in_flight));
823 index = 0;
824 while (1) {
825 struct reada_zone *zone;
826 ret = radix_tree_gang_lookup(&device->reada_zones,
827 (void **)&zone, index, 1);
828 if (ret == 0)
829 break;
830 printk(KERN_DEBUG " zone %llu-%llu elems %llu locked "
831 "%d devs", zone->start, zone->end, zone->elems,
832 zone->locked);
833 for (j = 0; j < zone->ndevs; ++j) {
834 printk(KERN_CONT " %lld",
835 zone->devs[j]->devid);
836 }
837 if (device->reada_curr_zone == zone)
838 printk(KERN_CONT " curr off %llu",
839 device->reada_next - zone->start);
840 printk(KERN_CONT "\n");
841 index = (zone->end >> PAGE_CACHE_SHIFT) + 1;
842 }
843 cnt = 0;
844 index = 0;
845 while (all) {
846 struct reada_extent *re = NULL;
847
848 ret = radix_tree_gang_lookup(&device->reada_extents,
849 (void **)&re, index, 1);
850 if (ret == 0)
851 break;
852 printk(KERN_DEBUG
853 " re: logical %llu size %u empty %d for %lld",
854 re->logical, re->blocksize,
855 list_empty(&re->extctl), re->scheduled_for ?
856 re->scheduled_for->devid : -1);
857
858 for (i = 0; i < re->nzones; ++i) {
859 printk(KERN_CONT " zone %llu-%llu devs",
860 re->zones[i]->start,
861 re->zones[i]->end);
862 for (j = 0; j < re->zones[i]->ndevs; ++j) {
863 printk(KERN_CONT " %lld",
864 re->zones[i]->devs[j]->devid);
865 }
866 }
867 printk(KERN_CONT "\n");
868 index = (re->logical >> PAGE_CACHE_SHIFT) + 1;
869 if (++cnt > 15)
870 break;
871 }
872 }
873
874 index = 0;
875 cnt = 0;
876 while (all) {
877 struct reada_extent *re = NULL;
878
879 ret = radix_tree_gang_lookup(&fs_info->reada_tree, (void **)&re,
880 index, 1);
881 if (ret == 0)
882 break;
883 if (!re->scheduled_for) {
884 index = (re->logical >> PAGE_CACHE_SHIFT) + 1;
885 continue;
886 }
887 printk(KERN_DEBUG
888 "re: logical %llu size %u list empty %d for %lld",
889 re->logical, re->blocksize, list_empty(&re->extctl),
890 re->scheduled_for ? re->scheduled_for->devid : -1);
891 for (i = 0; i < re->nzones; ++i) {
892 printk(KERN_CONT " zone %llu-%llu devs",
893 re->zones[i]->start,
894 re->zones[i]->end);
895 for (i = 0; i < re->nzones; ++i) {
896 printk(KERN_CONT " zone %llu-%llu devs",
897 re->zones[i]->start,
898 re->zones[i]->end);
899 for (j = 0; j < re->zones[i]->ndevs; ++j) {
900 printk(KERN_CONT " %lld",
901 re->zones[i]->devs[j]->devid);
902 }
903 }
904 }
905 printk(KERN_CONT "\n");
906 index = (re->logical >> PAGE_CACHE_SHIFT) + 1;
907 }
908 spin_unlock(&fs_info->reada_lock);
909 }
910 #endif
911
912 /*
913 * interface
914 */
btrfs_reada_add(struct btrfs_root * root,struct btrfs_key * key_start,struct btrfs_key * key_end)915 struct reada_control *btrfs_reada_add(struct btrfs_root *root,
916 struct btrfs_key *key_start, struct btrfs_key *key_end)
917 {
918 struct reada_control *rc;
919 u64 start;
920 u64 generation;
921 int level;
922 struct extent_buffer *node;
923 static struct btrfs_key max_key = {
924 .objectid = (u64)-1,
925 .type = (u8)-1,
926 .offset = (u64)-1
927 };
928
929 rc = kzalloc(sizeof(*rc), GFP_NOFS);
930 if (!rc)
931 return ERR_PTR(-ENOMEM);
932
933 rc->root = root;
934 rc->key_start = *key_start;
935 rc->key_end = *key_end;
936 atomic_set(&rc->elems, 0);
937 init_waitqueue_head(&rc->wait);
938 kref_init(&rc->refcnt);
939 kref_get(&rc->refcnt); /* one ref for having elements */
940
941 node = btrfs_root_node(root);
942 start = node->start;
943 level = btrfs_header_level(node);
944 generation = btrfs_header_generation(node);
945 free_extent_buffer(node);
946
947 if (reada_add_block(rc, start, &max_key, level, generation)) {
948 kfree(rc);
949 return ERR_PTR(-ENOMEM);
950 }
951
952 reada_start_machine(root->fs_info);
953
954 return rc;
955 }
956
957 #ifdef DEBUG
btrfs_reada_wait(void * handle)958 int btrfs_reada_wait(void *handle)
959 {
960 struct reada_control *rc = handle;
961
962 while (atomic_read(&rc->elems)) {
963 wait_event_timeout(rc->wait, atomic_read(&rc->elems) == 0,
964 5 * HZ);
965 dump_devs(rc->root->fs_info,
966 atomic_read(&rc->elems) < 10 ? 1 : 0);
967 }
968
969 dump_devs(rc->root->fs_info, atomic_read(&rc->elems) < 10 ? 1 : 0);
970
971 kref_put(&rc->refcnt, reada_control_release);
972
973 return 0;
974 }
975 #else
btrfs_reada_wait(void * handle)976 int btrfs_reada_wait(void *handle)
977 {
978 struct reada_control *rc = handle;
979
980 while (atomic_read(&rc->elems)) {
981 wait_event(rc->wait, atomic_read(&rc->elems) == 0);
982 }
983
984 kref_put(&rc->refcnt, reada_control_release);
985
986 return 0;
987 }
988 #endif
989
btrfs_reada_detach(void * handle)990 void btrfs_reada_detach(void *handle)
991 {
992 struct reada_control *rc = handle;
993
994 kref_put(&rc->refcnt, reada_control_release);
995 }
996