1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/slab.h>
21 #include <linux/buffer_head.h>
22 #include <linux/blkdev.h>
23 #include <linux/random.h>
24 #include <linux/iocontext.h>
25 #include <linux/capability.h>
26 #include <linux/kthread.h>
27 #include <asm/div64.h>
28 #include "compat.h"
29 #include "ctree.h"
30 #include "extent_map.h"
31 #include "disk-io.h"
32 #include "transaction.h"
33 #include "print-tree.h"
34 #include "volumes.h"
35 #include "async-thread.h"
36 #include "check-integrity.h"
37
38 static int init_first_rw_device(struct btrfs_trans_handle *trans,
39 struct btrfs_root *root,
40 struct btrfs_device *device);
41 static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
42
43 static DEFINE_MUTEX(uuid_mutex);
44 static LIST_HEAD(fs_uuids);
45
lock_chunks(struct btrfs_root * root)46 static void lock_chunks(struct btrfs_root *root)
47 {
48 mutex_lock(&root->fs_info->chunk_mutex);
49 }
50
unlock_chunks(struct btrfs_root * root)51 static void unlock_chunks(struct btrfs_root *root)
52 {
53 mutex_unlock(&root->fs_info->chunk_mutex);
54 }
55
free_fs_devices(struct btrfs_fs_devices * fs_devices)56 static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
57 {
58 struct btrfs_device *device;
59 WARN_ON(fs_devices->opened);
60 while (!list_empty(&fs_devices->devices)) {
61 device = list_entry(fs_devices->devices.next,
62 struct btrfs_device, dev_list);
63 list_del(&device->dev_list);
64 kfree(device->name);
65 kfree(device);
66 }
67 kfree(fs_devices);
68 }
69
btrfs_cleanup_fs_uuids(void)70 void btrfs_cleanup_fs_uuids(void)
71 {
72 struct btrfs_fs_devices *fs_devices;
73
74 while (!list_empty(&fs_uuids)) {
75 fs_devices = list_entry(fs_uuids.next,
76 struct btrfs_fs_devices, list);
77 list_del(&fs_devices->list);
78 free_fs_devices(fs_devices);
79 }
80 }
81
__find_device(struct list_head * head,u64 devid,u8 * uuid)82 static noinline struct btrfs_device *__find_device(struct list_head *head,
83 u64 devid, u8 *uuid)
84 {
85 struct btrfs_device *dev;
86
87 list_for_each_entry(dev, head, dev_list) {
88 if (dev->devid == devid &&
89 (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
90 return dev;
91 }
92 }
93 return NULL;
94 }
95
find_fsid(u8 * fsid)96 static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
97 {
98 struct btrfs_fs_devices *fs_devices;
99
100 list_for_each_entry(fs_devices, &fs_uuids, list) {
101 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
102 return fs_devices;
103 }
104 return NULL;
105 }
106
requeue_list(struct btrfs_pending_bios * pending_bios,struct bio * head,struct bio * tail)107 static void requeue_list(struct btrfs_pending_bios *pending_bios,
108 struct bio *head, struct bio *tail)
109 {
110
111 struct bio *old_head;
112
113 old_head = pending_bios->head;
114 pending_bios->head = head;
115 if (pending_bios->tail)
116 tail->bi_next = old_head;
117 else
118 pending_bios->tail = tail;
119 }
120
121 /*
122 * we try to collect pending bios for a device so we don't get a large
123 * number of procs sending bios down to the same device. This greatly
124 * improves the schedulers ability to collect and merge the bios.
125 *
126 * But, it also turns into a long list of bios to process and that is sure
127 * to eventually make the worker thread block. The solution here is to
128 * make some progress and then put this work struct back at the end of
129 * the list if the block device is congested. This way, multiple devices
130 * can make progress from a single worker thread.
131 */
run_scheduled_bios(struct btrfs_device * device)132 static noinline void run_scheduled_bios(struct btrfs_device *device)
133 {
134 struct bio *pending;
135 struct backing_dev_info *bdi;
136 struct btrfs_fs_info *fs_info;
137 struct btrfs_pending_bios *pending_bios;
138 struct bio *tail;
139 struct bio *cur;
140 int again = 0;
141 unsigned long num_run;
142 unsigned long batch_run = 0;
143 unsigned long limit;
144 unsigned long last_waited = 0;
145 int force_reg = 0;
146 int sync_pending = 0;
147 struct blk_plug plug;
148
149 /*
150 * this function runs all the bios we've collected for
151 * a particular device. We don't want to wander off to
152 * another device without first sending all of these down.
153 * So, setup a plug here and finish it off before we return
154 */
155 blk_start_plug(&plug);
156
157 bdi = blk_get_backing_dev_info(device->bdev);
158 fs_info = device->dev_root->fs_info;
159 limit = btrfs_async_submit_limit(fs_info);
160 limit = limit * 2 / 3;
161
162 loop:
163 spin_lock(&device->io_lock);
164
165 loop_lock:
166 num_run = 0;
167
168 /* take all the bios off the list at once and process them
169 * later on (without the lock held). But, remember the
170 * tail and other pointers so the bios can be properly reinserted
171 * into the list if we hit congestion
172 */
173 if (!force_reg && device->pending_sync_bios.head) {
174 pending_bios = &device->pending_sync_bios;
175 force_reg = 1;
176 } else {
177 pending_bios = &device->pending_bios;
178 force_reg = 0;
179 }
180
181 pending = pending_bios->head;
182 tail = pending_bios->tail;
183 WARN_ON(pending && !tail);
184
185 /*
186 * if pending was null this time around, no bios need processing
187 * at all and we can stop. Otherwise it'll loop back up again
188 * and do an additional check so no bios are missed.
189 *
190 * device->running_pending is used to synchronize with the
191 * schedule_bio code.
192 */
193 if (device->pending_sync_bios.head == NULL &&
194 device->pending_bios.head == NULL) {
195 again = 0;
196 device->running_pending = 0;
197 } else {
198 again = 1;
199 device->running_pending = 1;
200 }
201
202 pending_bios->head = NULL;
203 pending_bios->tail = NULL;
204
205 spin_unlock(&device->io_lock);
206
207 while (pending) {
208
209 rmb();
210 /* we want to work on both lists, but do more bios on the
211 * sync list than the regular list
212 */
213 if ((num_run > 32 &&
214 pending_bios != &device->pending_sync_bios &&
215 device->pending_sync_bios.head) ||
216 (num_run > 64 && pending_bios == &device->pending_sync_bios &&
217 device->pending_bios.head)) {
218 spin_lock(&device->io_lock);
219 requeue_list(pending_bios, pending, tail);
220 goto loop_lock;
221 }
222
223 cur = pending;
224 pending = pending->bi_next;
225 cur->bi_next = NULL;
226 atomic_dec(&fs_info->nr_async_bios);
227
228 if (atomic_read(&fs_info->nr_async_bios) < limit &&
229 waitqueue_active(&fs_info->async_submit_wait))
230 wake_up(&fs_info->async_submit_wait);
231
232 BUG_ON(atomic_read(&cur->bi_cnt) == 0);
233
234 /*
235 * if we're doing the sync list, record that our
236 * plug has some sync requests on it
237 *
238 * If we're doing the regular list and there are
239 * sync requests sitting around, unplug before
240 * we add more
241 */
242 if (pending_bios == &device->pending_sync_bios) {
243 sync_pending = 1;
244 } else if (sync_pending) {
245 blk_finish_plug(&plug);
246 blk_start_plug(&plug);
247 sync_pending = 0;
248 }
249
250 btrfsic_submit_bio(cur->bi_rw, cur);
251 num_run++;
252 batch_run++;
253 if (need_resched())
254 cond_resched();
255
256 /*
257 * we made progress, there is more work to do and the bdi
258 * is now congested. Back off and let other work structs
259 * run instead
260 */
261 if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
262 fs_info->fs_devices->open_devices > 1) {
263 struct io_context *ioc;
264
265 ioc = current->io_context;
266
267 /*
268 * the main goal here is that we don't want to
269 * block if we're going to be able to submit
270 * more requests without blocking.
271 *
272 * This code does two great things, it pokes into
273 * the elevator code from a filesystem _and_
274 * it makes assumptions about how batching works.
275 */
276 if (ioc && ioc->nr_batch_requests > 0 &&
277 time_before(jiffies, ioc->last_waited + HZ/50UL) &&
278 (last_waited == 0 ||
279 ioc->last_waited == last_waited)) {
280 /*
281 * we want to go through our batch of
282 * requests and stop. So, we copy out
283 * the ioc->last_waited time and test
284 * against it before looping
285 */
286 last_waited = ioc->last_waited;
287 if (need_resched())
288 cond_resched();
289 continue;
290 }
291 spin_lock(&device->io_lock);
292 requeue_list(pending_bios, pending, tail);
293 device->running_pending = 1;
294
295 spin_unlock(&device->io_lock);
296 btrfs_requeue_work(&device->work);
297 goto done;
298 }
299 /* unplug every 64 requests just for good measure */
300 if (batch_run % 64 == 0) {
301 blk_finish_plug(&plug);
302 blk_start_plug(&plug);
303 sync_pending = 0;
304 }
305 }
306
307 cond_resched();
308 if (again)
309 goto loop;
310
311 spin_lock(&device->io_lock);
312 if (device->pending_bios.head || device->pending_sync_bios.head)
313 goto loop_lock;
314 spin_unlock(&device->io_lock);
315
316 done:
317 blk_finish_plug(&plug);
318 }
319
pending_bios_fn(struct btrfs_work * work)320 static void pending_bios_fn(struct btrfs_work *work)
321 {
322 struct btrfs_device *device;
323
324 device = container_of(work, struct btrfs_device, work);
325 run_scheduled_bios(device);
326 }
327
device_list_add(const char * path,struct btrfs_super_block * disk_super,u64 devid,struct btrfs_fs_devices ** fs_devices_ret)328 static noinline int device_list_add(const char *path,
329 struct btrfs_super_block *disk_super,
330 u64 devid, struct btrfs_fs_devices **fs_devices_ret)
331 {
332 struct btrfs_device *device;
333 struct btrfs_fs_devices *fs_devices;
334 u64 found_transid = btrfs_super_generation(disk_super);
335 char *name;
336
337 fs_devices = find_fsid(disk_super->fsid);
338 if (!fs_devices) {
339 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
340 if (!fs_devices)
341 return -ENOMEM;
342 INIT_LIST_HEAD(&fs_devices->devices);
343 INIT_LIST_HEAD(&fs_devices->alloc_list);
344 list_add(&fs_devices->list, &fs_uuids);
345 memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
346 fs_devices->latest_devid = devid;
347 fs_devices->latest_trans = found_transid;
348 mutex_init(&fs_devices->device_list_mutex);
349 device = NULL;
350 } else {
351 device = __find_device(&fs_devices->devices, devid,
352 disk_super->dev_item.uuid);
353 }
354 if (!device) {
355 if (fs_devices->opened)
356 return -EBUSY;
357
358 device = kzalloc(sizeof(*device), GFP_NOFS);
359 if (!device) {
360 /* we can safely leave the fs_devices entry around */
361 return -ENOMEM;
362 }
363 device->devid = devid;
364 device->work.func = pending_bios_fn;
365 memcpy(device->uuid, disk_super->dev_item.uuid,
366 BTRFS_UUID_SIZE);
367 spin_lock_init(&device->io_lock);
368 device->name = kstrdup(path, GFP_NOFS);
369 if (!device->name) {
370 kfree(device);
371 return -ENOMEM;
372 }
373 INIT_LIST_HEAD(&device->dev_alloc_list);
374
375 /* init readahead state */
376 spin_lock_init(&device->reada_lock);
377 device->reada_curr_zone = NULL;
378 atomic_set(&device->reada_in_flight, 0);
379 device->reada_next = 0;
380 INIT_RADIX_TREE(&device->reada_zones, GFP_NOFS & ~__GFP_WAIT);
381 INIT_RADIX_TREE(&device->reada_extents, GFP_NOFS & ~__GFP_WAIT);
382
383 mutex_lock(&fs_devices->device_list_mutex);
384 list_add_rcu(&device->dev_list, &fs_devices->devices);
385 mutex_unlock(&fs_devices->device_list_mutex);
386
387 device->fs_devices = fs_devices;
388 fs_devices->num_devices++;
389 } else if (!device->name || strcmp(device->name, path)) {
390 name = kstrdup(path, GFP_NOFS);
391 if (!name)
392 return -ENOMEM;
393 kfree(device->name);
394 device->name = name;
395 if (device->missing) {
396 fs_devices->missing_devices--;
397 device->missing = 0;
398 }
399 }
400
401 if (found_transid > fs_devices->latest_trans) {
402 fs_devices->latest_devid = devid;
403 fs_devices->latest_trans = found_transid;
404 }
405 *fs_devices_ret = fs_devices;
406 return 0;
407 }
408
clone_fs_devices(struct btrfs_fs_devices * orig)409 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
410 {
411 struct btrfs_fs_devices *fs_devices;
412 struct btrfs_device *device;
413 struct btrfs_device *orig_dev;
414
415 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
416 if (!fs_devices)
417 return ERR_PTR(-ENOMEM);
418
419 INIT_LIST_HEAD(&fs_devices->devices);
420 INIT_LIST_HEAD(&fs_devices->alloc_list);
421 INIT_LIST_HEAD(&fs_devices->list);
422 mutex_init(&fs_devices->device_list_mutex);
423 fs_devices->latest_devid = orig->latest_devid;
424 fs_devices->latest_trans = orig->latest_trans;
425 memcpy(fs_devices->fsid, orig->fsid, sizeof(fs_devices->fsid));
426
427 /* We have held the volume lock, it is safe to get the devices. */
428 list_for_each_entry(orig_dev, &orig->devices, dev_list) {
429 device = kzalloc(sizeof(*device), GFP_NOFS);
430 if (!device)
431 goto error;
432
433 device->name = kstrdup(orig_dev->name, GFP_NOFS);
434 if (!device->name) {
435 kfree(device);
436 goto error;
437 }
438
439 device->devid = orig_dev->devid;
440 device->work.func = pending_bios_fn;
441 memcpy(device->uuid, orig_dev->uuid, sizeof(device->uuid));
442 spin_lock_init(&device->io_lock);
443 INIT_LIST_HEAD(&device->dev_list);
444 INIT_LIST_HEAD(&device->dev_alloc_list);
445
446 list_add(&device->dev_list, &fs_devices->devices);
447 device->fs_devices = fs_devices;
448 fs_devices->num_devices++;
449 }
450 return fs_devices;
451 error:
452 free_fs_devices(fs_devices);
453 return ERR_PTR(-ENOMEM);
454 }
455
btrfs_close_extra_devices(struct btrfs_fs_devices * fs_devices)456 void btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices)
457 {
458 struct btrfs_device *device, *next;
459
460 struct block_device *latest_bdev = NULL;
461 u64 latest_devid = 0;
462 u64 latest_transid = 0;
463
464 mutex_lock(&uuid_mutex);
465 again:
466 /* This is the initialized path, it is safe to release the devices. */
467 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
468 if (device->in_fs_metadata) {
469 if (!latest_transid ||
470 device->generation > latest_transid) {
471 latest_devid = device->devid;
472 latest_transid = device->generation;
473 latest_bdev = device->bdev;
474 }
475 continue;
476 }
477
478 if (device->bdev) {
479 blkdev_put(device->bdev, device->mode);
480 device->bdev = NULL;
481 fs_devices->open_devices--;
482 }
483 if (device->writeable) {
484 list_del_init(&device->dev_alloc_list);
485 device->writeable = 0;
486 fs_devices->rw_devices--;
487 }
488 list_del_init(&device->dev_list);
489 fs_devices->num_devices--;
490 kfree(device->name);
491 kfree(device);
492 }
493
494 if (fs_devices->seed) {
495 fs_devices = fs_devices->seed;
496 goto again;
497 }
498
499 fs_devices->latest_bdev = latest_bdev;
500 fs_devices->latest_devid = latest_devid;
501 fs_devices->latest_trans = latest_transid;
502
503 mutex_unlock(&uuid_mutex);
504 }
505
__free_device(struct work_struct * work)506 static void __free_device(struct work_struct *work)
507 {
508 struct btrfs_device *device;
509
510 device = container_of(work, struct btrfs_device, rcu_work);
511
512 if (device->bdev)
513 blkdev_put(device->bdev, device->mode);
514
515 kfree(device->name);
516 kfree(device);
517 }
518
free_device(struct rcu_head * head)519 static void free_device(struct rcu_head *head)
520 {
521 struct btrfs_device *device;
522
523 device = container_of(head, struct btrfs_device, rcu);
524
525 INIT_WORK(&device->rcu_work, __free_device);
526 schedule_work(&device->rcu_work);
527 }
528
__btrfs_close_devices(struct btrfs_fs_devices * fs_devices)529 static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
530 {
531 struct btrfs_device *device;
532
533 if (--fs_devices->opened > 0)
534 return 0;
535
536 mutex_lock(&fs_devices->device_list_mutex);
537 list_for_each_entry(device, &fs_devices->devices, dev_list) {
538 struct btrfs_device *new_device;
539
540 if (device->bdev)
541 fs_devices->open_devices--;
542
543 if (device->writeable) {
544 list_del_init(&device->dev_alloc_list);
545 fs_devices->rw_devices--;
546 }
547
548 if (device->can_discard)
549 fs_devices->num_can_discard--;
550
551 new_device = kmalloc(sizeof(*new_device), GFP_NOFS);
552 BUG_ON(!new_device); /* -ENOMEM */
553 memcpy(new_device, device, sizeof(*new_device));
554 new_device->name = kstrdup(device->name, GFP_NOFS);
555 BUG_ON(device->name && !new_device->name); /* -ENOMEM */
556 new_device->bdev = NULL;
557 new_device->writeable = 0;
558 new_device->in_fs_metadata = 0;
559 new_device->can_discard = 0;
560 spin_lock_init(&new_device->io_lock);
561 list_replace_rcu(&device->dev_list, &new_device->dev_list);
562
563 call_rcu(&device->rcu, free_device);
564 }
565 mutex_unlock(&fs_devices->device_list_mutex);
566
567 WARN_ON(fs_devices->open_devices);
568 WARN_ON(fs_devices->rw_devices);
569 fs_devices->opened = 0;
570 fs_devices->seeding = 0;
571
572 return 0;
573 }
574
btrfs_close_devices(struct btrfs_fs_devices * fs_devices)575 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
576 {
577 struct btrfs_fs_devices *seed_devices = NULL;
578 int ret;
579
580 mutex_lock(&uuid_mutex);
581 ret = __btrfs_close_devices(fs_devices);
582 if (!fs_devices->opened) {
583 seed_devices = fs_devices->seed;
584 fs_devices->seed = NULL;
585 }
586 mutex_unlock(&uuid_mutex);
587
588 while (seed_devices) {
589 fs_devices = seed_devices;
590 seed_devices = fs_devices->seed;
591 __btrfs_close_devices(fs_devices);
592 free_fs_devices(fs_devices);
593 }
594 /*
595 * Wait for rcu kworkers under __btrfs_close_devices
596 * to finish all blkdev_puts so device is really
597 * free when umount is done.
598 */
599 rcu_barrier();
600 return ret;
601 }
602
__btrfs_open_devices(struct btrfs_fs_devices * fs_devices,fmode_t flags,void * holder)603 static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
604 fmode_t flags, void *holder)
605 {
606 struct request_queue *q;
607 struct block_device *bdev;
608 struct list_head *head = &fs_devices->devices;
609 struct btrfs_device *device;
610 struct block_device *latest_bdev = NULL;
611 struct buffer_head *bh;
612 struct btrfs_super_block *disk_super;
613 u64 latest_devid = 0;
614 u64 latest_transid = 0;
615 u64 devid;
616 int seeding = 1;
617 int ret = 0;
618
619 flags |= FMODE_EXCL;
620
621 list_for_each_entry(device, head, dev_list) {
622 if (device->bdev)
623 continue;
624 if (!device->name)
625 continue;
626
627 bdev = blkdev_get_by_path(device->name, flags, holder);
628 if (IS_ERR(bdev)) {
629 printk(KERN_INFO "open %s failed\n", device->name);
630 goto error;
631 }
632 filemap_write_and_wait(bdev->bd_inode->i_mapping);
633 invalidate_bdev(bdev);
634 set_blocksize(bdev, 4096);
635
636 bh = btrfs_read_dev_super(bdev);
637 if (!bh)
638 goto error_close;
639
640 disk_super = (struct btrfs_super_block *)bh->b_data;
641 devid = btrfs_stack_device_id(&disk_super->dev_item);
642 if (devid != device->devid)
643 goto error_brelse;
644
645 if (memcmp(device->uuid, disk_super->dev_item.uuid,
646 BTRFS_UUID_SIZE))
647 goto error_brelse;
648
649 device->generation = btrfs_super_generation(disk_super);
650 if (!latest_transid || device->generation > latest_transid) {
651 latest_devid = devid;
652 latest_transid = device->generation;
653 latest_bdev = bdev;
654 }
655
656 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
657 device->writeable = 0;
658 } else {
659 device->writeable = !bdev_read_only(bdev);
660 seeding = 0;
661 }
662
663 q = bdev_get_queue(bdev);
664 if (blk_queue_discard(q)) {
665 device->can_discard = 1;
666 fs_devices->num_can_discard++;
667 }
668
669 device->bdev = bdev;
670 device->in_fs_metadata = 0;
671 device->mode = flags;
672
673 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
674 fs_devices->rotating = 1;
675
676 fs_devices->open_devices++;
677 if (device->writeable) {
678 fs_devices->rw_devices++;
679 list_add(&device->dev_alloc_list,
680 &fs_devices->alloc_list);
681 }
682 brelse(bh);
683 continue;
684
685 error_brelse:
686 brelse(bh);
687 error_close:
688 blkdev_put(bdev, flags);
689 error:
690 continue;
691 }
692 if (fs_devices->open_devices == 0) {
693 ret = -EINVAL;
694 goto out;
695 }
696 fs_devices->seeding = seeding;
697 fs_devices->opened = 1;
698 fs_devices->latest_bdev = latest_bdev;
699 fs_devices->latest_devid = latest_devid;
700 fs_devices->latest_trans = latest_transid;
701 fs_devices->total_rw_bytes = 0;
702 out:
703 return ret;
704 }
705
btrfs_open_devices(struct btrfs_fs_devices * fs_devices,fmode_t flags,void * holder)706 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
707 fmode_t flags, void *holder)
708 {
709 int ret;
710
711 mutex_lock(&uuid_mutex);
712 if (fs_devices->opened) {
713 fs_devices->opened++;
714 ret = 0;
715 } else {
716 ret = __btrfs_open_devices(fs_devices, flags, holder);
717 }
718 mutex_unlock(&uuid_mutex);
719 return ret;
720 }
721
btrfs_scan_one_device(const char * path,fmode_t flags,void * holder,struct btrfs_fs_devices ** fs_devices_ret)722 int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
723 struct btrfs_fs_devices **fs_devices_ret)
724 {
725 struct btrfs_super_block *disk_super;
726 struct block_device *bdev;
727 struct buffer_head *bh;
728 int ret;
729 u64 devid;
730 u64 transid;
731
732 flags |= FMODE_EXCL;
733 bdev = blkdev_get_by_path(path, flags, holder);
734
735 if (IS_ERR(bdev)) {
736 ret = PTR_ERR(bdev);
737 goto error;
738 }
739
740 mutex_lock(&uuid_mutex);
741 ret = set_blocksize(bdev, 4096);
742 if (ret)
743 goto error_close;
744 bh = btrfs_read_dev_super(bdev);
745 if (!bh) {
746 ret = -EINVAL;
747 goto error_close;
748 }
749 disk_super = (struct btrfs_super_block *)bh->b_data;
750 devid = btrfs_stack_device_id(&disk_super->dev_item);
751 transid = btrfs_super_generation(disk_super);
752 if (disk_super->label[0])
753 printk(KERN_INFO "device label %s ", disk_super->label);
754 else
755 printk(KERN_INFO "device fsid %pU ", disk_super->fsid);
756 printk(KERN_CONT "devid %llu transid %llu %s\n",
757 (unsigned long long)devid, (unsigned long long)transid, path);
758 ret = device_list_add(path, disk_super, devid, fs_devices_ret);
759
760 brelse(bh);
761 error_close:
762 mutex_unlock(&uuid_mutex);
763 blkdev_put(bdev, flags);
764 error:
765 return ret;
766 }
767
768 /* helper to account the used device space in the range */
btrfs_account_dev_extents_size(struct btrfs_device * device,u64 start,u64 end,u64 * length)769 int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
770 u64 end, u64 *length)
771 {
772 struct btrfs_key key;
773 struct btrfs_root *root = device->dev_root;
774 struct btrfs_dev_extent *dev_extent;
775 struct btrfs_path *path;
776 u64 extent_end;
777 int ret;
778 int slot;
779 struct extent_buffer *l;
780
781 *length = 0;
782
783 if (start >= device->total_bytes)
784 return 0;
785
786 path = btrfs_alloc_path();
787 if (!path)
788 return -ENOMEM;
789 path->reada = 2;
790
791 key.objectid = device->devid;
792 key.offset = start;
793 key.type = BTRFS_DEV_EXTENT_KEY;
794
795 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
796 if (ret < 0)
797 goto out;
798 if (ret > 0) {
799 ret = btrfs_previous_item(root, path, key.objectid, key.type);
800 if (ret < 0)
801 goto out;
802 }
803
804 while (1) {
805 l = path->nodes[0];
806 slot = path->slots[0];
807 if (slot >= btrfs_header_nritems(l)) {
808 ret = btrfs_next_leaf(root, path);
809 if (ret == 0)
810 continue;
811 if (ret < 0)
812 goto out;
813
814 break;
815 }
816 btrfs_item_key_to_cpu(l, &key, slot);
817
818 if (key.objectid < device->devid)
819 goto next;
820
821 if (key.objectid > device->devid)
822 break;
823
824 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
825 goto next;
826
827 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
828 extent_end = key.offset + btrfs_dev_extent_length(l,
829 dev_extent);
830 if (key.offset <= start && extent_end > end) {
831 *length = end - start + 1;
832 break;
833 } else if (key.offset <= start && extent_end > start)
834 *length += extent_end - start;
835 else if (key.offset > start && extent_end <= end)
836 *length += extent_end - key.offset;
837 else if (key.offset > start && key.offset <= end) {
838 *length += end - key.offset + 1;
839 break;
840 } else if (key.offset > end)
841 break;
842
843 next:
844 path->slots[0]++;
845 }
846 ret = 0;
847 out:
848 btrfs_free_path(path);
849 return ret;
850 }
851
852 /*
853 * find_free_dev_extent - find free space in the specified device
854 * @device: the device which we search the free space in
855 * @num_bytes: the size of the free space that we need
856 * @start: store the start of the free space.
857 * @len: the size of the free space. that we find, or the size of the max
858 * free space if we don't find suitable free space
859 *
860 * this uses a pretty simple search, the expectation is that it is
861 * called very infrequently and that a given device has a small number
862 * of extents
863 *
864 * @start is used to store the start of the free space if we find. But if we
865 * don't find suitable free space, it will be used to store the start position
866 * of the max free space.
867 *
868 * @len is used to store the size of the free space that we find.
869 * But if we don't find suitable free space, it is used to store the size of
870 * the max free space.
871 */
find_free_dev_extent(struct btrfs_device * device,u64 num_bytes,u64 * start,u64 * len)872 int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
873 u64 *start, u64 *len)
874 {
875 struct btrfs_key key;
876 struct btrfs_root *root = device->dev_root;
877 struct btrfs_dev_extent *dev_extent;
878 struct btrfs_path *path;
879 u64 hole_size;
880 u64 max_hole_start;
881 u64 max_hole_size;
882 u64 extent_end;
883 u64 search_start;
884 u64 search_end = device->total_bytes;
885 int ret;
886 int slot;
887 struct extent_buffer *l;
888
889 /* FIXME use last free of some kind */
890
891 /* we don't want to overwrite the superblock on the drive,
892 * so we make sure to start at an offset of at least 1MB
893 */
894 search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
895
896 max_hole_start = search_start;
897 max_hole_size = 0;
898 hole_size = 0;
899
900 if (search_start >= search_end) {
901 ret = -ENOSPC;
902 goto error;
903 }
904
905 path = btrfs_alloc_path();
906 if (!path) {
907 ret = -ENOMEM;
908 goto error;
909 }
910 path->reada = 2;
911
912 key.objectid = device->devid;
913 key.offset = search_start;
914 key.type = BTRFS_DEV_EXTENT_KEY;
915
916 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
917 if (ret < 0)
918 goto out;
919 if (ret > 0) {
920 ret = btrfs_previous_item(root, path, key.objectid, key.type);
921 if (ret < 0)
922 goto out;
923 }
924
925 while (1) {
926 l = path->nodes[0];
927 slot = path->slots[0];
928 if (slot >= btrfs_header_nritems(l)) {
929 ret = btrfs_next_leaf(root, path);
930 if (ret == 0)
931 continue;
932 if (ret < 0)
933 goto out;
934
935 break;
936 }
937 btrfs_item_key_to_cpu(l, &key, slot);
938
939 if (key.objectid < device->devid)
940 goto next;
941
942 if (key.objectid > device->devid)
943 break;
944
945 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
946 goto next;
947
948 if (key.offset > search_start) {
949 hole_size = key.offset - search_start;
950
951 if (hole_size > max_hole_size) {
952 max_hole_start = search_start;
953 max_hole_size = hole_size;
954 }
955
956 /*
957 * If this free space is greater than which we need,
958 * it must be the max free space that we have found
959 * until now, so max_hole_start must point to the start
960 * of this free space and the length of this free space
961 * is stored in max_hole_size. Thus, we return
962 * max_hole_start and max_hole_size and go back to the
963 * caller.
964 */
965 if (hole_size >= num_bytes) {
966 ret = 0;
967 goto out;
968 }
969 }
970
971 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
972 extent_end = key.offset + btrfs_dev_extent_length(l,
973 dev_extent);
974 if (extent_end > search_start)
975 search_start = extent_end;
976 next:
977 path->slots[0]++;
978 cond_resched();
979 }
980
981 /*
982 * At this point, search_start should be the end of
983 * allocated dev extents, and when shrinking the device,
984 * search_end may be smaller than search_start.
985 */
986 if (search_end > search_start)
987 hole_size = search_end - search_start;
988
989 if (hole_size > max_hole_size) {
990 max_hole_start = search_start;
991 max_hole_size = hole_size;
992 }
993
994 /* See above. */
995 if (hole_size < num_bytes)
996 ret = -ENOSPC;
997 else
998 ret = 0;
999
1000 out:
1001 btrfs_free_path(path);
1002 error:
1003 *start = max_hole_start;
1004 if (len)
1005 *len = max_hole_size;
1006 return ret;
1007 }
1008
btrfs_free_dev_extent(struct btrfs_trans_handle * trans,struct btrfs_device * device,u64 start)1009 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1010 struct btrfs_device *device,
1011 u64 start)
1012 {
1013 int ret;
1014 struct btrfs_path *path;
1015 struct btrfs_root *root = device->dev_root;
1016 struct btrfs_key key;
1017 struct btrfs_key found_key;
1018 struct extent_buffer *leaf = NULL;
1019 struct btrfs_dev_extent *extent = NULL;
1020
1021 path = btrfs_alloc_path();
1022 if (!path)
1023 return -ENOMEM;
1024
1025 key.objectid = device->devid;
1026 key.offset = start;
1027 key.type = BTRFS_DEV_EXTENT_KEY;
1028 again:
1029 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1030 if (ret > 0) {
1031 ret = btrfs_previous_item(root, path, key.objectid,
1032 BTRFS_DEV_EXTENT_KEY);
1033 if (ret)
1034 goto out;
1035 leaf = path->nodes[0];
1036 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1037 extent = btrfs_item_ptr(leaf, path->slots[0],
1038 struct btrfs_dev_extent);
1039 BUG_ON(found_key.offset > start || found_key.offset +
1040 btrfs_dev_extent_length(leaf, extent) < start);
1041 key = found_key;
1042 btrfs_release_path(path);
1043 goto again;
1044 } else if (ret == 0) {
1045 leaf = path->nodes[0];
1046 extent = btrfs_item_ptr(leaf, path->slots[0],
1047 struct btrfs_dev_extent);
1048 } else {
1049 btrfs_error(root->fs_info, ret, "Slot search failed");
1050 goto out;
1051 }
1052
1053 if (device->bytes_used > 0) {
1054 u64 len = btrfs_dev_extent_length(leaf, extent);
1055 device->bytes_used -= len;
1056 spin_lock(&root->fs_info->free_chunk_lock);
1057 root->fs_info->free_chunk_space += len;
1058 spin_unlock(&root->fs_info->free_chunk_lock);
1059 }
1060 ret = btrfs_del_item(trans, root, path);
1061 if (ret) {
1062 btrfs_error(root->fs_info, ret,
1063 "Failed to remove dev extent item");
1064 }
1065 out:
1066 btrfs_free_path(path);
1067 return ret;
1068 }
1069
btrfs_alloc_dev_extent(struct btrfs_trans_handle * trans,struct btrfs_device * device,u64 chunk_tree,u64 chunk_objectid,u64 chunk_offset,u64 start,u64 num_bytes)1070 int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1071 struct btrfs_device *device,
1072 u64 chunk_tree, u64 chunk_objectid,
1073 u64 chunk_offset, u64 start, u64 num_bytes)
1074 {
1075 int ret;
1076 struct btrfs_path *path;
1077 struct btrfs_root *root = device->dev_root;
1078 struct btrfs_dev_extent *extent;
1079 struct extent_buffer *leaf;
1080 struct btrfs_key key;
1081
1082 WARN_ON(!device->in_fs_metadata);
1083 path = btrfs_alloc_path();
1084 if (!path)
1085 return -ENOMEM;
1086
1087 key.objectid = device->devid;
1088 key.offset = start;
1089 key.type = BTRFS_DEV_EXTENT_KEY;
1090 ret = btrfs_insert_empty_item(trans, root, path, &key,
1091 sizeof(*extent));
1092 if (ret)
1093 goto out;
1094
1095 leaf = path->nodes[0];
1096 extent = btrfs_item_ptr(leaf, path->slots[0],
1097 struct btrfs_dev_extent);
1098 btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
1099 btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
1100 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
1101
1102 write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
1103 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
1104 BTRFS_UUID_SIZE);
1105
1106 btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1107 btrfs_mark_buffer_dirty(leaf);
1108 out:
1109 btrfs_free_path(path);
1110 return ret;
1111 }
1112
find_next_chunk(struct btrfs_root * root,u64 objectid,u64 * offset)1113 static noinline int find_next_chunk(struct btrfs_root *root,
1114 u64 objectid, u64 *offset)
1115 {
1116 struct btrfs_path *path;
1117 int ret;
1118 struct btrfs_key key;
1119 struct btrfs_chunk *chunk;
1120 struct btrfs_key found_key;
1121
1122 path = btrfs_alloc_path();
1123 if (!path)
1124 return -ENOMEM;
1125
1126 key.objectid = objectid;
1127 key.offset = (u64)-1;
1128 key.type = BTRFS_CHUNK_ITEM_KEY;
1129
1130 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1131 if (ret < 0)
1132 goto error;
1133
1134 BUG_ON(ret == 0); /* Corruption */
1135
1136 ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
1137 if (ret) {
1138 *offset = 0;
1139 } else {
1140 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1141 path->slots[0]);
1142 if (found_key.objectid != objectid)
1143 *offset = 0;
1144 else {
1145 chunk = btrfs_item_ptr(path->nodes[0], path->slots[0],
1146 struct btrfs_chunk);
1147 *offset = found_key.offset +
1148 btrfs_chunk_length(path->nodes[0], chunk);
1149 }
1150 }
1151 ret = 0;
1152 error:
1153 btrfs_free_path(path);
1154 return ret;
1155 }
1156
find_next_devid(struct btrfs_root * root,u64 * objectid)1157 static noinline int find_next_devid(struct btrfs_root *root, u64 *objectid)
1158 {
1159 int ret;
1160 struct btrfs_key key;
1161 struct btrfs_key found_key;
1162 struct btrfs_path *path;
1163
1164 root = root->fs_info->chunk_root;
1165
1166 path = btrfs_alloc_path();
1167 if (!path)
1168 return -ENOMEM;
1169
1170 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1171 key.type = BTRFS_DEV_ITEM_KEY;
1172 key.offset = (u64)-1;
1173
1174 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1175 if (ret < 0)
1176 goto error;
1177
1178 BUG_ON(ret == 0); /* Corruption */
1179
1180 ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
1181 BTRFS_DEV_ITEM_KEY);
1182 if (ret) {
1183 *objectid = 1;
1184 } else {
1185 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1186 path->slots[0]);
1187 *objectid = found_key.offset + 1;
1188 }
1189 ret = 0;
1190 error:
1191 btrfs_free_path(path);
1192 return ret;
1193 }
1194
1195 /*
1196 * the device information is stored in the chunk root
1197 * the btrfs_device struct should be fully filled in
1198 */
btrfs_add_device(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_device * device)1199 int btrfs_add_device(struct btrfs_trans_handle *trans,
1200 struct btrfs_root *root,
1201 struct btrfs_device *device)
1202 {
1203 int ret;
1204 struct btrfs_path *path;
1205 struct btrfs_dev_item *dev_item;
1206 struct extent_buffer *leaf;
1207 struct btrfs_key key;
1208 unsigned long ptr;
1209
1210 root = root->fs_info->chunk_root;
1211
1212 path = btrfs_alloc_path();
1213 if (!path)
1214 return -ENOMEM;
1215
1216 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1217 key.type = BTRFS_DEV_ITEM_KEY;
1218 key.offset = device->devid;
1219
1220 ret = btrfs_insert_empty_item(trans, root, path, &key,
1221 sizeof(*dev_item));
1222 if (ret)
1223 goto out;
1224
1225 leaf = path->nodes[0];
1226 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1227
1228 btrfs_set_device_id(leaf, dev_item, device->devid);
1229 btrfs_set_device_generation(leaf, dev_item, 0);
1230 btrfs_set_device_type(leaf, dev_item, device->type);
1231 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1232 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1233 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1234 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
1235 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1236 btrfs_set_device_group(leaf, dev_item, 0);
1237 btrfs_set_device_seek_speed(leaf, dev_item, 0);
1238 btrfs_set_device_bandwidth(leaf, dev_item, 0);
1239 btrfs_set_device_start_offset(leaf, dev_item, 0);
1240
1241 ptr = (unsigned long)btrfs_device_uuid(dev_item);
1242 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1243 ptr = (unsigned long)btrfs_device_fsid(dev_item);
1244 write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
1245 btrfs_mark_buffer_dirty(leaf);
1246
1247 ret = 0;
1248 out:
1249 btrfs_free_path(path);
1250 return ret;
1251 }
1252
btrfs_rm_dev_item(struct btrfs_root * root,struct btrfs_device * device)1253 static int btrfs_rm_dev_item(struct btrfs_root *root,
1254 struct btrfs_device *device)
1255 {
1256 int ret;
1257 struct btrfs_path *path;
1258 struct btrfs_key key;
1259 struct btrfs_trans_handle *trans;
1260
1261 root = root->fs_info->chunk_root;
1262
1263 path = btrfs_alloc_path();
1264 if (!path)
1265 return -ENOMEM;
1266
1267 trans = btrfs_start_transaction(root, 0);
1268 if (IS_ERR(trans)) {
1269 btrfs_free_path(path);
1270 return PTR_ERR(trans);
1271 }
1272 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1273 key.type = BTRFS_DEV_ITEM_KEY;
1274 key.offset = device->devid;
1275 lock_chunks(root);
1276
1277 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1278 if (ret < 0)
1279 goto out;
1280
1281 if (ret > 0) {
1282 ret = -ENOENT;
1283 goto out;
1284 }
1285
1286 ret = btrfs_del_item(trans, root, path);
1287 if (ret)
1288 goto out;
1289 out:
1290 btrfs_free_path(path);
1291 unlock_chunks(root);
1292 btrfs_commit_transaction(trans, root);
1293 return ret;
1294 }
1295
btrfs_rm_device(struct btrfs_root * root,char * device_path)1296 int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1297 {
1298 struct btrfs_device *device;
1299 struct btrfs_device *next_device;
1300 struct block_device *bdev;
1301 struct buffer_head *bh = NULL;
1302 struct btrfs_super_block *disk_super;
1303 struct btrfs_fs_devices *cur_devices;
1304 u64 all_avail;
1305 u64 devid;
1306 u64 num_devices;
1307 u8 *dev_uuid;
1308 int ret = 0;
1309 bool clear_super = false;
1310
1311 mutex_lock(&uuid_mutex);
1312
1313 all_avail = root->fs_info->avail_data_alloc_bits |
1314 root->fs_info->avail_system_alloc_bits |
1315 root->fs_info->avail_metadata_alloc_bits;
1316
1317 if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) &&
1318 root->fs_info->fs_devices->num_devices <= 4) {
1319 printk(KERN_ERR "btrfs: unable to go below four devices "
1320 "on raid10\n");
1321 ret = -EINVAL;
1322 goto out;
1323 }
1324
1325 if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) &&
1326 root->fs_info->fs_devices->num_devices <= 2) {
1327 printk(KERN_ERR "btrfs: unable to go below two "
1328 "devices on raid1\n");
1329 ret = -EINVAL;
1330 goto out;
1331 }
1332
1333 if (strcmp(device_path, "missing") == 0) {
1334 struct list_head *devices;
1335 struct btrfs_device *tmp;
1336
1337 device = NULL;
1338 devices = &root->fs_info->fs_devices->devices;
1339 /*
1340 * It is safe to read the devices since the volume_mutex
1341 * is held.
1342 */
1343 list_for_each_entry(tmp, devices, dev_list) {
1344 if (tmp->in_fs_metadata && !tmp->bdev) {
1345 device = tmp;
1346 break;
1347 }
1348 }
1349 bdev = NULL;
1350 bh = NULL;
1351 disk_super = NULL;
1352 if (!device) {
1353 printk(KERN_ERR "btrfs: no missing devices found to "
1354 "remove\n");
1355 goto out;
1356 }
1357 } else {
1358 bdev = blkdev_get_by_path(device_path, FMODE_READ | FMODE_EXCL,
1359 root->fs_info->bdev_holder);
1360 if (IS_ERR(bdev)) {
1361 ret = PTR_ERR(bdev);
1362 goto out;
1363 }
1364
1365 set_blocksize(bdev, 4096);
1366 invalidate_bdev(bdev);
1367 bh = btrfs_read_dev_super(bdev);
1368 if (!bh) {
1369 ret = -EINVAL;
1370 goto error_close;
1371 }
1372 disk_super = (struct btrfs_super_block *)bh->b_data;
1373 devid = btrfs_stack_device_id(&disk_super->dev_item);
1374 dev_uuid = disk_super->dev_item.uuid;
1375 device = btrfs_find_device(root, devid, dev_uuid,
1376 disk_super->fsid);
1377 if (!device) {
1378 ret = -ENOENT;
1379 goto error_brelse;
1380 }
1381 }
1382
1383 if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
1384 printk(KERN_ERR "btrfs: unable to remove the only writeable "
1385 "device\n");
1386 ret = -EINVAL;
1387 goto error_brelse;
1388 }
1389
1390 if (device->writeable) {
1391 lock_chunks(root);
1392 list_del_init(&device->dev_alloc_list);
1393 unlock_chunks(root);
1394 root->fs_info->fs_devices->rw_devices--;
1395 clear_super = true;
1396 }
1397
1398 ret = btrfs_shrink_device(device, 0);
1399 if (ret)
1400 goto error_undo;
1401
1402 ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
1403 if (ret)
1404 goto error_undo;
1405
1406 spin_lock(&root->fs_info->free_chunk_lock);
1407 root->fs_info->free_chunk_space = device->total_bytes -
1408 device->bytes_used;
1409 spin_unlock(&root->fs_info->free_chunk_lock);
1410
1411 device->in_fs_metadata = 0;
1412 btrfs_scrub_cancel_dev(root, device);
1413
1414 /*
1415 * the device list mutex makes sure that we don't change
1416 * the device list while someone else is writing out all
1417 * the device supers.
1418 */
1419
1420 cur_devices = device->fs_devices;
1421 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1422 list_del_rcu(&device->dev_list);
1423
1424 device->fs_devices->num_devices--;
1425
1426 if (device->missing)
1427 root->fs_info->fs_devices->missing_devices--;
1428
1429 next_device = list_entry(root->fs_info->fs_devices->devices.next,
1430 struct btrfs_device, dev_list);
1431 if (device->bdev == root->fs_info->sb->s_bdev)
1432 root->fs_info->sb->s_bdev = next_device->bdev;
1433 if (device->bdev == root->fs_info->fs_devices->latest_bdev)
1434 root->fs_info->fs_devices->latest_bdev = next_device->bdev;
1435
1436 if (device->bdev)
1437 device->fs_devices->open_devices--;
1438
1439 call_rcu(&device->rcu, free_device);
1440 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1441
1442 num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
1443 btrfs_set_super_num_devices(root->fs_info->super_copy, num_devices);
1444
1445 if (cur_devices->open_devices == 0) {
1446 struct btrfs_fs_devices *fs_devices;
1447 fs_devices = root->fs_info->fs_devices;
1448 while (fs_devices) {
1449 if (fs_devices->seed == cur_devices)
1450 break;
1451 fs_devices = fs_devices->seed;
1452 }
1453 fs_devices->seed = cur_devices->seed;
1454 cur_devices->seed = NULL;
1455 lock_chunks(root);
1456 __btrfs_close_devices(cur_devices);
1457 unlock_chunks(root);
1458 free_fs_devices(cur_devices);
1459 }
1460
1461 /*
1462 * at this point, the device is zero sized. We want to
1463 * remove it from the devices list and zero out the old super
1464 */
1465 if (clear_super) {
1466 /* make sure this device isn't detected as part of
1467 * the FS anymore
1468 */
1469 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
1470 set_buffer_dirty(bh);
1471 sync_dirty_buffer(bh);
1472 }
1473
1474 ret = 0;
1475
1476 error_brelse:
1477 brelse(bh);
1478 error_close:
1479 if (bdev)
1480 blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
1481 out:
1482 mutex_unlock(&uuid_mutex);
1483 return ret;
1484 error_undo:
1485 if (device->writeable) {
1486 lock_chunks(root);
1487 list_add(&device->dev_alloc_list,
1488 &root->fs_info->fs_devices->alloc_list);
1489 unlock_chunks(root);
1490 root->fs_info->fs_devices->rw_devices++;
1491 }
1492 goto error_brelse;
1493 }
1494
1495 /*
1496 * does all the dirty work required for changing file system's UUID.
1497 */
btrfs_prepare_sprout(struct btrfs_root * root)1498 static int btrfs_prepare_sprout(struct btrfs_root *root)
1499 {
1500 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
1501 struct btrfs_fs_devices *old_devices;
1502 struct btrfs_fs_devices *seed_devices;
1503 struct btrfs_super_block *disk_super = root->fs_info->super_copy;
1504 struct btrfs_device *device;
1505 u64 super_flags;
1506
1507 BUG_ON(!mutex_is_locked(&uuid_mutex));
1508 if (!fs_devices->seeding)
1509 return -EINVAL;
1510
1511 seed_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
1512 if (!seed_devices)
1513 return -ENOMEM;
1514
1515 old_devices = clone_fs_devices(fs_devices);
1516 if (IS_ERR(old_devices)) {
1517 kfree(seed_devices);
1518 return PTR_ERR(old_devices);
1519 }
1520
1521 list_add(&old_devices->list, &fs_uuids);
1522
1523 memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
1524 seed_devices->opened = 1;
1525 INIT_LIST_HEAD(&seed_devices->devices);
1526 INIT_LIST_HEAD(&seed_devices->alloc_list);
1527 mutex_init(&seed_devices->device_list_mutex);
1528
1529 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1530 list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
1531 synchronize_rcu);
1532 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1533
1534 list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
1535 list_for_each_entry(device, &seed_devices->devices, dev_list) {
1536 device->fs_devices = seed_devices;
1537 }
1538
1539 fs_devices->seeding = 0;
1540 fs_devices->num_devices = 0;
1541 fs_devices->open_devices = 0;
1542 fs_devices->seed = seed_devices;
1543
1544 generate_random_uuid(fs_devices->fsid);
1545 memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1546 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1547 super_flags = btrfs_super_flags(disk_super) &
1548 ~BTRFS_SUPER_FLAG_SEEDING;
1549 btrfs_set_super_flags(disk_super, super_flags);
1550
1551 return 0;
1552 }
1553
1554 /*
1555 * strore the expected generation for seed devices in device items.
1556 */
btrfs_finish_sprout(struct btrfs_trans_handle * trans,struct btrfs_root * root)1557 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
1558 struct btrfs_root *root)
1559 {
1560 struct btrfs_path *path;
1561 struct extent_buffer *leaf;
1562 struct btrfs_dev_item *dev_item;
1563 struct btrfs_device *device;
1564 struct btrfs_key key;
1565 u8 fs_uuid[BTRFS_UUID_SIZE];
1566 u8 dev_uuid[BTRFS_UUID_SIZE];
1567 u64 devid;
1568 int ret;
1569
1570 path = btrfs_alloc_path();
1571 if (!path)
1572 return -ENOMEM;
1573
1574 root = root->fs_info->chunk_root;
1575 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1576 key.offset = 0;
1577 key.type = BTRFS_DEV_ITEM_KEY;
1578
1579 while (1) {
1580 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1581 if (ret < 0)
1582 goto error;
1583
1584 leaf = path->nodes[0];
1585 next_slot:
1586 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1587 ret = btrfs_next_leaf(root, path);
1588 if (ret > 0)
1589 break;
1590 if (ret < 0)
1591 goto error;
1592 leaf = path->nodes[0];
1593 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1594 btrfs_release_path(path);
1595 continue;
1596 }
1597
1598 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1599 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
1600 key.type != BTRFS_DEV_ITEM_KEY)
1601 break;
1602
1603 dev_item = btrfs_item_ptr(leaf, path->slots[0],
1604 struct btrfs_dev_item);
1605 devid = btrfs_device_id(leaf, dev_item);
1606 read_extent_buffer(leaf, dev_uuid,
1607 (unsigned long)btrfs_device_uuid(dev_item),
1608 BTRFS_UUID_SIZE);
1609 read_extent_buffer(leaf, fs_uuid,
1610 (unsigned long)btrfs_device_fsid(dev_item),
1611 BTRFS_UUID_SIZE);
1612 device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
1613 BUG_ON(!device); /* Logic error */
1614
1615 if (device->fs_devices->seeding) {
1616 btrfs_set_device_generation(leaf, dev_item,
1617 device->generation);
1618 btrfs_mark_buffer_dirty(leaf);
1619 }
1620
1621 path->slots[0]++;
1622 goto next_slot;
1623 }
1624 ret = 0;
1625 error:
1626 btrfs_free_path(path);
1627 return ret;
1628 }
1629
btrfs_init_new_device(struct btrfs_root * root,char * device_path)1630 int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1631 {
1632 struct request_queue *q;
1633 struct btrfs_trans_handle *trans;
1634 struct btrfs_device *device;
1635 struct block_device *bdev;
1636 struct list_head *devices;
1637 struct super_block *sb = root->fs_info->sb;
1638 u64 total_bytes;
1639 int seeding_dev = 0;
1640 int ret = 0;
1641
1642 if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
1643 return -EINVAL;
1644
1645 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
1646 root->fs_info->bdev_holder);
1647 if (IS_ERR(bdev))
1648 return PTR_ERR(bdev);
1649
1650 if (root->fs_info->fs_devices->seeding) {
1651 seeding_dev = 1;
1652 down_write(&sb->s_umount);
1653 mutex_lock(&uuid_mutex);
1654 }
1655
1656 filemap_write_and_wait(bdev->bd_inode->i_mapping);
1657
1658 devices = &root->fs_info->fs_devices->devices;
1659 /*
1660 * we have the volume lock, so we don't need the extra
1661 * device list mutex while reading the list here.
1662 */
1663 list_for_each_entry(device, devices, dev_list) {
1664 if (device->bdev == bdev) {
1665 ret = -EEXIST;
1666 goto error;
1667 }
1668 }
1669
1670 device = kzalloc(sizeof(*device), GFP_NOFS);
1671 if (!device) {
1672 /* we can safely leave the fs_devices entry around */
1673 ret = -ENOMEM;
1674 goto error;
1675 }
1676
1677 device->name = kstrdup(device_path, GFP_NOFS);
1678 if (!device->name) {
1679 kfree(device);
1680 ret = -ENOMEM;
1681 goto error;
1682 }
1683
1684 ret = find_next_devid(root, &device->devid);
1685 if (ret) {
1686 kfree(device->name);
1687 kfree(device);
1688 goto error;
1689 }
1690
1691 trans = btrfs_start_transaction(root, 0);
1692 if (IS_ERR(trans)) {
1693 kfree(device->name);
1694 kfree(device);
1695 ret = PTR_ERR(trans);
1696 goto error;
1697 }
1698
1699 lock_chunks(root);
1700
1701 q = bdev_get_queue(bdev);
1702 if (blk_queue_discard(q))
1703 device->can_discard = 1;
1704 device->writeable = 1;
1705 device->work.func = pending_bios_fn;
1706 generate_random_uuid(device->uuid);
1707 spin_lock_init(&device->io_lock);
1708 device->generation = trans->transid;
1709 device->io_width = root->sectorsize;
1710 device->io_align = root->sectorsize;
1711 device->sector_size = root->sectorsize;
1712 device->total_bytes = i_size_read(bdev->bd_inode);
1713 device->disk_total_bytes = device->total_bytes;
1714 device->dev_root = root->fs_info->dev_root;
1715 device->bdev = bdev;
1716 device->in_fs_metadata = 1;
1717 device->mode = FMODE_EXCL;
1718 set_blocksize(device->bdev, 4096);
1719
1720 if (seeding_dev) {
1721 sb->s_flags &= ~MS_RDONLY;
1722 ret = btrfs_prepare_sprout(root);
1723 BUG_ON(ret); /* -ENOMEM */
1724 }
1725
1726 device->fs_devices = root->fs_info->fs_devices;
1727
1728 /*
1729 * we don't want write_supers to jump in here with our device
1730 * half setup
1731 */
1732 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1733 list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices);
1734 list_add(&device->dev_alloc_list,
1735 &root->fs_info->fs_devices->alloc_list);
1736 root->fs_info->fs_devices->num_devices++;
1737 root->fs_info->fs_devices->open_devices++;
1738 root->fs_info->fs_devices->rw_devices++;
1739 if (device->can_discard)
1740 root->fs_info->fs_devices->num_can_discard++;
1741 root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
1742
1743 spin_lock(&root->fs_info->free_chunk_lock);
1744 root->fs_info->free_chunk_space += device->total_bytes;
1745 spin_unlock(&root->fs_info->free_chunk_lock);
1746
1747 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
1748 root->fs_info->fs_devices->rotating = 1;
1749
1750 total_bytes = btrfs_super_total_bytes(root->fs_info->super_copy);
1751 btrfs_set_super_total_bytes(root->fs_info->super_copy,
1752 total_bytes + device->total_bytes);
1753
1754 total_bytes = btrfs_super_num_devices(root->fs_info->super_copy);
1755 btrfs_set_super_num_devices(root->fs_info->super_copy,
1756 total_bytes + 1);
1757 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1758
1759 if (seeding_dev) {
1760 ret = init_first_rw_device(trans, root, device);
1761 if (ret)
1762 goto error_trans;
1763 ret = btrfs_finish_sprout(trans, root);
1764 if (ret)
1765 goto error_trans;
1766 } else {
1767 ret = btrfs_add_device(trans, root, device);
1768 if (ret)
1769 goto error_trans;
1770 }
1771
1772 /*
1773 * we've got more storage, clear any full flags on the space
1774 * infos
1775 */
1776 btrfs_clear_space_info_full(root->fs_info);
1777
1778 unlock_chunks(root);
1779 ret = btrfs_commit_transaction(trans, root);
1780
1781 if (seeding_dev) {
1782 mutex_unlock(&uuid_mutex);
1783 up_write(&sb->s_umount);
1784
1785 if (ret) /* transaction commit */
1786 return ret;
1787
1788 ret = btrfs_relocate_sys_chunks(root);
1789 if (ret < 0)
1790 btrfs_error(root->fs_info, ret,
1791 "Failed to relocate sys chunks after "
1792 "device initialization. This can be fixed "
1793 "using the \"btrfs balance\" command.");
1794 }
1795
1796 return ret;
1797
1798 error_trans:
1799 unlock_chunks(root);
1800 btrfs_abort_transaction(trans, root, ret);
1801 btrfs_end_transaction(trans, root);
1802 kfree(device->name);
1803 kfree(device);
1804 error:
1805 blkdev_put(bdev, FMODE_EXCL);
1806 if (seeding_dev) {
1807 mutex_unlock(&uuid_mutex);
1808 up_write(&sb->s_umount);
1809 }
1810 return ret;
1811 }
1812
btrfs_update_device(struct btrfs_trans_handle * trans,struct btrfs_device * device)1813 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
1814 struct btrfs_device *device)
1815 {
1816 int ret;
1817 struct btrfs_path *path;
1818 struct btrfs_root *root;
1819 struct btrfs_dev_item *dev_item;
1820 struct extent_buffer *leaf;
1821 struct btrfs_key key;
1822
1823 root = device->dev_root->fs_info->chunk_root;
1824
1825 path = btrfs_alloc_path();
1826 if (!path)
1827 return -ENOMEM;
1828
1829 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1830 key.type = BTRFS_DEV_ITEM_KEY;
1831 key.offset = device->devid;
1832
1833 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1834 if (ret < 0)
1835 goto out;
1836
1837 if (ret > 0) {
1838 ret = -ENOENT;
1839 goto out;
1840 }
1841
1842 leaf = path->nodes[0];
1843 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1844
1845 btrfs_set_device_id(leaf, dev_item, device->devid);
1846 btrfs_set_device_type(leaf, dev_item, device->type);
1847 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1848 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1849 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1850 btrfs_set_device_total_bytes(leaf, dev_item, device->disk_total_bytes);
1851 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1852 btrfs_mark_buffer_dirty(leaf);
1853
1854 out:
1855 btrfs_free_path(path);
1856 return ret;
1857 }
1858
__btrfs_grow_device(struct btrfs_trans_handle * trans,struct btrfs_device * device,u64 new_size)1859 static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
1860 struct btrfs_device *device, u64 new_size)
1861 {
1862 struct btrfs_super_block *super_copy =
1863 device->dev_root->fs_info->super_copy;
1864 u64 old_total = btrfs_super_total_bytes(super_copy);
1865 u64 diff = new_size - device->total_bytes;
1866
1867 if (!device->writeable)
1868 return -EACCES;
1869 if (new_size <= device->total_bytes)
1870 return -EINVAL;
1871
1872 btrfs_set_super_total_bytes(super_copy, old_total + diff);
1873 device->fs_devices->total_rw_bytes += diff;
1874
1875 device->total_bytes = new_size;
1876 device->disk_total_bytes = new_size;
1877 btrfs_clear_space_info_full(device->dev_root->fs_info);
1878
1879 return btrfs_update_device(trans, device);
1880 }
1881
btrfs_grow_device(struct btrfs_trans_handle * trans,struct btrfs_device * device,u64 new_size)1882 int btrfs_grow_device(struct btrfs_trans_handle *trans,
1883 struct btrfs_device *device, u64 new_size)
1884 {
1885 int ret;
1886 lock_chunks(device->dev_root);
1887 ret = __btrfs_grow_device(trans, device, new_size);
1888 unlock_chunks(device->dev_root);
1889 return ret;
1890 }
1891
btrfs_free_chunk(struct btrfs_trans_handle * trans,struct btrfs_root * root,u64 chunk_tree,u64 chunk_objectid,u64 chunk_offset)1892 static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
1893 struct btrfs_root *root,
1894 u64 chunk_tree, u64 chunk_objectid,
1895 u64 chunk_offset)
1896 {
1897 int ret;
1898 struct btrfs_path *path;
1899 struct btrfs_key key;
1900
1901 root = root->fs_info->chunk_root;
1902 path = btrfs_alloc_path();
1903 if (!path)
1904 return -ENOMEM;
1905
1906 key.objectid = chunk_objectid;
1907 key.offset = chunk_offset;
1908 key.type = BTRFS_CHUNK_ITEM_KEY;
1909
1910 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1911 if (ret < 0)
1912 goto out;
1913 else if (ret > 0) { /* Logic error or corruption */
1914 btrfs_error(root->fs_info, -ENOENT,
1915 "Failed lookup while freeing chunk.");
1916 ret = -ENOENT;
1917 goto out;
1918 }
1919
1920 ret = btrfs_del_item(trans, root, path);
1921 if (ret < 0)
1922 btrfs_error(root->fs_info, ret,
1923 "Failed to delete chunk item.");
1924 out:
1925 btrfs_free_path(path);
1926 return ret;
1927 }
1928
btrfs_del_sys_chunk(struct btrfs_root * root,u64 chunk_objectid,u64 chunk_offset)1929 static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
1930 chunk_offset)
1931 {
1932 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
1933 struct btrfs_disk_key *disk_key;
1934 struct btrfs_chunk *chunk;
1935 u8 *ptr;
1936 int ret = 0;
1937 u32 num_stripes;
1938 u32 array_size;
1939 u32 len = 0;
1940 u32 cur;
1941 struct btrfs_key key;
1942
1943 array_size = btrfs_super_sys_array_size(super_copy);
1944
1945 ptr = super_copy->sys_chunk_array;
1946 cur = 0;
1947
1948 while (cur < array_size) {
1949 disk_key = (struct btrfs_disk_key *)ptr;
1950 btrfs_disk_key_to_cpu(&key, disk_key);
1951
1952 len = sizeof(*disk_key);
1953
1954 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
1955 chunk = (struct btrfs_chunk *)(ptr + len);
1956 num_stripes = btrfs_stack_chunk_num_stripes(chunk);
1957 len += btrfs_chunk_item_size(num_stripes);
1958 } else {
1959 ret = -EIO;
1960 break;
1961 }
1962 if (key.objectid == chunk_objectid &&
1963 key.offset == chunk_offset) {
1964 memmove(ptr, ptr + len, array_size - (cur + len));
1965 array_size -= len;
1966 btrfs_set_super_sys_array_size(super_copy, array_size);
1967 } else {
1968 ptr += len;
1969 cur += len;
1970 }
1971 }
1972 return ret;
1973 }
1974
btrfs_relocate_chunk(struct btrfs_root * root,u64 chunk_tree,u64 chunk_objectid,u64 chunk_offset)1975 static int btrfs_relocate_chunk(struct btrfs_root *root,
1976 u64 chunk_tree, u64 chunk_objectid,
1977 u64 chunk_offset)
1978 {
1979 struct extent_map_tree *em_tree;
1980 struct btrfs_root *extent_root;
1981 struct btrfs_trans_handle *trans;
1982 struct extent_map *em;
1983 struct map_lookup *map;
1984 int ret;
1985 int i;
1986
1987 root = root->fs_info->chunk_root;
1988 extent_root = root->fs_info->extent_root;
1989 em_tree = &root->fs_info->mapping_tree.map_tree;
1990
1991 ret = btrfs_can_relocate(extent_root, chunk_offset);
1992 if (ret)
1993 return -ENOSPC;
1994
1995 /* step one, relocate all the extents inside this chunk */
1996 ret = btrfs_relocate_block_group(extent_root, chunk_offset);
1997 if (ret)
1998 return ret;
1999
2000 trans = btrfs_start_transaction(root, 0);
2001 BUG_ON(IS_ERR(trans));
2002
2003 lock_chunks(root);
2004
2005 /*
2006 * step two, delete the device extents and the
2007 * chunk tree entries
2008 */
2009 read_lock(&em_tree->lock);
2010 em = lookup_extent_mapping(em_tree, chunk_offset, 1);
2011 read_unlock(&em_tree->lock);
2012
2013 BUG_ON(!em || em->start > chunk_offset ||
2014 em->start + em->len < chunk_offset);
2015 map = (struct map_lookup *)em->bdev;
2016
2017 for (i = 0; i < map->num_stripes; i++) {
2018 ret = btrfs_free_dev_extent(trans, map->stripes[i].dev,
2019 map->stripes[i].physical);
2020 BUG_ON(ret);
2021
2022 if (map->stripes[i].dev) {
2023 ret = btrfs_update_device(trans, map->stripes[i].dev);
2024 BUG_ON(ret);
2025 }
2026 }
2027 ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
2028 chunk_offset);
2029
2030 BUG_ON(ret);
2031
2032 trace_btrfs_chunk_free(root, map, chunk_offset, em->len);
2033
2034 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
2035 ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
2036 BUG_ON(ret);
2037 }
2038
2039 ret = btrfs_remove_block_group(trans, extent_root, chunk_offset);
2040 BUG_ON(ret);
2041
2042 write_lock(&em_tree->lock);
2043 remove_extent_mapping(em_tree, em);
2044 write_unlock(&em_tree->lock);
2045
2046 kfree(map);
2047 em->bdev = NULL;
2048
2049 /* once for the tree */
2050 free_extent_map(em);
2051 /* once for us */
2052 free_extent_map(em);
2053
2054 unlock_chunks(root);
2055 btrfs_end_transaction(trans, root);
2056 return 0;
2057 }
2058
btrfs_relocate_sys_chunks(struct btrfs_root * root)2059 static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
2060 {
2061 struct btrfs_root *chunk_root = root->fs_info->chunk_root;
2062 struct btrfs_path *path;
2063 struct extent_buffer *leaf;
2064 struct btrfs_chunk *chunk;
2065 struct btrfs_key key;
2066 struct btrfs_key found_key;
2067 u64 chunk_tree = chunk_root->root_key.objectid;
2068 u64 chunk_type;
2069 bool retried = false;
2070 int failed = 0;
2071 int ret;
2072
2073 path = btrfs_alloc_path();
2074 if (!path)
2075 return -ENOMEM;
2076
2077 again:
2078 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2079 key.offset = (u64)-1;
2080 key.type = BTRFS_CHUNK_ITEM_KEY;
2081
2082 while (1) {
2083 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2084 if (ret < 0)
2085 goto error;
2086 BUG_ON(ret == 0); /* Corruption */
2087
2088 ret = btrfs_previous_item(chunk_root, path, key.objectid,
2089 key.type);
2090 if (ret < 0)
2091 goto error;
2092 if (ret > 0)
2093 break;
2094
2095 leaf = path->nodes[0];
2096 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2097
2098 chunk = btrfs_item_ptr(leaf, path->slots[0],
2099 struct btrfs_chunk);
2100 chunk_type = btrfs_chunk_type(leaf, chunk);
2101 btrfs_release_path(path);
2102
2103 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
2104 ret = btrfs_relocate_chunk(chunk_root, chunk_tree,
2105 found_key.objectid,
2106 found_key.offset);
2107 if (ret == -ENOSPC)
2108 failed++;
2109 else if (ret)
2110 BUG();
2111 }
2112
2113 if (found_key.offset == 0)
2114 break;
2115 key.offset = found_key.offset - 1;
2116 }
2117 ret = 0;
2118 if (failed && !retried) {
2119 failed = 0;
2120 retried = true;
2121 goto again;
2122 } else if (failed && retried) {
2123 WARN_ON(1);
2124 ret = -ENOSPC;
2125 }
2126 error:
2127 btrfs_free_path(path);
2128 return ret;
2129 }
2130
insert_balance_item(struct btrfs_root * root,struct btrfs_balance_control * bctl)2131 static int insert_balance_item(struct btrfs_root *root,
2132 struct btrfs_balance_control *bctl)
2133 {
2134 struct btrfs_trans_handle *trans;
2135 struct btrfs_balance_item *item;
2136 struct btrfs_disk_balance_args disk_bargs;
2137 struct btrfs_path *path;
2138 struct extent_buffer *leaf;
2139 struct btrfs_key key;
2140 int ret, err;
2141
2142 path = btrfs_alloc_path();
2143 if (!path)
2144 return -ENOMEM;
2145
2146 trans = btrfs_start_transaction(root, 0);
2147 if (IS_ERR(trans)) {
2148 btrfs_free_path(path);
2149 return PTR_ERR(trans);
2150 }
2151
2152 key.objectid = BTRFS_BALANCE_OBJECTID;
2153 key.type = BTRFS_BALANCE_ITEM_KEY;
2154 key.offset = 0;
2155
2156 ret = btrfs_insert_empty_item(trans, root, path, &key,
2157 sizeof(*item));
2158 if (ret)
2159 goto out;
2160
2161 leaf = path->nodes[0];
2162 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
2163
2164 memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
2165
2166 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
2167 btrfs_set_balance_data(leaf, item, &disk_bargs);
2168 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
2169 btrfs_set_balance_meta(leaf, item, &disk_bargs);
2170 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
2171 btrfs_set_balance_sys(leaf, item, &disk_bargs);
2172
2173 btrfs_set_balance_flags(leaf, item, bctl->flags);
2174
2175 btrfs_mark_buffer_dirty(leaf);
2176 out:
2177 btrfs_free_path(path);
2178 err = btrfs_commit_transaction(trans, root);
2179 if (err && !ret)
2180 ret = err;
2181 return ret;
2182 }
2183
del_balance_item(struct btrfs_root * root)2184 static int del_balance_item(struct btrfs_root *root)
2185 {
2186 struct btrfs_trans_handle *trans;
2187 struct btrfs_path *path;
2188 struct btrfs_key key;
2189 int ret, err;
2190
2191 path = btrfs_alloc_path();
2192 if (!path)
2193 return -ENOMEM;
2194
2195 trans = btrfs_start_transaction(root, 0);
2196 if (IS_ERR(trans)) {
2197 btrfs_free_path(path);
2198 return PTR_ERR(trans);
2199 }
2200
2201 key.objectid = BTRFS_BALANCE_OBJECTID;
2202 key.type = BTRFS_BALANCE_ITEM_KEY;
2203 key.offset = 0;
2204
2205 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2206 if (ret < 0)
2207 goto out;
2208 if (ret > 0) {
2209 ret = -ENOENT;
2210 goto out;
2211 }
2212
2213 ret = btrfs_del_item(trans, root, path);
2214 out:
2215 btrfs_free_path(path);
2216 err = btrfs_commit_transaction(trans, root);
2217 if (err && !ret)
2218 ret = err;
2219 return ret;
2220 }
2221
2222 /*
2223 * This is a heuristic used to reduce the number of chunks balanced on
2224 * resume after balance was interrupted.
2225 */
update_balance_args(struct btrfs_balance_control * bctl)2226 static void update_balance_args(struct btrfs_balance_control *bctl)
2227 {
2228 /*
2229 * Turn on soft mode for chunk types that were being converted.
2230 */
2231 if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
2232 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
2233 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
2234 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
2235 if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
2236 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
2237
2238 /*
2239 * Turn on usage filter if is not already used. The idea is
2240 * that chunks that we have already balanced should be
2241 * reasonably full. Don't do it for chunks that are being
2242 * converted - that will keep us from relocating unconverted
2243 * (albeit full) chunks.
2244 */
2245 if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2246 !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2247 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
2248 bctl->data.usage = 90;
2249 }
2250 if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2251 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2252 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
2253 bctl->sys.usage = 90;
2254 }
2255 if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2256 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2257 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
2258 bctl->meta.usage = 90;
2259 }
2260 }
2261
2262 /*
2263 * Should be called with both balance and volume mutexes held to
2264 * serialize other volume operations (add_dev/rm_dev/resize) with
2265 * restriper. Same goes for unset_balance_control.
2266 */
set_balance_control(struct btrfs_balance_control * bctl)2267 static void set_balance_control(struct btrfs_balance_control *bctl)
2268 {
2269 struct btrfs_fs_info *fs_info = bctl->fs_info;
2270
2271 BUG_ON(fs_info->balance_ctl);
2272
2273 spin_lock(&fs_info->balance_lock);
2274 fs_info->balance_ctl = bctl;
2275 spin_unlock(&fs_info->balance_lock);
2276 }
2277
unset_balance_control(struct btrfs_fs_info * fs_info)2278 static void unset_balance_control(struct btrfs_fs_info *fs_info)
2279 {
2280 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2281
2282 BUG_ON(!fs_info->balance_ctl);
2283
2284 spin_lock(&fs_info->balance_lock);
2285 fs_info->balance_ctl = NULL;
2286 spin_unlock(&fs_info->balance_lock);
2287
2288 kfree(bctl);
2289 }
2290
2291 /*
2292 * Balance filters. Return 1 if chunk should be filtered out
2293 * (should not be balanced).
2294 */
chunk_profiles_filter(u64 chunk_type,struct btrfs_balance_args * bargs)2295 static int chunk_profiles_filter(u64 chunk_type,
2296 struct btrfs_balance_args *bargs)
2297 {
2298 chunk_type = chunk_to_extended(chunk_type) &
2299 BTRFS_EXTENDED_PROFILE_MASK;
2300
2301 if (bargs->profiles & chunk_type)
2302 return 0;
2303
2304 return 1;
2305 }
2306
div_factor_fine(u64 num,int factor)2307 static u64 div_factor_fine(u64 num, int factor)
2308 {
2309 if (factor <= 0)
2310 return 0;
2311 if (factor >= 100)
2312 return num;
2313
2314 num *= factor;
2315 do_div(num, 100);
2316 return num;
2317 }
2318
chunk_usage_filter(struct btrfs_fs_info * fs_info,u64 chunk_offset,struct btrfs_balance_args * bargs)2319 static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
2320 struct btrfs_balance_args *bargs)
2321 {
2322 struct btrfs_block_group_cache *cache;
2323 u64 chunk_used, user_thresh;
2324 int ret = 1;
2325
2326 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
2327 chunk_used = btrfs_block_group_used(&cache->item);
2328
2329 user_thresh = div_factor_fine(cache->key.offset, bargs->usage);
2330 if (chunk_used < user_thresh)
2331 ret = 0;
2332
2333 btrfs_put_block_group(cache);
2334 return ret;
2335 }
2336
chunk_devid_filter(struct extent_buffer * leaf,struct btrfs_chunk * chunk,struct btrfs_balance_args * bargs)2337 static int chunk_devid_filter(struct extent_buffer *leaf,
2338 struct btrfs_chunk *chunk,
2339 struct btrfs_balance_args *bargs)
2340 {
2341 struct btrfs_stripe *stripe;
2342 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2343 int i;
2344
2345 for (i = 0; i < num_stripes; i++) {
2346 stripe = btrfs_stripe_nr(chunk, i);
2347 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
2348 return 0;
2349 }
2350
2351 return 1;
2352 }
2353
2354 /* [pstart, pend) */
chunk_drange_filter(struct extent_buffer * leaf,struct btrfs_chunk * chunk,u64 chunk_offset,struct btrfs_balance_args * bargs)2355 static int chunk_drange_filter(struct extent_buffer *leaf,
2356 struct btrfs_chunk *chunk,
2357 u64 chunk_offset,
2358 struct btrfs_balance_args *bargs)
2359 {
2360 struct btrfs_stripe *stripe;
2361 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2362 u64 stripe_offset;
2363 u64 stripe_length;
2364 int factor;
2365 int i;
2366
2367 if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
2368 return 0;
2369
2370 if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP |
2371 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10))
2372 factor = 2;
2373 else
2374 factor = 1;
2375 factor = num_stripes / factor;
2376
2377 for (i = 0; i < num_stripes; i++) {
2378 stripe = btrfs_stripe_nr(chunk, i);
2379 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
2380 continue;
2381
2382 stripe_offset = btrfs_stripe_offset(leaf, stripe);
2383 stripe_length = btrfs_chunk_length(leaf, chunk);
2384 do_div(stripe_length, factor);
2385
2386 if (stripe_offset < bargs->pend &&
2387 stripe_offset + stripe_length > bargs->pstart)
2388 return 0;
2389 }
2390
2391 return 1;
2392 }
2393
2394 /* [vstart, vend) */
chunk_vrange_filter(struct extent_buffer * leaf,struct btrfs_chunk * chunk,u64 chunk_offset,struct btrfs_balance_args * bargs)2395 static int chunk_vrange_filter(struct extent_buffer *leaf,
2396 struct btrfs_chunk *chunk,
2397 u64 chunk_offset,
2398 struct btrfs_balance_args *bargs)
2399 {
2400 if (chunk_offset < bargs->vend &&
2401 chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
2402 /* at least part of the chunk is inside this vrange */
2403 return 0;
2404
2405 return 1;
2406 }
2407
chunk_soft_convert_filter(u64 chunk_type,struct btrfs_balance_args * bargs)2408 static int chunk_soft_convert_filter(u64 chunk_type,
2409 struct btrfs_balance_args *bargs)
2410 {
2411 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
2412 return 0;
2413
2414 chunk_type = chunk_to_extended(chunk_type) &
2415 BTRFS_EXTENDED_PROFILE_MASK;
2416
2417 if (bargs->target == chunk_type)
2418 return 1;
2419
2420 return 0;
2421 }
2422
should_balance_chunk(struct btrfs_root * root,struct extent_buffer * leaf,struct btrfs_chunk * chunk,u64 chunk_offset)2423 static int should_balance_chunk(struct btrfs_root *root,
2424 struct extent_buffer *leaf,
2425 struct btrfs_chunk *chunk, u64 chunk_offset)
2426 {
2427 struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
2428 struct btrfs_balance_args *bargs = NULL;
2429 u64 chunk_type = btrfs_chunk_type(leaf, chunk);
2430
2431 /* type filter */
2432 if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
2433 (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
2434 return 0;
2435 }
2436
2437 if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
2438 bargs = &bctl->data;
2439 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
2440 bargs = &bctl->sys;
2441 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
2442 bargs = &bctl->meta;
2443
2444 /* profiles filter */
2445 if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
2446 chunk_profiles_filter(chunk_type, bargs)) {
2447 return 0;
2448 }
2449
2450 /* usage filter */
2451 if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
2452 chunk_usage_filter(bctl->fs_info, chunk_offset, bargs)) {
2453 return 0;
2454 }
2455
2456 /* devid filter */
2457 if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
2458 chunk_devid_filter(leaf, chunk, bargs)) {
2459 return 0;
2460 }
2461
2462 /* drange filter, makes sense only with devid filter */
2463 if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
2464 chunk_drange_filter(leaf, chunk, chunk_offset, bargs)) {
2465 return 0;
2466 }
2467
2468 /* vrange filter */
2469 if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
2470 chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
2471 return 0;
2472 }
2473
2474 /* soft profile changing mode */
2475 if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
2476 chunk_soft_convert_filter(chunk_type, bargs)) {
2477 return 0;
2478 }
2479
2480 return 1;
2481 }
2482
div_factor(u64 num,int factor)2483 static u64 div_factor(u64 num, int factor)
2484 {
2485 if (factor == 10)
2486 return num;
2487 num *= factor;
2488 do_div(num, 10);
2489 return num;
2490 }
2491
__btrfs_balance(struct btrfs_fs_info * fs_info)2492 static int __btrfs_balance(struct btrfs_fs_info *fs_info)
2493 {
2494 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2495 struct btrfs_root *chunk_root = fs_info->chunk_root;
2496 struct btrfs_root *dev_root = fs_info->dev_root;
2497 struct list_head *devices;
2498 struct btrfs_device *device;
2499 u64 old_size;
2500 u64 size_to_free;
2501 struct btrfs_chunk *chunk;
2502 struct btrfs_path *path;
2503 struct btrfs_key key;
2504 struct btrfs_key found_key;
2505 struct btrfs_trans_handle *trans;
2506 struct extent_buffer *leaf;
2507 int slot;
2508 int ret;
2509 int enospc_errors = 0;
2510 bool counting = true;
2511
2512 /* step one make some room on all the devices */
2513 devices = &fs_info->fs_devices->devices;
2514 list_for_each_entry(device, devices, dev_list) {
2515 old_size = device->total_bytes;
2516 size_to_free = div_factor(old_size, 1);
2517 size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
2518 if (!device->writeable ||
2519 device->total_bytes - device->bytes_used > size_to_free)
2520 continue;
2521
2522 ret = btrfs_shrink_device(device, old_size - size_to_free);
2523 if (ret == -ENOSPC)
2524 break;
2525 BUG_ON(ret);
2526
2527 trans = btrfs_start_transaction(dev_root, 0);
2528 BUG_ON(IS_ERR(trans));
2529
2530 ret = btrfs_grow_device(trans, device, old_size);
2531 BUG_ON(ret);
2532
2533 btrfs_end_transaction(trans, dev_root);
2534 }
2535
2536 /* step two, relocate all the chunks */
2537 path = btrfs_alloc_path();
2538 if (!path) {
2539 ret = -ENOMEM;
2540 goto error;
2541 }
2542
2543 /* zero out stat counters */
2544 spin_lock(&fs_info->balance_lock);
2545 memset(&bctl->stat, 0, sizeof(bctl->stat));
2546 spin_unlock(&fs_info->balance_lock);
2547 again:
2548 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2549 key.offset = (u64)-1;
2550 key.type = BTRFS_CHUNK_ITEM_KEY;
2551
2552 while (1) {
2553 if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
2554 atomic_read(&fs_info->balance_cancel_req)) {
2555 ret = -ECANCELED;
2556 goto error;
2557 }
2558
2559 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2560 if (ret < 0)
2561 goto error;
2562
2563 /*
2564 * this shouldn't happen, it means the last relocate
2565 * failed
2566 */
2567 if (ret == 0)
2568 BUG(); /* FIXME break ? */
2569
2570 ret = btrfs_previous_item(chunk_root, path, 0,
2571 BTRFS_CHUNK_ITEM_KEY);
2572 if (ret) {
2573 ret = 0;
2574 break;
2575 }
2576
2577 leaf = path->nodes[0];
2578 slot = path->slots[0];
2579 btrfs_item_key_to_cpu(leaf, &found_key, slot);
2580
2581 if (found_key.objectid != key.objectid)
2582 break;
2583
2584 /* chunk zero is special */
2585 if (found_key.offset == 0)
2586 break;
2587
2588 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
2589
2590 if (!counting) {
2591 spin_lock(&fs_info->balance_lock);
2592 bctl->stat.considered++;
2593 spin_unlock(&fs_info->balance_lock);
2594 }
2595
2596 ret = should_balance_chunk(chunk_root, leaf, chunk,
2597 found_key.offset);
2598 btrfs_release_path(path);
2599 if (!ret)
2600 goto loop;
2601
2602 if (counting) {
2603 spin_lock(&fs_info->balance_lock);
2604 bctl->stat.expected++;
2605 spin_unlock(&fs_info->balance_lock);
2606 goto loop;
2607 }
2608
2609 ret = btrfs_relocate_chunk(chunk_root,
2610 chunk_root->root_key.objectid,
2611 found_key.objectid,
2612 found_key.offset);
2613 if (ret && ret != -ENOSPC)
2614 goto error;
2615 if (ret == -ENOSPC) {
2616 enospc_errors++;
2617 } else {
2618 spin_lock(&fs_info->balance_lock);
2619 bctl->stat.completed++;
2620 spin_unlock(&fs_info->balance_lock);
2621 }
2622 loop:
2623 key.offset = found_key.offset - 1;
2624 }
2625
2626 if (counting) {
2627 btrfs_release_path(path);
2628 counting = false;
2629 goto again;
2630 }
2631 error:
2632 btrfs_free_path(path);
2633 if (enospc_errors) {
2634 printk(KERN_INFO "btrfs: %d enospc errors during balance\n",
2635 enospc_errors);
2636 if (!ret)
2637 ret = -ENOSPC;
2638 }
2639
2640 return ret;
2641 }
2642
2643 /**
2644 * alloc_profile_is_valid - see if a given profile is valid and reduced
2645 * @flags: profile to validate
2646 * @extended: if true @flags is treated as an extended profile
2647 */
alloc_profile_is_valid(u64 flags,int extended)2648 static int alloc_profile_is_valid(u64 flags, int extended)
2649 {
2650 u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
2651 BTRFS_BLOCK_GROUP_PROFILE_MASK);
2652
2653 flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
2654
2655 /* 1) check that all other bits are zeroed */
2656 if (flags & ~mask)
2657 return 0;
2658
2659 /* 2) see if profile is reduced */
2660 if (flags == 0)
2661 return !extended; /* "0" is valid for usual profiles */
2662
2663 /* true if exactly one bit set */
2664 return (flags & (flags - 1)) == 0;
2665 }
2666
balance_need_close(struct btrfs_fs_info * fs_info)2667 static inline int balance_need_close(struct btrfs_fs_info *fs_info)
2668 {
2669 /* cancel requested || normal exit path */
2670 return atomic_read(&fs_info->balance_cancel_req) ||
2671 (atomic_read(&fs_info->balance_pause_req) == 0 &&
2672 atomic_read(&fs_info->balance_cancel_req) == 0);
2673 }
2674
__cancel_balance(struct btrfs_fs_info * fs_info)2675 static void __cancel_balance(struct btrfs_fs_info *fs_info)
2676 {
2677 int ret;
2678
2679 unset_balance_control(fs_info);
2680 ret = del_balance_item(fs_info->tree_root);
2681 BUG_ON(ret);
2682 }
2683
2684 void update_ioctl_balance_args(struct btrfs_fs_info *fs_info, int lock,
2685 struct btrfs_ioctl_balance_args *bargs);
2686
2687 /*
2688 * Should be called with both balance and volume mutexes held
2689 */
btrfs_balance(struct btrfs_balance_control * bctl,struct btrfs_ioctl_balance_args * bargs)2690 int btrfs_balance(struct btrfs_balance_control *bctl,
2691 struct btrfs_ioctl_balance_args *bargs)
2692 {
2693 struct btrfs_fs_info *fs_info = bctl->fs_info;
2694 u64 allowed;
2695 int mixed = 0;
2696 int ret;
2697
2698 if (btrfs_fs_closing(fs_info) ||
2699 atomic_read(&fs_info->balance_pause_req) ||
2700 atomic_read(&fs_info->balance_cancel_req)) {
2701 ret = -EINVAL;
2702 goto out;
2703 }
2704
2705 allowed = btrfs_super_incompat_flags(fs_info->super_copy);
2706 if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
2707 mixed = 1;
2708
2709 /*
2710 * In case of mixed groups both data and meta should be picked,
2711 * and identical options should be given for both of them.
2712 */
2713 allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
2714 if (mixed && (bctl->flags & allowed)) {
2715 if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
2716 !(bctl->flags & BTRFS_BALANCE_METADATA) ||
2717 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
2718 printk(KERN_ERR "btrfs: with mixed groups data and "
2719 "metadata balance options must be the same\n");
2720 ret = -EINVAL;
2721 goto out;
2722 }
2723 }
2724
2725 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
2726 if (fs_info->fs_devices->num_devices == 1)
2727 allowed |= BTRFS_BLOCK_GROUP_DUP;
2728 else if (fs_info->fs_devices->num_devices < 4)
2729 allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
2730 else
2731 allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
2732 BTRFS_BLOCK_GROUP_RAID10);
2733
2734 if ((bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2735 (!alloc_profile_is_valid(bctl->data.target, 1) ||
2736 (bctl->data.target & ~allowed))) {
2737 printk(KERN_ERR "btrfs: unable to start balance with target "
2738 "data profile %llu\n",
2739 (unsigned long long)bctl->data.target);
2740 ret = -EINVAL;
2741 goto out;
2742 }
2743 if ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2744 (!alloc_profile_is_valid(bctl->meta.target, 1) ||
2745 (bctl->meta.target & ~allowed))) {
2746 printk(KERN_ERR "btrfs: unable to start balance with target "
2747 "metadata profile %llu\n",
2748 (unsigned long long)bctl->meta.target);
2749 ret = -EINVAL;
2750 goto out;
2751 }
2752 if ((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2753 (!alloc_profile_is_valid(bctl->sys.target, 1) ||
2754 (bctl->sys.target & ~allowed))) {
2755 printk(KERN_ERR "btrfs: unable to start balance with target "
2756 "system profile %llu\n",
2757 (unsigned long long)bctl->sys.target);
2758 ret = -EINVAL;
2759 goto out;
2760 }
2761
2762 /* allow dup'ed data chunks only in mixed mode */
2763 if (!mixed && (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2764 (bctl->data.target & BTRFS_BLOCK_GROUP_DUP)) {
2765 printk(KERN_ERR "btrfs: dup for data is not allowed\n");
2766 ret = -EINVAL;
2767 goto out;
2768 }
2769
2770 /* allow to reduce meta or sys integrity only if force set */
2771 allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
2772 BTRFS_BLOCK_GROUP_RAID10;
2773 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2774 (fs_info->avail_system_alloc_bits & allowed) &&
2775 !(bctl->sys.target & allowed)) ||
2776 ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2777 (fs_info->avail_metadata_alloc_bits & allowed) &&
2778 !(bctl->meta.target & allowed))) {
2779 if (bctl->flags & BTRFS_BALANCE_FORCE) {
2780 printk(KERN_INFO "btrfs: force reducing metadata "
2781 "integrity\n");
2782 } else {
2783 printk(KERN_ERR "btrfs: balance will reduce metadata "
2784 "integrity, use force if you want this\n");
2785 ret = -EINVAL;
2786 goto out;
2787 }
2788 }
2789
2790 ret = insert_balance_item(fs_info->tree_root, bctl);
2791 if (ret && ret != -EEXIST)
2792 goto out;
2793
2794 if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
2795 BUG_ON(ret == -EEXIST);
2796 set_balance_control(bctl);
2797 } else {
2798 BUG_ON(ret != -EEXIST);
2799 spin_lock(&fs_info->balance_lock);
2800 update_balance_args(bctl);
2801 spin_unlock(&fs_info->balance_lock);
2802 }
2803
2804 atomic_inc(&fs_info->balance_running);
2805 mutex_unlock(&fs_info->balance_mutex);
2806
2807 ret = __btrfs_balance(fs_info);
2808
2809 mutex_lock(&fs_info->balance_mutex);
2810 atomic_dec(&fs_info->balance_running);
2811
2812 if (bargs) {
2813 memset(bargs, 0, sizeof(*bargs));
2814 update_ioctl_balance_args(fs_info, 0, bargs);
2815 }
2816
2817 if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
2818 balance_need_close(fs_info)) {
2819 __cancel_balance(fs_info);
2820 }
2821
2822 wake_up(&fs_info->balance_wait_q);
2823
2824 return ret;
2825 out:
2826 if (bctl->flags & BTRFS_BALANCE_RESUME)
2827 __cancel_balance(fs_info);
2828 else
2829 kfree(bctl);
2830 return ret;
2831 }
2832
balance_kthread(void * data)2833 static int balance_kthread(void *data)
2834 {
2835 struct btrfs_balance_control *bctl =
2836 (struct btrfs_balance_control *)data;
2837 struct btrfs_fs_info *fs_info = bctl->fs_info;
2838 int ret = 0;
2839
2840 mutex_lock(&fs_info->volume_mutex);
2841 mutex_lock(&fs_info->balance_mutex);
2842
2843 set_balance_control(bctl);
2844
2845 if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) {
2846 printk(KERN_INFO "btrfs: force skipping balance\n");
2847 } else {
2848 printk(KERN_INFO "btrfs: continuing balance\n");
2849 ret = btrfs_balance(bctl, NULL);
2850 }
2851
2852 mutex_unlock(&fs_info->balance_mutex);
2853 mutex_unlock(&fs_info->volume_mutex);
2854 return ret;
2855 }
2856
btrfs_recover_balance(struct btrfs_root * tree_root)2857 int btrfs_recover_balance(struct btrfs_root *tree_root)
2858 {
2859 struct task_struct *tsk;
2860 struct btrfs_balance_control *bctl;
2861 struct btrfs_balance_item *item;
2862 struct btrfs_disk_balance_args disk_bargs;
2863 struct btrfs_path *path;
2864 struct extent_buffer *leaf;
2865 struct btrfs_key key;
2866 int ret;
2867
2868 path = btrfs_alloc_path();
2869 if (!path)
2870 return -ENOMEM;
2871
2872 bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
2873 if (!bctl) {
2874 ret = -ENOMEM;
2875 goto out;
2876 }
2877
2878 key.objectid = BTRFS_BALANCE_OBJECTID;
2879 key.type = BTRFS_BALANCE_ITEM_KEY;
2880 key.offset = 0;
2881
2882 ret = btrfs_search_slot(NULL, tree_root, &key, path, 0, 0);
2883 if (ret < 0)
2884 goto out_bctl;
2885 if (ret > 0) { /* ret = -ENOENT; */
2886 ret = 0;
2887 goto out_bctl;
2888 }
2889
2890 leaf = path->nodes[0];
2891 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
2892
2893 bctl->fs_info = tree_root->fs_info;
2894 bctl->flags = btrfs_balance_flags(leaf, item) | BTRFS_BALANCE_RESUME;
2895
2896 btrfs_balance_data(leaf, item, &disk_bargs);
2897 btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
2898 btrfs_balance_meta(leaf, item, &disk_bargs);
2899 btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
2900 btrfs_balance_sys(leaf, item, &disk_bargs);
2901 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
2902
2903 tsk = kthread_run(balance_kthread, bctl, "btrfs-balance");
2904 if (IS_ERR(tsk))
2905 ret = PTR_ERR(tsk);
2906 else
2907 goto out;
2908
2909 out_bctl:
2910 kfree(bctl);
2911 out:
2912 btrfs_free_path(path);
2913 return ret;
2914 }
2915
btrfs_pause_balance(struct btrfs_fs_info * fs_info)2916 int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
2917 {
2918 int ret = 0;
2919
2920 mutex_lock(&fs_info->balance_mutex);
2921 if (!fs_info->balance_ctl) {
2922 mutex_unlock(&fs_info->balance_mutex);
2923 return -ENOTCONN;
2924 }
2925
2926 if (atomic_read(&fs_info->balance_running)) {
2927 atomic_inc(&fs_info->balance_pause_req);
2928 mutex_unlock(&fs_info->balance_mutex);
2929
2930 wait_event(fs_info->balance_wait_q,
2931 atomic_read(&fs_info->balance_running) == 0);
2932
2933 mutex_lock(&fs_info->balance_mutex);
2934 /* we are good with balance_ctl ripped off from under us */
2935 BUG_ON(atomic_read(&fs_info->balance_running));
2936 atomic_dec(&fs_info->balance_pause_req);
2937 } else {
2938 ret = -ENOTCONN;
2939 }
2940
2941 mutex_unlock(&fs_info->balance_mutex);
2942 return ret;
2943 }
2944
btrfs_cancel_balance(struct btrfs_fs_info * fs_info)2945 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
2946 {
2947 mutex_lock(&fs_info->balance_mutex);
2948 if (!fs_info->balance_ctl) {
2949 mutex_unlock(&fs_info->balance_mutex);
2950 return -ENOTCONN;
2951 }
2952
2953 atomic_inc(&fs_info->balance_cancel_req);
2954 /*
2955 * if we are running just wait and return, balance item is
2956 * deleted in btrfs_balance in this case
2957 */
2958 if (atomic_read(&fs_info->balance_running)) {
2959 mutex_unlock(&fs_info->balance_mutex);
2960 wait_event(fs_info->balance_wait_q,
2961 atomic_read(&fs_info->balance_running) == 0);
2962 mutex_lock(&fs_info->balance_mutex);
2963 } else {
2964 /* __cancel_balance needs volume_mutex */
2965 mutex_unlock(&fs_info->balance_mutex);
2966 mutex_lock(&fs_info->volume_mutex);
2967 mutex_lock(&fs_info->balance_mutex);
2968
2969 if (fs_info->balance_ctl)
2970 __cancel_balance(fs_info);
2971
2972 mutex_unlock(&fs_info->volume_mutex);
2973 }
2974
2975 BUG_ON(fs_info->balance_ctl || atomic_read(&fs_info->balance_running));
2976 atomic_dec(&fs_info->balance_cancel_req);
2977 mutex_unlock(&fs_info->balance_mutex);
2978 return 0;
2979 }
2980
2981 /*
2982 * shrinking a device means finding all of the device extents past
2983 * the new size, and then following the back refs to the chunks.
2984 * The chunk relocation code actually frees the device extent
2985 */
btrfs_shrink_device(struct btrfs_device * device,u64 new_size)2986 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
2987 {
2988 struct btrfs_trans_handle *trans;
2989 struct btrfs_root *root = device->dev_root;
2990 struct btrfs_dev_extent *dev_extent = NULL;
2991 struct btrfs_path *path;
2992 u64 length;
2993 u64 chunk_tree;
2994 u64 chunk_objectid;
2995 u64 chunk_offset;
2996 int ret;
2997 int slot;
2998 int failed = 0;
2999 bool retried = false;
3000 struct extent_buffer *l;
3001 struct btrfs_key key;
3002 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
3003 u64 old_total = btrfs_super_total_bytes(super_copy);
3004 u64 old_size = device->total_bytes;
3005 u64 diff = device->total_bytes - new_size;
3006
3007 if (new_size >= device->total_bytes)
3008 return -EINVAL;
3009
3010 path = btrfs_alloc_path();
3011 if (!path)
3012 return -ENOMEM;
3013
3014 path->reada = 2;
3015
3016 lock_chunks(root);
3017
3018 device->total_bytes = new_size;
3019 if (device->writeable) {
3020 device->fs_devices->total_rw_bytes -= diff;
3021 spin_lock(&root->fs_info->free_chunk_lock);
3022 root->fs_info->free_chunk_space -= diff;
3023 spin_unlock(&root->fs_info->free_chunk_lock);
3024 }
3025 unlock_chunks(root);
3026
3027 again:
3028 key.objectid = device->devid;
3029 key.offset = (u64)-1;
3030 key.type = BTRFS_DEV_EXTENT_KEY;
3031
3032 do {
3033 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3034 if (ret < 0)
3035 goto done;
3036
3037 ret = btrfs_previous_item(root, path, 0, key.type);
3038 if (ret < 0)
3039 goto done;
3040 if (ret) {
3041 ret = 0;
3042 btrfs_release_path(path);
3043 break;
3044 }
3045
3046 l = path->nodes[0];
3047 slot = path->slots[0];
3048 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
3049
3050 if (key.objectid != device->devid) {
3051 btrfs_release_path(path);
3052 break;
3053 }
3054
3055 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
3056 length = btrfs_dev_extent_length(l, dev_extent);
3057
3058 if (key.offset + length <= new_size) {
3059 btrfs_release_path(path);
3060 break;
3061 }
3062
3063 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
3064 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
3065 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
3066 btrfs_release_path(path);
3067
3068 ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
3069 chunk_offset);
3070 if (ret && ret != -ENOSPC)
3071 goto done;
3072 if (ret == -ENOSPC)
3073 failed++;
3074 } while (key.offset-- > 0);
3075
3076 if (failed && !retried) {
3077 failed = 0;
3078 retried = true;
3079 goto again;
3080 } else if (failed && retried) {
3081 ret = -ENOSPC;
3082 lock_chunks(root);
3083
3084 device->total_bytes = old_size;
3085 if (device->writeable)
3086 device->fs_devices->total_rw_bytes += diff;
3087 spin_lock(&root->fs_info->free_chunk_lock);
3088 root->fs_info->free_chunk_space += diff;
3089 spin_unlock(&root->fs_info->free_chunk_lock);
3090 unlock_chunks(root);
3091 goto done;
3092 }
3093
3094 /* Shrinking succeeded, else we would be at "done". */
3095 trans = btrfs_start_transaction(root, 0);
3096 if (IS_ERR(trans)) {
3097 ret = PTR_ERR(trans);
3098 goto done;
3099 }
3100
3101 lock_chunks(root);
3102
3103 device->disk_total_bytes = new_size;
3104 /* Now btrfs_update_device() will change the on-disk size. */
3105 ret = btrfs_update_device(trans, device);
3106 if (ret) {
3107 unlock_chunks(root);
3108 btrfs_end_transaction(trans, root);
3109 goto done;
3110 }
3111 WARN_ON(diff > old_total);
3112 btrfs_set_super_total_bytes(super_copy, old_total - diff);
3113 unlock_chunks(root);
3114 btrfs_end_transaction(trans, root);
3115 done:
3116 btrfs_free_path(path);
3117 return ret;
3118 }
3119
btrfs_add_system_chunk(struct btrfs_root * root,struct btrfs_key * key,struct btrfs_chunk * chunk,int item_size)3120 static int btrfs_add_system_chunk(struct btrfs_root *root,
3121 struct btrfs_key *key,
3122 struct btrfs_chunk *chunk, int item_size)
3123 {
3124 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
3125 struct btrfs_disk_key disk_key;
3126 u32 array_size;
3127 u8 *ptr;
3128
3129 array_size = btrfs_super_sys_array_size(super_copy);
3130 if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
3131 return -EFBIG;
3132
3133 ptr = super_copy->sys_chunk_array + array_size;
3134 btrfs_cpu_key_to_disk(&disk_key, key);
3135 memcpy(ptr, &disk_key, sizeof(disk_key));
3136 ptr += sizeof(disk_key);
3137 memcpy(ptr, chunk, item_size);
3138 item_size += sizeof(disk_key);
3139 btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
3140 return 0;
3141 }
3142
3143 /*
3144 * sort the devices in descending order by max_avail, total_avail
3145 */
btrfs_cmp_device_info(const void * a,const void * b)3146 static int btrfs_cmp_device_info(const void *a, const void *b)
3147 {
3148 const struct btrfs_device_info *di_a = a;
3149 const struct btrfs_device_info *di_b = b;
3150
3151 if (di_a->max_avail > di_b->max_avail)
3152 return -1;
3153 if (di_a->max_avail < di_b->max_avail)
3154 return 1;
3155 if (di_a->total_avail > di_b->total_avail)
3156 return -1;
3157 if (di_a->total_avail < di_b->total_avail)
3158 return 1;
3159 return 0;
3160 }
3161
__btrfs_alloc_chunk(struct btrfs_trans_handle * trans,struct btrfs_root * extent_root,struct map_lookup ** map_ret,u64 * num_bytes_out,u64 * stripe_size_out,u64 start,u64 type)3162 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
3163 struct btrfs_root *extent_root,
3164 struct map_lookup **map_ret,
3165 u64 *num_bytes_out, u64 *stripe_size_out,
3166 u64 start, u64 type)
3167 {
3168 struct btrfs_fs_info *info = extent_root->fs_info;
3169 struct btrfs_fs_devices *fs_devices = info->fs_devices;
3170 struct list_head *cur;
3171 struct map_lookup *map = NULL;
3172 struct extent_map_tree *em_tree;
3173 struct extent_map *em;
3174 struct btrfs_device_info *devices_info = NULL;
3175 u64 total_avail;
3176 int num_stripes; /* total number of stripes to allocate */
3177 int sub_stripes; /* sub_stripes info for map */
3178 int dev_stripes; /* stripes per dev */
3179 int devs_max; /* max devs to use */
3180 int devs_min; /* min devs needed */
3181 int devs_increment; /* ndevs has to be a multiple of this */
3182 int ncopies; /* how many copies to data has */
3183 int ret;
3184 u64 max_stripe_size;
3185 u64 max_chunk_size;
3186 u64 stripe_size;
3187 u64 num_bytes;
3188 int ndevs;
3189 int i;
3190 int j;
3191
3192 BUG_ON(!alloc_profile_is_valid(type, 0));
3193
3194 if (list_empty(&fs_devices->alloc_list))
3195 return -ENOSPC;
3196
3197 sub_stripes = 1;
3198 dev_stripes = 1;
3199 devs_increment = 1;
3200 ncopies = 1;
3201 devs_max = 0; /* 0 == as many as possible */
3202 devs_min = 1;
3203
3204 /*
3205 * define the properties of each RAID type.
3206 * FIXME: move this to a global table and use it in all RAID
3207 * calculation code
3208 */
3209 if (type & (BTRFS_BLOCK_GROUP_DUP)) {
3210 dev_stripes = 2;
3211 ncopies = 2;
3212 devs_max = 1;
3213 } else if (type & (BTRFS_BLOCK_GROUP_RAID0)) {
3214 devs_min = 2;
3215 } else if (type & (BTRFS_BLOCK_GROUP_RAID1)) {
3216 devs_increment = 2;
3217 ncopies = 2;
3218 devs_max = 2;
3219 devs_min = 2;
3220 } else if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
3221 sub_stripes = 2;
3222 devs_increment = 2;
3223 ncopies = 2;
3224 devs_min = 4;
3225 } else {
3226 devs_max = 1;
3227 }
3228
3229 if (type & BTRFS_BLOCK_GROUP_DATA) {
3230 max_stripe_size = 1024 * 1024 * 1024;
3231 max_chunk_size = 10 * max_stripe_size;
3232 } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
3233 /* for larger filesystems, use larger metadata chunks */
3234 if (fs_devices->total_rw_bytes > 50ULL * 1024 * 1024 * 1024)
3235 max_stripe_size = 1024 * 1024 * 1024;
3236 else
3237 max_stripe_size = 256 * 1024 * 1024;
3238 max_chunk_size = max_stripe_size;
3239 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
3240 max_stripe_size = 32 * 1024 * 1024;
3241 max_chunk_size = 2 * max_stripe_size;
3242 } else {
3243 printk(KERN_ERR "btrfs: invalid chunk type 0x%llx requested\n",
3244 type);
3245 BUG_ON(1);
3246 }
3247
3248 /* we don't want a chunk larger than 10% of writeable space */
3249 max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
3250 max_chunk_size);
3251
3252 devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices,
3253 GFP_NOFS);
3254 if (!devices_info)
3255 return -ENOMEM;
3256
3257 cur = fs_devices->alloc_list.next;
3258
3259 /*
3260 * in the first pass through the devices list, we gather information
3261 * about the available holes on each device.
3262 */
3263 ndevs = 0;
3264 while (cur != &fs_devices->alloc_list) {
3265 struct btrfs_device *device;
3266 u64 max_avail;
3267 u64 dev_offset;
3268
3269 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
3270
3271 cur = cur->next;
3272
3273 if (!device->writeable) {
3274 printk(KERN_ERR
3275 "btrfs: read-only device in alloc_list\n");
3276 WARN_ON(1);
3277 continue;
3278 }
3279
3280 if (!device->in_fs_metadata)
3281 continue;
3282
3283 if (device->total_bytes > device->bytes_used)
3284 total_avail = device->total_bytes - device->bytes_used;
3285 else
3286 total_avail = 0;
3287
3288 /* If there is no space on this device, skip it. */
3289 if (total_avail == 0)
3290 continue;
3291
3292 ret = find_free_dev_extent(device,
3293 max_stripe_size * dev_stripes,
3294 &dev_offset, &max_avail);
3295 if (ret && ret != -ENOSPC)
3296 goto error;
3297
3298 if (ret == 0)
3299 max_avail = max_stripe_size * dev_stripes;
3300
3301 if (max_avail < BTRFS_STRIPE_LEN * dev_stripes)
3302 continue;
3303
3304 devices_info[ndevs].dev_offset = dev_offset;
3305 devices_info[ndevs].max_avail = max_avail;
3306 devices_info[ndevs].total_avail = total_avail;
3307 devices_info[ndevs].dev = device;
3308 ++ndevs;
3309 }
3310
3311 /*
3312 * now sort the devices by hole size / available space
3313 */
3314 sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
3315 btrfs_cmp_device_info, NULL);
3316
3317 /* round down to number of usable stripes */
3318 ndevs -= ndevs % devs_increment;
3319
3320 if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) {
3321 ret = -ENOSPC;
3322 goto error;
3323 }
3324
3325 if (devs_max && ndevs > devs_max)
3326 ndevs = devs_max;
3327 /*
3328 * the primary goal is to maximize the number of stripes, so use as many
3329 * devices as possible, even if the stripes are not maximum sized.
3330 */
3331 stripe_size = devices_info[ndevs-1].max_avail;
3332 num_stripes = ndevs * dev_stripes;
3333
3334 if (stripe_size * ndevs > max_chunk_size * ncopies) {
3335 stripe_size = max_chunk_size * ncopies;
3336 do_div(stripe_size, ndevs);
3337 }
3338
3339 do_div(stripe_size, dev_stripes);
3340
3341 /* align to BTRFS_STRIPE_LEN */
3342 do_div(stripe_size, BTRFS_STRIPE_LEN);
3343 stripe_size *= BTRFS_STRIPE_LEN;
3344
3345 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
3346 if (!map) {
3347 ret = -ENOMEM;
3348 goto error;
3349 }
3350 map->num_stripes = num_stripes;
3351
3352 for (i = 0; i < ndevs; ++i) {
3353 for (j = 0; j < dev_stripes; ++j) {
3354 int s = i * dev_stripes + j;
3355 map->stripes[s].dev = devices_info[i].dev;
3356 map->stripes[s].physical = devices_info[i].dev_offset +
3357 j * stripe_size;
3358 }
3359 }
3360 map->sector_size = extent_root->sectorsize;
3361 map->stripe_len = BTRFS_STRIPE_LEN;
3362 map->io_align = BTRFS_STRIPE_LEN;
3363 map->io_width = BTRFS_STRIPE_LEN;
3364 map->type = type;
3365 map->sub_stripes = sub_stripes;
3366
3367 *map_ret = map;
3368 num_bytes = stripe_size * (num_stripes / ncopies);
3369
3370 *stripe_size_out = stripe_size;
3371 *num_bytes_out = num_bytes;
3372
3373 trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes);
3374
3375 em = alloc_extent_map();
3376 if (!em) {
3377 ret = -ENOMEM;
3378 goto error;
3379 }
3380 em->bdev = (struct block_device *)map;
3381 em->start = start;
3382 em->len = num_bytes;
3383 em->block_start = 0;
3384 em->block_len = em->len;
3385
3386 em_tree = &extent_root->fs_info->mapping_tree.map_tree;
3387 write_lock(&em_tree->lock);
3388 ret = add_extent_mapping(em_tree, em);
3389 write_unlock(&em_tree->lock);
3390 free_extent_map(em);
3391 if (ret)
3392 goto error;
3393
3394 ret = btrfs_make_block_group(trans, extent_root, 0, type,
3395 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3396 start, num_bytes);
3397 if (ret)
3398 goto error;
3399
3400 for (i = 0; i < map->num_stripes; ++i) {
3401 struct btrfs_device *device;
3402 u64 dev_offset;
3403
3404 device = map->stripes[i].dev;
3405 dev_offset = map->stripes[i].physical;
3406
3407 ret = btrfs_alloc_dev_extent(trans, device,
3408 info->chunk_root->root_key.objectid,
3409 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3410 start, dev_offset, stripe_size);
3411 if (ret) {
3412 btrfs_abort_transaction(trans, extent_root, ret);
3413 goto error;
3414 }
3415 }
3416
3417 kfree(devices_info);
3418 return 0;
3419
3420 error:
3421 kfree(map);
3422 kfree(devices_info);
3423 return ret;
3424 }
3425
__finish_chunk_alloc(struct btrfs_trans_handle * trans,struct btrfs_root * extent_root,struct map_lookup * map,u64 chunk_offset,u64 chunk_size,u64 stripe_size)3426 static int __finish_chunk_alloc(struct btrfs_trans_handle *trans,
3427 struct btrfs_root *extent_root,
3428 struct map_lookup *map, u64 chunk_offset,
3429 u64 chunk_size, u64 stripe_size)
3430 {
3431 u64 dev_offset;
3432 struct btrfs_key key;
3433 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
3434 struct btrfs_device *device;
3435 struct btrfs_chunk *chunk;
3436 struct btrfs_stripe *stripe;
3437 size_t item_size = btrfs_chunk_item_size(map->num_stripes);
3438 int index = 0;
3439 int ret;
3440
3441 chunk = kzalloc(item_size, GFP_NOFS);
3442 if (!chunk)
3443 return -ENOMEM;
3444
3445 index = 0;
3446 while (index < map->num_stripes) {
3447 device = map->stripes[index].dev;
3448 device->bytes_used += stripe_size;
3449 ret = btrfs_update_device(trans, device);
3450 if (ret)
3451 goto out_free;
3452 index++;
3453 }
3454
3455 spin_lock(&extent_root->fs_info->free_chunk_lock);
3456 extent_root->fs_info->free_chunk_space -= (stripe_size *
3457 map->num_stripes);
3458 spin_unlock(&extent_root->fs_info->free_chunk_lock);
3459
3460 index = 0;
3461 stripe = &chunk->stripe;
3462 while (index < map->num_stripes) {
3463 device = map->stripes[index].dev;
3464 dev_offset = map->stripes[index].physical;
3465
3466 btrfs_set_stack_stripe_devid(stripe, device->devid);
3467 btrfs_set_stack_stripe_offset(stripe, dev_offset);
3468 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
3469 stripe++;
3470 index++;
3471 }
3472
3473 btrfs_set_stack_chunk_length(chunk, chunk_size);
3474 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
3475 btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
3476 btrfs_set_stack_chunk_type(chunk, map->type);
3477 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
3478 btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
3479 btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
3480 btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
3481 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
3482
3483 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3484 key.type = BTRFS_CHUNK_ITEM_KEY;
3485 key.offset = chunk_offset;
3486
3487 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
3488
3489 if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
3490 /*
3491 * TODO: Cleanup of inserted chunk root in case of
3492 * failure.
3493 */
3494 ret = btrfs_add_system_chunk(chunk_root, &key, chunk,
3495 item_size);
3496 }
3497
3498 out_free:
3499 kfree(chunk);
3500 return ret;
3501 }
3502
3503 /*
3504 * Chunk allocation falls into two parts. The first part does works
3505 * that make the new allocated chunk useable, but not do any operation
3506 * that modifies the chunk tree. The second part does the works that
3507 * require modifying the chunk tree. This division is important for the
3508 * bootstrap process of adding storage to a seed btrfs.
3509 */
btrfs_alloc_chunk(struct btrfs_trans_handle * trans,struct btrfs_root * extent_root,u64 type)3510 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
3511 struct btrfs_root *extent_root, u64 type)
3512 {
3513 u64 chunk_offset;
3514 u64 chunk_size;
3515 u64 stripe_size;
3516 struct map_lookup *map;
3517 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
3518 int ret;
3519
3520 ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3521 &chunk_offset);
3522 if (ret)
3523 return ret;
3524
3525 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
3526 &stripe_size, chunk_offset, type);
3527 if (ret)
3528 return ret;
3529
3530 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
3531 chunk_size, stripe_size);
3532 if (ret)
3533 return ret;
3534 return 0;
3535 }
3536
init_first_rw_device(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_device * device)3537 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
3538 struct btrfs_root *root,
3539 struct btrfs_device *device)
3540 {
3541 u64 chunk_offset;
3542 u64 sys_chunk_offset;
3543 u64 chunk_size;
3544 u64 sys_chunk_size;
3545 u64 stripe_size;
3546 u64 sys_stripe_size;
3547 u64 alloc_profile;
3548 struct map_lookup *map;
3549 struct map_lookup *sys_map;
3550 struct btrfs_fs_info *fs_info = root->fs_info;
3551 struct btrfs_root *extent_root = fs_info->extent_root;
3552 int ret;
3553
3554 ret = find_next_chunk(fs_info->chunk_root,
3555 BTRFS_FIRST_CHUNK_TREE_OBJECTID, &chunk_offset);
3556 if (ret)
3557 return ret;
3558
3559 alloc_profile = BTRFS_BLOCK_GROUP_METADATA |
3560 fs_info->avail_metadata_alloc_bits;
3561 alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
3562
3563 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
3564 &stripe_size, chunk_offset, alloc_profile);
3565 if (ret)
3566 return ret;
3567
3568 sys_chunk_offset = chunk_offset + chunk_size;
3569
3570 alloc_profile = BTRFS_BLOCK_GROUP_SYSTEM |
3571 fs_info->avail_system_alloc_bits;
3572 alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
3573
3574 ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map,
3575 &sys_chunk_size, &sys_stripe_size,
3576 sys_chunk_offset, alloc_profile);
3577 if (ret)
3578 goto abort;
3579
3580 ret = btrfs_add_device(trans, fs_info->chunk_root, device);
3581 if (ret)
3582 goto abort;
3583
3584 /*
3585 * Modifying chunk tree needs allocating new blocks from both
3586 * system block group and metadata block group. So we only can
3587 * do operations require modifying the chunk tree after both
3588 * block groups were created.
3589 */
3590 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
3591 chunk_size, stripe_size);
3592 if (ret)
3593 goto abort;
3594
3595 ret = __finish_chunk_alloc(trans, extent_root, sys_map,
3596 sys_chunk_offset, sys_chunk_size,
3597 sys_stripe_size);
3598 if (ret)
3599 goto abort;
3600
3601 return 0;
3602
3603 abort:
3604 btrfs_abort_transaction(trans, root, ret);
3605 return ret;
3606 }
3607
btrfs_chunk_readonly(struct btrfs_root * root,u64 chunk_offset)3608 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
3609 {
3610 struct extent_map *em;
3611 struct map_lookup *map;
3612 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
3613 int readonly = 0;
3614 int i;
3615
3616 read_lock(&map_tree->map_tree.lock);
3617 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
3618 read_unlock(&map_tree->map_tree.lock);
3619 if (!em)
3620 return 1;
3621
3622 if (btrfs_test_opt(root, DEGRADED)) {
3623 free_extent_map(em);
3624 return 0;
3625 }
3626
3627 map = (struct map_lookup *)em->bdev;
3628 for (i = 0; i < map->num_stripes; i++) {
3629 if (!map->stripes[i].dev->writeable) {
3630 readonly = 1;
3631 break;
3632 }
3633 }
3634 free_extent_map(em);
3635 return readonly;
3636 }
3637
btrfs_mapping_init(struct btrfs_mapping_tree * tree)3638 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
3639 {
3640 extent_map_tree_init(&tree->map_tree);
3641 }
3642
btrfs_mapping_tree_free(struct btrfs_mapping_tree * tree)3643 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
3644 {
3645 struct extent_map *em;
3646
3647 while (1) {
3648 write_lock(&tree->map_tree.lock);
3649 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
3650 if (em)
3651 remove_extent_mapping(&tree->map_tree, em);
3652 write_unlock(&tree->map_tree.lock);
3653 if (!em)
3654 break;
3655 kfree(em->bdev);
3656 /* once for us */
3657 free_extent_map(em);
3658 /* once for the tree */
3659 free_extent_map(em);
3660 }
3661 }
3662
btrfs_num_copies(struct btrfs_mapping_tree * map_tree,u64 logical,u64 len)3663 int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len)
3664 {
3665 struct extent_map *em;
3666 struct map_lookup *map;
3667 struct extent_map_tree *em_tree = &map_tree->map_tree;
3668 int ret;
3669
3670 read_lock(&em_tree->lock);
3671 em = lookup_extent_mapping(em_tree, logical, len);
3672 read_unlock(&em_tree->lock);
3673 BUG_ON(!em);
3674
3675 BUG_ON(em->start > logical || em->start + em->len < logical);
3676 map = (struct map_lookup *)em->bdev;
3677 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
3678 ret = map->num_stripes;
3679 else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
3680 ret = map->sub_stripes;
3681 else
3682 ret = 1;
3683 free_extent_map(em);
3684 return ret;
3685 }
3686
find_live_mirror(struct map_lookup * map,int first,int num,int optimal)3687 static int find_live_mirror(struct map_lookup *map, int first, int num,
3688 int optimal)
3689 {
3690 int i;
3691 if (map->stripes[optimal].dev->bdev)
3692 return optimal;
3693 for (i = first; i < first + num; i++) {
3694 if (map->stripes[i].dev->bdev)
3695 return i;
3696 }
3697 /* we couldn't find one that doesn't fail. Just return something
3698 * and the io error handling code will clean up eventually
3699 */
3700 return optimal;
3701 }
3702
__btrfs_map_block(struct btrfs_mapping_tree * map_tree,int rw,u64 logical,u64 * length,struct btrfs_bio ** bbio_ret,int mirror_num)3703 static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
3704 u64 logical, u64 *length,
3705 struct btrfs_bio **bbio_ret,
3706 int mirror_num)
3707 {
3708 struct extent_map *em;
3709 struct map_lookup *map;
3710 struct extent_map_tree *em_tree = &map_tree->map_tree;
3711 u64 offset;
3712 u64 stripe_offset;
3713 u64 stripe_end_offset;
3714 u64 stripe_nr;
3715 u64 stripe_nr_orig;
3716 u64 stripe_nr_end;
3717 int stripe_index;
3718 int i;
3719 int ret = 0;
3720 int num_stripes;
3721 int max_errors = 0;
3722 struct btrfs_bio *bbio = NULL;
3723
3724 read_lock(&em_tree->lock);
3725 em = lookup_extent_mapping(em_tree, logical, *length);
3726 read_unlock(&em_tree->lock);
3727
3728 if (!em) {
3729 printk(KERN_CRIT "unable to find logical %llu len %llu\n",
3730 (unsigned long long)logical,
3731 (unsigned long long)*length);
3732 BUG();
3733 }
3734
3735 BUG_ON(em->start > logical || em->start + em->len < logical);
3736 map = (struct map_lookup *)em->bdev;
3737 offset = logical - em->start;
3738
3739 if (mirror_num > map->num_stripes)
3740 mirror_num = 0;
3741
3742 stripe_nr = offset;
3743 /*
3744 * stripe_nr counts the total number of stripes we have to stride
3745 * to get to this block
3746 */
3747 do_div(stripe_nr, map->stripe_len);
3748
3749 stripe_offset = stripe_nr * map->stripe_len;
3750 BUG_ON(offset < stripe_offset);
3751
3752 /* stripe_offset is the offset of this block in its stripe*/
3753 stripe_offset = offset - stripe_offset;
3754
3755 if (rw & REQ_DISCARD)
3756 *length = min_t(u64, em->len - offset, *length);
3757 else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
3758 /* we limit the length of each bio to what fits in a stripe */
3759 *length = min_t(u64, em->len - offset,
3760 map->stripe_len - stripe_offset);
3761 } else {
3762 *length = em->len - offset;
3763 }
3764
3765 if (!bbio_ret)
3766 goto out;
3767
3768 num_stripes = 1;
3769 stripe_index = 0;
3770 stripe_nr_orig = stripe_nr;
3771 stripe_nr_end = (offset + *length + map->stripe_len - 1) &
3772 (~(map->stripe_len - 1));
3773 do_div(stripe_nr_end, map->stripe_len);
3774 stripe_end_offset = stripe_nr_end * map->stripe_len -
3775 (offset + *length);
3776 if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3777 if (rw & REQ_DISCARD)
3778 num_stripes = min_t(u64, map->num_stripes,
3779 stripe_nr_end - stripe_nr_orig);
3780 stripe_index = do_div(stripe_nr, map->num_stripes);
3781 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
3782 if (rw & (REQ_WRITE | REQ_DISCARD))
3783 num_stripes = map->num_stripes;
3784 else if (mirror_num)
3785 stripe_index = mirror_num - 1;
3786 else {
3787 stripe_index = find_live_mirror(map, 0,
3788 map->num_stripes,
3789 current->pid % map->num_stripes);
3790 mirror_num = stripe_index + 1;
3791 }
3792
3793 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
3794 if (rw & (REQ_WRITE | REQ_DISCARD)) {
3795 num_stripes = map->num_stripes;
3796 } else if (mirror_num) {
3797 stripe_index = mirror_num - 1;
3798 } else {
3799 mirror_num = 1;
3800 }
3801
3802 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3803 int factor = map->num_stripes / map->sub_stripes;
3804
3805 stripe_index = do_div(stripe_nr, factor);
3806 stripe_index *= map->sub_stripes;
3807
3808 if (rw & REQ_WRITE)
3809 num_stripes = map->sub_stripes;
3810 else if (rw & REQ_DISCARD)
3811 num_stripes = min_t(u64, map->sub_stripes *
3812 (stripe_nr_end - stripe_nr_orig),
3813 map->num_stripes);
3814 else if (mirror_num)
3815 stripe_index += mirror_num - 1;
3816 else {
3817 int old_stripe_index = stripe_index;
3818 stripe_index = find_live_mirror(map, stripe_index,
3819 map->sub_stripes, stripe_index +
3820 current->pid % map->sub_stripes);
3821 mirror_num = stripe_index - old_stripe_index + 1;
3822 }
3823 } else {
3824 /*
3825 * after this do_div call, stripe_nr is the number of stripes
3826 * on this device we have to walk to find the data, and
3827 * stripe_index is the number of our device in the stripe array
3828 */
3829 stripe_index = do_div(stripe_nr, map->num_stripes);
3830 mirror_num = stripe_index + 1;
3831 }
3832 BUG_ON(stripe_index >= map->num_stripes);
3833
3834 bbio = kzalloc(btrfs_bio_size(num_stripes), GFP_NOFS);
3835 if (!bbio) {
3836 ret = -ENOMEM;
3837 goto out;
3838 }
3839 atomic_set(&bbio->error, 0);
3840
3841 if (rw & REQ_DISCARD) {
3842 int factor = 0;
3843 int sub_stripes = 0;
3844 u64 stripes_per_dev = 0;
3845 u32 remaining_stripes = 0;
3846 u32 last_stripe = 0;
3847
3848 if (map->type &
3849 (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
3850 if (map->type & BTRFS_BLOCK_GROUP_RAID0)
3851 sub_stripes = 1;
3852 else
3853 sub_stripes = map->sub_stripes;
3854
3855 factor = map->num_stripes / sub_stripes;
3856 stripes_per_dev = div_u64_rem(stripe_nr_end -
3857 stripe_nr_orig,
3858 factor,
3859 &remaining_stripes);
3860 div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
3861 last_stripe *= sub_stripes;
3862 }
3863
3864 for (i = 0; i < num_stripes; i++) {
3865 bbio->stripes[i].physical =
3866 map->stripes[stripe_index].physical +
3867 stripe_offset + stripe_nr * map->stripe_len;
3868 bbio->stripes[i].dev = map->stripes[stripe_index].dev;
3869
3870 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
3871 BTRFS_BLOCK_GROUP_RAID10)) {
3872 bbio->stripes[i].length = stripes_per_dev *
3873 map->stripe_len;
3874
3875 if (i / sub_stripes < remaining_stripes)
3876 bbio->stripes[i].length +=
3877 map->stripe_len;
3878
3879 /*
3880 * Special for the first stripe and
3881 * the last stripe:
3882 *
3883 * |-------|...|-------|
3884 * |----------|
3885 * off end_off
3886 */
3887 if (i < sub_stripes)
3888 bbio->stripes[i].length -=
3889 stripe_offset;
3890
3891 if (stripe_index >= last_stripe &&
3892 stripe_index <= (last_stripe +
3893 sub_stripes - 1))
3894 bbio->stripes[i].length -=
3895 stripe_end_offset;
3896
3897 if (i == sub_stripes - 1)
3898 stripe_offset = 0;
3899 } else
3900 bbio->stripes[i].length = *length;
3901
3902 stripe_index++;
3903 if (stripe_index == map->num_stripes) {
3904 /* This could only happen for RAID0/10 */
3905 stripe_index = 0;
3906 stripe_nr++;
3907 }
3908 }
3909 } else {
3910 for (i = 0; i < num_stripes; i++) {
3911 bbio->stripes[i].physical =
3912 map->stripes[stripe_index].physical +
3913 stripe_offset +
3914 stripe_nr * map->stripe_len;
3915 bbio->stripes[i].dev =
3916 map->stripes[stripe_index].dev;
3917 stripe_index++;
3918 }
3919 }
3920
3921 if (rw & REQ_WRITE) {
3922 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
3923 BTRFS_BLOCK_GROUP_RAID10 |
3924 BTRFS_BLOCK_GROUP_DUP)) {
3925 max_errors = 1;
3926 }
3927 }
3928
3929 *bbio_ret = bbio;
3930 bbio->num_stripes = num_stripes;
3931 bbio->max_errors = max_errors;
3932 bbio->mirror_num = mirror_num;
3933 out:
3934 free_extent_map(em);
3935 return ret;
3936 }
3937
btrfs_map_block(struct btrfs_mapping_tree * map_tree,int rw,u64 logical,u64 * length,struct btrfs_bio ** bbio_ret,int mirror_num)3938 int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
3939 u64 logical, u64 *length,
3940 struct btrfs_bio **bbio_ret, int mirror_num)
3941 {
3942 return __btrfs_map_block(map_tree, rw, logical, length, bbio_ret,
3943 mirror_num);
3944 }
3945
btrfs_rmap_block(struct btrfs_mapping_tree * map_tree,u64 chunk_start,u64 physical,u64 devid,u64 ** logical,int * naddrs,int * stripe_len)3946 int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
3947 u64 chunk_start, u64 physical, u64 devid,
3948 u64 **logical, int *naddrs, int *stripe_len)
3949 {
3950 struct extent_map_tree *em_tree = &map_tree->map_tree;
3951 struct extent_map *em;
3952 struct map_lookup *map;
3953 u64 *buf;
3954 u64 bytenr;
3955 u64 length;
3956 u64 stripe_nr;
3957 int i, j, nr = 0;
3958
3959 read_lock(&em_tree->lock);
3960 em = lookup_extent_mapping(em_tree, chunk_start, 1);
3961 read_unlock(&em_tree->lock);
3962
3963 BUG_ON(!em || em->start != chunk_start);
3964 map = (struct map_lookup *)em->bdev;
3965
3966 length = em->len;
3967 if (map->type & BTRFS_BLOCK_GROUP_RAID10)
3968 do_div(length, map->num_stripes / map->sub_stripes);
3969 else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
3970 do_div(length, map->num_stripes);
3971
3972 buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
3973 BUG_ON(!buf); /* -ENOMEM */
3974
3975 for (i = 0; i < map->num_stripes; i++) {
3976 if (devid && map->stripes[i].dev->devid != devid)
3977 continue;
3978 if (map->stripes[i].physical > physical ||
3979 map->stripes[i].physical + length <= physical)
3980 continue;
3981
3982 stripe_nr = physical - map->stripes[i].physical;
3983 do_div(stripe_nr, map->stripe_len);
3984
3985 if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3986 stripe_nr = stripe_nr * map->num_stripes + i;
3987 do_div(stripe_nr, map->sub_stripes);
3988 } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3989 stripe_nr = stripe_nr * map->num_stripes + i;
3990 }
3991 bytenr = chunk_start + stripe_nr * map->stripe_len;
3992 WARN_ON(nr >= map->num_stripes);
3993 for (j = 0; j < nr; j++) {
3994 if (buf[j] == bytenr)
3995 break;
3996 }
3997 if (j == nr) {
3998 WARN_ON(nr >= map->num_stripes);
3999 buf[nr++] = bytenr;
4000 }
4001 }
4002
4003 *logical = buf;
4004 *naddrs = nr;
4005 *stripe_len = map->stripe_len;
4006
4007 free_extent_map(em);
4008 return 0;
4009 }
4010
btrfs_end_bio(struct bio * bio,int err)4011 static void btrfs_end_bio(struct bio *bio, int err)
4012 {
4013 struct btrfs_bio *bbio = bio->bi_private;
4014 int is_orig_bio = 0;
4015
4016 if (err)
4017 atomic_inc(&bbio->error);
4018
4019 if (bio == bbio->orig_bio)
4020 is_orig_bio = 1;
4021
4022 if (atomic_dec_and_test(&bbio->stripes_pending)) {
4023 if (!is_orig_bio) {
4024 bio_put(bio);
4025 bio = bbio->orig_bio;
4026 }
4027 bio->bi_private = bbio->private;
4028 bio->bi_end_io = bbio->end_io;
4029 bio->bi_bdev = (struct block_device *)
4030 (unsigned long)bbio->mirror_num;
4031 /* only send an error to the higher layers if it is
4032 * beyond the tolerance of the multi-bio
4033 */
4034 if (atomic_read(&bbio->error) > bbio->max_errors) {
4035 err = -EIO;
4036 } else {
4037 /*
4038 * this bio is actually up to date, we didn't
4039 * go over the max number of errors
4040 */
4041 set_bit(BIO_UPTODATE, &bio->bi_flags);
4042 err = 0;
4043 }
4044 kfree(bbio);
4045
4046 bio_endio(bio, err);
4047 } else if (!is_orig_bio) {
4048 bio_put(bio);
4049 }
4050 }
4051
4052 struct async_sched {
4053 struct bio *bio;
4054 int rw;
4055 struct btrfs_fs_info *info;
4056 struct btrfs_work work;
4057 };
4058
4059 /*
4060 * see run_scheduled_bios for a description of why bios are collected for
4061 * async submit.
4062 *
4063 * This will add one bio to the pending list for a device and make sure
4064 * the work struct is scheduled.
4065 */
schedule_bio(struct btrfs_root * root,struct btrfs_device * device,int rw,struct bio * bio)4066 static noinline void schedule_bio(struct btrfs_root *root,
4067 struct btrfs_device *device,
4068 int rw, struct bio *bio)
4069 {
4070 int should_queue = 1;
4071 struct btrfs_pending_bios *pending_bios;
4072
4073 /* don't bother with additional async steps for reads, right now */
4074 if (!(rw & REQ_WRITE)) {
4075 bio_get(bio);
4076 btrfsic_submit_bio(rw, bio);
4077 bio_put(bio);
4078 return;
4079 }
4080
4081 /*
4082 * nr_async_bios allows us to reliably return congestion to the
4083 * higher layers. Otherwise, the async bio makes it appear we have
4084 * made progress against dirty pages when we've really just put it
4085 * on a queue for later
4086 */
4087 atomic_inc(&root->fs_info->nr_async_bios);
4088 WARN_ON(bio->bi_next);
4089 bio->bi_next = NULL;
4090 bio->bi_rw |= rw;
4091
4092 spin_lock(&device->io_lock);
4093 if (bio->bi_rw & REQ_SYNC)
4094 pending_bios = &device->pending_sync_bios;
4095 else
4096 pending_bios = &device->pending_bios;
4097
4098 if (pending_bios->tail)
4099 pending_bios->tail->bi_next = bio;
4100
4101 pending_bios->tail = bio;
4102 if (!pending_bios->head)
4103 pending_bios->head = bio;
4104 if (device->running_pending)
4105 should_queue = 0;
4106
4107 spin_unlock(&device->io_lock);
4108
4109 if (should_queue)
4110 btrfs_queue_worker(&root->fs_info->submit_workers,
4111 &device->work);
4112 }
4113
btrfs_map_bio(struct btrfs_root * root,int rw,struct bio * bio,int mirror_num,int async_submit)4114 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
4115 int mirror_num, int async_submit)
4116 {
4117 struct btrfs_mapping_tree *map_tree;
4118 struct btrfs_device *dev;
4119 struct bio *first_bio = bio;
4120 u64 logical = (u64)bio->bi_sector << 9;
4121 u64 length = 0;
4122 u64 map_length;
4123 int ret;
4124 int dev_nr = 0;
4125 int total_devs = 1;
4126 struct btrfs_bio *bbio = NULL;
4127
4128 length = bio->bi_size;
4129 map_tree = &root->fs_info->mapping_tree;
4130 map_length = length;
4131
4132 ret = btrfs_map_block(map_tree, rw, logical, &map_length, &bbio,
4133 mirror_num);
4134 if (ret) /* -ENOMEM */
4135 return ret;
4136
4137 total_devs = bbio->num_stripes;
4138 if (map_length < length) {
4139 printk(KERN_CRIT "mapping failed logical %llu bio len %llu "
4140 "len %llu\n", (unsigned long long)logical,
4141 (unsigned long long)length,
4142 (unsigned long long)map_length);
4143 BUG();
4144 }
4145
4146 bbio->orig_bio = first_bio;
4147 bbio->private = first_bio->bi_private;
4148 bbio->end_io = first_bio->bi_end_io;
4149 atomic_set(&bbio->stripes_pending, bbio->num_stripes);
4150
4151 while (dev_nr < total_devs) {
4152 if (dev_nr < total_devs - 1) {
4153 bio = bio_clone(first_bio, GFP_NOFS);
4154 BUG_ON(!bio); /* -ENOMEM */
4155 } else {
4156 bio = first_bio;
4157 }
4158 bio->bi_private = bbio;
4159 bio->bi_end_io = btrfs_end_bio;
4160 bio->bi_sector = bbio->stripes[dev_nr].physical >> 9;
4161 dev = bbio->stripes[dev_nr].dev;
4162 if (dev && dev->bdev && (rw != WRITE || dev->writeable)) {
4163 pr_debug("btrfs_map_bio: rw %d, secor=%llu, dev=%lu "
4164 "(%s id %llu), size=%u\n", rw,
4165 (u64)bio->bi_sector, (u_long)dev->bdev->bd_dev,
4166 dev->name, dev->devid, bio->bi_size);
4167 bio->bi_bdev = dev->bdev;
4168 if (async_submit)
4169 schedule_bio(root, dev, rw, bio);
4170 else
4171 btrfsic_submit_bio(rw, bio);
4172 } else {
4173 bio->bi_bdev = root->fs_info->fs_devices->latest_bdev;
4174 bio->bi_sector = logical >> 9;
4175 bio_endio(bio, -EIO);
4176 }
4177 dev_nr++;
4178 }
4179 return 0;
4180 }
4181
btrfs_find_device(struct btrfs_root * root,u64 devid,u8 * uuid,u8 * fsid)4182 struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
4183 u8 *uuid, u8 *fsid)
4184 {
4185 struct btrfs_device *device;
4186 struct btrfs_fs_devices *cur_devices;
4187
4188 cur_devices = root->fs_info->fs_devices;
4189 while (cur_devices) {
4190 if (!fsid ||
4191 !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
4192 device = __find_device(&cur_devices->devices,
4193 devid, uuid);
4194 if (device)
4195 return device;
4196 }
4197 cur_devices = cur_devices->seed;
4198 }
4199 return NULL;
4200 }
4201
add_missing_dev(struct btrfs_root * root,u64 devid,u8 * dev_uuid)4202 static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
4203 u64 devid, u8 *dev_uuid)
4204 {
4205 struct btrfs_device *device;
4206 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
4207
4208 device = kzalloc(sizeof(*device), GFP_NOFS);
4209 if (!device)
4210 return NULL;
4211 list_add(&device->dev_list,
4212 &fs_devices->devices);
4213 device->dev_root = root->fs_info->dev_root;
4214 device->devid = devid;
4215 device->work.func = pending_bios_fn;
4216 device->fs_devices = fs_devices;
4217 device->missing = 1;
4218 fs_devices->num_devices++;
4219 fs_devices->missing_devices++;
4220 spin_lock_init(&device->io_lock);
4221 INIT_LIST_HEAD(&device->dev_alloc_list);
4222 memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE);
4223 return device;
4224 }
4225
read_one_chunk(struct btrfs_root * root,struct btrfs_key * key,struct extent_buffer * leaf,struct btrfs_chunk * chunk)4226 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
4227 struct extent_buffer *leaf,
4228 struct btrfs_chunk *chunk)
4229 {
4230 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
4231 struct map_lookup *map;
4232 struct extent_map *em;
4233 u64 logical;
4234 u64 length;
4235 u64 devid;
4236 u8 uuid[BTRFS_UUID_SIZE];
4237 int num_stripes;
4238 int ret;
4239 int i;
4240
4241 logical = key->offset;
4242 length = btrfs_chunk_length(leaf, chunk);
4243
4244 read_lock(&map_tree->map_tree.lock);
4245 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
4246 read_unlock(&map_tree->map_tree.lock);
4247
4248 /* already mapped? */
4249 if (em && em->start <= logical && em->start + em->len > logical) {
4250 free_extent_map(em);
4251 return 0;
4252 } else if (em) {
4253 free_extent_map(em);
4254 }
4255
4256 em = alloc_extent_map();
4257 if (!em)
4258 return -ENOMEM;
4259 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
4260 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
4261 if (!map) {
4262 free_extent_map(em);
4263 return -ENOMEM;
4264 }
4265
4266 em->bdev = (struct block_device *)map;
4267 em->start = logical;
4268 em->len = length;
4269 em->block_start = 0;
4270 em->block_len = em->len;
4271
4272 map->num_stripes = num_stripes;
4273 map->io_width = btrfs_chunk_io_width(leaf, chunk);
4274 map->io_align = btrfs_chunk_io_align(leaf, chunk);
4275 map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
4276 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
4277 map->type = btrfs_chunk_type(leaf, chunk);
4278 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
4279 for (i = 0; i < num_stripes; i++) {
4280 map->stripes[i].physical =
4281 btrfs_stripe_offset_nr(leaf, chunk, i);
4282 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
4283 read_extent_buffer(leaf, uuid, (unsigned long)
4284 btrfs_stripe_dev_uuid_nr(chunk, i),
4285 BTRFS_UUID_SIZE);
4286 map->stripes[i].dev = btrfs_find_device(root, devid, uuid,
4287 NULL);
4288 if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
4289 kfree(map);
4290 free_extent_map(em);
4291 return -EIO;
4292 }
4293 if (!map->stripes[i].dev) {
4294 map->stripes[i].dev =
4295 add_missing_dev(root, devid, uuid);
4296 if (!map->stripes[i].dev) {
4297 kfree(map);
4298 free_extent_map(em);
4299 return -EIO;
4300 }
4301 }
4302 map->stripes[i].dev->in_fs_metadata = 1;
4303 }
4304
4305 write_lock(&map_tree->map_tree.lock);
4306 ret = add_extent_mapping(&map_tree->map_tree, em);
4307 write_unlock(&map_tree->map_tree.lock);
4308 BUG_ON(ret); /* Tree corruption */
4309 free_extent_map(em);
4310
4311 return 0;
4312 }
4313
fill_device_from_item(struct extent_buffer * leaf,struct btrfs_dev_item * dev_item,struct btrfs_device * device)4314 static void fill_device_from_item(struct extent_buffer *leaf,
4315 struct btrfs_dev_item *dev_item,
4316 struct btrfs_device *device)
4317 {
4318 unsigned long ptr;
4319
4320 device->devid = btrfs_device_id(leaf, dev_item);
4321 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
4322 device->total_bytes = device->disk_total_bytes;
4323 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
4324 device->type = btrfs_device_type(leaf, dev_item);
4325 device->io_align = btrfs_device_io_align(leaf, dev_item);
4326 device->io_width = btrfs_device_io_width(leaf, dev_item);
4327 device->sector_size = btrfs_device_sector_size(leaf, dev_item);
4328
4329 ptr = (unsigned long)btrfs_device_uuid(dev_item);
4330 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
4331 }
4332
open_seed_devices(struct btrfs_root * root,u8 * fsid)4333 static int open_seed_devices(struct btrfs_root *root, u8 *fsid)
4334 {
4335 struct btrfs_fs_devices *fs_devices;
4336 int ret;
4337
4338 BUG_ON(!mutex_is_locked(&uuid_mutex));
4339
4340 fs_devices = root->fs_info->fs_devices->seed;
4341 while (fs_devices) {
4342 if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
4343 ret = 0;
4344 goto out;
4345 }
4346 fs_devices = fs_devices->seed;
4347 }
4348
4349 fs_devices = find_fsid(fsid);
4350 if (!fs_devices) {
4351 ret = -ENOENT;
4352 goto out;
4353 }
4354
4355 fs_devices = clone_fs_devices(fs_devices);
4356 if (IS_ERR(fs_devices)) {
4357 ret = PTR_ERR(fs_devices);
4358 goto out;
4359 }
4360
4361 ret = __btrfs_open_devices(fs_devices, FMODE_READ,
4362 root->fs_info->bdev_holder);
4363 if (ret) {
4364 free_fs_devices(fs_devices);
4365 goto out;
4366 }
4367
4368 if (!fs_devices->seeding) {
4369 __btrfs_close_devices(fs_devices);
4370 free_fs_devices(fs_devices);
4371 ret = -EINVAL;
4372 goto out;
4373 }
4374
4375 fs_devices->seed = root->fs_info->fs_devices->seed;
4376 root->fs_info->fs_devices->seed = fs_devices;
4377 out:
4378 return ret;
4379 }
4380
read_one_dev(struct btrfs_root * root,struct extent_buffer * leaf,struct btrfs_dev_item * dev_item)4381 static int read_one_dev(struct btrfs_root *root,
4382 struct extent_buffer *leaf,
4383 struct btrfs_dev_item *dev_item)
4384 {
4385 struct btrfs_device *device;
4386 u64 devid;
4387 int ret;
4388 u8 fs_uuid[BTRFS_UUID_SIZE];
4389 u8 dev_uuid[BTRFS_UUID_SIZE];
4390
4391 devid = btrfs_device_id(leaf, dev_item);
4392 read_extent_buffer(leaf, dev_uuid,
4393 (unsigned long)btrfs_device_uuid(dev_item),
4394 BTRFS_UUID_SIZE);
4395 read_extent_buffer(leaf, fs_uuid,
4396 (unsigned long)btrfs_device_fsid(dev_item),
4397 BTRFS_UUID_SIZE);
4398
4399 if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
4400 ret = open_seed_devices(root, fs_uuid);
4401 if (ret && !btrfs_test_opt(root, DEGRADED))
4402 return ret;
4403 }
4404
4405 device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
4406 if (!device || !device->bdev) {
4407 if (!btrfs_test_opt(root, DEGRADED))
4408 return -EIO;
4409
4410 if (!device) {
4411 printk(KERN_WARNING "warning devid %llu missing\n",
4412 (unsigned long long)devid);
4413 device = add_missing_dev(root, devid, dev_uuid);
4414 if (!device)
4415 return -ENOMEM;
4416 } else if (!device->missing) {
4417 /*
4418 * this happens when a device that was properly setup
4419 * in the device info lists suddenly goes bad.
4420 * device->bdev is NULL, and so we have to set
4421 * device->missing to one here
4422 */
4423 root->fs_info->fs_devices->missing_devices++;
4424 device->missing = 1;
4425 }
4426 }
4427
4428 if (device->fs_devices != root->fs_info->fs_devices) {
4429 BUG_ON(device->writeable);
4430 if (device->generation !=
4431 btrfs_device_generation(leaf, dev_item))
4432 return -EINVAL;
4433 }
4434
4435 fill_device_from_item(leaf, dev_item, device);
4436 device->dev_root = root->fs_info->dev_root;
4437 device->in_fs_metadata = 1;
4438 if (device->writeable) {
4439 device->fs_devices->total_rw_bytes += device->total_bytes;
4440 spin_lock(&root->fs_info->free_chunk_lock);
4441 root->fs_info->free_chunk_space += device->total_bytes -
4442 device->bytes_used;
4443 spin_unlock(&root->fs_info->free_chunk_lock);
4444 }
4445 ret = 0;
4446 return ret;
4447 }
4448
btrfs_read_sys_array(struct btrfs_root * root)4449 int btrfs_read_sys_array(struct btrfs_root *root)
4450 {
4451 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
4452 struct extent_buffer *sb;
4453 struct btrfs_disk_key *disk_key;
4454 struct btrfs_chunk *chunk;
4455 u8 *ptr;
4456 unsigned long sb_ptr;
4457 int ret = 0;
4458 u32 num_stripes;
4459 u32 array_size;
4460 u32 len = 0;
4461 u32 cur;
4462 struct btrfs_key key;
4463
4464 sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET,
4465 BTRFS_SUPER_INFO_SIZE);
4466 if (!sb)
4467 return -ENOMEM;
4468 btrfs_set_buffer_uptodate(sb);
4469 btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
4470 /*
4471 * The sb extent buffer is artifical and just used to read the system array.
4472 * btrfs_set_buffer_uptodate() call does not properly mark all it's
4473 * pages up-to-date when the page is larger: extent does not cover the
4474 * whole page and consequently check_page_uptodate does not find all
4475 * the page's extents up-to-date (the hole beyond sb),
4476 * write_extent_buffer then triggers a WARN_ON.
4477 *
4478 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
4479 * but sb spans only this function. Add an explicit SetPageUptodate call
4480 * to silence the warning eg. on PowerPC 64.
4481 */
4482 if (PAGE_CACHE_SIZE > BTRFS_SUPER_INFO_SIZE)
4483 SetPageUptodate(sb->pages[0]);
4484
4485 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
4486 array_size = btrfs_super_sys_array_size(super_copy);
4487
4488 ptr = super_copy->sys_chunk_array;
4489 sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
4490 cur = 0;
4491
4492 while (cur < array_size) {
4493 disk_key = (struct btrfs_disk_key *)ptr;
4494 btrfs_disk_key_to_cpu(&key, disk_key);
4495
4496 len = sizeof(*disk_key); ptr += len;
4497 sb_ptr += len;
4498 cur += len;
4499
4500 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
4501 chunk = (struct btrfs_chunk *)sb_ptr;
4502 ret = read_one_chunk(root, &key, sb, chunk);
4503 if (ret)
4504 break;
4505 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
4506 len = btrfs_chunk_item_size(num_stripes);
4507 } else {
4508 ret = -EIO;
4509 break;
4510 }
4511 ptr += len;
4512 sb_ptr += len;
4513 cur += len;
4514 }
4515 free_extent_buffer(sb);
4516 return ret;
4517 }
4518
btrfs_read_chunk_tree(struct btrfs_root * root)4519 int btrfs_read_chunk_tree(struct btrfs_root *root)
4520 {
4521 struct btrfs_path *path;
4522 struct extent_buffer *leaf;
4523 struct btrfs_key key;
4524 struct btrfs_key found_key;
4525 int ret;
4526 int slot;
4527
4528 root = root->fs_info->chunk_root;
4529
4530 path = btrfs_alloc_path();
4531 if (!path)
4532 return -ENOMEM;
4533
4534 mutex_lock(&uuid_mutex);
4535 lock_chunks(root);
4536
4537 /* first we search for all of the device items, and then we
4538 * read in all of the chunk items. This way we can create chunk
4539 * mappings that reference all of the devices that are afound
4540 */
4541 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
4542 key.offset = 0;
4543 key.type = 0;
4544 again:
4545 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4546 if (ret < 0)
4547 goto error;
4548 while (1) {
4549 leaf = path->nodes[0];
4550 slot = path->slots[0];
4551 if (slot >= btrfs_header_nritems(leaf)) {
4552 ret = btrfs_next_leaf(root, path);
4553 if (ret == 0)
4554 continue;
4555 if (ret < 0)
4556 goto error;
4557 break;
4558 }
4559 btrfs_item_key_to_cpu(leaf, &found_key, slot);
4560 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
4561 if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID)
4562 break;
4563 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
4564 struct btrfs_dev_item *dev_item;
4565 dev_item = btrfs_item_ptr(leaf, slot,
4566 struct btrfs_dev_item);
4567 ret = read_one_dev(root, leaf, dev_item);
4568 if (ret)
4569 goto error;
4570 }
4571 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
4572 struct btrfs_chunk *chunk;
4573 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
4574 ret = read_one_chunk(root, &found_key, leaf, chunk);
4575 if (ret)
4576 goto error;
4577 }
4578 path->slots[0]++;
4579 }
4580 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
4581 key.objectid = 0;
4582 btrfs_release_path(path);
4583 goto again;
4584 }
4585 ret = 0;
4586 error:
4587 unlock_chunks(root);
4588 mutex_unlock(&uuid_mutex);
4589
4590 btrfs_free_path(path);
4591 return ret;
4592 }
4593