• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/slab.h>
21 #include <linux/buffer_head.h>
22 #include <linux/blkdev.h>
23 #include <linux/random.h>
24 #include <linux/iocontext.h>
25 #include <linux/capability.h>
26 #include <linux/ratelimit.h>
27 #include <linux/kthread.h>
28 #include <linux/raid/pq.h>
29 #include <linux/semaphore.h>
30 #include <asm/div64.h>
31 #include "ctree.h"
32 #include "extent_map.h"
33 #include "disk-io.h"
34 #include "transaction.h"
35 #include "print-tree.h"
36 #include "volumes.h"
37 #include "raid56.h"
38 #include "async-thread.h"
39 #include "check-integrity.h"
40 #include "rcu-string.h"
41 #include "math.h"
42 #include "dev-replace.h"
43 #include "sysfs.h"
44 
45 static int init_first_rw_device(struct btrfs_trans_handle *trans,
46 				struct btrfs_root *root,
47 				struct btrfs_device *device);
48 static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
49 static void __btrfs_reset_dev_stats(struct btrfs_device *dev);
50 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
51 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
52 
53 DEFINE_MUTEX(uuid_mutex);
54 static LIST_HEAD(fs_uuids);
55 
lock_chunks(struct btrfs_root * root)56 static void lock_chunks(struct btrfs_root *root)
57 {
58 	mutex_lock(&root->fs_info->chunk_mutex);
59 }
60 
unlock_chunks(struct btrfs_root * root)61 static void unlock_chunks(struct btrfs_root *root)
62 {
63 	mutex_unlock(&root->fs_info->chunk_mutex);
64 }
65 
__alloc_fs_devices(void)66 static struct btrfs_fs_devices *__alloc_fs_devices(void)
67 {
68 	struct btrfs_fs_devices *fs_devs;
69 
70 	fs_devs = kzalloc(sizeof(*fs_devs), GFP_NOFS);
71 	if (!fs_devs)
72 		return ERR_PTR(-ENOMEM);
73 
74 	mutex_init(&fs_devs->device_list_mutex);
75 
76 	INIT_LIST_HEAD(&fs_devs->devices);
77 	INIT_LIST_HEAD(&fs_devs->resized_devices);
78 	INIT_LIST_HEAD(&fs_devs->alloc_list);
79 	INIT_LIST_HEAD(&fs_devs->list);
80 
81 	return fs_devs;
82 }
83 
84 /**
85  * alloc_fs_devices - allocate struct btrfs_fs_devices
86  * @fsid:	a pointer to UUID for this FS.  If NULL a new UUID is
87  *		generated.
88  *
89  * Return: a pointer to a new &struct btrfs_fs_devices on success;
90  * ERR_PTR() on error.  Returned struct is not linked onto any lists and
91  * can be destroyed with kfree() right away.
92  */
alloc_fs_devices(const u8 * fsid)93 static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid)
94 {
95 	struct btrfs_fs_devices *fs_devs;
96 
97 	fs_devs = __alloc_fs_devices();
98 	if (IS_ERR(fs_devs))
99 		return fs_devs;
100 
101 	if (fsid)
102 		memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE);
103 	else
104 		generate_random_uuid(fs_devs->fsid);
105 
106 	return fs_devs;
107 }
108 
free_fs_devices(struct btrfs_fs_devices * fs_devices)109 static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
110 {
111 	struct btrfs_device *device;
112 	WARN_ON(fs_devices->opened);
113 	while (!list_empty(&fs_devices->devices)) {
114 		device = list_entry(fs_devices->devices.next,
115 				    struct btrfs_device, dev_list);
116 		list_del(&device->dev_list);
117 		rcu_string_free(device->name);
118 		kfree(device);
119 	}
120 	kfree(fs_devices);
121 }
122 
btrfs_kobject_uevent(struct block_device * bdev,enum kobject_action action)123 static void btrfs_kobject_uevent(struct block_device *bdev,
124 				 enum kobject_action action)
125 {
126 	int ret;
127 
128 	ret = kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, action);
129 	if (ret)
130 		pr_warn("BTRFS: Sending event '%d' to kobject: '%s' (%p): failed\n",
131 			action,
132 			kobject_name(&disk_to_dev(bdev->bd_disk)->kobj),
133 			&disk_to_dev(bdev->bd_disk)->kobj);
134 }
135 
btrfs_cleanup_fs_uuids(void)136 void btrfs_cleanup_fs_uuids(void)
137 {
138 	struct btrfs_fs_devices *fs_devices;
139 
140 	while (!list_empty(&fs_uuids)) {
141 		fs_devices = list_entry(fs_uuids.next,
142 					struct btrfs_fs_devices, list);
143 		list_del(&fs_devices->list);
144 		free_fs_devices(fs_devices);
145 	}
146 }
147 
__alloc_device(void)148 static struct btrfs_device *__alloc_device(void)
149 {
150 	struct btrfs_device *dev;
151 
152 	dev = kzalloc(sizeof(*dev), GFP_NOFS);
153 	if (!dev)
154 		return ERR_PTR(-ENOMEM);
155 
156 	INIT_LIST_HEAD(&dev->dev_list);
157 	INIT_LIST_HEAD(&dev->dev_alloc_list);
158 	INIT_LIST_HEAD(&dev->resized_list);
159 
160 	spin_lock_init(&dev->io_lock);
161 
162 	spin_lock_init(&dev->reada_lock);
163 	atomic_set(&dev->reada_in_flight, 0);
164 	atomic_set(&dev->dev_stats_ccnt, 0);
165 	btrfs_device_data_ordered_init(dev);
166 	INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_WAIT);
167 	INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_WAIT);
168 
169 	return dev;
170 }
171 
__find_device(struct list_head * head,u64 devid,u8 * uuid)172 static noinline struct btrfs_device *__find_device(struct list_head *head,
173 						   u64 devid, u8 *uuid)
174 {
175 	struct btrfs_device *dev;
176 
177 	list_for_each_entry(dev, head, dev_list) {
178 		if (dev->devid == devid &&
179 		    (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
180 			return dev;
181 		}
182 	}
183 	return NULL;
184 }
185 
find_fsid(u8 * fsid)186 static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
187 {
188 	struct btrfs_fs_devices *fs_devices;
189 
190 	list_for_each_entry(fs_devices, &fs_uuids, list) {
191 		if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
192 			return fs_devices;
193 	}
194 	return NULL;
195 }
196 
197 static int
btrfs_get_bdev_and_sb(const char * device_path,fmode_t flags,void * holder,int flush,struct block_device ** bdev,struct buffer_head ** bh)198 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder,
199 		      int flush, struct block_device **bdev,
200 		      struct buffer_head **bh)
201 {
202 	int ret;
203 
204 	*bdev = blkdev_get_by_path(device_path, flags, holder);
205 
206 	if (IS_ERR(*bdev)) {
207 		ret = PTR_ERR(*bdev);
208 		printk(KERN_INFO "BTRFS: open %s failed\n", device_path);
209 		goto error;
210 	}
211 
212 	if (flush)
213 		filemap_write_and_wait((*bdev)->bd_inode->i_mapping);
214 	ret = set_blocksize(*bdev, 4096);
215 	if (ret) {
216 		blkdev_put(*bdev, flags);
217 		goto error;
218 	}
219 	invalidate_bdev(*bdev);
220 	*bh = btrfs_read_dev_super(*bdev);
221 	if (!*bh) {
222 		ret = -EINVAL;
223 		blkdev_put(*bdev, flags);
224 		goto error;
225 	}
226 
227 	return 0;
228 
229 error:
230 	*bdev = NULL;
231 	*bh = NULL;
232 	return ret;
233 }
234 
requeue_list(struct btrfs_pending_bios * pending_bios,struct bio * head,struct bio * tail)235 static void requeue_list(struct btrfs_pending_bios *pending_bios,
236 			struct bio *head, struct bio *tail)
237 {
238 
239 	struct bio *old_head;
240 
241 	old_head = pending_bios->head;
242 	pending_bios->head = head;
243 	if (pending_bios->tail)
244 		tail->bi_next = old_head;
245 	else
246 		pending_bios->tail = tail;
247 }
248 
249 /*
250  * we try to collect pending bios for a device so we don't get a large
251  * number of procs sending bios down to the same device.  This greatly
252  * improves the schedulers ability to collect and merge the bios.
253  *
254  * But, it also turns into a long list of bios to process and that is sure
255  * to eventually make the worker thread block.  The solution here is to
256  * make some progress and then put this work struct back at the end of
257  * the list if the block device is congested.  This way, multiple devices
258  * can make progress from a single worker thread.
259  */
run_scheduled_bios(struct btrfs_device * device)260 static noinline void run_scheduled_bios(struct btrfs_device *device)
261 {
262 	struct bio *pending;
263 	struct backing_dev_info *bdi;
264 	struct btrfs_fs_info *fs_info;
265 	struct btrfs_pending_bios *pending_bios;
266 	struct bio *tail;
267 	struct bio *cur;
268 	int again = 0;
269 	unsigned long num_run;
270 	unsigned long batch_run = 0;
271 	unsigned long limit;
272 	unsigned long last_waited = 0;
273 	int force_reg = 0;
274 	int sync_pending = 0;
275 	struct blk_plug plug;
276 
277 	/*
278 	 * this function runs all the bios we've collected for
279 	 * a particular device.  We don't want to wander off to
280 	 * another device without first sending all of these down.
281 	 * So, setup a plug here and finish it off before we return
282 	 */
283 	blk_start_plug(&plug);
284 
285 	bdi = blk_get_backing_dev_info(device->bdev);
286 	fs_info = device->dev_root->fs_info;
287 	limit = btrfs_async_submit_limit(fs_info);
288 	limit = limit * 2 / 3;
289 
290 loop:
291 	spin_lock(&device->io_lock);
292 
293 loop_lock:
294 	num_run = 0;
295 
296 	/* take all the bios off the list at once and process them
297 	 * later on (without the lock held).  But, remember the
298 	 * tail and other pointers so the bios can be properly reinserted
299 	 * into the list if we hit congestion
300 	 */
301 	if (!force_reg && device->pending_sync_bios.head) {
302 		pending_bios = &device->pending_sync_bios;
303 		force_reg = 1;
304 	} else {
305 		pending_bios = &device->pending_bios;
306 		force_reg = 0;
307 	}
308 
309 	pending = pending_bios->head;
310 	tail = pending_bios->tail;
311 	WARN_ON(pending && !tail);
312 
313 	/*
314 	 * if pending was null this time around, no bios need processing
315 	 * at all and we can stop.  Otherwise it'll loop back up again
316 	 * and do an additional check so no bios are missed.
317 	 *
318 	 * device->running_pending is used to synchronize with the
319 	 * schedule_bio code.
320 	 */
321 	if (device->pending_sync_bios.head == NULL &&
322 	    device->pending_bios.head == NULL) {
323 		again = 0;
324 		device->running_pending = 0;
325 	} else {
326 		again = 1;
327 		device->running_pending = 1;
328 	}
329 
330 	pending_bios->head = NULL;
331 	pending_bios->tail = NULL;
332 
333 	spin_unlock(&device->io_lock);
334 
335 	while (pending) {
336 
337 		rmb();
338 		/* we want to work on both lists, but do more bios on the
339 		 * sync list than the regular list
340 		 */
341 		if ((num_run > 32 &&
342 		    pending_bios != &device->pending_sync_bios &&
343 		    device->pending_sync_bios.head) ||
344 		   (num_run > 64 && pending_bios == &device->pending_sync_bios &&
345 		    device->pending_bios.head)) {
346 			spin_lock(&device->io_lock);
347 			requeue_list(pending_bios, pending, tail);
348 			goto loop_lock;
349 		}
350 
351 		cur = pending;
352 		pending = pending->bi_next;
353 		cur->bi_next = NULL;
354 
355 		if (atomic_dec_return(&fs_info->nr_async_bios) < limit &&
356 		    waitqueue_active(&fs_info->async_submit_wait))
357 			wake_up(&fs_info->async_submit_wait);
358 
359 		BUG_ON(atomic_read(&cur->bi_cnt) == 0);
360 
361 		/*
362 		 * if we're doing the sync list, record that our
363 		 * plug has some sync requests on it
364 		 *
365 		 * If we're doing the regular list and there are
366 		 * sync requests sitting around, unplug before
367 		 * we add more
368 		 */
369 		if (pending_bios == &device->pending_sync_bios) {
370 			sync_pending = 1;
371 		} else if (sync_pending) {
372 			blk_finish_plug(&plug);
373 			blk_start_plug(&plug);
374 			sync_pending = 0;
375 		}
376 
377 		btrfsic_submit_bio(cur->bi_rw, cur);
378 		num_run++;
379 		batch_run++;
380 		if (need_resched())
381 			cond_resched();
382 
383 		/*
384 		 * we made progress, there is more work to do and the bdi
385 		 * is now congested.  Back off and let other work structs
386 		 * run instead
387 		 */
388 		if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
389 		    fs_info->fs_devices->open_devices > 1) {
390 			struct io_context *ioc;
391 
392 			ioc = current->io_context;
393 
394 			/*
395 			 * the main goal here is that we don't want to
396 			 * block if we're going to be able to submit
397 			 * more requests without blocking.
398 			 *
399 			 * This code does two great things, it pokes into
400 			 * the elevator code from a filesystem _and_
401 			 * it makes assumptions about how batching works.
402 			 */
403 			if (ioc && ioc->nr_batch_requests > 0 &&
404 			    time_before(jiffies, ioc->last_waited + HZ/50UL) &&
405 			    (last_waited == 0 ||
406 			     ioc->last_waited == last_waited)) {
407 				/*
408 				 * we want to go through our batch of
409 				 * requests and stop.  So, we copy out
410 				 * the ioc->last_waited time and test
411 				 * against it before looping
412 				 */
413 				last_waited = ioc->last_waited;
414 				if (need_resched())
415 					cond_resched();
416 				continue;
417 			}
418 			spin_lock(&device->io_lock);
419 			requeue_list(pending_bios, pending, tail);
420 			device->running_pending = 1;
421 
422 			spin_unlock(&device->io_lock);
423 			btrfs_queue_work(fs_info->submit_workers,
424 					 &device->work);
425 			goto done;
426 		}
427 		/* unplug every 64 requests just for good measure */
428 		if (batch_run % 64 == 0) {
429 			blk_finish_plug(&plug);
430 			blk_start_plug(&plug);
431 			sync_pending = 0;
432 		}
433 	}
434 
435 	cond_resched();
436 	if (again)
437 		goto loop;
438 
439 	spin_lock(&device->io_lock);
440 	if (device->pending_bios.head || device->pending_sync_bios.head)
441 		goto loop_lock;
442 	spin_unlock(&device->io_lock);
443 
444 done:
445 	blk_finish_plug(&plug);
446 }
447 
pending_bios_fn(struct btrfs_work * work)448 static void pending_bios_fn(struct btrfs_work *work)
449 {
450 	struct btrfs_device *device;
451 
452 	device = container_of(work, struct btrfs_device, work);
453 	run_scheduled_bios(device);
454 }
455 
456 /*
457  * Add new device to list of registered devices
458  *
459  * Returns:
460  * 1   - first time device is seen
461  * 0   - device already known
462  * < 0 - error
463  */
device_list_add(const char * path,struct btrfs_super_block * disk_super,u64 devid,struct btrfs_fs_devices ** fs_devices_ret)464 static noinline int device_list_add(const char *path,
465 			   struct btrfs_super_block *disk_super,
466 			   u64 devid, struct btrfs_fs_devices **fs_devices_ret)
467 {
468 	struct btrfs_device *device;
469 	struct btrfs_fs_devices *fs_devices;
470 	struct rcu_string *name;
471 	int ret = 0;
472 	u64 found_transid = btrfs_super_generation(disk_super);
473 
474 	fs_devices = find_fsid(disk_super->fsid);
475 	if (!fs_devices) {
476 		fs_devices = alloc_fs_devices(disk_super->fsid);
477 		if (IS_ERR(fs_devices))
478 			return PTR_ERR(fs_devices);
479 
480 		list_add(&fs_devices->list, &fs_uuids);
481 
482 		device = NULL;
483 	} else {
484 		device = __find_device(&fs_devices->devices, devid,
485 				       disk_super->dev_item.uuid);
486 	}
487 
488 	if (!device) {
489 		if (fs_devices->opened)
490 			return -EBUSY;
491 
492 		device = btrfs_alloc_device(NULL, &devid,
493 					    disk_super->dev_item.uuid);
494 		if (IS_ERR(device)) {
495 			/* we can safely leave the fs_devices entry around */
496 			return PTR_ERR(device);
497 		}
498 
499 		name = rcu_string_strdup(path, GFP_NOFS);
500 		if (!name) {
501 			kfree(device);
502 			return -ENOMEM;
503 		}
504 		rcu_assign_pointer(device->name, name);
505 
506 		mutex_lock(&fs_devices->device_list_mutex);
507 		list_add_rcu(&device->dev_list, &fs_devices->devices);
508 		fs_devices->num_devices++;
509 		mutex_unlock(&fs_devices->device_list_mutex);
510 
511 		ret = 1;
512 		device->fs_devices = fs_devices;
513 	} else if (!device->name || strcmp(device->name->str, path)) {
514 		/*
515 		 * When FS is already mounted.
516 		 * 1. If you are here and if the device->name is NULL that
517 		 *    means this device was missing at time of FS mount.
518 		 * 2. If you are here and if the device->name is different
519 		 *    from 'path' that means either
520 		 *      a. The same device disappeared and reappeared with
521 		 *         different name. or
522 		 *      b. The missing-disk-which-was-replaced, has
523 		 *         reappeared now.
524 		 *
525 		 * We must allow 1 and 2a above. But 2b would be a spurious
526 		 * and unintentional.
527 		 *
528 		 * Further in case of 1 and 2a above, the disk at 'path'
529 		 * would have missed some transaction when it was away and
530 		 * in case of 2a the stale bdev has to be updated as well.
531 		 * 2b must not be allowed at all time.
532 		 */
533 
534 		/*
535 		 * For now, we do allow update to btrfs_fs_device through the
536 		 * btrfs dev scan cli after FS has been mounted.  We're still
537 		 * tracking a problem where systems fail mount by subvolume id
538 		 * when we reject replacement on a mounted FS.
539 		 */
540 		if (!fs_devices->opened && found_transid < device->generation) {
541 			/*
542 			 * That is if the FS is _not_ mounted and if you
543 			 * are here, that means there is more than one
544 			 * disk with same uuid and devid.We keep the one
545 			 * with larger generation number or the last-in if
546 			 * generation are equal.
547 			 */
548 			return -EEXIST;
549 		}
550 
551 		name = rcu_string_strdup(path, GFP_NOFS);
552 		if (!name)
553 			return -ENOMEM;
554 		rcu_string_free(device->name);
555 		rcu_assign_pointer(device->name, name);
556 		if (device->missing) {
557 			fs_devices->missing_devices--;
558 			device->missing = 0;
559 		}
560 	}
561 
562 	/*
563 	 * Unmount does not free the btrfs_device struct but would zero
564 	 * generation along with most of the other members. So just update
565 	 * it back. We need it to pick the disk with largest generation
566 	 * (as above).
567 	 */
568 	if (!fs_devices->opened)
569 		device->generation = found_transid;
570 
571 	*fs_devices_ret = fs_devices;
572 
573 	return ret;
574 }
575 
clone_fs_devices(struct btrfs_fs_devices * orig)576 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
577 {
578 	struct btrfs_fs_devices *fs_devices;
579 	struct btrfs_device *device;
580 	struct btrfs_device *orig_dev;
581 
582 	fs_devices = alloc_fs_devices(orig->fsid);
583 	if (IS_ERR(fs_devices))
584 		return fs_devices;
585 
586 	mutex_lock(&orig->device_list_mutex);
587 	fs_devices->total_devices = orig->total_devices;
588 
589 	/* We have held the volume lock, it is safe to get the devices. */
590 	list_for_each_entry(orig_dev, &orig->devices, dev_list) {
591 		struct rcu_string *name;
592 
593 		device = btrfs_alloc_device(NULL, &orig_dev->devid,
594 					    orig_dev->uuid);
595 		if (IS_ERR(device))
596 			goto error;
597 
598 		/*
599 		 * This is ok to do without rcu read locked because we hold the
600 		 * uuid mutex so nothing we touch in here is going to disappear.
601 		 */
602 		if (orig_dev->name) {
603 			name = rcu_string_strdup(orig_dev->name->str, GFP_NOFS);
604 			if (!name) {
605 				kfree(device);
606 				goto error;
607 			}
608 			rcu_assign_pointer(device->name, name);
609 		}
610 
611 		list_add(&device->dev_list, &fs_devices->devices);
612 		device->fs_devices = fs_devices;
613 		fs_devices->num_devices++;
614 	}
615 	mutex_unlock(&orig->device_list_mutex);
616 	return fs_devices;
617 error:
618 	mutex_unlock(&orig->device_list_mutex);
619 	free_fs_devices(fs_devices);
620 	return ERR_PTR(-ENOMEM);
621 }
622 
btrfs_close_extra_devices(struct btrfs_fs_info * fs_info,struct btrfs_fs_devices * fs_devices,int step)623 void btrfs_close_extra_devices(struct btrfs_fs_info *fs_info,
624 			       struct btrfs_fs_devices *fs_devices, int step)
625 {
626 	struct btrfs_device *device, *next;
627 	struct btrfs_device *latest_dev = NULL;
628 
629 	mutex_lock(&uuid_mutex);
630 again:
631 	/* This is the initialized path, it is safe to release the devices. */
632 	list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
633 		if (device->in_fs_metadata) {
634 			if (!device->is_tgtdev_for_dev_replace &&
635 			    (!latest_dev ||
636 			     device->generation > latest_dev->generation)) {
637 				latest_dev = device;
638 			}
639 			continue;
640 		}
641 
642 		if (device->devid == BTRFS_DEV_REPLACE_DEVID) {
643 			/*
644 			 * In the first step, keep the device which has
645 			 * the correct fsid and the devid that is used
646 			 * for the dev_replace procedure.
647 			 * In the second step, the dev_replace state is
648 			 * read from the device tree and it is known
649 			 * whether the procedure is really active or
650 			 * not, which means whether this device is
651 			 * used or whether it should be removed.
652 			 */
653 			if (step == 0 || device->is_tgtdev_for_dev_replace) {
654 				continue;
655 			}
656 		}
657 		if (device->bdev) {
658 			blkdev_put(device->bdev, device->mode);
659 			device->bdev = NULL;
660 			fs_devices->open_devices--;
661 		}
662 		if (device->writeable) {
663 			list_del_init(&device->dev_alloc_list);
664 			device->writeable = 0;
665 			if (!device->is_tgtdev_for_dev_replace)
666 				fs_devices->rw_devices--;
667 		}
668 		list_del_init(&device->dev_list);
669 		fs_devices->num_devices--;
670 		rcu_string_free(device->name);
671 		kfree(device);
672 	}
673 
674 	if (fs_devices->seed) {
675 		fs_devices = fs_devices->seed;
676 		goto again;
677 	}
678 
679 	fs_devices->latest_bdev = latest_dev->bdev;
680 
681 	mutex_unlock(&uuid_mutex);
682 }
683 
__free_device(struct work_struct * work)684 static void __free_device(struct work_struct *work)
685 {
686 	struct btrfs_device *device;
687 
688 	device = container_of(work, struct btrfs_device, rcu_work);
689 
690 	if (device->bdev)
691 		blkdev_put(device->bdev, device->mode);
692 
693 	rcu_string_free(device->name);
694 	kfree(device);
695 }
696 
free_device(struct rcu_head * head)697 static void free_device(struct rcu_head *head)
698 {
699 	struct btrfs_device *device;
700 
701 	device = container_of(head, struct btrfs_device, rcu);
702 
703 	INIT_WORK(&device->rcu_work, __free_device);
704 	schedule_work(&device->rcu_work);
705 }
706 
__btrfs_close_devices(struct btrfs_fs_devices * fs_devices)707 static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
708 {
709 	struct btrfs_device *device;
710 
711 	if (--fs_devices->opened > 0)
712 		return 0;
713 
714 	mutex_lock(&fs_devices->device_list_mutex);
715 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
716 		struct btrfs_device *new_device;
717 		struct rcu_string *name;
718 
719 		if (device->bdev)
720 			fs_devices->open_devices--;
721 
722 		if (device->writeable &&
723 		    device->devid != BTRFS_DEV_REPLACE_DEVID) {
724 			list_del_init(&device->dev_alloc_list);
725 			fs_devices->rw_devices--;
726 		}
727 
728 		if (device->missing)
729 			fs_devices->missing_devices--;
730 
731 		new_device = btrfs_alloc_device(NULL, &device->devid,
732 						device->uuid);
733 		BUG_ON(IS_ERR(new_device)); /* -ENOMEM */
734 
735 		/* Safe because we are under uuid_mutex */
736 		if (device->name) {
737 			name = rcu_string_strdup(device->name->str, GFP_NOFS);
738 			BUG_ON(!name); /* -ENOMEM */
739 			rcu_assign_pointer(new_device->name, name);
740 		}
741 
742 		list_replace_rcu(&device->dev_list, &new_device->dev_list);
743 		new_device->fs_devices = device->fs_devices;
744 
745 		call_rcu(&device->rcu, free_device);
746 	}
747 	mutex_unlock(&fs_devices->device_list_mutex);
748 
749 	WARN_ON(fs_devices->open_devices);
750 	WARN_ON(fs_devices->rw_devices);
751 	fs_devices->opened = 0;
752 	fs_devices->seeding = 0;
753 
754 	return 0;
755 }
756 
btrfs_close_devices(struct btrfs_fs_devices * fs_devices)757 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
758 {
759 	struct btrfs_fs_devices *seed_devices = NULL;
760 	int ret;
761 
762 	mutex_lock(&uuid_mutex);
763 	ret = __btrfs_close_devices(fs_devices);
764 	if (!fs_devices->opened) {
765 		seed_devices = fs_devices->seed;
766 		fs_devices->seed = NULL;
767 	}
768 	mutex_unlock(&uuid_mutex);
769 
770 	while (seed_devices) {
771 		fs_devices = seed_devices;
772 		seed_devices = fs_devices->seed;
773 		__btrfs_close_devices(fs_devices);
774 		free_fs_devices(fs_devices);
775 	}
776 	/*
777 	 * Wait for rcu kworkers under __btrfs_close_devices
778 	 * to finish all blkdev_puts so device is really
779 	 * free when umount is done.
780 	 */
781 	rcu_barrier();
782 	return ret;
783 }
784 
__btrfs_open_devices(struct btrfs_fs_devices * fs_devices,fmode_t flags,void * holder)785 static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
786 				fmode_t flags, void *holder)
787 {
788 	struct request_queue *q;
789 	struct block_device *bdev;
790 	struct list_head *head = &fs_devices->devices;
791 	struct btrfs_device *device;
792 	struct btrfs_device *latest_dev = NULL;
793 	struct buffer_head *bh;
794 	struct btrfs_super_block *disk_super;
795 	u64 devid;
796 	int seeding = 1;
797 	int ret = 0;
798 
799 	flags |= FMODE_EXCL;
800 
801 	list_for_each_entry(device, head, dev_list) {
802 		if (device->bdev)
803 			continue;
804 		if (!device->name)
805 			continue;
806 
807 		/* Just open everything we can; ignore failures here */
808 		if (btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
809 					    &bdev, &bh))
810 			continue;
811 
812 		disk_super = (struct btrfs_super_block *)bh->b_data;
813 		devid = btrfs_stack_device_id(&disk_super->dev_item);
814 		if (devid != device->devid)
815 			goto error_brelse;
816 
817 		if (memcmp(device->uuid, disk_super->dev_item.uuid,
818 			   BTRFS_UUID_SIZE))
819 			goto error_brelse;
820 
821 		device->generation = btrfs_super_generation(disk_super);
822 		if (!latest_dev ||
823 		    device->generation > latest_dev->generation)
824 			latest_dev = device;
825 
826 		if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
827 			device->writeable = 0;
828 		} else {
829 			device->writeable = !bdev_read_only(bdev);
830 			seeding = 0;
831 		}
832 
833 		q = bdev_get_queue(bdev);
834 		if (blk_queue_discard(q))
835 			device->can_discard = 1;
836 
837 		device->bdev = bdev;
838 		device->in_fs_metadata = 0;
839 		device->mode = flags;
840 
841 		if (!blk_queue_nonrot(bdev_get_queue(bdev)))
842 			fs_devices->rotating = 1;
843 
844 		fs_devices->open_devices++;
845 		if (device->writeable &&
846 		    device->devid != BTRFS_DEV_REPLACE_DEVID) {
847 			fs_devices->rw_devices++;
848 			list_add(&device->dev_alloc_list,
849 				 &fs_devices->alloc_list);
850 		}
851 		brelse(bh);
852 		continue;
853 
854 error_brelse:
855 		brelse(bh);
856 		blkdev_put(bdev, flags);
857 		continue;
858 	}
859 	if (fs_devices->open_devices == 0) {
860 		ret = -EINVAL;
861 		goto out;
862 	}
863 	fs_devices->seeding = seeding;
864 	fs_devices->opened = 1;
865 	fs_devices->latest_bdev = latest_dev->bdev;
866 	fs_devices->total_rw_bytes = 0;
867 out:
868 	return ret;
869 }
870 
btrfs_open_devices(struct btrfs_fs_devices * fs_devices,fmode_t flags,void * holder)871 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
872 		       fmode_t flags, void *holder)
873 {
874 	int ret;
875 
876 	mutex_lock(&uuid_mutex);
877 	if (fs_devices->opened) {
878 		fs_devices->opened++;
879 		ret = 0;
880 	} else {
881 		ret = __btrfs_open_devices(fs_devices, flags, holder);
882 	}
883 	mutex_unlock(&uuid_mutex);
884 	return ret;
885 }
886 
887 /*
888  * Look for a btrfs signature on a device. This may be called out of the mount path
889  * and we are not allowed to call set_blocksize during the scan. The superblock
890  * is read via pagecache
891  */
btrfs_scan_one_device(const char * path,fmode_t flags,void * holder,struct btrfs_fs_devices ** fs_devices_ret)892 int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
893 			  struct btrfs_fs_devices **fs_devices_ret)
894 {
895 	struct btrfs_super_block *disk_super;
896 	struct block_device *bdev;
897 	struct page *page;
898 	void *p;
899 	int ret = -EINVAL;
900 	u64 devid;
901 	u64 transid;
902 	u64 total_devices;
903 	u64 bytenr;
904 	pgoff_t index;
905 
906 	/*
907 	 * we would like to check all the supers, but that would make
908 	 * a btrfs mount succeed after a mkfs from a different FS.
909 	 * So, we need to add a special mount option to scan for
910 	 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
911 	 */
912 	bytenr = btrfs_sb_offset(0);
913 	flags |= FMODE_EXCL;
914 	mutex_lock(&uuid_mutex);
915 
916 	bdev = blkdev_get_by_path(path, flags, holder);
917 
918 	if (IS_ERR(bdev)) {
919 		ret = PTR_ERR(bdev);
920 		goto error;
921 	}
922 
923 	/* make sure our super fits in the device */
924 	if (bytenr + PAGE_CACHE_SIZE >= i_size_read(bdev->bd_inode))
925 		goto error_bdev_put;
926 
927 	/* make sure our super fits in the page */
928 	if (sizeof(*disk_super) > PAGE_CACHE_SIZE)
929 		goto error_bdev_put;
930 
931 	/* make sure our super doesn't straddle pages on disk */
932 	index = bytenr >> PAGE_CACHE_SHIFT;
933 	if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_CACHE_SHIFT != index)
934 		goto error_bdev_put;
935 
936 	/* pull in the page with our super */
937 	page = read_cache_page_gfp(bdev->bd_inode->i_mapping,
938 				   index, GFP_NOFS);
939 
940 	if (IS_ERR_OR_NULL(page))
941 		goto error_bdev_put;
942 
943 	p = kmap(page);
944 
945 	/* align our pointer to the offset of the super block */
946 	disk_super = p + (bytenr & ~PAGE_CACHE_MASK);
947 
948 	if (btrfs_super_bytenr(disk_super) != bytenr ||
949 	    btrfs_super_magic(disk_super) != BTRFS_MAGIC)
950 		goto error_unmap;
951 
952 	devid = btrfs_stack_device_id(&disk_super->dev_item);
953 	transid = btrfs_super_generation(disk_super);
954 	total_devices = btrfs_super_num_devices(disk_super);
955 
956 	ret = device_list_add(path, disk_super, devid, fs_devices_ret);
957 	if (ret > 0) {
958 		if (disk_super->label[0]) {
959 			if (disk_super->label[BTRFS_LABEL_SIZE - 1])
960 				disk_super->label[BTRFS_LABEL_SIZE - 1] = '\0';
961 			printk(KERN_INFO "BTRFS: device label %s ", disk_super->label);
962 		} else {
963 			printk(KERN_INFO "BTRFS: device fsid %pU ", disk_super->fsid);
964 		}
965 
966 		printk(KERN_CONT "devid %llu transid %llu %s\n", devid, transid, path);
967 		ret = 0;
968 	}
969 	if (!ret && fs_devices_ret)
970 		(*fs_devices_ret)->total_devices = total_devices;
971 
972 error_unmap:
973 	kunmap(page);
974 	page_cache_release(page);
975 
976 error_bdev_put:
977 	blkdev_put(bdev, flags);
978 error:
979 	mutex_unlock(&uuid_mutex);
980 	return ret;
981 }
982 
983 /* helper to account the used device space in the range */
btrfs_account_dev_extents_size(struct btrfs_device * device,u64 start,u64 end,u64 * length)984 int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
985 				   u64 end, u64 *length)
986 {
987 	struct btrfs_key key;
988 	struct btrfs_root *root = device->dev_root;
989 	struct btrfs_dev_extent *dev_extent;
990 	struct btrfs_path *path;
991 	u64 extent_end;
992 	int ret;
993 	int slot;
994 	struct extent_buffer *l;
995 
996 	*length = 0;
997 
998 	if (start >= device->total_bytes || device->is_tgtdev_for_dev_replace)
999 		return 0;
1000 
1001 	path = btrfs_alloc_path();
1002 	if (!path)
1003 		return -ENOMEM;
1004 	path->reada = 2;
1005 
1006 	key.objectid = device->devid;
1007 	key.offset = start;
1008 	key.type = BTRFS_DEV_EXTENT_KEY;
1009 
1010 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1011 	if (ret < 0)
1012 		goto out;
1013 	if (ret > 0) {
1014 		ret = btrfs_previous_item(root, path, key.objectid, key.type);
1015 		if (ret < 0)
1016 			goto out;
1017 	}
1018 
1019 	while (1) {
1020 		l = path->nodes[0];
1021 		slot = path->slots[0];
1022 		if (slot >= btrfs_header_nritems(l)) {
1023 			ret = btrfs_next_leaf(root, path);
1024 			if (ret == 0)
1025 				continue;
1026 			if (ret < 0)
1027 				goto out;
1028 
1029 			break;
1030 		}
1031 		btrfs_item_key_to_cpu(l, &key, slot);
1032 
1033 		if (key.objectid < device->devid)
1034 			goto next;
1035 
1036 		if (key.objectid > device->devid)
1037 			break;
1038 
1039 		if (key.type != BTRFS_DEV_EXTENT_KEY)
1040 			goto next;
1041 
1042 		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1043 		extent_end = key.offset + btrfs_dev_extent_length(l,
1044 								  dev_extent);
1045 		if (key.offset <= start && extent_end > end) {
1046 			*length = end - start + 1;
1047 			break;
1048 		} else if (key.offset <= start && extent_end > start)
1049 			*length += extent_end - start;
1050 		else if (key.offset > start && extent_end <= end)
1051 			*length += extent_end - key.offset;
1052 		else if (key.offset > start && key.offset <= end) {
1053 			*length += end - key.offset + 1;
1054 			break;
1055 		} else if (key.offset > end)
1056 			break;
1057 
1058 next:
1059 		path->slots[0]++;
1060 	}
1061 	ret = 0;
1062 out:
1063 	btrfs_free_path(path);
1064 	return ret;
1065 }
1066 
contains_pending_extent(struct btrfs_trans_handle * trans,struct btrfs_device * device,u64 * start,u64 len)1067 static int contains_pending_extent(struct btrfs_trans_handle *trans,
1068 				   struct btrfs_device *device,
1069 				   u64 *start, u64 len)
1070 {
1071 	struct extent_map *em;
1072 	int ret = 0;
1073 
1074 	list_for_each_entry(em, &trans->transaction->pending_chunks, list) {
1075 		struct map_lookup *map;
1076 		int i;
1077 
1078 		map = (struct map_lookup *)em->bdev;
1079 		for (i = 0; i < map->num_stripes; i++) {
1080 			if (map->stripes[i].dev != device)
1081 				continue;
1082 			if (map->stripes[i].physical >= *start + len ||
1083 			    map->stripes[i].physical + em->orig_block_len <=
1084 			    *start)
1085 				continue;
1086 			*start = map->stripes[i].physical +
1087 				em->orig_block_len;
1088 			ret = 1;
1089 		}
1090 	}
1091 
1092 	return ret;
1093 }
1094 
1095 
1096 /*
1097  * find_free_dev_extent - find free space in the specified device
1098  * @device:	the device which we search the free space in
1099  * @num_bytes:	the size of the free space that we need
1100  * @start:	store the start of the free space.
1101  * @len:	the size of the free space. that we find, or the size of the max
1102  * 		free space if we don't find suitable free space
1103  *
1104  * this uses a pretty simple search, the expectation is that it is
1105  * called very infrequently and that a given device has a small number
1106  * of extents
1107  *
1108  * @start is used to store the start of the free space if we find. But if we
1109  * don't find suitable free space, it will be used to store the start position
1110  * of the max free space.
1111  *
1112  * @len is used to store the size of the free space that we find.
1113  * But if we don't find suitable free space, it is used to store the size of
1114  * the max free space.
1115  */
find_free_dev_extent(struct btrfs_trans_handle * trans,struct btrfs_device * device,u64 num_bytes,u64 * start,u64 * len)1116 int find_free_dev_extent(struct btrfs_trans_handle *trans,
1117 			 struct btrfs_device *device, u64 num_bytes,
1118 			 u64 *start, u64 *len)
1119 {
1120 	struct btrfs_key key;
1121 	struct btrfs_root *root = device->dev_root;
1122 	struct btrfs_dev_extent *dev_extent;
1123 	struct btrfs_path *path;
1124 	u64 hole_size;
1125 	u64 max_hole_start;
1126 	u64 max_hole_size;
1127 	u64 extent_end;
1128 	u64 search_start;
1129 	u64 search_end = device->total_bytes;
1130 	int ret;
1131 	int slot;
1132 	struct extent_buffer *l;
1133 
1134 	/* FIXME use last free of some kind */
1135 
1136 	/* we don't want to overwrite the superblock on the drive,
1137 	 * so we make sure to start at an offset of at least 1MB
1138 	 */
1139 	search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
1140 
1141 	path = btrfs_alloc_path();
1142 	if (!path)
1143 		return -ENOMEM;
1144 again:
1145 	max_hole_start = search_start;
1146 	max_hole_size = 0;
1147 	hole_size = 0;
1148 
1149 	if (search_start >= search_end || device->is_tgtdev_for_dev_replace) {
1150 		ret = -ENOSPC;
1151 		goto out;
1152 	}
1153 
1154 	path->reada = 2;
1155 	path->search_commit_root = 1;
1156 	path->skip_locking = 1;
1157 
1158 	key.objectid = device->devid;
1159 	key.offset = search_start;
1160 	key.type = BTRFS_DEV_EXTENT_KEY;
1161 
1162 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1163 	if (ret < 0)
1164 		goto out;
1165 	if (ret > 0) {
1166 		ret = btrfs_previous_item(root, path, key.objectid, key.type);
1167 		if (ret < 0)
1168 			goto out;
1169 	}
1170 
1171 	while (1) {
1172 		l = path->nodes[0];
1173 		slot = path->slots[0];
1174 		if (slot >= btrfs_header_nritems(l)) {
1175 			ret = btrfs_next_leaf(root, path);
1176 			if (ret == 0)
1177 				continue;
1178 			if (ret < 0)
1179 				goto out;
1180 
1181 			break;
1182 		}
1183 		btrfs_item_key_to_cpu(l, &key, slot);
1184 
1185 		if (key.objectid < device->devid)
1186 			goto next;
1187 
1188 		if (key.objectid > device->devid)
1189 			break;
1190 
1191 		if (key.type != BTRFS_DEV_EXTENT_KEY)
1192 			goto next;
1193 
1194 		if (key.offset > search_start) {
1195 			hole_size = key.offset - search_start;
1196 
1197 			/*
1198 			 * Have to check before we set max_hole_start, otherwise
1199 			 * we could end up sending back this offset anyway.
1200 			 */
1201 			if (contains_pending_extent(trans, device,
1202 						    &search_start,
1203 						    hole_size))
1204 				hole_size = 0;
1205 
1206 			if (hole_size > max_hole_size) {
1207 				max_hole_start = search_start;
1208 				max_hole_size = hole_size;
1209 			}
1210 
1211 			/*
1212 			 * If this free space is greater than which we need,
1213 			 * it must be the max free space that we have found
1214 			 * until now, so max_hole_start must point to the start
1215 			 * of this free space and the length of this free space
1216 			 * is stored in max_hole_size. Thus, we return
1217 			 * max_hole_start and max_hole_size and go back to the
1218 			 * caller.
1219 			 */
1220 			if (hole_size >= num_bytes) {
1221 				ret = 0;
1222 				goto out;
1223 			}
1224 		}
1225 
1226 		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1227 		extent_end = key.offset + btrfs_dev_extent_length(l,
1228 								  dev_extent);
1229 		if (extent_end > search_start)
1230 			search_start = extent_end;
1231 next:
1232 		path->slots[0]++;
1233 		cond_resched();
1234 	}
1235 
1236 	/*
1237 	 * At this point, search_start should be the end of
1238 	 * allocated dev extents, and when shrinking the device,
1239 	 * search_end may be smaller than search_start.
1240 	 */
1241 	if (search_end > search_start)
1242 		hole_size = search_end - search_start;
1243 
1244 	if (hole_size > max_hole_size) {
1245 		max_hole_start = search_start;
1246 		max_hole_size = hole_size;
1247 	}
1248 
1249 	if (contains_pending_extent(trans, device, &search_start, hole_size)) {
1250 		btrfs_release_path(path);
1251 		goto again;
1252 	}
1253 
1254 	/* See above. */
1255 	if (hole_size < num_bytes)
1256 		ret = -ENOSPC;
1257 	else
1258 		ret = 0;
1259 
1260 out:
1261 	btrfs_free_path(path);
1262 	*start = max_hole_start;
1263 	if (len)
1264 		*len = max_hole_size;
1265 	return ret;
1266 }
1267 
btrfs_free_dev_extent(struct btrfs_trans_handle * trans,struct btrfs_device * device,u64 start,u64 * dev_extent_len)1268 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1269 			  struct btrfs_device *device,
1270 			  u64 start, u64 *dev_extent_len)
1271 {
1272 	int ret;
1273 	struct btrfs_path *path;
1274 	struct btrfs_root *root = device->dev_root;
1275 	struct btrfs_key key;
1276 	struct btrfs_key found_key;
1277 	struct extent_buffer *leaf = NULL;
1278 	struct btrfs_dev_extent *extent = NULL;
1279 
1280 	path = btrfs_alloc_path();
1281 	if (!path)
1282 		return -ENOMEM;
1283 
1284 	key.objectid = device->devid;
1285 	key.offset = start;
1286 	key.type = BTRFS_DEV_EXTENT_KEY;
1287 again:
1288 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1289 	if (ret > 0) {
1290 		ret = btrfs_previous_item(root, path, key.objectid,
1291 					  BTRFS_DEV_EXTENT_KEY);
1292 		if (ret)
1293 			goto out;
1294 		leaf = path->nodes[0];
1295 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1296 		extent = btrfs_item_ptr(leaf, path->slots[0],
1297 					struct btrfs_dev_extent);
1298 		BUG_ON(found_key.offset > start || found_key.offset +
1299 		       btrfs_dev_extent_length(leaf, extent) < start);
1300 		key = found_key;
1301 		btrfs_release_path(path);
1302 		goto again;
1303 	} else if (ret == 0) {
1304 		leaf = path->nodes[0];
1305 		extent = btrfs_item_ptr(leaf, path->slots[0],
1306 					struct btrfs_dev_extent);
1307 	} else {
1308 		btrfs_error(root->fs_info, ret, "Slot search failed");
1309 		goto out;
1310 	}
1311 
1312 	*dev_extent_len = btrfs_dev_extent_length(leaf, extent);
1313 
1314 	ret = btrfs_del_item(trans, root, path);
1315 	if (ret) {
1316 		btrfs_error(root->fs_info, ret,
1317 			    "Failed to remove dev extent item");
1318 	}
1319 out:
1320 	btrfs_free_path(path);
1321 	return ret;
1322 }
1323 
btrfs_alloc_dev_extent(struct btrfs_trans_handle * trans,struct btrfs_device * device,u64 chunk_tree,u64 chunk_objectid,u64 chunk_offset,u64 start,u64 num_bytes)1324 static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1325 				  struct btrfs_device *device,
1326 				  u64 chunk_tree, u64 chunk_objectid,
1327 				  u64 chunk_offset, u64 start, u64 num_bytes)
1328 {
1329 	int ret;
1330 	struct btrfs_path *path;
1331 	struct btrfs_root *root = device->dev_root;
1332 	struct btrfs_dev_extent *extent;
1333 	struct extent_buffer *leaf;
1334 	struct btrfs_key key;
1335 
1336 	WARN_ON(!device->in_fs_metadata);
1337 	WARN_ON(device->is_tgtdev_for_dev_replace);
1338 	path = btrfs_alloc_path();
1339 	if (!path)
1340 		return -ENOMEM;
1341 
1342 	key.objectid = device->devid;
1343 	key.offset = start;
1344 	key.type = BTRFS_DEV_EXTENT_KEY;
1345 	ret = btrfs_insert_empty_item(trans, root, path, &key,
1346 				      sizeof(*extent));
1347 	if (ret)
1348 		goto out;
1349 
1350 	leaf = path->nodes[0];
1351 	extent = btrfs_item_ptr(leaf, path->slots[0],
1352 				struct btrfs_dev_extent);
1353 	btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
1354 	btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
1355 	btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
1356 
1357 	write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
1358 		    btrfs_dev_extent_chunk_tree_uuid(extent), BTRFS_UUID_SIZE);
1359 
1360 	btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1361 	btrfs_mark_buffer_dirty(leaf);
1362 out:
1363 	btrfs_free_path(path);
1364 	return ret;
1365 }
1366 
find_next_chunk(struct btrfs_fs_info * fs_info)1367 static u64 find_next_chunk(struct btrfs_fs_info *fs_info)
1368 {
1369 	struct extent_map_tree *em_tree;
1370 	struct extent_map *em;
1371 	struct rb_node *n;
1372 	u64 ret = 0;
1373 
1374 	em_tree = &fs_info->mapping_tree.map_tree;
1375 	read_lock(&em_tree->lock);
1376 	n = rb_last(&em_tree->map);
1377 	if (n) {
1378 		em = rb_entry(n, struct extent_map, rb_node);
1379 		ret = em->start + em->len;
1380 	}
1381 	read_unlock(&em_tree->lock);
1382 
1383 	return ret;
1384 }
1385 
find_next_devid(struct btrfs_fs_info * fs_info,u64 * devid_ret)1386 static noinline int find_next_devid(struct btrfs_fs_info *fs_info,
1387 				    u64 *devid_ret)
1388 {
1389 	int ret;
1390 	struct btrfs_key key;
1391 	struct btrfs_key found_key;
1392 	struct btrfs_path *path;
1393 
1394 	path = btrfs_alloc_path();
1395 	if (!path)
1396 		return -ENOMEM;
1397 
1398 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1399 	key.type = BTRFS_DEV_ITEM_KEY;
1400 	key.offset = (u64)-1;
1401 
1402 	ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0);
1403 	if (ret < 0)
1404 		goto error;
1405 
1406 	BUG_ON(ret == 0); /* Corruption */
1407 
1408 	ret = btrfs_previous_item(fs_info->chunk_root, path,
1409 				  BTRFS_DEV_ITEMS_OBJECTID,
1410 				  BTRFS_DEV_ITEM_KEY);
1411 	if (ret) {
1412 		*devid_ret = 1;
1413 	} else {
1414 		btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1415 				      path->slots[0]);
1416 		*devid_ret = found_key.offset + 1;
1417 	}
1418 	ret = 0;
1419 error:
1420 	btrfs_free_path(path);
1421 	return ret;
1422 }
1423 
1424 /*
1425  * the device information is stored in the chunk root
1426  * the btrfs_device struct should be fully filled in
1427  */
btrfs_add_device(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_device * device)1428 static int btrfs_add_device(struct btrfs_trans_handle *trans,
1429 			    struct btrfs_root *root,
1430 			    struct btrfs_device *device)
1431 {
1432 	int ret;
1433 	struct btrfs_path *path;
1434 	struct btrfs_dev_item *dev_item;
1435 	struct extent_buffer *leaf;
1436 	struct btrfs_key key;
1437 	unsigned long ptr;
1438 
1439 	root = root->fs_info->chunk_root;
1440 
1441 	path = btrfs_alloc_path();
1442 	if (!path)
1443 		return -ENOMEM;
1444 
1445 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1446 	key.type = BTRFS_DEV_ITEM_KEY;
1447 	key.offset = device->devid;
1448 
1449 	ret = btrfs_insert_empty_item(trans, root, path, &key,
1450 				      sizeof(*dev_item));
1451 	if (ret)
1452 		goto out;
1453 
1454 	leaf = path->nodes[0];
1455 	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1456 
1457 	btrfs_set_device_id(leaf, dev_item, device->devid);
1458 	btrfs_set_device_generation(leaf, dev_item, 0);
1459 	btrfs_set_device_type(leaf, dev_item, device->type);
1460 	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1461 	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1462 	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1463 	btrfs_set_device_total_bytes(leaf, dev_item,
1464 				     btrfs_device_get_disk_total_bytes(device));
1465 	btrfs_set_device_bytes_used(leaf, dev_item,
1466 				    btrfs_device_get_bytes_used(device));
1467 	btrfs_set_device_group(leaf, dev_item, 0);
1468 	btrfs_set_device_seek_speed(leaf, dev_item, 0);
1469 	btrfs_set_device_bandwidth(leaf, dev_item, 0);
1470 	btrfs_set_device_start_offset(leaf, dev_item, 0);
1471 
1472 	ptr = btrfs_device_uuid(dev_item);
1473 	write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1474 	ptr = btrfs_device_fsid(dev_item);
1475 	write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
1476 	btrfs_mark_buffer_dirty(leaf);
1477 
1478 	ret = 0;
1479 out:
1480 	btrfs_free_path(path);
1481 	return ret;
1482 }
1483 
1484 /*
1485  * Function to update ctime/mtime for a given device path.
1486  * Mainly used for ctime/mtime based probe like libblkid.
1487  */
update_dev_time(char * path_name)1488 static void update_dev_time(char *path_name)
1489 {
1490 	struct file *filp;
1491 
1492 	filp = filp_open(path_name, O_RDWR, 0);
1493 	if (!filp)
1494 		return;
1495 	file_update_time(filp);
1496 	filp_close(filp, NULL);
1497 	return;
1498 }
1499 
btrfs_rm_dev_item(struct btrfs_root * root,struct btrfs_device * device)1500 static int btrfs_rm_dev_item(struct btrfs_root *root,
1501 			     struct btrfs_device *device)
1502 {
1503 	int ret;
1504 	struct btrfs_path *path;
1505 	struct btrfs_key key;
1506 	struct btrfs_trans_handle *trans;
1507 
1508 	root = root->fs_info->chunk_root;
1509 
1510 	path = btrfs_alloc_path();
1511 	if (!path)
1512 		return -ENOMEM;
1513 
1514 	trans = btrfs_start_transaction(root, 0);
1515 	if (IS_ERR(trans)) {
1516 		btrfs_free_path(path);
1517 		return PTR_ERR(trans);
1518 	}
1519 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1520 	key.type = BTRFS_DEV_ITEM_KEY;
1521 	key.offset = device->devid;
1522 
1523 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1524 	if (ret < 0)
1525 		goto out;
1526 
1527 	if (ret > 0) {
1528 		ret = -ENOENT;
1529 		goto out;
1530 	}
1531 
1532 	ret = btrfs_del_item(trans, root, path);
1533 	if (ret)
1534 		goto out;
1535 out:
1536 	btrfs_free_path(path);
1537 	btrfs_commit_transaction(trans, root);
1538 	return ret;
1539 }
1540 
btrfs_rm_device(struct btrfs_root * root,char * device_path)1541 int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1542 {
1543 	struct btrfs_device *device;
1544 	struct btrfs_device *next_device;
1545 	struct block_device *bdev;
1546 	struct buffer_head *bh = NULL;
1547 	struct btrfs_super_block *disk_super;
1548 	struct btrfs_fs_devices *cur_devices;
1549 	u64 all_avail;
1550 	u64 devid;
1551 	u64 num_devices;
1552 	u8 *dev_uuid;
1553 	unsigned seq;
1554 	int ret = 0;
1555 	bool clear_super = false;
1556 
1557 	mutex_lock(&uuid_mutex);
1558 
1559 	do {
1560 		seq = read_seqbegin(&root->fs_info->profiles_lock);
1561 
1562 		all_avail = root->fs_info->avail_data_alloc_bits |
1563 			    root->fs_info->avail_system_alloc_bits |
1564 			    root->fs_info->avail_metadata_alloc_bits;
1565 	} while (read_seqretry(&root->fs_info->profiles_lock, seq));
1566 
1567 	num_devices = root->fs_info->fs_devices->num_devices;
1568 	btrfs_dev_replace_lock(&root->fs_info->dev_replace);
1569 	if (btrfs_dev_replace_is_ongoing(&root->fs_info->dev_replace)) {
1570 		WARN_ON(num_devices < 1);
1571 		num_devices--;
1572 	}
1573 	btrfs_dev_replace_unlock(&root->fs_info->dev_replace);
1574 
1575 	if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) && num_devices <= 4) {
1576 		ret = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET;
1577 		goto out;
1578 	}
1579 
1580 	if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) && num_devices <= 2) {
1581 		ret = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET;
1582 		goto out;
1583 	}
1584 
1585 	if ((all_avail & BTRFS_BLOCK_GROUP_RAID5) &&
1586 	    root->fs_info->fs_devices->rw_devices <= 2) {
1587 		ret = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET;
1588 		goto out;
1589 	}
1590 	if ((all_avail & BTRFS_BLOCK_GROUP_RAID6) &&
1591 	    root->fs_info->fs_devices->rw_devices <= 3) {
1592 		ret = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET;
1593 		goto out;
1594 	}
1595 
1596 	if (strcmp(device_path, "missing") == 0) {
1597 		struct list_head *devices;
1598 		struct btrfs_device *tmp;
1599 
1600 		device = NULL;
1601 		devices = &root->fs_info->fs_devices->devices;
1602 		/*
1603 		 * It is safe to read the devices since the volume_mutex
1604 		 * is held.
1605 		 */
1606 		list_for_each_entry(tmp, devices, dev_list) {
1607 			if (tmp->in_fs_metadata &&
1608 			    !tmp->is_tgtdev_for_dev_replace &&
1609 			    !tmp->bdev) {
1610 				device = tmp;
1611 				break;
1612 			}
1613 		}
1614 		bdev = NULL;
1615 		bh = NULL;
1616 		disk_super = NULL;
1617 		if (!device) {
1618 			ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
1619 			goto out;
1620 		}
1621 	} else {
1622 		ret = btrfs_get_bdev_and_sb(device_path,
1623 					    FMODE_WRITE | FMODE_EXCL,
1624 					    root->fs_info->bdev_holder, 0,
1625 					    &bdev, &bh);
1626 		if (ret)
1627 			goto out;
1628 		disk_super = (struct btrfs_super_block *)bh->b_data;
1629 		devid = btrfs_stack_device_id(&disk_super->dev_item);
1630 		dev_uuid = disk_super->dev_item.uuid;
1631 		device = btrfs_find_device(root->fs_info, devid, dev_uuid,
1632 					   disk_super->fsid);
1633 		if (!device) {
1634 			ret = -ENOENT;
1635 			goto error_brelse;
1636 		}
1637 	}
1638 
1639 	if (device->is_tgtdev_for_dev_replace) {
1640 		ret = BTRFS_ERROR_DEV_TGT_REPLACE;
1641 		goto error_brelse;
1642 	}
1643 
1644 	if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
1645 		ret = BTRFS_ERROR_DEV_ONLY_WRITABLE;
1646 		goto error_brelse;
1647 	}
1648 
1649 	if (device->writeable) {
1650 		lock_chunks(root);
1651 		list_del_init(&device->dev_alloc_list);
1652 		device->fs_devices->rw_devices--;
1653 		unlock_chunks(root);
1654 		clear_super = true;
1655 	}
1656 
1657 	mutex_unlock(&uuid_mutex);
1658 	ret = btrfs_shrink_device(device, 0);
1659 	mutex_lock(&uuid_mutex);
1660 	if (ret)
1661 		goto error_undo;
1662 
1663 	/*
1664 	 * TODO: the superblock still includes this device in its num_devices
1665 	 * counter although write_all_supers() is not locked out. This
1666 	 * could give a filesystem state which requires a degraded mount.
1667 	 */
1668 	ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
1669 	if (ret)
1670 		goto error_undo;
1671 
1672 	device->in_fs_metadata = 0;
1673 	btrfs_scrub_cancel_dev(root->fs_info, device);
1674 
1675 	/*
1676 	 * the device list mutex makes sure that we don't change
1677 	 * the device list while someone else is writing out all
1678 	 * the device supers. Whoever is writing all supers, should
1679 	 * lock the device list mutex before getting the number of
1680 	 * devices in the super block (super_copy). Conversely,
1681 	 * whoever updates the number of devices in the super block
1682 	 * (super_copy) should hold the device list mutex.
1683 	 */
1684 
1685 	cur_devices = device->fs_devices;
1686 	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1687 	list_del_rcu(&device->dev_list);
1688 
1689 	device->fs_devices->num_devices--;
1690 	device->fs_devices->total_devices--;
1691 
1692 	if (device->missing)
1693 		device->fs_devices->missing_devices--;
1694 
1695 	next_device = list_entry(root->fs_info->fs_devices->devices.next,
1696 				 struct btrfs_device, dev_list);
1697 	if (device->bdev == root->fs_info->sb->s_bdev)
1698 		root->fs_info->sb->s_bdev = next_device->bdev;
1699 	if (device->bdev == root->fs_info->fs_devices->latest_bdev)
1700 		root->fs_info->fs_devices->latest_bdev = next_device->bdev;
1701 
1702 	if (device->bdev) {
1703 		device->fs_devices->open_devices--;
1704 		/* remove sysfs entry */
1705 		btrfs_kobj_rm_device(root->fs_info, device);
1706 	}
1707 
1708 	call_rcu(&device->rcu, free_device);
1709 
1710 	num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
1711 	btrfs_set_super_num_devices(root->fs_info->super_copy, num_devices);
1712 	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1713 
1714 	if (cur_devices->open_devices == 0) {
1715 		struct btrfs_fs_devices *fs_devices;
1716 		fs_devices = root->fs_info->fs_devices;
1717 		while (fs_devices) {
1718 			if (fs_devices->seed == cur_devices) {
1719 				fs_devices->seed = cur_devices->seed;
1720 				break;
1721 			}
1722 			fs_devices = fs_devices->seed;
1723 		}
1724 		cur_devices->seed = NULL;
1725 		__btrfs_close_devices(cur_devices);
1726 		free_fs_devices(cur_devices);
1727 	}
1728 
1729 	root->fs_info->num_tolerated_disk_barrier_failures =
1730 		btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
1731 
1732 	/*
1733 	 * at this point, the device is zero sized.  We want to
1734 	 * remove it from the devices list and zero out the old super
1735 	 */
1736 	if (clear_super && disk_super) {
1737 		u64 bytenr;
1738 		int i;
1739 
1740 		/* make sure this device isn't detected as part of
1741 		 * the FS anymore
1742 		 */
1743 		memset(&disk_super->magic, 0, sizeof(disk_super->magic));
1744 		set_buffer_dirty(bh);
1745 		sync_dirty_buffer(bh);
1746 
1747 		/* clear the mirror copies of super block on the disk
1748 		 * being removed, 0th copy is been taken care above and
1749 		 * the below would take of the rest
1750 		 */
1751 		for (i = 1; i < BTRFS_SUPER_MIRROR_MAX; i++) {
1752 			bytenr = btrfs_sb_offset(i);
1753 			if (bytenr + BTRFS_SUPER_INFO_SIZE >=
1754 					i_size_read(bdev->bd_inode))
1755 				break;
1756 
1757 			brelse(bh);
1758 			bh = __bread(bdev, bytenr / 4096,
1759 					BTRFS_SUPER_INFO_SIZE);
1760 			if (!bh)
1761 				continue;
1762 
1763 			disk_super = (struct btrfs_super_block *)bh->b_data;
1764 
1765 			if (btrfs_super_bytenr(disk_super) != bytenr ||
1766 				btrfs_super_magic(disk_super) != BTRFS_MAGIC) {
1767 				continue;
1768 			}
1769 			memset(&disk_super->magic, 0,
1770 						sizeof(disk_super->magic));
1771 			set_buffer_dirty(bh);
1772 			sync_dirty_buffer(bh);
1773 		}
1774 	}
1775 
1776 	ret = 0;
1777 
1778 	if (bdev) {
1779 		/* Notify udev that device has changed */
1780 		btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
1781 
1782 		/* Update ctime/mtime for device path for libblkid */
1783 		update_dev_time(device_path);
1784 	}
1785 
1786 error_brelse:
1787 	brelse(bh);
1788 	if (bdev)
1789 		blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
1790 out:
1791 	mutex_unlock(&uuid_mutex);
1792 	return ret;
1793 error_undo:
1794 	if (device->writeable) {
1795 		lock_chunks(root);
1796 		list_add(&device->dev_alloc_list,
1797 			 &root->fs_info->fs_devices->alloc_list);
1798 		device->fs_devices->rw_devices++;
1799 		unlock_chunks(root);
1800 	}
1801 	goto error_brelse;
1802 }
1803 
btrfs_rm_dev_replace_srcdev(struct btrfs_fs_info * fs_info,struct btrfs_device * srcdev)1804 void btrfs_rm_dev_replace_srcdev(struct btrfs_fs_info *fs_info,
1805 				 struct btrfs_device *srcdev)
1806 {
1807 	struct btrfs_fs_devices *fs_devices;
1808 
1809 	WARN_ON(!mutex_is_locked(&fs_info->fs_devices->device_list_mutex));
1810 
1811 	/*
1812 	 * in case of fs with no seed, srcdev->fs_devices will point
1813 	 * to fs_devices of fs_info. However when the dev being replaced is
1814 	 * a seed dev it will point to the seed's local fs_devices. In short
1815 	 * srcdev will have its correct fs_devices in both the cases.
1816 	 */
1817 	fs_devices = srcdev->fs_devices;
1818 
1819 	list_del_rcu(&srcdev->dev_list);
1820 	list_del_rcu(&srcdev->dev_alloc_list);
1821 	fs_devices->num_devices--;
1822 	if (srcdev->missing)
1823 		fs_devices->missing_devices--;
1824 
1825 	if (srcdev->writeable) {
1826 		fs_devices->rw_devices--;
1827 		/* zero out the old super if it is writable */
1828 		btrfs_scratch_superblock(srcdev);
1829 	}
1830 
1831 	if (srcdev->bdev)
1832 		fs_devices->open_devices--;
1833 
1834 	call_rcu(&srcdev->rcu, free_device);
1835 
1836 	/*
1837 	 * unless fs_devices is seed fs, num_devices shouldn't go
1838 	 * zero
1839 	 */
1840 	BUG_ON(!fs_devices->num_devices && !fs_devices->seeding);
1841 
1842 	/* if this is no devs we rather delete the fs_devices */
1843 	if (!fs_devices->num_devices) {
1844 		struct btrfs_fs_devices *tmp_fs_devices;
1845 
1846 		tmp_fs_devices = fs_info->fs_devices;
1847 		while (tmp_fs_devices) {
1848 			if (tmp_fs_devices->seed == fs_devices) {
1849 				tmp_fs_devices->seed = fs_devices->seed;
1850 				break;
1851 			}
1852 			tmp_fs_devices = tmp_fs_devices->seed;
1853 		}
1854 		fs_devices->seed = NULL;
1855 		__btrfs_close_devices(fs_devices);
1856 		free_fs_devices(fs_devices);
1857 	}
1858 }
1859 
btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info * fs_info,struct btrfs_device * tgtdev)1860 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
1861 				      struct btrfs_device *tgtdev)
1862 {
1863 	struct btrfs_device *next_device;
1864 
1865 	mutex_lock(&uuid_mutex);
1866 	WARN_ON(!tgtdev);
1867 	mutex_lock(&fs_info->fs_devices->device_list_mutex);
1868 	if (tgtdev->bdev) {
1869 		btrfs_scratch_superblock(tgtdev);
1870 		fs_info->fs_devices->open_devices--;
1871 	}
1872 	fs_info->fs_devices->num_devices--;
1873 
1874 	next_device = list_entry(fs_info->fs_devices->devices.next,
1875 				 struct btrfs_device, dev_list);
1876 	if (tgtdev->bdev == fs_info->sb->s_bdev)
1877 		fs_info->sb->s_bdev = next_device->bdev;
1878 	if (tgtdev->bdev == fs_info->fs_devices->latest_bdev)
1879 		fs_info->fs_devices->latest_bdev = next_device->bdev;
1880 	list_del_rcu(&tgtdev->dev_list);
1881 
1882 	call_rcu(&tgtdev->rcu, free_device);
1883 
1884 	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
1885 	mutex_unlock(&uuid_mutex);
1886 }
1887 
btrfs_find_device_by_path(struct btrfs_root * root,char * device_path,struct btrfs_device ** device)1888 static int btrfs_find_device_by_path(struct btrfs_root *root, char *device_path,
1889 				     struct btrfs_device **device)
1890 {
1891 	int ret = 0;
1892 	struct btrfs_super_block *disk_super;
1893 	u64 devid;
1894 	u8 *dev_uuid;
1895 	struct block_device *bdev;
1896 	struct buffer_head *bh;
1897 
1898 	*device = NULL;
1899 	ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ,
1900 				    root->fs_info->bdev_holder, 0, &bdev, &bh);
1901 	if (ret)
1902 		return ret;
1903 	disk_super = (struct btrfs_super_block *)bh->b_data;
1904 	devid = btrfs_stack_device_id(&disk_super->dev_item);
1905 	dev_uuid = disk_super->dev_item.uuid;
1906 	*device = btrfs_find_device(root->fs_info, devid, dev_uuid,
1907 				    disk_super->fsid);
1908 	brelse(bh);
1909 	if (!*device)
1910 		ret = -ENOENT;
1911 	blkdev_put(bdev, FMODE_READ);
1912 	return ret;
1913 }
1914 
btrfs_find_device_missing_or_by_path(struct btrfs_root * root,char * device_path,struct btrfs_device ** device)1915 int btrfs_find_device_missing_or_by_path(struct btrfs_root *root,
1916 					 char *device_path,
1917 					 struct btrfs_device **device)
1918 {
1919 	*device = NULL;
1920 	if (strcmp(device_path, "missing") == 0) {
1921 		struct list_head *devices;
1922 		struct btrfs_device *tmp;
1923 
1924 		devices = &root->fs_info->fs_devices->devices;
1925 		/*
1926 		 * It is safe to read the devices since the volume_mutex
1927 		 * is held by the caller.
1928 		 */
1929 		list_for_each_entry(tmp, devices, dev_list) {
1930 			if (tmp->in_fs_metadata && !tmp->bdev) {
1931 				*device = tmp;
1932 				break;
1933 			}
1934 		}
1935 
1936 		if (!*device) {
1937 			btrfs_err(root->fs_info, "no missing device found");
1938 			return -ENOENT;
1939 		}
1940 
1941 		return 0;
1942 	} else {
1943 		return btrfs_find_device_by_path(root, device_path, device);
1944 	}
1945 }
1946 
1947 /*
1948  * does all the dirty work required for changing file system's UUID.
1949  */
btrfs_prepare_sprout(struct btrfs_root * root)1950 static int btrfs_prepare_sprout(struct btrfs_root *root)
1951 {
1952 	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
1953 	struct btrfs_fs_devices *old_devices;
1954 	struct btrfs_fs_devices *seed_devices;
1955 	struct btrfs_super_block *disk_super = root->fs_info->super_copy;
1956 	struct btrfs_device *device;
1957 	u64 super_flags;
1958 
1959 	BUG_ON(!mutex_is_locked(&uuid_mutex));
1960 	if (!fs_devices->seeding)
1961 		return -EINVAL;
1962 
1963 	seed_devices = __alloc_fs_devices();
1964 	if (IS_ERR(seed_devices))
1965 		return PTR_ERR(seed_devices);
1966 
1967 	old_devices = clone_fs_devices(fs_devices);
1968 	if (IS_ERR(old_devices)) {
1969 		kfree(seed_devices);
1970 		return PTR_ERR(old_devices);
1971 	}
1972 
1973 	list_add(&old_devices->list, &fs_uuids);
1974 
1975 	memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
1976 	seed_devices->opened = 1;
1977 	INIT_LIST_HEAD(&seed_devices->devices);
1978 	INIT_LIST_HEAD(&seed_devices->alloc_list);
1979 	mutex_init(&seed_devices->device_list_mutex);
1980 
1981 	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1982 	list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
1983 			      synchronize_rcu);
1984 	list_for_each_entry(device, &seed_devices->devices, dev_list)
1985 		device->fs_devices = seed_devices;
1986 
1987 	lock_chunks(root);
1988 	list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
1989 	unlock_chunks(root);
1990 
1991 	fs_devices->seeding = 0;
1992 	fs_devices->num_devices = 0;
1993 	fs_devices->open_devices = 0;
1994 	fs_devices->missing_devices = 0;
1995 	fs_devices->rotating = 0;
1996 	fs_devices->seed = seed_devices;
1997 
1998 	generate_random_uuid(fs_devices->fsid);
1999 	memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
2000 	memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
2001 	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2002 
2003 	super_flags = btrfs_super_flags(disk_super) &
2004 		      ~BTRFS_SUPER_FLAG_SEEDING;
2005 	btrfs_set_super_flags(disk_super, super_flags);
2006 
2007 	return 0;
2008 }
2009 
2010 /*
2011  * strore the expected generation for seed devices in device items.
2012  */
btrfs_finish_sprout(struct btrfs_trans_handle * trans,struct btrfs_root * root)2013 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
2014 			       struct btrfs_root *root)
2015 {
2016 	struct btrfs_path *path;
2017 	struct extent_buffer *leaf;
2018 	struct btrfs_dev_item *dev_item;
2019 	struct btrfs_device *device;
2020 	struct btrfs_key key;
2021 	u8 fs_uuid[BTRFS_UUID_SIZE];
2022 	u8 dev_uuid[BTRFS_UUID_SIZE];
2023 	u64 devid;
2024 	int ret;
2025 
2026 	path = btrfs_alloc_path();
2027 	if (!path)
2028 		return -ENOMEM;
2029 
2030 	root = root->fs_info->chunk_root;
2031 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2032 	key.offset = 0;
2033 	key.type = BTRFS_DEV_ITEM_KEY;
2034 
2035 	while (1) {
2036 		ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2037 		if (ret < 0)
2038 			goto error;
2039 
2040 		leaf = path->nodes[0];
2041 next_slot:
2042 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
2043 			ret = btrfs_next_leaf(root, path);
2044 			if (ret > 0)
2045 				break;
2046 			if (ret < 0)
2047 				goto error;
2048 			leaf = path->nodes[0];
2049 			btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2050 			btrfs_release_path(path);
2051 			continue;
2052 		}
2053 
2054 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2055 		if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
2056 		    key.type != BTRFS_DEV_ITEM_KEY)
2057 			break;
2058 
2059 		dev_item = btrfs_item_ptr(leaf, path->slots[0],
2060 					  struct btrfs_dev_item);
2061 		devid = btrfs_device_id(leaf, dev_item);
2062 		read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
2063 				   BTRFS_UUID_SIZE);
2064 		read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
2065 				   BTRFS_UUID_SIZE);
2066 		device = btrfs_find_device(root->fs_info, devid, dev_uuid,
2067 					   fs_uuid);
2068 		BUG_ON(!device); /* Logic error */
2069 
2070 		if (device->fs_devices->seeding) {
2071 			btrfs_set_device_generation(leaf, dev_item,
2072 						    device->generation);
2073 			btrfs_mark_buffer_dirty(leaf);
2074 		}
2075 
2076 		path->slots[0]++;
2077 		goto next_slot;
2078 	}
2079 	ret = 0;
2080 error:
2081 	btrfs_free_path(path);
2082 	return ret;
2083 }
2084 
btrfs_init_new_device(struct btrfs_root * root,char * device_path)2085 int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
2086 {
2087 	struct request_queue *q;
2088 	struct btrfs_trans_handle *trans;
2089 	struct btrfs_device *device;
2090 	struct block_device *bdev;
2091 	struct list_head *devices;
2092 	struct super_block *sb = root->fs_info->sb;
2093 	struct rcu_string *name;
2094 	u64 tmp;
2095 	int seeding_dev = 0;
2096 	int ret = 0;
2097 
2098 	if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
2099 		return -EROFS;
2100 
2101 	bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2102 				  root->fs_info->bdev_holder);
2103 	if (IS_ERR(bdev))
2104 		return PTR_ERR(bdev);
2105 
2106 	if (root->fs_info->fs_devices->seeding) {
2107 		seeding_dev = 1;
2108 		down_write(&sb->s_umount);
2109 		mutex_lock(&uuid_mutex);
2110 	}
2111 
2112 	filemap_write_and_wait(bdev->bd_inode->i_mapping);
2113 
2114 	devices = &root->fs_info->fs_devices->devices;
2115 
2116 	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2117 	list_for_each_entry(device, devices, dev_list) {
2118 		if (device->bdev == bdev) {
2119 			ret = -EEXIST;
2120 			mutex_unlock(
2121 				&root->fs_info->fs_devices->device_list_mutex);
2122 			goto error;
2123 		}
2124 	}
2125 	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2126 
2127 	device = btrfs_alloc_device(root->fs_info, NULL, NULL);
2128 	if (IS_ERR(device)) {
2129 		/* we can safely leave the fs_devices entry around */
2130 		ret = PTR_ERR(device);
2131 		goto error;
2132 	}
2133 
2134 	name = rcu_string_strdup(device_path, GFP_NOFS);
2135 	if (!name) {
2136 		kfree(device);
2137 		ret = -ENOMEM;
2138 		goto error;
2139 	}
2140 	rcu_assign_pointer(device->name, name);
2141 
2142 	trans = btrfs_start_transaction(root, 0);
2143 	if (IS_ERR(trans)) {
2144 		rcu_string_free(device->name);
2145 		kfree(device);
2146 		ret = PTR_ERR(trans);
2147 		goto error;
2148 	}
2149 
2150 	q = bdev_get_queue(bdev);
2151 	if (blk_queue_discard(q))
2152 		device->can_discard = 1;
2153 	device->writeable = 1;
2154 	device->generation = trans->transid;
2155 	device->io_width = root->sectorsize;
2156 	device->io_align = root->sectorsize;
2157 	device->sector_size = root->sectorsize;
2158 	device->total_bytes = i_size_read(bdev->bd_inode);
2159 	device->disk_total_bytes = device->total_bytes;
2160 	device->commit_total_bytes = device->total_bytes;
2161 	device->dev_root = root->fs_info->dev_root;
2162 	device->bdev = bdev;
2163 	device->in_fs_metadata = 1;
2164 	device->is_tgtdev_for_dev_replace = 0;
2165 	device->mode = FMODE_EXCL;
2166 	device->dev_stats_valid = 1;
2167 	set_blocksize(device->bdev, 4096);
2168 
2169 	if (seeding_dev) {
2170 		sb->s_flags &= ~MS_RDONLY;
2171 		ret = btrfs_prepare_sprout(root);
2172 		BUG_ON(ret); /* -ENOMEM */
2173 	}
2174 
2175 	device->fs_devices = root->fs_info->fs_devices;
2176 
2177 	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2178 	lock_chunks(root);
2179 	list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices);
2180 	list_add(&device->dev_alloc_list,
2181 		 &root->fs_info->fs_devices->alloc_list);
2182 	root->fs_info->fs_devices->num_devices++;
2183 	root->fs_info->fs_devices->open_devices++;
2184 	root->fs_info->fs_devices->rw_devices++;
2185 	root->fs_info->fs_devices->total_devices++;
2186 	root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
2187 
2188 	spin_lock(&root->fs_info->free_chunk_lock);
2189 	root->fs_info->free_chunk_space += device->total_bytes;
2190 	spin_unlock(&root->fs_info->free_chunk_lock);
2191 
2192 	if (!blk_queue_nonrot(bdev_get_queue(bdev)))
2193 		root->fs_info->fs_devices->rotating = 1;
2194 
2195 	tmp = btrfs_super_total_bytes(root->fs_info->super_copy);
2196 	btrfs_set_super_total_bytes(root->fs_info->super_copy,
2197 				    tmp + device->total_bytes);
2198 
2199 	tmp = btrfs_super_num_devices(root->fs_info->super_copy);
2200 	btrfs_set_super_num_devices(root->fs_info->super_copy,
2201 				    tmp + 1);
2202 
2203 	/* add sysfs device entry */
2204 	btrfs_kobj_add_device(root->fs_info, device);
2205 
2206 	/*
2207 	 * we've got more storage, clear any full flags on the space
2208 	 * infos
2209 	 */
2210 	btrfs_clear_space_info_full(root->fs_info);
2211 
2212 	unlock_chunks(root);
2213 	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2214 
2215 	if (seeding_dev) {
2216 		lock_chunks(root);
2217 		ret = init_first_rw_device(trans, root, device);
2218 		unlock_chunks(root);
2219 		if (ret) {
2220 			btrfs_abort_transaction(trans, root, ret);
2221 			goto error_trans;
2222 		}
2223 	}
2224 
2225 	ret = btrfs_add_device(trans, root, device);
2226 	if (ret) {
2227 		btrfs_abort_transaction(trans, root, ret);
2228 		goto error_trans;
2229 	}
2230 
2231 	if (seeding_dev) {
2232 		char fsid_buf[BTRFS_UUID_UNPARSED_SIZE];
2233 
2234 		ret = btrfs_finish_sprout(trans, root);
2235 		if (ret) {
2236 			btrfs_abort_transaction(trans, root, ret);
2237 			goto error_trans;
2238 		}
2239 
2240 		/* Sprouting would change fsid of the mounted root,
2241 		 * so rename the fsid on the sysfs
2242 		 */
2243 		snprintf(fsid_buf, BTRFS_UUID_UNPARSED_SIZE, "%pU",
2244 						root->fs_info->fsid);
2245 		if (kobject_rename(&root->fs_info->super_kobj, fsid_buf))
2246 			goto error_trans;
2247 	}
2248 
2249 	root->fs_info->num_tolerated_disk_barrier_failures =
2250 		btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
2251 	ret = btrfs_commit_transaction(trans, root);
2252 
2253 	if (seeding_dev) {
2254 		mutex_unlock(&uuid_mutex);
2255 		up_write(&sb->s_umount);
2256 
2257 		if (ret) /* transaction commit */
2258 			return ret;
2259 
2260 		ret = btrfs_relocate_sys_chunks(root);
2261 		if (ret < 0)
2262 			btrfs_error(root->fs_info, ret,
2263 				    "Failed to relocate sys chunks after "
2264 				    "device initialization. This can be fixed "
2265 				    "using the \"btrfs balance\" command.");
2266 		trans = btrfs_attach_transaction(root);
2267 		if (IS_ERR(trans)) {
2268 			if (PTR_ERR(trans) == -ENOENT)
2269 				return 0;
2270 			return PTR_ERR(trans);
2271 		}
2272 		ret = btrfs_commit_transaction(trans, root);
2273 	}
2274 
2275 	/* Update ctime/mtime for libblkid */
2276 	update_dev_time(device_path);
2277 	return ret;
2278 
2279 error_trans:
2280 	btrfs_end_transaction(trans, root);
2281 	rcu_string_free(device->name);
2282 	btrfs_kobj_rm_device(root->fs_info, device);
2283 	kfree(device);
2284 error:
2285 	blkdev_put(bdev, FMODE_EXCL);
2286 	if (seeding_dev) {
2287 		mutex_unlock(&uuid_mutex);
2288 		up_write(&sb->s_umount);
2289 	}
2290 	return ret;
2291 }
2292 
btrfs_init_dev_replace_tgtdev(struct btrfs_root * root,char * device_path,struct btrfs_device * srcdev,struct btrfs_device ** device_out)2293 int btrfs_init_dev_replace_tgtdev(struct btrfs_root *root, char *device_path,
2294 				  struct btrfs_device *srcdev,
2295 				  struct btrfs_device **device_out)
2296 {
2297 	struct request_queue *q;
2298 	struct btrfs_device *device;
2299 	struct block_device *bdev;
2300 	struct btrfs_fs_info *fs_info = root->fs_info;
2301 	struct list_head *devices;
2302 	struct rcu_string *name;
2303 	u64 devid = BTRFS_DEV_REPLACE_DEVID;
2304 	int ret = 0;
2305 
2306 	*device_out = NULL;
2307 	if (fs_info->fs_devices->seeding) {
2308 		btrfs_err(fs_info, "the filesystem is a seed filesystem!");
2309 		return -EINVAL;
2310 	}
2311 
2312 	bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2313 				  fs_info->bdev_holder);
2314 	if (IS_ERR(bdev)) {
2315 		btrfs_err(fs_info, "target device %s is invalid!", device_path);
2316 		return PTR_ERR(bdev);
2317 	}
2318 
2319 	filemap_write_and_wait(bdev->bd_inode->i_mapping);
2320 
2321 	devices = &fs_info->fs_devices->devices;
2322 	list_for_each_entry(device, devices, dev_list) {
2323 		if (device->bdev == bdev) {
2324 			btrfs_err(fs_info, "target device is in the filesystem!");
2325 			ret = -EEXIST;
2326 			goto error;
2327 		}
2328 	}
2329 
2330 
2331 	if (i_size_read(bdev->bd_inode) <
2332 	    btrfs_device_get_total_bytes(srcdev)) {
2333 		btrfs_err(fs_info, "target device is smaller than source device!");
2334 		ret = -EINVAL;
2335 		goto error;
2336 	}
2337 
2338 
2339 	device = btrfs_alloc_device(NULL, &devid, NULL);
2340 	if (IS_ERR(device)) {
2341 		ret = PTR_ERR(device);
2342 		goto error;
2343 	}
2344 
2345 	name = rcu_string_strdup(device_path, GFP_NOFS);
2346 	if (!name) {
2347 		kfree(device);
2348 		ret = -ENOMEM;
2349 		goto error;
2350 	}
2351 	rcu_assign_pointer(device->name, name);
2352 
2353 	q = bdev_get_queue(bdev);
2354 	if (blk_queue_discard(q))
2355 		device->can_discard = 1;
2356 	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2357 	device->writeable = 1;
2358 	device->generation = 0;
2359 	device->io_width = root->sectorsize;
2360 	device->io_align = root->sectorsize;
2361 	device->sector_size = root->sectorsize;
2362 	device->total_bytes = btrfs_device_get_total_bytes(srcdev);
2363 	device->disk_total_bytes = btrfs_device_get_disk_total_bytes(srcdev);
2364 	device->bytes_used = btrfs_device_get_bytes_used(srcdev);
2365 	ASSERT(list_empty(&srcdev->resized_list));
2366 	device->commit_total_bytes = srcdev->commit_total_bytes;
2367 	device->commit_bytes_used = device->bytes_used;
2368 	device->dev_root = fs_info->dev_root;
2369 	device->bdev = bdev;
2370 	device->in_fs_metadata = 1;
2371 	device->is_tgtdev_for_dev_replace = 1;
2372 	device->mode = FMODE_EXCL;
2373 	device->dev_stats_valid = 1;
2374 	set_blocksize(device->bdev, 4096);
2375 	device->fs_devices = fs_info->fs_devices;
2376 	list_add(&device->dev_list, &fs_info->fs_devices->devices);
2377 	fs_info->fs_devices->num_devices++;
2378 	fs_info->fs_devices->open_devices++;
2379 	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2380 
2381 	*device_out = device;
2382 	return ret;
2383 
2384 error:
2385 	blkdev_put(bdev, FMODE_EXCL);
2386 	return ret;
2387 }
2388 
btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info * fs_info,struct btrfs_device * tgtdev)2389 void btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info *fs_info,
2390 					      struct btrfs_device *tgtdev)
2391 {
2392 	WARN_ON(fs_info->fs_devices->rw_devices == 0);
2393 	tgtdev->io_width = fs_info->dev_root->sectorsize;
2394 	tgtdev->io_align = fs_info->dev_root->sectorsize;
2395 	tgtdev->sector_size = fs_info->dev_root->sectorsize;
2396 	tgtdev->dev_root = fs_info->dev_root;
2397 	tgtdev->in_fs_metadata = 1;
2398 }
2399 
btrfs_update_device(struct btrfs_trans_handle * trans,struct btrfs_device * device)2400 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
2401 					struct btrfs_device *device)
2402 {
2403 	int ret;
2404 	struct btrfs_path *path;
2405 	struct btrfs_root *root;
2406 	struct btrfs_dev_item *dev_item;
2407 	struct extent_buffer *leaf;
2408 	struct btrfs_key key;
2409 
2410 	root = device->dev_root->fs_info->chunk_root;
2411 
2412 	path = btrfs_alloc_path();
2413 	if (!path)
2414 		return -ENOMEM;
2415 
2416 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2417 	key.type = BTRFS_DEV_ITEM_KEY;
2418 	key.offset = device->devid;
2419 
2420 	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2421 	if (ret < 0)
2422 		goto out;
2423 
2424 	if (ret > 0) {
2425 		ret = -ENOENT;
2426 		goto out;
2427 	}
2428 
2429 	leaf = path->nodes[0];
2430 	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
2431 
2432 	btrfs_set_device_id(leaf, dev_item, device->devid);
2433 	btrfs_set_device_type(leaf, dev_item, device->type);
2434 	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
2435 	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
2436 	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
2437 	btrfs_set_device_total_bytes(leaf, dev_item,
2438 				     btrfs_device_get_disk_total_bytes(device));
2439 	btrfs_set_device_bytes_used(leaf, dev_item,
2440 				    btrfs_device_get_bytes_used(device));
2441 	btrfs_mark_buffer_dirty(leaf);
2442 
2443 out:
2444 	btrfs_free_path(path);
2445 	return ret;
2446 }
2447 
btrfs_grow_device(struct btrfs_trans_handle * trans,struct btrfs_device * device,u64 new_size)2448 int btrfs_grow_device(struct btrfs_trans_handle *trans,
2449 		      struct btrfs_device *device, u64 new_size)
2450 {
2451 	struct btrfs_super_block *super_copy =
2452 		device->dev_root->fs_info->super_copy;
2453 	struct btrfs_fs_devices *fs_devices;
2454 	u64 old_total;
2455 	u64 diff;
2456 
2457 	if (!device->writeable)
2458 		return -EACCES;
2459 
2460 	lock_chunks(device->dev_root);
2461 	old_total = btrfs_super_total_bytes(super_copy);
2462 	diff = new_size - device->total_bytes;
2463 
2464 	if (new_size <= device->total_bytes ||
2465 	    device->is_tgtdev_for_dev_replace) {
2466 		unlock_chunks(device->dev_root);
2467 		return -EINVAL;
2468 	}
2469 
2470 	fs_devices = device->dev_root->fs_info->fs_devices;
2471 
2472 	btrfs_set_super_total_bytes(super_copy, old_total + diff);
2473 	device->fs_devices->total_rw_bytes += diff;
2474 
2475 	btrfs_device_set_total_bytes(device, new_size);
2476 	btrfs_device_set_disk_total_bytes(device, new_size);
2477 	btrfs_clear_space_info_full(device->dev_root->fs_info);
2478 	if (list_empty(&device->resized_list))
2479 		list_add_tail(&device->resized_list,
2480 			      &fs_devices->resized_devices);
2481 	unlock_chunks(device->dev_root);
2482 
2483 	return btrfs_update_device(trans, device);
2484 }
2485 
btrfs_free_chunk(struct btrfs_trans_handle * trans,struct btrfs_root * root,u64 chunk_tree,u64 chunk_objectid,u64 chunk_offset)2486 static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
2487 			    struct btrfs_root *root,
2488 			    u64 chunk_tree, u64 chunk_objectid,
2489 			    u64 chunk_offset)
2490 {
2491 	int ret;
2492 	struct btrfs_path *path;
2493 	struct btrfs_key key;
2494 
2495 	root = root->fs_info->chunk_root;
2496 	path = btrfs_alloc_path();
2497 	if (!path)
2498 		return -ENOMEM;
2499 
2500 	key.objectid = chunk_objectid;
2501 	key.offset = chunk_offset;
2502 	key.type = BTRFS_CHUNK_ITEM_KEY;
2503 
2504 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2505 	if (ret < 0)
2506 		goto out;
2507 	else if (ret > 0) { /* Logic error or corruption */
2508 		btrfs_error(root->fs_info, -ENOENT,
2509 			    "Failed lookup while freeing chunk.");
2510 		ret = -ENOENT;
2511 		goto out;
2512 	}
2513 
2514 	ret = btrfs_del_item(trans, root, path);
2515 	if (ret < 0)
2516 		btrfs_error(root->fs_info, ret,
2517 			    "Failed to delete chunk item.");
2518 out:
2519 	btrfs_free_path(path);
2520 	return ret;
2521 }
2522 
btrfs_del_sys_chunk(struct btrfs_root * root,u64 chunk_objectid,u64 chunk_offset)2523 static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
2524 			chunk_offset)
2525 {
2526 	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
2527 	struct btrfs_disk_key *disk_key;
2528 	struct btrfs_chunk *chunk;
2529 	u8 *ptr;
2530 	int ret = 0;
2531 	u32 num_stripes;
2532 	u32 array_size;
2533 	u32 len = 0;
2534 	u32 cur;
2535 	struct btrfs_key key;
2536 
2537 	lock_chunks(root);
2538 	array_size = btrfs_super_sys_array_size(super_copy);
2539 
2540 	ptr = super_copy->sys_chunk_array;
2541 	cur = 0;
2542 
2543 	while (cur < array_size) {
2544 		disk_key = (struct btrfs_disk_key *)ptr;
2545 		btrfs_disk_key_to_cpu(&key, disk_key);
2546 
2547 		len = sizeof(*disk_key);
2548 
2549 		if (key.type == BTRFS_CHUNK_ITEM_KEY) {
2550 			chunk = (struct btrfs_chunk *)(ptr + len);
2551 			num_stripes = btrfs_stack_chunk_num_stripes(chunk);
2552 			len += btrfs_chunk_item_size(num_stripes);
2553 		} else {
2554 			ret = -EIO;
2555 			break;
2556 		}
2557 		if (key.objectid == chunk_objectid &&
2558 		    key.offset == chunk_offset) {
2559 			memmove(ptr, ptr + len, array_size - (cur + len));
2560 			array_size -= len;
2561 			btrfs_set_super_sys_array_size(super_copy, array_size);
2562 		} else {
2563 			ptr += len;
2564 			cur += len;
2565 		}
2566 	}
2567 	unlock_chunks(root);
2568 	return ret;
2569 }
2570 
btrfs_remove_chunk(struct btrfs_trans_handle * trans,struct btrfs_root * root,u64 chunk_offset)2571 int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
2572 		       struct btrfs_root *root, u64 chunk_offset)
2573 {
2574 	struct extent_map_tree *em_tree;
2575 	struct extent_map *em;
2576 	struct btrfs_root *extent_root = root->fs_info->extent_root;
2577 	struct map_lookup *map;
2578 	u64 dev_extent_len = 0;
2579 	u64 chunk_objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2580 	u64 chunk_tree = root->fs_info->chunk_root->objectid;
2581 	int i, ret = 0;
2582 
2583 	/* Just in case */
2584 	root = root->fs_info->chunk_root;
2585 	em_tree = &root->fs_info->mapping_tree.map_tree;
2586 
2587 	read_lock(&em_tree->lock);
2588 	em = lookup_extent_mapping(em_tree, chunk_offset, 1);
2589 	read_unlock(&em_tree->lock);
2590 
2591 	if (!em || em->start > chunk_offset ||
2592 	    em->start + em->len < chunk_offset) {
2593 		/*
2594 		 * This is a logic error, but we don't want to just rely on the
2595 		 * user having built with ASSERT enabled, so if ASSERT doens't
2596 		 * do anything we still error out.
2597 		 */
2598 		ASSERT(0);
2599 		if (em)
2600 			free_extent_map(em);
2601 		return -EINVAL;
2602 	}
2603 	map = (struct map_lookup *)em->bdev;
2604 
2605 	for (i = 0; i < map->num_stripes; i++) {
2606 		struct btrfs_device *device = map->stripes[i].dev;
2607 		ret = btrfs_free_dev_extent(trans, device,
2608 					    map->stripes[i].physical,
2609 					    &dev_extent_len);
2610 		if (ret) {
2611 			btrfs_abort_transaction(trans, root, ret);
2612 			goto out;
2613 		}
2614 
2615 		if (device->bytes_used > 0) {
2616 			lock_chunks(root);
2617 			btrfs_device_set_bytes_used(device,
2618 					device->bytes_used - dev_extent_len);
2619 			spin_lock(&root->fs_info->free_chunk_lock);
2620 			root->fs_info->free_chunk_space += dev_extent_len;
2621 			spin_unlock(&root->fs_info->free_chunk_lock);
2622 			btrfs_clear_space_info_full(root->fs_info);
2623 			unlock_chunks(root);
2624 		}
2625 
2626 		if (map->stripes[i].dev) {
2627 			ret = btrfs_update_device(trans, map->stripes[i].dev);
2628 			if (ret) {
2629 				btrfs_abort_transaction(trans, root, ret);
2630 				goto out;
2631 			}
2632 		}
2633 	}
2634 	ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
2635 			       chunk_offset);
2636 	if (ret) {
2637 		btrfs_abort_transaction(trans, root, ret);
2638 		goto out;
2639 	}
2640 
2641 	trace_btrfs_chunk_free(root, map, chunk_offset, em->len);
2642 
2643 	if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
2644 		ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
2645 		if (ret) {
2646 			btrfs_abort_transaction(trans, root, ret);
2647 			goto out;
2648 		}
2649 	}
2650 
2651 	ret = btrfs_remove_block_group(trans, extent_root, chunk_offset);
2652 	if (ret) {
2653 		btrfs_abort_transaction(trans, extent_root, ret);
2654 		goto out;
2655 	}
2656 
2657 	write_lock(&em_tree->lock);
2658 	remove_extent_mapping(em_tree, em);
2659 	write_unlock(&em_tree->lock);
2660 
2661 	/* once for the tree */
2662 	free_extent_map(em);
2663 out:
2664 	/* once for us */
2665 	free_extent_map(em);
2666 	return ret;
2667 }
2668 
btrfs_relocate_chunk(struct btrfs_root * root,u64 chunk_tree,u64 chunk_objectid,u64 chunk_offset)2669 static int btrfs_relocate_chunk(struct btrfs_root *root,
2670 			 u64 chunk_tree, u64 chunk_objectid,
2671 			 u64 chunk_offset)
2672 {
2673 	struct btrfs_root *extent_root;
2674 	struct btrfs_trans_handle *trans;
2675 	int ret;
2676 
2677 	root = root->fs_info->chunk_root;
2678 	extent_root = root->fs_info->extent_root;
2679 
2680 	ret = btrfs_can_relocate(extent_root, chunk_offset);
2681 	if (ret)
2682 		return -ENOSPC;
2683 
2684 	/* step one, relocate all the extents inside this chunk */
2685 	ret = btrfs_relocate_block_group(extent_root, chunk_offset);
2686 	if (ret)
2687 		return ret;
2688 
2689 	trans = btrfs_start_transaction(root, 0);
2690 	if (IS_ERR(trans)) {
2691 		ret = PTR_ERR(trans);
2692 		btrfs_std_error(root->fs_info, ret);
2693 		return ret;
2694 	}
2695 
2696 	/*
2697 	 * step two, delete the device extents and the
2698 	 * chunk tree entries
2699 	 */
2700 	ret = btrfs_remove_chunk(trans, root, chunk_offset);
2701 	btrfs_end_transaction(trans, root);
2702 	return ret;
2703 }
2704 
btrfs_relocate_sys_chunks(struct btrfs_root * root)2705 static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
2706 {
2707 	struct btrfs_root *chunk_root = root->fs_info->chunk_root;
2708 	struct btrfs_path *path;
2709 	struct extent_buffer *leaf;
2710 	struct btrfs_chunk *chunk;
2711 	struct btrfs_key key;
2712 	struct btrfs_key found_key;
2713 	u64 chunk_tree = chunk_root->root_key.objectid;
2714 	u64 chunk_type;
2715 	bool retried = false;
2716 	int failed = 0;
2717 	int ret;
2718 
2719 	path = btrfs_alloc_path();
2720 	if (!path)
2721 		return -ENOMEM;
2722 
2723 again:
2724 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2725 	key.offset = (u64)-1;
2726 	key.type = BTRFS_CHUNK_ITEM_KEY;
2727 
2728 	while (1) {
2729 		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2730 		if (ret < 0)
2731 			goto error;
2732 		BUG_ON(ret == 0); /* Corruption */
2733 
2734 		ret = btrfs_previous_item(chunk_root, path, key.objectid,
2735 					  key.type);
2736 		if (ret < 0)
2737 			goto error;
2738 		if (ret > 0)
2739 			break;
2740 
2741 		leaf = path->nodes[0];
2742 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2743 
2744 		chunk = btrfs_item_ptr(leaf, path->slots[0],
2745 				       struct btrfs_chunk);
2746 		chunk_type = btrfs_chunk_type(leaf, chunk);
2747 		btrfs_release_path(path);
2748 
2749 		if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
2750 			ret = btrfs_relocate_chunk(chunk_root, chunk_tree,
2751 						   found_key.objectid,
2752 						   found_key.offset);
2753 			if (ret == -ENOSPC)
2754 				failed++;
2755 			else
2756 				BUG_ON(ret);
2757 		}
2758 
2759 		if (found_key.offset == 0)
2760 			break;
2761 		key.offset = found_key.offset - 1;
2762 	}
2763 	ret = 0;
2764 	if (failed && !retried) {
2765 		failed = 0;
2766 		retried = true;
2767 		goto again;
2768 	} else if (WARN_ON(failed && retried)) {
2769 		ret = -ENOSPC;
2770 	}
2771 error:
2772 	btrfs_free_path(path);
2773 	return ret;
2774 }
2775 
insert_balance_item(struct btrfs_root * root,struct btrfs_balance_control * bctl)2776 static int insert_balance_item(struct btrfs_root *root,
2777 			       struct btrfs_balance_control *bctl)
2778 {
2779 	struct btrfs_trans_handle *trans;
2780 	struct btrfs_balance_item *item;
2781 	struct btrfs_disk_balance_args disk_bargs;
2782 	struct btrfs_path *path;
2783 	struct extent_buffer *leaf;
2784 	struct btrfs_key key;
2785 	int ret, err;
2786 
2787 	path = btrfs_alloc_path();
2788 	if (!path)
2789 		return -ENOMEM;
2790 
2791 	trans = btrfs_start_transaction(root, 0);
2792 	if (IS_ERR(trans)) {
2793 		btrfs_free_path(path);
2794 		return PTR_ERR(trans);
2795 	}
2796 
2797 	key.objectid = BTRFS_BALANCE_OBJECTID;
2798 	key.type = BTRFS_BALANCE_ITEM_KEY;
2799 	key.offset = 0;
2800 
2801 	ret = btrfs_insert_empty_item(trans, root, path, &key,
2802 				      sizeof(*item));
2803 	if (ret)
2804 		goto out;
2805 
2806 	leaf = path->nodes[0];
2807 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
2808 
2809 	memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
2810 
2811 	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
2812 	btrfs_set_balance_data(leaf, item, &disk_bargs);
2813 	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
2814 	btrfs_set_balance_meta(leaf, item, &disk_bargs);
2815 	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
2816 	btrfs_set_balance_sys(leaf, item, &disk_bargs);
2817 
2818 	btrfs_set_balance_flags(leaf, item, bctl->flags);
2819 
2820 	btrfs_mark_buffer_dirty(leaf);
2821 out:
2822 	btrfs_free_path(path);
2823 	err = btrfs_commit_transaction(trans, root);
2824 	if (err && !ret)
2825 		ret = err;
2826 	return ret;
2827 }
2828 
del_balance_item(struct btrfs_root * root)2829 static int del_balance_item(struct btrfs_root *root)
2830 {
2831 	struct btrfs_trans_handle *trans;
2832 	struct btrfs_path *path;
2833 	struct btrfs_key key;
2834 	int ret, err;
2835 
2836 	path = btrfs_alloc_path();
2837 	if (!path)
2838 		return -ENOMEM;
2839 
2840 	trans = btrfs_start_transaction(root, 0);
2841 	if (IS_ERR(trans)) {
2842 		btrfs_free_path(path);
2843 		return PTR_ERR(trans);
2844 	}
2845 
2846 	key.objectid = BTRFS_BALANCE_OBJECTID;
2847 	key.type = BTRFS_BALANCE_ITEM_KEY;
2848 	key.offset = 0;
2849 
2850 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2851 	if (ret < 0)
2852 		goto out;
2853 	if (ret > 0) {
2854 		ret = -ENOENT;
2855 		goto out;
2856 	}
2857 
2858 	ret = btrfs_del_item(trans, root, path);
2859 out:
2860 	btrfs_free_path(path);
2861 	err = btrfs_commit_transaction(trans, root);
2862 	if (err && !ret)
2863 		ret = err;
2864 	return ret;
2865 }
2866 
2867 /*
2868  * This is a heuristic used to reduce the number of chunks balanced on
2869  * resume after balance was interrupted.
2870  */
update_balance_args(struct btrfs_balance_control * bctl)2871 static void update_balance_args(struct btrfs_balance_control *bctl)
2872 {
2873 	/*
2874 	 * Turn on soft mode for chunk types that were being converted.
2875 	 */
2876 	if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
2877 		bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
2878 	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
2879 		bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
2880 	if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
2881 		bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
2882 
2883 	/*
2884 	 * Turn on usage filter if is not already used.  The idea is
2885 	 * that chunks that we have already balanced should be
2886 	 * reasonably full.  Don't do it for chunks that are being
2887 	 * converted - that will keep us from relocating unconverted
2888 	 * (albeit full) chunks.
2889 	 */
2890 	if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2891 	    !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2892 		bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
2893 		bctl->data.usage = 90;
2894 	}
2895 	if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2896 	    !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2897 		bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
2898 		bctl->sys.usage = 90;
2899 	}
2900 	if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2901 	    !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2902 		bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
2903 		bctl->meta.usage = 90;
2904 	}
2905 }
2906 
2907 /*
2908  * Should be called with both balance and volume mutexes held to
2909  * serialize other volume operations (add_dev/rm_dev/resize) with
2910  * restriper.  Same goes for unset_balance_control.
2911  */
set_balance_control(struct btrfs_balance_control * bctl)2912 static void set_balance_control(struct btrfs_balance_control *bctl)
2913 {
2914 	struct btrfs_fs_info *fs_info = bctl->fs_info;
2915 
2916 	BUG_ON(fs_info->balance_ctl);
2917 
2918 	spin_lock(&fs_info->balance_lock);
2919 	fs_info->balance_ctl = bctl;
2920 	spin_unlock(&fs_info->balance_lock);
2921 }
2922 
unset_balance_control(struct btrfs_fs_info * fs_info)2923 static void unset_balance_control(struct btrfs_fs_info *fs_info)
2924 {
2925 	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2926 
2927 	BUG_ON(!fs_info->balance_ctl);
2928 
2929 	spin_lock(&fs_info->balance_lock);
2930 	fs_info->balance_ctl = NULL;
2931 	spin_unlock(&fs_info->balance_lock);
2932 
2933 	kfree(bctl);
2934 }
2935 
2936 /*
2937  * Balance filters.  Return 1 if chunk should be filtered out
2938  * (should not be balanced).
2939  */
chunk_profiles_filter(u64 chunk_type,struct btrfs_balance_args * bargs)2940 static int chunk_profiles_filter(u64 chunk_type,
2941 				 struct btrfs_balance_args *bargs)
2942 {
2943 	chunk_type = chunk_to_extended(chunk_type) &
2944 				BTRFS_EXTENDED_PROFILE_MASK;
2945 
2946 	if (bargs->profiles & chunk_type)
2947 		return 0;
2948 
2949 	return 1;
2950 }
2951 
chunk_usage_filter(struct btrfs_fs_info * fs_info,u64 chunk_offset,struct btrfs_balance_args * bargs)2952 static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
2953 			      struct btrfs_balance_args *bargs)
2954 {
2955 	struct btrfs_block_group_cache *cache;
2956 	u64 chunk_used, user_thresh;
2957 	int ret = 1;
2958 
2959 	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
2960 	chunk_used = btrfs_block_group_used(&cache->item);
2961 
2962 	if (bargs->usage == 0)
2963 		user_thresh = 1;
2964 	else if (bargs->usage > 100)
2965 		user_thresh = cache->key.offset;
2966 	else
2967 		user_thresh = div_factor_fine(cache->key.offset,
2968 					      bargs->usage);
2969 
2970 	if (chunk_used < user_thresh)
2971 		ret = 0;
2972 
2973 	btrfs_put_block_group(cache);
2974 	return ret;
2975 }
2976 
chunk_devid_filter(struct extent_buffer * leaf,struct btrfs_chunk * chunk,struct btrfs_balance_args * bargs)2977 static int chunk_devid_filter(struct extent_buffer *leaf,
2978 			      struct btrfs_chunk *chunk,
2979 			      struct btrfs_balance_args *bargs)
2980 {
2981 	struct btrfs_stripe *stripe;
2982 	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2983 	int i;
2984 
2985 	for (i = 0; i < num_stripes; i++) {
2986 		stripe = btrfs_stripe_nr(chunk, i);
2987 		if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
2988 			return 0;
2989 	}
2990 
2991 	return 1;
2992 }
2993 
2994 /* [pstart, pend) */
chunk_drange_filter(struct extent_buffer * leaf,struct btrfs_chunk * chunk,u64 chunk_offset,struct btrfs_balance_args * bargs)2995 static int chunk_drange_filter(struct extent_buffer *leaf,
2996 			       struct btrfs_chunk *chunk,
2997 			       u64 chunk_offset,
2998 			       struct btrfs_balance_args *bargs)
2999 {
3000 	struct btrfs_stripe *stripe;
3001 	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3002 	u64 stripe_offset;
3003 	u64 stripe_length;
3004 	int factor;
3005 	int i;
3006 
3007 	if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
3008 		return 0;
3009 
3010 	if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP |
3011 	     BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)) {
3012 		factor = num_stripes / 2;
3013 	} else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID5) {
3014 		factor = num_stripes - 1;
3015 	} else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID6) {
3016 		factor = num_stripes - 2;
3017 	} else {
3018 		factor = num_stripes;
3019 	}
3020 
3021 	for (i = 0; i < num_stripes; i++) {
3022 		stripe = btrfs_stripe_nr(chunk, i);
3023 		if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
3024 			continue;
3025 
3026 		stripe_offset = btrfs_stripe_offset(leaf, stripe);
3027 		stripe_length = btrfs_chunk_length(leaf, chunk);
3028 		do_div(stripe_length, factor);
3029 
3030 		if (stripe_offset < bargs->pend &&
3031 		    stripe_offset + stripe_length > bargs->pstart)
3032 			return 0;
3033 	}
3034 
3035 	return 1;
3036 }
3037 
3038 /* [vstart, vend) */
chunk_vrange_filter(struct extent_buffer * leaf,struct btrfs_chunk * chunk,u64 chunk_offset,struct btrfs_balance_args * bargs)3039 static int chunk_vrange_filter(struct extent_buffer *leaf,
3040 			       struct btrfs_chunk *chunk,
3041 			       u64 chunk_offset,
3042 			       struct btrfs_balance_args *bargs)
3043 {
3044 	if (chunk_offset < bargs->vend &&
3045 	    chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
3046 		/* at least part of the chunk is inside this vrange */
3047 		return 0;
3048 
3049 	return 1;
3050 }
3051 
chunk_soft_convert_filter(u64 chunk_type,struct btrfs_balance_args * bargs)3052 static int chunk_soft_convert_filter(u64 chunk_type,
3053 				     struct btrfs_balance_args *bargs)
3054 {
3055 	if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
3056 		return 0;
3057 
3058 	chunk_type = chunk_to_extended(chunk_type) &
3059 				BTRFS_EXTENDED_PROFILE_MASK;
3060 
3061 	if (bargs->target == chunk_type)
3062 		return 1;
3063 
3064 	return 0;
3065 }
3066 
should_balance_chunk(struct btrfs_root * root,struct extent_buffer * leaf,struct btrfs_chunk * chunk,u64 chunk_offset)3067 static int should_balance_chunk(struct btrfs_root *root,
3068 				struct extent_buffer *leaf,
3069 				struct btrfs_chunk *chunk, u64 chunk_offset)
3070 {
3071 	struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
3072 	struct btrfs_balance_args *bargs = NULL;
3073 	u64 chunk_type = btrfs_chunk_type(leaf, chunk);
3074 
3075 	/* type filter */
3076 	if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
3077 	      (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
3078 		return 0;
3079 	}
3080 
3081 	if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3082 		bargs = &bctl->data;
3083 	else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3084 		bargs = &bctl->sys;
3085 	else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3086 		bargs = &bctl->meta;
3087 
3088 	/* profiles filter */
3089 	if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
3090 	    chunk_profiles_filter(chunk_type, bargs)) {
3091 		return 0;
3092 	}
3093 
3094 	/* usage filter */
3095 	if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
3096 	    chunk_usage_filter(bctl->fs_info, chunk_offset, bargs)) {
3097 		return 0;
3098 	}
3099 
3100 	/* devid filter */
3101 	if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
3102 	    chunk_devid_filter(leaf, chunk, bargs)) {
3103 		return 0;
3104 	}
3105 
3106 	/* drange filter, makes sense only with devid filter */
3107 	if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
3108 	    chunk_drange_filter(leaf, chunk, chunk_offset, bargs)) {
3109 		return 0;
3110 	}
3111 
3112 	/* vrange filter */
3113 	if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
3114 	    chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
3115 		return 0;
3116 	}
3117 
3118 	/* soft profile changing mode */
3119 	if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
3120 	    chunk_soft_convert_filter(chunk_type, bargs)) {
3121 		return 0;
3122 	}
3123 
3124 	/*
3125 	 * limited by count, must be the last filter
3126 	 */
3127 	if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) {
3128 		if (bargs->limit == 0)
3129 			return 0;
3130 		else
3131 			bargs->limit--;
3132 	}
3133 
3134 	return 1;
3135 }
3136 
__btrfs_balance(struct btrfs_fs_info * fs_info)3137 static int __btrfs_balance(struct btrfs_fs_info *fs_info)
3138 {
3139 	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3140 	struct btrfs_root *chunk_root = fs_info->chunk_root;
3141 	struct btrfs_root *dev_root = fs_info->dev_root;
3142 	struct list_head *devices;
3143 	struct btrfs_device *device;
3144 	u64 old_size;
3145 	u64 size_to_free;
3146 	struct btrfs_chunk *chunk;
3147 	struct btrfs_path *path;
3148 	struct btrfs_key key;
3149 	struct btrfs_key found_key;
3150 	struct btrfs_trans_handle *trans;
3151 	struct extent_buffer *leaf;
3152 	int slot;
3153 	int ret;
3154 	int enospc_errors = 0;
3155 	bool counting = true;
3156 	u64 limit_data = bctl->data.limit;
3157 	u64 limit_meta = bctl->meta.limit;
3158 	u64 limit_sys = bctl->sys.limit;
3159 
3160 	/* step one make some room on all the devices */
3161 	devices = &fs_info->fs_devices->devices;
3162 	list_for_each_entry(device, devices, dev_list) {
3163 		old_size = btrfs_device_get_total_bytes(device);
3164 		size_to_free = div_factor(old_size, 1);
3165 		size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
3166 		if (!device->writeable ||
3167 		    btrfs_device_get_total_bytes(device) -
3168 		    btrfs_device_get_bytes_used(device) > size_to_free ||
3169 		    device->is_tgtdev_for_dev_replace)
3170 			continue;
3171 
3172 		ret = btrfs_shrink_device(device, old_size - size_to_free);
3173 		if (ret == -ENOSPC)
3174 			break;
3175 		BUG_ON(ret);
3176 
3177 		trans = btrfs_start_transaction(dev_root, 0);
3178 		BUG_ON(IS_ERR(trans));
3179 
3180 		ret = btrfs_grow_device(trans, device, old_size);
3181 		BUG_ON(ret);
3182 
3183 		btrfs_end_transaction(trans, dev_root);
3184 	}
3185 
3186 	/* step two, relocate all the chunks */
3187 	path = btrfs_alloc_path();
3188 	if (!path) {
3189 		ret = -ENOMEM;
3190 		goto error;
3191 	}
3192 
3193 	/* zero out stat counters */
3194 	spin_lock(&fs_info->balance_lock);
3195 	memset(&bctl->stat, 0, sizeof(bctl->stat));
3196 	spin_unlock(&fs_info->balance_lock);
3197 again:
3198 	if (!counting) {
3199 		bctl->data.limit = limit_data;
3200 		bctl->meta.limit = limit_meta;
3201 		bctl->sys.limit = limit_sys;
3202 	}
3203 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3204 	key.offset = (u64)-1;
3205 	key.type = BTRFS_CHUNK_ITEM_KEY;
3206 
3207 	while (1) {
3208 		if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
3209 		    atomic_read(&fs_info->balance_cancel_req)) {
3210 			ret = -ECANCELED;
3211 			goto error;
3212 		}
3213 
3214 		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3215 		if (ret < 0)
3216 			goto error;
3217 
3218 		/*
3219 		 * this shouldn't happen, it means the last relocate
3220 		 * failed
3221 		 */
3222 		if (ret == 0)
3223 			BUG(); /* FIXME break ? */
3224 
3225 		ret = btrfs_previous_item(chunk_root, path, 0,
3226 					  BTRFS_CHUNK_ITEM_KEY);
3227 		if (ret) {
3228 			ret = 0;
3229 			break;
3230 		}
3231 
3232 		leaf = path->nodes[0];
3233 		slot = path->slots[0];
3234 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
3235 
3236 		if (found_key.objectid != key.objectid)
3237 			break;
3238 
3239 		chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
3240 
3241 		if (!counting) {
3242 			spin_lock(&fs_info->balance_lock);
3243 			bctl->stat.considered++;
3244 			spin_unlock(&fs_info->balance_lock);
3245 		}
3246 
3247 		ret = should_balance_chunk(chunk_root, leaf, chunk,
3248 					   found_key.offset);
3249 		btrfs_release_path(path);
3250 		if (!ret)
3251 			goto loop;
3252 
3253 		if (counting) {
3254 			spin_lock(&fs_info->balance_lock);
3255 			bctl->stat.expected++;
3256 			spin_unlock(&fs_info->balance_lock);
3257 			goto loop;
3258 		}
3259 
3260 		ret = btrfs_relocate_chunk(chunk_root,
3261 					   chunk_root->root_key.objectid,
3262 					   found_key.objectid,
3263 					   found_key.offset);
3264 		if (ret && ret != -ENOSPC)
3265 			goto error;
3266 		if (ret == -ENOSPC) {
3267 			enospc_errors++;
3268 		} else {
3269 			spin_lock(&fs_info->balance_lock);
3270 			bctl->stat.completed++;
3271 			spin_unlock(&fs_info->balance_lock);
3272 		}
3273 loop:
3274 		if (found_key.offset == 0)
3275 			break;
3276 		key.offset = found_key.offset - 1;
3277 	}
3278 
3279 	if (counting) {
3280 		btrfs_release_path(path);
3281 		counting = false;
3282 		goto again;
3283 	}
3284 error:
3285 	btrfs_free_path(path);
3286 	if (enospc_errors) {
3287 		btrfs_info(fs_info, "%d enospc errors during balance",
3288 		       enospc_errors);
3289 		if (!ret)
3290 			ret = -ENOSPC;
3291 	}
3292 
3293 	return ret;
3294 }
3295 
3296 /**
3297  * alloc_profile_is_valid - see if a given profile is valid and reduced
3298  * @flags: profile to validate
3299  * @extended: if true @flags is treated as an extended profile
3300  */
alloc_profile_is_valid(u64 flags,int extended)3301 static int alloc_profile_is_valid(u64 flags, int extended)
3302 {
3303 	u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
3304 			       BTRFS_BLOCK_GROUP_PROFILE_MASK);
3305 
3306 	flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
3307 
3308 	/* 1) check that all other bits are zeroed */
3309 	if (flags & ~mask)
3310 		return 0;
3311 
3312 	/* 2) see if profile is reduced */
3313 	if (flags == 0)
3314 		return !extended; /* "0" is valid for usual profiles */
3315 
3316 	/* true if exactly one bit set */
3317 	return (flags & (flags - 1)) == 0;
3318 }
3319 
balance_need_close(struct btrfs_fs_info * fs_info)3320 static inline int balance_need_close(struct btrfs_fs_info *fs_info)
3321 {
3322 	/* cancel requested || normal exit path */
3323 	return atomic_read(&fs_info->balance_cancel_req) ||
3324 		(atomic_read(&fs_info->balance_pause_req) == 0 &&
3325 		 atomic_read(&fs_info->balance_cancel_req) == 0);
3326 }
3327 
__cancel_balance(struct btrfs_fs_info * fs_info)3328 static void __cancel_balance(struct btrfs_fs_info *fs_info)
3329 {
3330 	int ret;
3331 
3332 	unset_balance_control(fs_info);
3333 	ret = del_balance_item(fs_info->tree_root);
3334 	if (ret)
3335 		btrfs_std_error(fs_info, ret);
3336 
3337 	atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
3338 }
3339 
3340 /*
3341  * Should be called with both balance and volume mutexes held
3342  */
btrfs_balance(struct btrfs_balance_control * bctl,struct btrfs_ioctl_balance_args * bargs)3343 int btrfs_balance(struct btrfs_balance_control *bctl,
3344 		  struct btrfs_ioctl_balance_args *bargs)
3345 {
3346 	struct btrfs_fs_info *fs_info = bctl->fs_info;
3347 	u64 allowed;
3348 	int mixed = 0;
3349 	int ret;
3350 	u64 num_devices;
3351 	unsigned seq;
3352 
3353 	if (btrfs_fs_closing(fs_info) ||
3354 	    atomic_read(&fs_info->balance_pause_req) ||
3355 	    atomic_read(&fs_info->balance_cancel_req)) {
3356 		ret = -EINVAL;
3357 		goto out;
3358 	}
3359 
3360 	allowed = btrfs_super_incompat_flags(fs_info->super_copy);
3361 	if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
3362 		mixed = 1;
3363 
3364 	/*
3365 	 * In case of mixed groups both data and meta should be picked,
3366 	 * and identical options should be given for both of them.
3367 	 */
3368 	allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
3369 	if (mixed && (bctl->flags & allowed)) {
3370 		if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
3371 		    !(bctl->flags & BTRFS_BALANCE_METADATA) ||
3372 		    memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
3373 			btrfs_err(fs_info, "with mixed groups data and "
3374 				   "metadata balance options must be the same");
3375 			ret = -EINVAL;
3376 			goto out;
3377 		}
3378 	}
3379 
3380 	num_devices = fs_info->fs_devices->num_devices;
3381 	btrfs_dev_replace_lock(&fs_info->dev_replace);
3382 	if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
3383 		BUG_ON(num_devices < 1);
3384 		num_devices--;
3385 	}
3386 	btrfs_dev_replace_unlock(&fs_info->dev_replace);
3387 	allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
3388 	if (num_devices == 1)
3389 		allowed |= BTRFS_BLOCK_GROUP_DUP;
3390 	else if (num_devices > 1)
3391 		allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
3392 	if (num_devices > 2)
3393 		allowed |= BTRFS_BLOCK_GROUP_RAID5;
3394 	if (num_devices > 3)
3395 		allowed |= (BTRFS_BLOCK_GROUP_RAID10 |
3396 			    BTRFS_BLOCK_GROUP_RAID6);
3397 	if ((bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3398 	    (!alloc_profile_is_valid(bctl->data.target, 1) ||
3399 	     (bctl->data.target & ~allowed))) {
3400 		btrfs_err(fs_info, "unable to start balance with target "
3401 			   "data profile %llu",
3402 		       bctl->data.target);
3403 		ret = -EINVAL;
3404 		goto out;
3405 	}
3406 	if ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3407 	    (!alloc_profile_is_valid(bctl->meta.target, 1) ||
3408 	     (bctl->meta.target & ~allowed))) {
3409 		btrfs_err(fs_info,
3410 			   "unable to start balance with target metadata profile %llu",
3411 		       bctl->meta.target);
3412 		ret = -EINVAL;
3413 		goto out;
3414 	}
3415 	if ((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3416 	    (!alloc_profile_is_valid(bctl->sys.target, 1) ||
3417 	     (bctl->sys.target & ~allowed))) {
3418 		btrfs_err(fs_info,
3419 			   "unable to start balance with target system profile %llu",
3420 		       bctl->sys.target);
3421 		ret = -EINVAL;
3422 		goto out;
3423 	}
3424 
3425 	/* allow dup'ed data chunks only in mixed mode */
3426 	if (!mixed && (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3427 	    (bctl->data.target & BTRFS_BLOCK_GROUP_DUP)) {
3428 		btrfs_err(fs_info, "dup for data is not allowed");
3429 		ret = -EINVAL;
3430 		goto out;
3431 	}
3432 
3433 	/* allow to reduce meta or sys integrity only if force set */
3434 	allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3435 			BTRFS_BLOCK_GROUP_RAID10 |
3436 			BTRFS_BLOCK_GROUP_RAID5 |
3437 			BTRFS_BLOCK_GROUP_RAID6;
3438 	do {
3439 		seq = read_seqbegin(&fs_info->profiles_lock);
3440 
3441 		if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3442 		     (fs_info->avail_system_alloc_bits & allowed) &&
3443 		     !(bctl->sys.target & allowed)) ||
3444 		    ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3445 		     (fs_info->avail_metadata_alloc_bits & allowed) &&
3446 		     !(bctl->meta.target & allowed))) {
3447 			if (bctl->flags & BTRFS_BALANCE_FORCE) {
3448 				btrfs_info(fs_info, "force reducing metadata integrity");
3449 			} else {
3450 				btrfs_err(fs_info, "balance will reduce metadata "
3451 					   "integrity, use force if you want this");
3452 				ret = -EINVAL;
3453 				goto out;
3454 			}
3455 		}
3456 	} while (read_seqretry(&fs_info->profiles_lock, seq));
3457 
3458 	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3459 		int num_tolerated_disk_barrier_failures;
3460 		u64 target = bctl->sys.target;
3461 
3462 		num_tolerated_disk_barrier_failures =
3463 			btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
3464 		if (num_tolerated_disk_barrier_failures > 0 &&
3465 		    (target &
3466 		     (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
3467 		      BTRFS_AVAIL_ALLOC_BIT_SINGLE)))
3468 			num_tolerated_disk_barrier_failures = 0;
3469 		else if (num_tolerated_disk_barrier_failures > 1 &&
3470 			 (target &
3471 			  (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)))
3472 			num_tolerated_disk_barrier_failures = 1;
3473 
3474 		fs_info->num_tolerated_disk_barrier_failures =
3475 			num_tolerated_disk_barrier_failures;
3476 	}
3477 
3478 	ret = insert_balance_item(fs_info->tree_root, bctl);
3479 	if (ret && ret != -EEXIST)
3480 		goto out;
3481 
3482 	if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
3483 		BUG_ON(ret == -EEXIST);
3484 		set_balance_control(bctl);
3485 	} else {
3486 		BUG_ON(ret != -EEXIST);
3487 		spin_lock(&fs_info->balance_lock);
3488 		update_balance_args(bctl);
3489 		spin_unlock(&fs_info->balance_lock);
3490 	}
3491 
3492 	atomic_inc(&fs_info->balance_running);
3493 	mutex_unlock(&fs_info->balance_mutex);
3494 
3495 	ret = __btrfs_balance(fs_info);
3496 
3497 	mutex_lock(&fs_info->balance_mutex);
3498 	atomic_dec(&fs_info->balance_running);
3499 
3500 	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3501 		fs_info->num_tolerated_disk_barrier_failures =
3502 			btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
3503 	}
3504 
3505 	if (bargs) {
3506 		memset(bargs, 0, sizeof(*bargs));
3507 		update_ioctl_balance_args(fs_info, 0, bargs);
3508 	}
3509 
3510 	if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
3511 	    balance_need_close(fs_info)) {
3512 		__cancel_balance(fs_info);
3513 	}
3514 
3515 	wake_up(&fs_info->balance_wait_q);
3516 
3517 	return ret;
3518 out:
3519 	if (bctl->flags & BTRFS_BALANCE_RESUME)
3520 		__cancel_balance(fs_info);
3521 	else {
3522 		kfree(bctl);
3523 		atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
3524 	}
3525 	return ret;
3526 }
3527 
balance_kthread(void * data)3528 static int balance_kthread(void *data)
3529 {
3530 	struct btrfs_fs_info *fs_info = data;
3531 	int ret = 0;
3532 
3533 	mutex_lock(&fs_info->volume_mutex);
3534 	mutex_lock(&fs_info->balance_mutex);
3535 
3536 	if (fs_info->balance_ctl) {
3537 		btrfs_info(fs_info, "continuing balance");
3538 		ret = btrfs_balance(fs_info->balance_ctl, NULL);
3539 	}
3540 
3541 	mutex_unlock(&fs_info->balance_mutex);
3542 	mutex_unlock(&fs_info->volume_mutex);
3543 
3544 	return ret;
3545 }
3546 
btrfs_resume_balance_async(struct btrfs_fs_info * fs_info)3547 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
3548 {
3549 	struct task_struct *tsk;
3550 
3551 	spin_lock(&fs_info->balance_lock);
3552 	if (!fs_info->balance_ctl) {
3553 		spin_unlock(&fs_info->balance_lock);
3554 		return 0;
3555 	}
3556 	spin_unlock(&fs_info->balance_lock);
3557 
3558 	if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) {
3559 		btrfs_info(fs_info, "force skipping balance");
3560 		return 0;
3561 	}
3562 
3563 	tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
3564 	return PTR_ERR_OR_ZERO(tsk);
3565 }
3566 
btrfs_recover_balance(struct btrfs_fs_info * fs_info)3567 int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
3568 {
3569 	struct btrfs_balance_control *bctl;
3570 	struct btrfs_balance_item *item;
3571 	struct btrfs_disk_balance_args disk_bargs;
3572 	struct btrfs_path *path;
3573 	struct extent_buffer *leaf;
3574 	struct btrfs_key key;
3575 	int ret;
3576 
3577 	path = btrfs_alloc_path();
3578 	if (!path)
3579 		return -ENOMEM;
3580 
3581 	key.objectid = BTRFS_BALANCE_OBJECTID;
3582 	key.type = BTRFS_BALANCE_ITEM_KEY;
3583 	key.offset = 0;
3584 
3585 	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
3586 	if (ret < 0)
3587 		goto out;
3588 	if (ret > 0) { /* ret = -ENOENT; */
3589 		ret = 0;
3590 		goto out;
3591 	}
3592 
3593 	bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
3594 	if (!bctl) {
3595 		ret = -ENOMEM;
3596 		goto out;
3597 	}
3598 
3599 	leaf = path->nodes[0];
3600 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
3601 
3602 	bctl->fs_info = fs_info;
3603 	bctl->flags = btrfs_balance_flags(leaf, item);
3604 	bctl->flags |= BTRFS_BALANCE_RESUME;
3605 
3606 	btrfs_balance_data(leaf, item, &disk_bargs);
3607 	btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
3608 	btrfs_balance_meta(leaf, item, &disk_bargs);
3609 	btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
3610 	btrfs_balance_sys(leaf, item, &disk_bargs);
3611 	btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
3612 
3613 	WARN_ON(atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1));
3614 
3615 	mutex_lock(&fs_info->volume_mutex);
3616 	mutex_lock(&fs_info->balance_mutex);
3617 
3618 	set_balance_control(bctl);
3619 
3620 	mutex_unlock(&fs_info->balance_mutex);
3621 	mutex_unlock(&fs_info->volume_mutex);
3622 out:
3623 	btrfs_free_path(path);
3624 	return ret;
3625 }
3626 
btrfs_pause_balance(struct btrfs_fs_info * fs_info)3627 int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
3628 {
3629 	int ret = 0;
3630 
3631 	mutex_lock(&fs_info->balance_mutex);
3632 	if (!fs_info->balance_ctl) {
3633 		mutex_unlock(&fs_info->balance_mutex);
3634 		return -ENOTCONN;
3635 	}
3636 
3637 	if (atomic_read(&fs_info->balance_running)) {
3638 		atomic_inc(&fs_info->balance_pause_req);
3639 		mutex_unlock(&fs_info->balance_mutex);
3640 
3641 		wait_event(fs_info->balance_wait_q,
3642 			   atomic_read(&fs_info->balance_running) == 0);
3643 
3644 		mutex_lock(&fs_info->balance_mutex);
3645 		/* we are good with balance_ctl ripped off from under us */
3646 		BUG_ON(atomic_read(&fs_info->balance_running));
3647 		atomic_dec(&fs_info->balance_pause_req);
3648 	} else {
3649 		ret = -ENOTCONN;
3650 	}
3651 
3652 	mutex_unlock(&fs_info->balance_mutex);
3653 	return ret;
3654 }
3655 
btrfs_cancel_balance(struct btrfs_fs_info * fs_info)3656 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
3657 {
3658 	if (fs_info->sb->s_flags & MS_RDONLY)
3659 		return -EROFS;
3660 
3661 	mutex_lock(&fs_info->balance_mutex);
3662 	if (!fs_info->balance_ctl) {
3663 		mutex_unlock(&fs_info->balance_mutex);
3664 		return -ENOTCONN;
3665 	}
3666 
3667 	atomic_inc(&fs_info->balance_cancel_req);
3668 	/*
3669 	 * if we are running just wait and return, balance item is
3670 	 * deleted in btrfs_balance in this case
3671 	 */
3672 	if (atomic_read(&fs_info->balance_running)) {
3673 		mutex_unlock(&fs_info->balance_mutex);
3674 		wait_event(fs_info->balance_wait_q,
3675 			   atomic_read(&fs_info->balance_running) == 0);
3676 		mutex_lock(&fs_info->balance_mutex);
3677 	} else {
3678 		/* __cancel_balance needs volume_mutex */
3679 		mutex_unlock(&fs_info->balance_mutex);
3680 		mutex_lock(&fs_info->volume_mutex);
3681 		mutex_lock(&fs_info->balance_mutex);
3682 
3683 		if (fs_info->balance_ctl)
3684 			__cancel_balance(fs_info);
3685 
3686 		mutex_unlock(&fs_info->volume_mutex);
3687 	}
3688 
3689 	BUG_ON(fs_info->balance_ctl || atomic_read(&fs_info->balance_running));
3690 	atomic_dec(&fs_info->balance_cancel_req);
3691 	mutex_unlock(&fs_info->balance_mutex);
3692 	return 0;
3693 }
3694 
btrfs_uuid_scan_kthread(void * data)3695 static int btrfs_uuid_scan_kthread(void *data)
3696 {
3697 	struct btrfs_fs_info *fs_info = data;
3698 	struct btrfs_root *root = fs_info->tree_root;
3699 	struct btrfs_key key;
3700 	struct btrfs_key max_key;
3701 	struct btrfs_path *path = NULL;
3702 	int ret = 0;
3703 	struct extent_buffer *eb;
3704 	int slot;
3705 	struct btrfs_root_item root_item;
3706 	u32 item_size;
3707 	struct btrfs_trans_handle *trans = NULL;
3708 
3709 	path = btrfs_alloc_path();
3710 	if (!path) {
3711 		ret = -ENOMEM;
3712 		goto out;
3713 	}
3714 
3715 	key.objectid = 0;
3716 	key.type = BTRFS_ROOT_ITEM_KEY;
3717 	key.offset = 0;
3718 
3719 	max_key.objectid = (u64)-1;
3720 	max_key.type = BTRFS_ROOT_ITEM_KEY;
3721 	max_key.offset = (u64)-1;
3722 
3723 	while (1) {
3724 		ret = btrfs_search_forward(root, &key, path, 0);
3725 		if (ret) {
3726 			if (ret > 0)
3727 				ret = 0;
3728 			break;
3729 		}
3730 
3731 		if (key.type != BTRFS_ROOT_ITEM_KEY ||
3732 		    (key.objectid < BTRFS_FIRST_FREE_OBJECTID &&
3733 		     key.objectid != BTRFS_FS_TREE_OBJECTID) ||
3734 		    key.objectid > BTRFS_LAST_FREE_OBJECTID)
3735 			goto skip;
3736 
3737 		eb = path->nodes[0];
3738 		slot = path->slots[0];
3739 		item_size = btrfs_item_size_nr(eb, slot);
3740 		if (item_size < sizeof(root_item))
3741 			goto skip;
3742 
3743 		read_extent_buffer(eb, &root_item,
3744 				   btrfs_item_ptr_offset(eb, slot),
3745 				   (int)sizeof(root_item));
3746 		if (btrfs_root_refs(&root_item) == 0)
3747 			goto skip;
3748 
3749 		if (!btrfs_is_empty_uuid(root_item.uuid) ||
3750 		    !btrfs_is_empty_uuid(root_item.received_uuid)) {
3751 			if (trans)
3752 				goto update_tree;
3753 
3754 			btrfs_release_path(path);
3755 			/*
3756 			 * 1 - subvol uuid item
3757 			 * 1 - received_subvol uuid item
3758 			 */
3759 			trans = btrfs_start_transaction(fs_info->uuid_root, 2);
3760 			if (IS_ERR(trans)) {
3761 				ret = PTR_ERR(trans);
3762 				break;
3763 			}
3764 			continue;
3765 		} else {
3766 			goto skip;
3767 		}
3768 update_tree:
3769 		if (!btrfs_is_empty_uuid(root_item.uuid)) {
3770 			ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root,
3771 						  root_item.uuid,
3772 						  BTRFS_UUID_KEY_SUBVOL,
3773 						  key.objectid);
3774 			if (ret < 0) {
3775 				btrfs_warn(fs_info, "uuid_tree_add failed %d",
3776 					ret);
3777 				break;
3778 			}
3779 		}
3780 
3781 		if (!btrfs_is_empty_uuid(root_item.received_uuid)) {
3782 			ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root,
3783 						  root_item.received_uuid,
3784 						 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
3785 						  key.objectid);
3786 			if (ret < 0) {
3787 				btrfs_warn(fs_info, "uuid_tree_add failed %d",
3788 					ret);
3789 				break;
3790 			}
3791 		}
3792 
3793 skip:
3794 		if (trans) {
3795 			ret = btrfs_end_transaction(trans, fs_info->uuid_root);
3796 			trans = NULL;
3797 			if (ret)
3798 				break;
3799 		}
3800 
3801 		btrfs_release_path(path);
3802 		if (key.offset < (u64)-1) {
3803 			key.offset++;
3804 		} else if (key.type < BTRFS_ROOT_ITEM_KEY) {
3805 			key.offset = 0;
3806 			key.type = BTRFS_ROOT_ITEM_KEY;
3807 		} else if (key.objectid < (u64)-1) {
3808 			key.offset = 0;
3809 			key.type = BTRFS_ROOT_ITEM_KEY;
3810 			key.objectid++;
3811 		} else {
3812 			break;
3813 		}
3814 		cond_resched();
3815 	}
3816 
3817 out:
3818 	btrfs_free_path(path);
3819 	if (trans && !IS_ERR(trans))
3820 		btrfs_end_transaction(trans, fs_info->uuid_root);
3821 	if (ret)
3822 		btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret);
3823 	else
3824 		fs_info->update_uuid_tree_gen = 1;
3825 	up(&fs_info->uuid_tree_rescan_sem);
3826 	return 0;
3827 }
3828 
3829 /*
3830  * Callback for btrfs_uuid_tree_iterate().
3831  * returns:
3832  * 0	check succeeded, the entry is not outdated.
3833  * < 0	if an error occured.
3834  * > 0	if the check failed, which means the caller shall remove the entry.
3835  */
btrfs_check_uuid_tree_entry(struct btrfs_fs_info * fs_info,u8 * uuid,u8 type,u64 subid)3836 static int btrfs_check_uuid_tree_entry(struct btrfs_fs_info *fs_info,
3837 				       u8 *uuid, u8 type, u64 subid)
3838 {
3839 	struct btrfs_key key;
3840 	int ret = 0;
3841 	struct btrfs_root *subvol_root;
3842 
3843 	if (type != BTRFS_UUID_KEY_SUBVOL &&
3844 	    type != BTRFS_UUID_KEY_RECEIVED_SUBVOL)
3845 		goto out;
3846 
3847 	key.objectid = subid;
3848 	key.type = BTRFS_ROOT_ITEM_KEY;
3849 	key.offset = (u64)-1;
3850 	subvol_root = btrfs_read_fs_root_no_name(fs_info, &key);
3851 	if (IS_ERR(subvol_root)) {
3852 		ret = PTR_ERR(subvol_root);
3853 		if (ret == -ENOENT)
3854 			ret = 1;
3855 		goto out;
3856 	}
3857 
3858 	switch (type) {
3859 	case BTRFS_UUID_KEY_SUBVOL:
3860 		if (memcmp(uuid, subvol_root->root_item.uuid, BTRFS_UUID_SIZE))
3861 			ret = 1;
3862 		break;
3863 	case BTRFS_UUID_KEY_RECEIVED_SUBVOL:
3864 		if (memcmp(uuid, subvol_root->root_item.received_uuid,
3865 			   BTRFS_UUID_SIZE))
3866 			ret = 1;
3867 		break;
3868 	}
3869 
3870 out:
3871 	return ret;
3872 }
3873 
btrfs_uuid_rescan_kthread(void * data)3874 static int btrfs_uuid_rescan_kthread(void *data)
3875 {
3876 	struct btrfs_fs_info *fs_info = (struct btrfs_fs_info *)data;
3877 	int ret;
3878 
3879 	/*
3880 	 * 1st step is to iterate through the existing UUID tree and
3881 	 * to delete all entries that contain outdated data.
3882 	 * 2nd step is to add all missing entries to the UUID tree.
3883 	 */
3884 	ret = btrfs_uuid_tree_iterate(fs_info, btrfs_check_uuid_tree_entry);
3885 	if (ret < 0) {
3886 		btrfs_warn(fs_info, "iterating uuid_tree failed %d", ret);
3887 		up(&fs_info->uuid_tree_rescan_sem);
3888 		return ret;
3889 	}
3890 	return btrfs_uuid_scan_kthread(data);
3891 }
3892 
btrfs_create_uuid_tree(struct btrfs_fs_info * fs_info)3893 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
3894 {
3895 	struct btrfs_trans_handle *trans;
3896 	struct btrfs_root *tree_root = fs_info->tree_root;
3897 	struct btrfs_root *uuid_root;
3898 	struct task_struct *task;
3899 	int ret;
3900 
3901 	/*
3902 	 * 1 - root node
3903 	 * 1 - root item
3904 	 */
3905 	trans = btrfs_start_transaction(tree_root, 2);
3906 	if (IS_ERR(trans))
3907 		return PTR_ERR(trans);
3908 
3909 	uuid_root = btrfs_create_tree(trans, fs_info,
3910 				      BTRFS_UUID_TREE_OBJECTID);
3911 	if (IS_ERR(uuid_root)) {
3912 		btrfs_abort_transaction(trans, tree_root,
3913 					PTR_ERR(uuid_root));
3914 		return PTR_ERR(uuid_root);
3915 	}
3916 
3917 	fs_info->uuid_root = uuid_root;
3918 
3919 	ret = btrfs_commit_transaction(trans, tree_root);
3920 	if (ret)
3921 		return ret;
3922 
3923 	down(&fs_info->uuid_tree_rescan_sem);
3924 	task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid");
3925 	if (IS_ERR(task)) {
3926 		/* fs_info->update_uuid_tree_gen remains 0 in all error case */
3927 		btrfs_warn(fs_info, "failed to start uuid_scan task");
3928 		up(&fs_info->uuid_tree_rescan_sem);
3929 		return PTR_ERR(task);
3930 	}
3931 
3932 	return 0;
3933 }
3934 
btrfs_check_uuid_tree(struct btrfs_fs_info * fs_info)3935 int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info)
3936 {
3937 	struct task_struct *task;
3938 
3939 	down(&fs_info->uuid_tree_rescan_sem);
3940 	task = kthread_run(btrfs_uuid_rescan_kthread, fs_info, "btrfs-uuid");
3941 	if (IS_ERR(task)) {
3942 		/* fs_info->update_uuid_tree_gen remains 0 in all error case */
3943 		btrfs_warn(fs_info, "failed to start uuid_rescan task");
3944 		up(&fs_info->uuid_tree_rescan_sem);
3945 		return PTR_ERR(task);
3946 	}
3947 
3948 	return 0;
3949 }
3950 
3951 /*
3952  * shrinking a device means finding all of the device extents past
3953  * the new size, and then following the back refs to the chunks.
3954  * The chunk relocation code actually frees the device extent
3955  */
btrfs_shrink_device(struct btrfs_device * device,u64 new_size)3956 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
3957 {
3958 	struct btrfs_trans_handle *trans;
3959 	struct btrfs_root *root = device->dev_root;
3960 	struct btrfs_dev_extent *dev_extent = NULL;
3961 	struct btrfs_path *path;
3962 	u64 length;
3963 	u64 chunk_tree;
3964 	u64 chunk_objectid;
3965 	u64 chunk_offset;
3966 	int ret;
3967 	int slot;
3968 	int failed = 0;
3969 	bool retried = false;
3970 	struct extent_buffer *l;
3971 	struct btrfs_key key;
3972 	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
3973 	u64 old_total = btrfs_super_total_bytes(super_copy);
3974 	u64 old_size = btrfs_device_get_total_bytes(device);
3975 	u64 diff = old_size - new_size;
3976 
3977 	if (device->is_tgtdev_for_dev_replace)
3978 		return -EINVAL;
3979 
3980 	path = btrfs_alloc_path();
3981 	if (!path)
3982 		return -ENOMEM;
3983 
3984 	path->reada = 2;
3985 
3986 	lock_chunks(root);
3987 
3988 	btrfs_device_set_total_bytes(device, new_size);
3989 	if (device->writeable) {
3990 		device->fs_devices->total_rw_bytes -= diff;
3991 		spin_lock(&root->fs_info->free_chunk_lock);
3992 		root->fs_info->free_chunk_space -= diff;
3993 		spin_unlock(&root->fs_info->free_chunk_lock);
3994 	}
3995 	unlock_chunks(root);
3996 
3997 again:
3998 	key.objectid = device->devid;
3999 	key.offset = (u64)-1;
4000 	key.type = BTRFS_DEV_EXTENT_KEY;
4001 
4002 	do {
4003 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4004 		if (ret < 0)
4005 			goto done;
4006 
4007 		ret = btrfs_previous_item(root, path, 0, key.type);
4008 		if (ret < 0)
4009 			goto done;
4010 		if (ret) {
4011 			ret = 0;
4012 			btrfs_release_path(path);
4013 			break;
4014 		}
4015 
4016 		l = path->nodes[0];
4017 		slot = path->slots[0];
4018 		btrfs_item_key_to_cpu(l, &key, path->slots[0]);
4019 
4020 		if (key.objectid != device->devid) {
4021 			btrfs_release_path(path);
4022 			break;
4023 		}
4024 
4025 		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
4026 		length = btrfs_dev_extent_length(l, dev_extent);
4027 
4028 		if (key.offset + length <= new_size) {
4029 			btrfs_release_path(path);
4030 			break;
4031 		}
4032 
4033 		chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
4034 		chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
4035 		chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
4036 		btrfs_release_path(path);
4037 
4038 		ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
4039 					   chunk_offset);
4040 		if (ret && ret != -ENOSPC)
4041 			goto done;
4042 		if (ret == -ENOSPC)
4043 			failed++;
4044 	} while (key.offset-- > 0);
4045 
4046 	if (failed && !retried) {
4047 		failed = 0;
4048 		retried = true;
4049 		goto again;
4050 	} else if (failed && retried) {
4051 		ret = -ENOSPC;
4052 		lock_chunks(root);
4053 
4054 		btrfs_device_set_total_bytes(device, old_size);
4055 		if (device->writeable)
4056 			device->fs_devices->total_rw_bytes += diff;
4057 		spin_lock(&root->fs_info->free_chunk_lock);
4058 		root->fs_info->free_chunk_space += diff;
4059 		spin_unlock(&root->fs_info->free_chunk_lock);
4060 		unlock_chunks(root);
4061 		goto done;
4062 	}
4063 
4064 	/* Shrinking succeeded, else we would be at "done". */
4065 	trans = btrfs_start_transaction(root, 0);
4066 	if (IS_ERR(trans)) {
4067 		ret = PTR_ERR(trans);
4068 		goto done;
4069 	}
4070 
4071 	lock_chunks(root);
4072 	btrfs_device_set_disk_total_bytes(device, new_size);
4073 	if (list_empty(&device->resized_list))
4074 		list_add_tail(&device->resized_list,
4075 			      &root->fs_info->fs_devices->resized_devices);
4076 
4077 	WARN_ON(diff > old_total);
4078 	btrfs_set_super_total_bytes(super_copy, old_total - diff);
4079 	unlock_chunks(root);
4080 
4081 	/* Now btrfs_update_device() will change the on-disk size. */
4082 	ret = btrfs_update_device(trans, device);
4083 	btrfs_end_transaction(trans, root);
4084 done:
4085 	btrfs_free_path(path);
4086 	return ret;
4087 }
4088 
btrfs_add_system_chunk(struct btrfs_root * root,struct btrfs_key * key,struct btrfs_chunk * chunk,int item_size)4089 static int btrfs_add_system_chunk(struct btrfs_root *root,
4090 			   struct btrfs_key *key,
4091 			   struct btrfs_chunk *chunk, int item_size)
4092 {
4093 	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
4094 	struct btrfs_disk_key disk_key;
4095 	u32 array_size;
4096 	u8 *ptr;
4097 
4098 	lock_chunks(root);
4099 	array_size = btrfs_super_sys_array_size(super_copy);
4100 	if (array_size + item_size + sizeof(disk_key)
4101 			> BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
4102 		unlock_chunks(root);
4103 		return -EFBIG;
4104 	}
4105 
4106 	ptr = super_copy->sys_chunk_array + array_size;
4107 	btrfs_cpu_key_to_disk(&disk_key, key);
4108 	memcpy(ptr, &disk_key, sizeof(disk_key));
4109 	ptr += sizeof(disk_key);
4110 	memcpy(ptr, chunk, item_size);
4111 	item_size += sizeof(disk_key);
4112 	btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
4113 	unlock_chunks(root);
4114 
4115 	return 0;
4116 }
4117 
4118 /*
4119  * sort the devices in descending order by max_avail, total_avail
4120  */
btrfs_cmp_device_info(const void * a,const void * b)4121 static int btrfs_cmp_device_info(const void *a, const void *b)
4122 {
4123 	const struct btrfs_device_info *di_a = a;
4124 	const struct btrfs_device_info *di_b = b;
4125 
4126 	if (di_a->max_avail > di_b->max_avail)
4127 		return -1;
4128 	if (di_a->max_avail < di_b->max_avail)
4129 		return 1;
4130 	if (di_a->total_avail > di_b->total_avail)
4131 		return -1;
4132 	if (di_a->total_avail < di_b->total_avail)
4133 		return 1;
4134 	return 0;
4135 }
4136 
4137 static struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
4138 	[BTRFS_RAID_RAID10] = {
4139 		.sub_stripes	= 2,
4140 		.dev_stripes	= 1,
4141 		.devs_max	= 0,	/* 0 == as many as possible */
4142 		.devs_min	= 4,
4143 		.devs_increment	= 2,
4144 		.ncopies	= 2,
4145 	},
4146 	[BTRFS_RAID_RAID1] = {
4147 		.sub_stripes	= 1,
4148 		.dev_stripes	= 1,
4149 		.devs_max	= 2,
4150 		.devs_min	= 2,
4151 		.devs_increment	= 2,
4152 		.ncopies	= 2,
4153 	},
4154 	[BTRFS_RAID_DUP] = {
4155 		.sub_stripes	= 1,
4156 		.dev_stripes	= 2,
4157 		.devs_max	= 1,
4158 		.devs_min	= 1,
4159 		.devs_increment	= 1,
4160 		.ncopies	= 2,
4161 	},
4162 	[BTRFS_RAID_RAID0] = {
4163 		.sub_stripes	= 1,
4164 		.dev_stripes	= 1,
4165 		.devs_max	= 0,
4166 		.devs_min	= 2,
4167 		.devs_increment	= 1,
4168 		.ncopies	= 1,
4169 	},
4170 	[BTRFS_RAID_SINGLE] = {
4171 		.sub_stripes	= 1,
4172 		.dev_stripes	= 1,
4173 		.devs_max	= 1,
4174 		.devs_min	= 1,
4175 		.devs_increment	= 1,
4176 		.ncopies	= 1,
4177 	},
4178 	[BTRFS_RAID_RAID5] = {
4179 		.sub_stripes	= 1,
4180 		.dev_stripes	= 1,
4181 		.devs_max	= 0,
4182 		.devs_min	= 2,
4183 		.devs_increment	= 1,
4184 		.ncopies	= 2,
4185 	},
4186 	[BTRFS_RAID_RAID6] = {
4187 		.sub_stripes	= 1,
4188 		.dev_stripes	= 1,
4189 		.devs_max	= 0,
4190 		.devs_min	= 3,
4191 		.devs_increment	= 1,
4192 		.ncopies	= 3,
4193 	},
4194 };
4195 
find_raid56_stripe_len(u32 data_devices,u32 dev_stripe_target)4196 static u32 find_raid56_stripe_len(u32 data_devices, u32 dev_stripe_target)
4197 {
4198 	/* TODO allow them to set a preferred stripe size */
4199 	return 64 * 1024;
4200 }
4201 
check_raid56_incompat_flag(struct btrfs_fs_info * info,u64 type)4202 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
4203 {
4204 	if (!(type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)))
4205 		return;
4206 
4207 	btrfs_set_fs_incompat(info, RAID56);
4208 }
4209 
4210 #define BTRFS_MAX_DEVS(r) ((BTRFS_LEAF_DATA_SIZE(r)		\
4211 			- sizeof(struct btrfs_item)		\
4212 			- sizeof(struct btrfs_chunk))		\
4213 			/ sizeof(struct btrfs_stripe) + 1)
4214 
4215 #define BTRFS_MAX_DEVS_SYS_CHUNK ((BTRFS_SYSTEM_CHUNK_ARRAY_SIZE	\
4216 				- 2 * sizeof(struct btrfs_disk_key)	\
4217 				- 2 * sizeof(struct btrfs_chunk))	\
4218 				/ sizeof(struct btrfs_stripe) + 1)
4219 
__btrfs_alloc_chunk(struct btrfs_trans_handle * trans,struct btrfs_root * extent_root,u64 start,u64 type)4220 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
4221 			       struct btrfs_root *extent_root, u64 start,
4222 			       u64 type)
4223 {
4224 	struct btrfs_fs_info *info = extent_root->fs_info;
4225 	struct btrfs_fs_devices *fs_devices = info->fs_devices;
4226 	struct list_head *cur;
4227 	struct map_lookup *map = NULL;
4228 	struct extent_map_tree *em_tree;
4229 	struct extent_map *em;
4230 	struct btrfs_device_info *devices_info = NULL;
4231 	u64 total_avail;
4232 	int num_stripes;	/* total number of stripes to allocate */
4233 	int data_stripes;	/* number of stripes that count for
4234 				   block group size */
4235 	int sub_stripes;	/* sub_stripes info for map */
4236 	int dev_stripes;	/* stripes per dev */
4237 	int devs_max;		/* max devs to use */
4238 	int devs_min;		/* min devs needed */
4239 	int devs_increment;	/* ndevs has to be a multiple of this */
4240 	int ncopies;		/* how many copies to data has */
4241 	int ret;
4242 	u64 max_stripe_size;
4243 	u64 max_chunk_size;
4244 	u64 stripe_size;
4245 	u64 num_bytes;
4246 	u64 raid_stripe_len = BTRFS_STRIPE_LEN;
4247 	int ndevs;
4248 	int i;
4249 	int j;
4250 	int index;
4251 
4252 	BUG_ON(!alloc_profile_is_valid(type, 0));
4253 
4254 	if (list_empty(&fs_devices->alloc_list))
4255 		return -ENOSPC;
4256 
4257 	index = __get_raid_index(type);
4258 
4259 	sub_stripes = btrfs_raid_array[index].sub_stripes;
4260 	dev_stripes = btrfs_raid_array[index].dev_stripes;
4261 	devs_max = btrfs_raid_array[index].devs_max;
4262 	devs_min = btrfs_raid_array[index].devs_min;
4263 	devs_increment = btrfs_raid_array[index].devs_increment;
4264 	ncopies = btrfs_raid_array[index].ncopies;
4265 
4266 	if (type & BTRFS_BLOCK_GROUP_DATA) {
4267 		max_stripe_size = 1024 * 1024 * 1024;
4268 		max_chunk_size = 10 * max_stripe_size;
4269 		if (!devs_max)
4270 			devs_max = BTRFS_MAX_DEVS(info->chunk_root);
4271 	} else if (type & BTRFS_BLOCK_GROUP_METADATA) {
4272 		/* for larger filesystems, use larger metadata chunks */
4273 		if (fs_devices->total_rw_bytes > 50ULL * 1024 * 1024 * 1024)
4274 			max_stripe_size = 1024 * 1024 * 1024;
4275 		else
4276 			max_stripe_size = 256 * 1024 * 1024;
4277 		max_chunk_size = max_stripe_size;
4278 		if (!devs_max)
4279 			devs_max = BTRFS_MAX_DEVS(info->chunk_root);
4280 	} else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
4281 		max_stripe_size = 32 * 1024 * 1024;
4282 		max_chunk_size = 2 * max_stripe_size;
4283 		if (!devs_max)
4284 			devs_max = BTRFS_MAX_DEVS_SYS_CHUNK;
4285 	} else {
4286 		btrfs_err(info, "invalid chunk type 0x%llx requested",
4287 		       type);
4288 		BUG_ON(1);
4289 	}
4290 
4291 	/* we don't want a chunk larger than 10% of writeable space */
4292 	max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
4293 			     max_chunk_size);
4294 
4295 	devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices,
4296 			       GFP_NOFS);
4297 	if (!devices_info)
4298 		return -ENOMEM;
4299 
4300 	cur = fs_devices->alloc_list.next;
4301 
4302 	/*
4303 	 * in the first pass through the devices list, we gather information
4304 	 * about the available holes on each device.
4305 	 */
4306 	ndevs = 0;
4307 	while (cur != &fs_devices->alloc_list) {
4308 		struct btrfs_device *device;
4309 		u64 max_avail;
4310 		u64 dev_offset;
4311 
4312 		device = list_entry(cur, struct btrfs_device, dev_alloc_list);
4313 
4314 		cur = cur->next;
4315 
4316 		if (!device->writeable) {
4317 			WARN(1, KERN_ERR
4318 			       "BTRFS: read-only device in alloc_list\n");
4319 			continue;
4320 		}
4321 
4322 		if (!device->in_fs_metadata ||
4323 		    device->is_tgtdev_for_dev_replace)
4324 			continue;
4325 
4326 		if (device->total_bytes > device->bytes_used)
4327 			total_avail = device->total_bytes - device->bytes_used;
4328 		else
4329 			total_avail = 0;
4330 
4331 		/* If there is no space on this device, skip it. */
4332 		if (total_avail == 0)
4333 			continue;
4334 
4335 		ret = find_free_dev_extent(trans, device,
4336 					   max_stripe_size * dev_stripes,
4337 					   &dev_offset, &max_avail);
4338 		if (ret && ret != -ENOSPC)
4339 			goto error;
4340 
4341 		if (ret == 0)
4342 			max_avail = max_stripe_size * dev_stripes;
4343 
4344 		if (max_avail < BTRFS_STRIPE_LEN * dev_stripes)
4345 			continue;
4346 
4347 		if (ndevs == fs_devices->rw_devices) {
4348 			WARN(1, "%s: found more than %llu devices\n",
4349 			     __func__, fs_devices->rw_devices);
4350 			break;
4351 		}
4352 		devices_info[ndevs].dev_offset = dev_offset;
4353 		devices_info[ndevs].max_avail = max_avail;
4354 		devices_info[ndevs].total_avail = total_avail;
4355 		devices_info[ndevs].dev = device;
4356 		++ndevs;
4357 	}
4358 
4359 	/*
4360 	 * now sort the devices by hole size / available space
4361 	 */
4362 	sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
4363 	     btrfs_cmp_device_info, NULL);
4364 
4365 	/* round down to number of usable stripes */
4366 	ndevs -= ndevs % devs_increment;
4367 
4368 	if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) {
4369 		ret = -ENOSPC;
4370 		goto error;
4371 	}
4372 
4373 	if (devs_max && ndevs > devs_max)
4374 		ndevs = devs_max;
4375 	/*
4376 	 * the primary goal is to maximize the number of stripes, so use as many
4377 	 * devices as possible, even if the stripes are not maximum sized.
4378 	 */
4379 	stripe_size = devices_info[ndevs-1].max_avail;
4380 	num_stripes = ndevs * dev_stripes;
4381 
4382 	/*
4383 	 * this will have to be fixed for RAID1 and RAID10 over
4384 	 * more drives
4385 	 */
4386 	data_stripes = num_stripes / ncopies;
4387 
4388 	if (type & BTRFS_BLOCK_GROUP_RAID5) {
4389 		raid_stripe_len = find_raid56_stripe_len(ndevs - 1,
4390 				 btrfs_super_stripesize(info->super_copy));
4391 		data_stripes = num_stripes - 1;
4392 	}
4393 	if (type & BTRFS_BLOCK_GROUP_RAID6) {
4394 		raid_stripe_len = find_raid56_stripe_len(ndevs - 2,
4395 				 btrfs_super_stripesize(info->super_copy));
4396 		data_stripes = num_stripes - 2;
4397 	}
4398 
4399 	/*
4400 	 * Use the number of data stripes to figure out how big this chunk
4401 	 * is really going to be in terms of logical address space,
4402 	 * and compare that answer with the max chunk size
4403 	 */
4404 	if (stripe_size * data_stripes > max_chunk_size) {
4405 		u64 mask = (1ULL << 24) - 1;
4406 		stripe_size = max_chunk_size;
4407 		do_div(stripe_size, data_stripes);
4408 
4409 		/* bump the answer up to a 16MB boundary */
4410 		stripe_size = (stripe_size + mask) & ~mask;
4411 
4412 		/* but don't go higher than the limits we found
4413 		 * while searching for free extents
4414 		 */
4415 		if (stripe_size > devices_info[ndevs-1].max_avail)
4416 			stripe_size = devices_info[ndevs-1].max_avail;
4417 	}
4418 
4419 	do_div(stripe_size, dev_stripes);
4420 
4421 	/* align to BTRFS_STRIPE_LEN */
4422 	do_div(stripe_size, raid_stripe_len);
4423 	stripe_size *= raid_stripe_len;
4424 
4425 	map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
4426 	if (!map) {
4427 		ret = -ENOMEM;
4428 		goto error;
4429 	}
4430 	map->num_stripes = num_stripes;
4431 
4432 	for (i = 0; i < ndevs; ++i) {
4433 		for (j = 0; j < dev_stripes; ++j) {
4434 			int s = i * dev_stripes + j;
4435 			map->stripes[s].dev = devices_info[i].dev;
4436 			map->stripes[s].physical = devices_info[i].dev_offset +
4437 						   j * stripe_size;
4438 		}
4439 	}
4440 	map->sector_size = extent_root->sectorsize;
4441 	map->stripe_len = raid_stripe_len;
4442 	map->io_align = raid_stripe_len;
4443 	map->io_width = raid_stripe_len;
4444 	map->type = type;
4445 	map->sub_stripes = sub_stripes;
4446 
4447 	num_bytes = stripe_size * data_stripes;
4448 
4449 	trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes);
4450 
4451 	em = alloc_extent_map();
4452 	if (!em) {
4453 		kfree(map);
4454 		ret = -ENOMEM;
4455 		goto error;
4456 	}
4457 	set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
4458 	em->bdev = (struct block_device *)map;
4459 	em->start = start;
4460 	em->len = num_bytes;
4461 	em->block_start = 0;
4462 	em->block_len = em->len;
4463 	em->orig_block_len = stripe_size;
4464 
4465 	em_tree = &extent_root->fs_info->mapping_tree.map_tree;
4466 	write_lock(&em_tree->lock);
4467 	ret = add_extent_mapping(em_tree, em, 0);
4468 	if (!ret) {
4469 		list_add_tail(&em->list, &trans->transaction->pending_chunks);
4470 		atomic_inc(&em->refs);
4471 	}
4472 	write_unlock(&em_tree->lock);
4473 	if (ret) {
4474 		free_extent_map(em);
4475 		goto error;
4476 	}
4477 
4478 	ret = btrfs_make_block_group(trans, extent_root, 0, type,
4479 				     BTRFS_FIRST_CHUNK_TREE_OBJECTID,
4480 				     start, num_bytes);
4481 	if (ret)
4482 		goto error_del_extent;
4483 
4484 	for (i = 0; i < map->num_stripes; i++) {
4485 		num_bytes = map->stripes[i].dev->bytes_used + stripe_size;
4486 		btrfs_device_set_bytes_used(map->stripes[i].dev, num_bytes);
4487 	}
4488 
4489 	spin_lock(&extent_root->fs_info->free_chunk_lock);
4490 	extent_root->fs_info->free_chunk_space -= (stripe_size *
4491 						   map->num_stripes);
4492 	spin_unlock(&extent_root->fs_info->free_chunk_lock);
4493 
4494 	free_extent_map(em);
4495 	check_raid56_incompat_flag(extent_root->fs_info, type);
4496 
4497 	kfree(devices_info);
4498 	return 0;
4499 
4500 error_del_extent:
4501 	write_lock(&em_tree->lock);
4502 	remove_extent_mapping(em_tree, em);
4503 	write_unlock(&em_tree->lock);
4504 
4505 	/* One for our allocation */
4506 	free_extent_map(em);
4507 	/* One for the tree reference */
4508 	free_extent_map(em);
4509 error:
4510 	kfree(devices_info);
4511 	return ret;
4512 }
4513 
btrfs_finish_chunk_alloc(struct btrfs_trans_handle * trans,struct btrfs_root * extent_root,u64 chunk_offset,u64 chunk_size)4514 int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
4515 				struct btrfs_root *extent_root,
4516 				u64 chunk_offset, u64 chunk_size)
4517 {
4518 	struct btrfs_key key;
4519 	struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
4520 	struct btrfs_device *device;
4521 	struct btrfs_chunk *chunk;
4522 	struct btrfs_stripe *stripe;
4523 	struct extent_map_tree *em_tree;
4524 	struct extent_map *em;
4525 	struct map_lookup *map;
4526 	size_t item_size;
4527 	u64 dev_offset;
4528 	u64 stripe_size;
4529 	int i = 0;
4530 	int ret;
4531 
4532 	em_tree = &extent_root->fs_info->mapping_tree.map_tree;
4533 	read_lock(&em_tree->lock);
4534 	em = lookup_extent_mapping(em_tree, chunk_offset, chunk_size);
4535 	read_unlock(&em_tree->lock);
4536 
4537 	if (!em) {
4538 		btrfs_crit(extent_root->fs_info, "unable to find logical "
4539 			   "%Lu len %Lu", chunk_offset, chunk_size);
4540 		return -EINVAL;
4541 	}
4542 
4543 	if (em->start != chunk_offset || em->len != chunk_size) {
4544 		btrfs_crit(extent_root->fs_info, "found a bad mapping, wanted"
4545 			  " %Lu-%Lu, found %Lu-%Lu", chunk_offset,
4546 			  chunk_size, em->start, em->len);
4547 		free_extent_map(em);
4548 		return -EINVAL;
4549 	}
4550 
4551 	map = (struct map_lookup *)em->bdev;
4552 	item_size = btrfs_chunk_item_size(map->num_stripes);
4553 	stripe_size = em->orig_block_len;
4554 
4555 	chunk = kzalloc(item_size, GFP_NOFS);
4556 	if (!chunk) {
4557 		ret = -ENOMEM;
4558 		goto out;
4559 	}
4560 
4561 	for (i = 0; i < map->num_stripes; i++) {
4562 		device = map->stripes[i].dev;
4563 		dev_offset = map->stripes[i].physical;
4564 
4565 		ret = btrfs_update_device(trans, device);
4566 		if (ret)
4567 			goto out;
4568 		ret = btrfs_alloc_dev_extent(trans, device,
4569 					     chunk_root->root_key.objectid,
4570 					     BTRFS_FIRST_CHUNK_TREE_OBJECTID,
4571 					     chunk_offset, dev_offset,
4572 					     stripe_size);
4573 		if (ret)
4574 			goto out;
4575 	}
4576 
4577 	stripe = &chunk->stripe;
4578 	for (i = 0; i < map->num_stripes; i++) {
4579 		device = map->stripes[i].dev;
4580 		dev_offset = map->stripes[i].physical;
4581 
4582 		btrfs_set_stack_stripe_devid(stripe, device->devid);
4583 		btrfs_set_stack_stripe_offset(stripe, dev_offset);
4584 		memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
4585 		stripe++;
4586 	}
4587 
4588 	btrfs_set_stack_chunk_length(chunk, chunk_size);
4589 	btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
4590 	btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
4591 	btrfs_set_stack_chunk_type(chunk, map->type);
4592 	btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
4593 	btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
4594 	btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
4595 	btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
4596 	btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
4597 
4598 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
4599 	key.type = BTRFS_CHUNK_ITEM_KEY;
4600 	key.offset = chunk_offset;
4601 
4602 	ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
4603 	if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
4604 		/*
4605 		 * TODO: Cleanup of inserted chunk root in case of
4606 		 * failure.
4607 		 */
4608 		ret = btrfs_add_system_chunk(chunk_root, &key, chunk,
4609 					     item_size);
4610 	}
4611 
4612 out:
4613 	kfree(chunk);
4614 	free_extent_map(em);
4615 	return ret;
4616 }
4617 
4618 /*
4619  * Chunk allocation falls into two parts. The first part does works
4620  * that make the new allocated chunk useable, but not do any operation
4621  * that modifies the chunk tree. The second part does the works that
4622  * require modifying the chunk tree. This division is important for the
4623  * bootstrap process of adding storage to a seed btrfs.
4624  */
btrfs_alloc_chunk(struct btrfs_trans_handle * trans,struct btrfs_root * extent_root,u64 type)4625 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
4626 		      struct btrfs_root *extent_root, u64 type)
4627 {
4628 	u64 chunk_offset;
4629 
4630 	chunk_offset = find_next_chunk(extent_root->fs_info);
4631 	return __btrfs_alloc_chunk(trans, extent_root, chunk_offset, type);
4632 }
4633 
init_first_rw_device(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_device * device)4634 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
4635 					 struct btrfs_root *root,
4636 					 struct btrfs_device *device)
4637 {
4638 	u64 chunk_offset;
4639 	u64 sys_chunk_offset;
4640 	u64 alloc_profile;
4641 	struct btrfs_fs_info *fs_info = root->fs_info;
4642 	struct btrfs_root *extent_root = fs_info->extent_root;
4643 	int ret;
4644 
4645 	chunk_offset = find_next_chunk(fs_info);
4646 	alloc_profile = btrfs_get_alloc_profile(extent_root, 0);
4647 	ret = __btrfs_alloc_chunk(trans, extent_root, chunk_offset,
4648 				  alloc_profile);
4649 	if (ret)
4650 		return ret;
4651 
4652 	sys_chunk_offset = find_next_chunk(root->fs_info);
4653 	alloc_profile = btrfs_get_alloc_profile(fs_info->chunk_root, 0);
4654 	ret = __btrfs_alloc_chunk(trans, extent_root, sys_chunk_offset,
4655 				  alloc_profile);
4656 	return ret;
4657 }
4658 
btrfs_chunk_max_errors(struct map_lookup * map)4659 static inline int btrfs_chunk_max_errors(struct map_lookup *map)
4660 {
4661 	int max_errors;
4662 
4663 	if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
4664 			 BTRFS_BLOCK_GROUP_RAID10 |
4665 			 BTRFS_BLOCK_GROUP_RAID5 |
4666 			 BTRFS_BLOCK_GROUP_DUP)) {
4667 		max_errors = 1;
4668 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID6) {
4669 		max_errors = 2;
4670 	} else {
4671 		max_errors = 0;
4672 	}
4673 
4674 	return max_errors;
4675 }
4676 
btrfs_chunk_readonly(struct btrfs_root * root,u64 chunk_offset)4677 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
4678 {
4679 	struct extent_map *em;
4680 	struct map_lookup *map;
4681 	struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
4682 	int readonly = 0;
4683 	int miss_ndevs = 0;
4684 	int i;
4685 
4686 	read_lock(&map_tree->map_tree.lock);
4687 	em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
4688 	read_unlock(&map_tree->map_tree.lock);
4689 	if (!em)
4690 		return 1;
4691 
4692 	map = (struct map_lookup *)em->bdev;
4693 	for (i = 0; i < map->num_stripes; i++) {
4694 		if (map->stripes[i].dev->missing) {
4695 			miss_ndevs++;
4696 			continue;
4697 		}
4698 
4699 		if (!map->stripes[i].dev->writeable) {
4700 			readonly = 1;
4701 			goto end;
4702 		}
4703 	}
4704 
4705 	/*
4706 	 * If the number of missing devices is larger than max errors,
4707 	 * we can not write the data into that chunk successfully, so
4708 	 * set it readonly.
4709 	 */
4710 	if (miss_ndevs > btrfs_chunk_max_errors(map))
4711 		readonly = 1;
4712 end:
4713 	free_extent_map(em);
4714 	return readonly;
4715 }
4716 
btrfs_mapping_init(struct btrfs_mapping_tree * tree)4717 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
4718 {
4719 	extent_map_tree_init(&tree->map_tree);
4720 }
4721 
btrfs_mapping_tree_free(struct btrfs_mapping_tree * tree)4722 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
4723 {
4724 	struct extent_map *em;
4725 
4726 	while (1) {
4727 		write_lock(&tree->map_tree.lock);
4728 		em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
4729 		if (em)
4730 			remove_extent_mapping(&tree->map_tree, em);
4731 		write_unlock(&tree->map_tree.lock);
4732 		if (!em)
4733 			break;
4734 		/* once for us */
4735 		free_extent_map(em);
4736 		/* once for the tree */
4737 		free_extent_map(em);
4738 	}
4739 }
4740 
btrfs_num_copies(struct btrfs_fs_info * fs_info,u64 logical,u64 len)4741 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
4742 {
4743 	struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
4744 	struct extent_map *em;
4745 	struct map_lookup *map;
4746 	struct extent_map_tree *em_tree = &map_tree->map_tree;
4747 	int ret;
4748 
4749 	read_lock(&em_tree->lock);
4750 	em = lookup_extent_mapping(em_tree, logical, len);
4751 	read_unlock(&em_tree->lock);
4752 
4753 	/*
4754 	 * We could return errors for these cases, but that could get ugly and
4755 	 * we'd probably do the same thing which is just not do anything else
4756 	 * and exit, so return 1 so the callers don't try to use other copies.
4757 	 */
4758 	if (!em) {
4759 		btrfs_crit(fs_info, "No mapping for %Lu-%Lu", logical,
4760 			    logical+len);
4761 		return 1;
4762 	}
4763 
4764 	if (em->start > logical || em->start + em->len < logical) {
4765 		btrfs_crit(fs_info, "Invalid mapping for %Lu-%Lu, got "
4766 			    "%Lu-%Lu", logical, logical+len, em->start,
4767 			    em->start + em->len);
4768 		free_extent_map(em);
4769 		return 1;
4770 	}
4771 
4772 	map = (struct map_lookup *)em->bdev;
4773 	if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
4774 		ret = map->num_stripes;
4775 	else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
4776 		ret = map->sub_stripes;
4777 	else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
4778 		ret = 2;
4779 	else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
4780 		ret = 3;
4781 	else
4782 		ret = 1;
4783 	free_extent_map(em);
4784 
4785 	btrfs_dev_replace_lock(&fs_info->dev_replace);
4786 	if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))
4787 		ret++;
4788 	btrfs_dev_replace_unlock(&fs_info->dev_replace);
4789 
4790 	return ret;
4791 }
4792 
btrfs_full_stripe_len(struct btrfs_root * root,struct btrfs_mapping_tree * map_tree,u64 logical)4793 unsigned long btrfs_full_stripe_len(struct btrfs_root *root,
4794 				    struct btrfs_mapping_tree *map_tree,
4795 				    u64 logical)
4796 {
4797 	struct extent_map *em;
4798 	struct map_lookup *map;
4799 	struct extent_map_tree *em_tree = &map_tree->map_tree;
4800 	unsigned long len = root->sectorsize;
4801 
4802 	read_lock(&em_tree->lock);
4803 	em = lookup_extent_mapping(em_tree, logical, len);
4804 	read_unlock(&em_tree->lock);
4805 	BUG_ON(!em);
4806 
4807 	BUG_ON(em->start > logical || em->start + em->len < logical);
4808 	map = (struct map_lookup *)em->bdev;
4809 	if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
4810 			 BTRFS_BLOCK_GROUP_RAID6)) {
4811 		len = map->stripe_len * nr_data_stripes(map);
4812 	}
4813 	free_extent_map(em);
4814 	return len;
4815 }
4816 
btrfs_is_parity_mirror(struct btrfs_mapping_tree * map_tree,u64 logical,u64 len,int mirror_num)4817 int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree,
4818 			   u64 logical, u64 len, int mirror_num)
4819 {
4820 	struct extent_map *em;
4821 	struct map_lookup *map;
4822 	struct extent_map_tree *em_tree = &map_tree->map_tree;
4823 	int ret = 0;
4824 
4825 	read_lock(&em_tree->lock);
4826 	em = lookup_extent_mapping(em_tree, logical, len);
4827 	read_unlock(&em_tree->lock);
4828 	BUG_ON(!em);
4829 
4830 	BUG_ON(em->start > logical || em->start + em->len < logical);
4831 	map = (struct map_lookup *)em->bdev;
4832 	if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
4833 			 BTRFS_BLOCK_GROUP_RAID6))
4834 		ret = 1;
4835 	free_extent_map(em);
4836 	return ret;
4837 }
4838 
find_live_mirror(struct btrfs_fs_info * fs_info,struct map_lookup * map,int first,int num,int optimal,int dev_replace_is_ongoing)4839 static int find_live_mirror(struct btrfs_fs_info *fs_info,
4840 			    struct map_lookup *map, int first, int num,
4841 			    int optimal, int dev_replace_is_ongoing)
4842 {
4843 	int i;
4844 	int tolerance;
4845 	struct btrfs_device *srcdev;
4846 
4847 	if (dev_replace_is_ongoing &&
4848 	    fs_info->dev_replace.cont_reading_from_srcdev_mode ==
4849 	     BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID)
4850 		srcdev = fs_info->dev_replace.srcdev;
4851 	else
4852 		srcdev = NULL;
4853 
4854 	/*
4855 	 * try to avoid the drive that is the source drive for a
4856 	 * dev-replace procedure, only choose it if no other non-missing
4857 	 * mirror is available
4858 	 */
4859 	for (tolerance = 0; tolerance < 2; tolerance++) {
4860 		if (map->stripes[optimal].dev->bdev &&
4861 		    (tolerance || map->stripes[optimal].dev != srcdev))
4862 			return optimal;
4863 		for (i = first; i < first + num; i++) {
4864 			if (map->stripes[i].dev->bdev &&
4865 			    (tolerance || map->stripes[i].dev != srcdev))
4866 				return i;
4867 		}
4868 	}
4869 
4870 	/* we couldn't find one that doesn't fail.  Just return something
4871 	 * and the io error handling code will clean up eventually
4872 	 */
4873 	return optimal;
4874 }
4875 
parity_smaller(u64 a,u64 b)4876 static inline int parity_smaller(u64 a, u64 b)
4877 {
4878 	return a > b;
4879 }
4880 
4881 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */
sort_parity_stripes(struct btrfs_bio * bbio,u64 * raid_map)4882 static void sort_parity_stripes(struct btrfs_bio *bbio, u64 *raid_map)
4883 {
4884 	struct btrfs_bio_stripe s;
4885 	int i;
4886 	u64 l;
4887 	int again = 1;
4888 
4889 	while (again) {
4890 		again = 0;
4891 		for (i = 0; i < bbio->num_stripes - 1; i++) {
4892 			if (parity_smaller(raid_map[i], raid_map[i+1])) {
4893 				s = bbio->stripes[i];
4894 				l = raid_map[i];
4895 				bbio->stripes[i] = bbio->stripes[i+1];
4896 				raid_map[i] = raid_map[i+1];
4897 				bbio->stripes[i+1] = s;
4898 				raid_map[i+1] = l;
4899 				again = 1;
4900 			}
4901 		}
4902 	}
4903 }
4904 
__btrfs_map_block(struct btrfs_fs_info * fs_info,int rw,u64 logical,u64 * length,struct btrfs_bio ** bbio_ret,int mirror_num,u64 ** raid_map_ret)4905 static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
4906 			     u64 logical, u64 *length,
4907 			     struct btrfs_bio **bbio_ret,
4908 			     int mirror_num, u64 **raid_map_ret)
4909 {
4910 	struct extent_map *em;
4911 	struct map_lookup *map;
4912 	struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
4913 	struct extent_map_tree *em_tree = &map_tree->map_tree;
4914 	u64 offset;
4915 	u64 stripe_offset;
4916 	u64 stripe_end_offset;
4917 	u64 stripe_nr;
4918 	u64 stripe_nr_orig;
4919 	u64 stripe_nr_end;
4920 	u64 stripe_len;
4921 	u64 *raid_map = NULL;
4922 	int stripe_index;
4923 	int i;
4924 	int ret = 0;
4925 	int num_stripes;
4926 	int max_errors = 0;
4927 	struct btrfs_bio *bbio = NULL;
4928 	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
4929 	int dev_replace_is_ongoing = 0;
4930 	int num_alloc_stripes;
4931 	int patch_the_first_stripe_for_dev_replace = 0;
4932 	u64 physical_to_patch_in_first_stripe = 0;
4933 	u64 raid56_full_stripe_start = (u64)-1;
4934 
4935 	read_lock(&em_tree->lock);
4936 	em = lookup_extent_mapping(em_tree, logical, *length);
4937 	read_unlock(&em_tree->lock);
4938 
4939 	if (!em) {
4940 		btrfs_crit(fs_info, "unable to find logical %llu len %llu",
4941 			logical, *length);
4942 		return -EINVAL;
4943 	}
4944 
4945 	if (em->start > logical || em->start + em->len < logical) {
4946 		btrfs_crit(fs_info, "found a bad mapping, wanted %Lu, "
4947 			   "found %Lu-%Lu", logical, em->start,
4948 			   em->start + em->len);
4949 		free_extent_map(em);
4950 		return -EINVAL;
4951 	}
4952 
4953 	map = (struct map_lookup *)em->bdev;
4954 	offset = logical - em->start;
4955 
4956 	stripe_len = map->stripe_len;
4957 	stripe_nr = offset;
4958 	/*
4959 	 * stripe_nr counts the total number of stripes we have to stride
4960 	 * to get to this block
4961 	 */
4962 	do_div(stripe_nr, stripe_len);
4963 
4964 	stripe_offset = stripe_nr * stripe_len;
4965 	BUG_ON(offset < stripe_offset);
4966 
4967 	/* stripe_offset is the offset of this block in its stripe*/
4968 	stripe_offset = offset - stripe_offset;
4969 
4970 	/* if we're here for raid56, we need to know the stripe aligned start */
4971 	if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) {
4972 		unsigned long full_stripe_len = stripe_len * nr_data_stripes(map);
4973 		raid56_full_stripe_start = offset;
4974 
4975 		/* allow a write of a full stripe, but make sure we don't
4976 		 * allow straddling of stripes
4977 		 */
4978 		do_div(raid56_full_stripe_start, full_stripe_len);
4979 		raid56_full_stripe_start *= full_stripe_len;
4980 	}
4981 
4982 	if (rw & REQ_DISCARD) {
4983 		/* we don't discard raid56 yet */
4984 		if (map->type &
4985 		    (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) {
4986 			ret = -EOPNOTSUPP;
4987 			goto out;
4988 		}
4989 		*length = min_t(u64, em->len - offset, *length);
4990 	} else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
4991 		u64 max_len;
4992 		/* For writes to RAID[56], allow a full stripeset across all disks.
4993 		   For other RAID types and for RAID[56] reads, just allow a single
4994 		   stripe (on a single disk). */
4995 		if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6) &&
4996 		    (rw & REQ_WRITE)) {
4997 			max_len = stripe_len * nr_data_stripes(map) -
4998 				(offset - raid56_full_stripe_start);
4999 		} else {
5000 			/* we limit the length of each bio to what fits in a stripe */
5001 			max_len = stripe_len - stripe_offset;
5002 		}
5003 		*length = min_t(u64, em->len - offset, max_len);
5004 	} else {
5005 		*length = em->len - offset;
5006 	}
5007 
5008 	/* This is for when we're called from btrfs_merge_bio_hook() and all
5009 	   it cares about is the length */
5010 	if (!bbio_ret)
5011 		goto out;
5012 
5013 	btrfs_dev_replace_lock(dev_replace);
5014 	dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
5015 	if (!dev_replace_is_ongoing)
5016 		btrfs_dev_replace_unlock(dev_replace);
5017 
5018 	if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
5019 	    !(rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) &&
5020 	    dev_replace->tgtdev != NULL) {
5021 		/*
5022 		 * in dev-replace case, for repair case (that's the only
5023 		 * case where the mirror is selected explicitly when
5024 		 * calling btrfs_map_block), blocks left of the left cursor
5025 		 * can also be read from the target drive.
5026 		 * For REQ_GET_READ_MIRRORS, the target drive is added as
5027 		 * the last one to the array of stripes. For READ, it also
5028 		 * needs to be supported using the same mirror number.
5029 		 * If the requested block is not left of the left cursor,
5030 		 * EIO is returned. This can happen because btrfs_num_copies()
5031 		 * returns one more in the dev-replace case.
5032 		 */
5033 		u64 tmp_length = *length;
5034 		struct btrfs_bio *tmp_bbio = NULL;
5035 		int tmp_num_stripes;
5036 		u64 srcdev_devid = dev_replace->srcdev->devid;
5037 		int index_srcdev = 0;
5038 		int found = 0;
5039 		u64 physical_of_found = 0;
5040 
5041 		ret = __btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS,
5042 			     logical, &tmp_length, &tmp_bbio, 0, NULL);
5043 		if (ret) {
5044 			WARN_ON(tmp_bbio != NULL);
5045 			goto out;
5046 		}
5047 
5048 		tmp_num_stripes = tmp_bbio->num_stripes;
5049 		if (mirror_num > tmp_num_stripes) {
5050 			/*
5051 			 * REQ_GET_READ_MIRRORS does not contain this
5052 			 * mirror, that means that the requested area
5053 			 * is not left of the left cursor
5054 			 */
5055 			ret = -EIO;
5056 			kfree(tmp_bbio);
5057 			goto out;
5058 		}
5059 
5060 		/*
5061 		 * process the rest of the function using the mirror_num
5062 		 * of the source drive. Therefore look it up first.
5063 		 * At the end, patch the device pointer to the one of the
5064 		 * target drive.
5065 		 */
5066 		for (i = 0; i < tmp_num_stripes; i++) {
5067 			if (tmp_bbio->stripes[i].dev->devid == srcdev_devid) {
5068 				/*
5069 				 * In case of DUP, in order to keep it
5070 				 * simple, only add the mirror with the
5071 				 * lowest physical address
5072 				 */
5073 				if (found &&
5074 				    physical_of_found <=
5075 				     tmp_bbio->stripes[i].physical)
5076 					continue;
5077 				index_srcdev = i;
5078 				found = 1;
5079 				physical_of_found =
5080 					tmp_bbio->stripes[i].physical;
5081 			}
5082 		}
5083 
5084 		if (found) {
5085 			mirror_num = index_srcdev + 1;
5086 			patch_the_first_stripe_for_dev_replace = 1;
5087 			physical_to_patch_in_first_stripe = physical_of_found;
5088 		} else {
5089 			WARN_ON(1);
5090 			ret = -EIO;
5091 			kfree(tmp_bbio);
5092 			goto out;
5093 		}
5094 
5095 		kfree(tmp_bbio);
5096 	} else if (mirror_num > map->num_stripes) {
5097 		mirror_num = 0;
5098 	}
5099 
5100 	num_stripes = 1;
5101 	stripe_index = 0;
5102 	stripe_nr_orig = stripe_nr;
5103 	stripe_nr_end = ALIGN(offset + *length, map->stripe_len);
5104 	do_div(stripe_nr_end, map->stripe_len);
5105 	stripe_end_offset = stripe_nr_end * map->stripe_len -
5106 			    (offset + *length);
5107 
5108 	if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
5109 		if (rw & REQ_DISCARD)
5110 			num_stripes = min_t(u64, map->num_stripes,
5111 					    stripe_nr_end - stripe_nr_orig);
5112 		stripe_index = do_div(stripe_nr, map->num_stripes);
5113 		if (!(rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)))
5114 			mirror_num = 1;
5115 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
5116 		if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS))
5117 			num_stripes = map->num_stripes;
5118 		else if (mirror_num)
5119 			stripe_index = mirror_num - 1;
5120 		else {
5121 			stripe_index = find_live_mirror(fs_info, map, 0,
5122 					    map->num_stripes,
5123 					    current->pid % map->num_stripes,
5124 					    dev_replace_is_ongoing);
5125 			mirror_num = stripe_index + 1;
5126 		}
5127 
5128 	} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
5129 		if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) {
5130 			num_stripes = map->num_stripes;
5131 		} else if (mirror_num) {
5132 			stripe_index = mirror_num - 1;
5133 		} else {
5134 			mirror_num = 1;
5135 		}
5136 
5137 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
5138 		int factor = map->num_stripes / map->sub_stripes;
5139 
5140 		stripe_index = do_div(stripe_nr, factor);
5141 		stripe_index *= map->sub_stripes;
5142 
5143 		if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS))
5144 			num_stripes = map->sub_stripes;
5145 		else if (rw & REQ_DISCARD)
5146 			num_stripes = min_t(u64, map->sub_stripes *
5147 					    (stripe_nr_end - stripe_nr_orig),
5148 					    map->num_stripes);
5149 		else if (mirror_num)
5150 			stripe_index += mirror_num - 1;
5151 		else {
5152 			int old_stripe_index = stripe_index;
5153 			stripe_index = find_live_mirror(fs_info, map,
5154 					      stripe_index,
5155 					      map->sub_stripes, stripe_index +
5156 					      current->pid % map->sub_stripes,
5157 					      dev_replace_is_ongoing);
5158 			mirror_num = stripe_index - old_stripe_index + 1;
5159 		}
5160 
5161 	} else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
5162 				BTRFS_BLOCK_GROUP_RAID6)) {
5163 		u64 tmp;
5164 
5165 		if (bbio_ret && ((rw & REQ_WRITE) || mirror_num > 1)
5166 		    && raid_map_ret) {
5167 			int i, rot;
5168 
5169 			/* push stripe_nr back to the start of the full stripe */
5170 			stripe_nr = raid56_full_stripe_start;
5171 			do_div(stripe_nr, stripe_len);
5172 
5173 			stripe_index = do_div(stripe_nr, nr_data_stripes(map));
5174 
5175 			/* RAID[56] write or recovery. Return all stripes */
5176 			num_stripes = map->num_stripes;
5177 			max_errors = nr_parity_stripes(map);
5178 
5179 			raid_map = kmalloc_array(num_stripes, sizeof(u64),
5180 					   GFP_NOFS);
5181 			if (!raid_map) {
5182 				ret = -ENOMEM;
5183 				goto out;
5184 			}
5185 
5186 			/* Work out the disk rotation on this stripe-set */
5187 			tmp = stripe_nr;
5188 			rot = do_div(tmp, num_stripes);
5189 
5190 			/* Fill in the logical address of each stripe */
5191 			tmp = stripe_nr * nr_data_stripes(map);
5192 			for (i = 0; i < nr_data_stripes(map); i++)
5193 				raid_map[(i+rot) % num_stripes] =
5194 					em->start + (tmp + i) * map->stripe_len;
5195 
5196 			raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE;
5197 			if (map->type & BTRFS_BLOCK_GROUP_RAID6)
5198 				raid_map[(i+rot+1) % num_stripes] =
5199 					RAID6_Q_STRIPE;
5200 
5201 			*length = map->stripe_len;
5202 			stripe_index = 0;
5203 			stripe_offset = 0;
5204 		} else {
5205 			/*
5206 			 * Mirror #0 or #1 means the original data block.
5207 			 * Mirror #2 is RAID5 parity block.
5208 			 * Mirror #3 is RAID6 Q block.
5209 			 */
5210 			stripe_index = do_div(stripe_nr, nr_data_stripes(map));
5211 			if (mirror_num > 1)
5212 				stripe_index = nr_data_stripes(map) +
5213 						mirror_num - 2;
5214 
5215 			/* We distribute the parity blocks across stripes */
5216 			tmp = stripe_nr + stripe_index;
5217 			stripe_index = do_div(tmp, map->num_stripes);
5218 			if (!(rw & (REQ_WRITE | REQ_DISCARD |
5219 				    REQ_GET_READ_MIRRORS)) && mirror_num <= 1)
5220 				mirror_num = 1;
5221 		}
5222 	} else {
5223 		/*
5224 		 * after this do_div call, stripe_nr is the number of stripes
5225 		 * on this device we have to walk to find the data, and
5226 		 * stripe_index is the number of our device in the stripe array
5227 		 */
5228 		stripe_index = do_div(stripe_nr, map->num_stripes);
5229 		mirror_num = stripe_index + 1;
5230 	}
5231 	BUG_ON(stripe_index >= map->num_stripes);
5232 
5233 	num_alloc_stripes = num_stripes;
5234 	if (dev_replace_is_ongoing) {
5235 		if (rw & (REQ_WRITE | REQ_DISCARD))
5236 			num_alloc_stripes <<= 1;
5237 		if (rw & REQ_GET_READ_MIRRORS)
5238 			num_alloc_stripes++;
5239 	}
5240 	bbio = kzalloc(btrfs_bio_size(num_alloc_stripes), GFP_NOFS);
5241 	if (!bbio) {
5242 		kfree(raid_map);
5243 		ret = -ENOMEM;
5244 		goto out;
5245 	}
5246 	atomic_set(&bbio->error, 0);
5247 
5248 	if (rw & REQ_DISCARD) {
5249 		int factor = 0;
5250 		int sub_stripes = 0;
5251 		u64 stripes_per_dev = 0;
5252 		u32 remaining_stripes = 0;
5253 		u32 last_stripe = 0;
5254 
5255 		if (map->type &
5256 		    (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
5257 			if (map->type & BTRFS_BLOCK_GROUP_RAID0)
5258 				sub_stripes = 1;
5259 			else
5260 				sub_stripes = map->sub_stripes;
5261 
5262 			factor = map->num_stripes / sub_stripes;
5263 			stripes_per_dev = div_u64_rem(stripe_nr_end -
5264 						      stripe_nr_orig,
5265 						      factor,
5266 						      &remaining_stripes);
5267 			div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
5268 			last_stripe *= sub_stripes;
5269 		}
5270 
5271 		for (i = 0; i < num_stripes; i++) {
5272 			bbio->stripes[i].physical =
5273 				map->stripes[stripe_index].physical +
5274 				stripe_offset + stripe_nr * map->stripe_len;
5275 			bbio->stripes[i].dev = map->stripes[stripe_index].dev;
5276 
5277 			if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
5278 					 BTRFS_BLOCK_GROUP_RAID10)) {
5279 				bbio->stripes[i].length = stripes_per_dev *
5280 							  map->stripe_len;
5281 
5282 				if (i / sub_stripes < remaining_stripes)
5283 					bbio->stripes[i].length +=
5284 						map->stripe_len;
5285 
5286 				/*
5287 				 * Special for the first stripe and
5288 				 * the last stripe:
5289 				 *
5290 				 * |-------|...|-------|
5291 				 *     |----------|
5292 				 *    off     end_off
5293 				 */
5294 				if (i < sub_stripes)
5295 					bbio->stripes[i].length -=
5296 						stripe_offset;
5297 
5298 				if (stripe_index >= last_stripe &&
5299 				    stripe_index <= (last_stripe +
5300 						     sub_stripes - 1))
5301 					bbio->stripes[i].length -=
5302 						stripe_end_offset;
5303 
5304 				if (i == sub_stripes - 1)
5305 					stripe_offset = 0;
5306 			} else
5307 				bbio->stripes[i].length = *length;
5308 
5309 			stripe_index++;
5310 			if (stripe_index == map->num_stripes) {
5311 				/* This could only happen for RAID0/10 */
5312 				stripe_index = 0;
5313 				stripe_nr++;
5314 			}
5315 		}
5316 	} else {
5317 		for (i = 0; i < num_stripes; i++) {
5318 			bbio->stripes[i].physical =
5319 				map->stripes[stripe_index].physical +
5320 				stripe_offset +
5321 				stripe_nr * map->stripe_len;
5322 			bbio->stripes[i].dev =
5323 				map->stripes[stripe_index].dev;
5324 			stripe_index++;
5325 		}
5326 	}
5327 
5328 	if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS))
5329 		max_errors = btrfs_chunk_max_errors(map);
5330 
5331 	if (dev_replace_is_ongoing && (rw & (REQ_WRITE | REQ_DISCARD)) &&
5332 	    dev_replace->tgtdev != NULL) {
5333 		int index_where_to_add;
5334 		u64 srcdev_devid = dev_replace->srcdev->devid;
5335 
5336 		/*
5337 		 * duplicate the write operations while the dev replace
5338 		 * procedure is running. Since the copying of the old disk
5339 		 * to the new disk takes place at run time while the
5340 		 * filesystem is mounted writable, the regular write
5341 		 * operations to the old disk have to be duplicated to go
5342 		 * to the new disk as well.
5343 		 * Note that device->missing is handled by the caller, and
5344 		 * that the write to the old disk is already set up in the
5345 		 * stripes array.
5346 		 */
5347 		index_where_to_add = num_stripes;
5348 		for (i = 0; i < num_stripes; i++) {
5349 			if (bbio->stripes[i].dev->devid == srcdev_devid) {
5350 				/* write to new disk, too */
5351 				struct btrfs_bio_stripe *new =
5352 					bbio->stripes + index_where_to_add;
5353 				struct btrfs_bio_stripe *old =
5354 					bbio->stripes + i;
5355 
5356 				new->physical = old->physical;
5357 				new->length = old->length;
5358 				new->dev = dev_replace->tgtdev;
5359 				index_where_to_add++;
5360 				max_errors++;
5361 			}
5362 		}
5363 		num_stripes = index_where_to_add;
5364 	} else if (dev_replace_is_ongoing && (rw & REQ_GET_READ_MIRRORS) &&
5365 		   dev_replace->tgtdev != NULL) {
5366 		u64 srcdev_devid = dev_replace->srcdev->devid;
5367 		int index_srcdev = 0;
5368 		int found = 0;
5369 		u64 physical_of_found = 0;
5370 
5371 		/*
5372 		 * During the dev-replace procedure, the target drive can
5373 		 * also be used to read data in case it is needed to repair
5374 		 * a corrupt block elsewhere. This is possible if the
5375 		 * requested area is left of the left cursor. In this area,
5376 		 * the target drive is a full copy of the source drive.
5377 		 */
5378 		for (i = 0; i < num_stripes; i++) {
5379 			if (bbio->stripes[i].dev->devid == srcdev_devid) {
5380 				/*
5381 				 * In case of DUP, in order to keep it
5382 				 * simple, only add the mirror with the
5383 				 * lowest physical address
5384 				 */
5385 				if (found &&
5386 				    physical_of_found <=
5387 				     bbio->stripes[i].physical)
5388 					continue;
5389 				index_srcdev = i;
5390 				found = 1;
5391 				physical_of_found = bbio->stripes[i].physical;
5392 			}
5393 		}
5394 		if (found) {
5395 			u64 length = map->stripe_len;
5396 
5397 			if (physical_of_found + length <=
5398 			    dev_replace->cursor_left) {
5399 				struct btrfs_bio_stripe *tgtdev_stripe =
5400 					bbio->stripes + num_stripes;
5401 
5402 				tgtdev_stripe->physical = physical_of_found;
5403 				tgtdev_stripe->length =
5404 					bbio->stripes[index_srcdev].length;
5405 				tgtdev_stripe->dev = dev_replace->tgtdev;
5406 
5407 				num_stripes++;
5408 			}
5409 		}
5410 	}
5411 
5412 	*bbio_ret = bbio;
5413 	bbio->num_stripes = num_stripes;
5414 	bbio->max_errors = max_errors;
5415 	bbio->mirror_num = mirror_num;
5416 
5417 	/*
5418 	 * this is the case that REQ_READ && dev_replace_is_ongoing &&
5419 	 * mirror_num == num_stripes + 1 && dev_replace target drive is
5420 	 * available as a mirror
5421 	 */
5422 	if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) {
5423 		WARN_ON(num_stripes > 1);
5424 		bbio->stripes[0].dev = dev_replace->tgtdev;
5425 		bbio->stripes[0].physical = physical_to_patch_in_first_stripe;
5426 		bbio->mirror_num = map->num_stripes + 1;
5427 	}
5428 	if (raid_map) {
5429 		sort_parity_stripes(bbio, raid_map);
5430 		*raid_map_ret = raid_map;
5431 	}
5432 out:
5433 	if (dev_replace_is_ongoing)
5434 		btrfs_dev_replace_unlock(dev_replace);
5435 	free_extent_map(em);
5436 	return ret;
5437 }
5438 
btrfs_map_block(struct btrfs_fs_info * fs_info,int rw,u64 logical,u64 * length,struct btrfs_bio ** bbio_ret,int mirror_num)5439 int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
5440 		      u64 logical, u64 *length,
5441 		      struct btrfs_bio **bbio_ret, int mirror_num)
5442 {
5443 	return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret,
5444 				 mirror_num, NULL);
5445 }
5446 
btrfs_rmap_block(struct btrfs_mapping_tree * map_tree,u64 chunk_start,u64 physical,u64 devid,u64 ** logical,int * naddrs,int * stripe_len)5447 int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
5448 		     u64 chunk_start, u64 physical, u64 devid,
5449 		     u64 **logical, int *naddrs, int *stripe_len)
5450 {
5451 	struct extent_map_tree *em_tree = &map_tree->map_tree;
5452 	struct extent_map *em;
5453 	struct map_lookup *map;
5454 	u64 *buf;
5455 	u64 bytenr;
5456 	u64 length;
5457 	u64 stripe_nr;
5458 	u64 rmap_len;
5459 	int i, j, nr = 0;
5460 
5461 	read_lock(&em_tree->lock);
5462 	em = lookup_extent_mapping(em_tree, chunk_start, 1);
5463 	read_unlock(&em_tree->lock);
5464 
5465 	if (!em) {
5466 		printk(KERN_ERR "BTRFS: couldn't find em for chunk %Lu\n",
5467 		       chunk_start);
5468 		return -EIO;
5469 	}
5470 
5471 	if (em->start != chunk_start) {
5472 		printk(KERN_ERR "BTRFS: bad chunk start, em=%Lu, wanted=%Lu\n",
5473 		       em->start, chunk_start);
5474 		free_extent_map(em);
5475 		return -EIO;
5476 	}
5477 	map = (struct map_lookup *)em->bdev;
5478 
5479 	length = em->len;
5480 	rmap_len = map->stripe_len;
5481 
5482 	if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5483 		do_div(length, map->num_stripes / map->sub_stripes);
5484 	else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
5485 		do_div(length, map->num_stripes);
5486 	else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
5487 			      BTRFS_BLOCK_GROUP_RAID6)) {
5488 		do_div(length, nr_data_stripes(map));
5489 		rmap_len = map->stripe_len * nr_data_stripes(map);
5490 	}
5491 
5492 	buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
5493 	BUG_ON(!buf); /* -ENOMEM */
5494 
5495 	for (i = 0; i < map->num_stripes; i++) {
5496 		if (devid && map->stripes[i].dev->devid != devid)
5497 			continue;
5498 		if (map->stripes[i].physical > physical ||
5499 		    map->stripes[i].physical + length <= physical)
5500 			continue;
5501 
5502 		stripe_nr = physical - map->stripes[i].physical;
5503 		do_div(stripe_nr, map->stripe_len);
5504 
5505 		if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
5506 			stripe_nr = stripe_nr * map->num_stripes + i;
5507 			do_div(stripe_nr, map->sub_stripes);
5508 		} else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
5509 			stripe_nr = stripe_nr * map->num_stripes + i;
5510 		} /* else if RAID[56], multiply by nr_data_stripes().
5511 		   * Alternatively, just use rmap_len below instead of
5512 		   * map->stripe_len */
5513 
5514 		bytenr = chunk_start + stripe_nr * rmap_len;
5515 		WARN_ON(nr >= map->num_stripes);
5516 		for (j = 0; j < nr; j++) {
5517 			if (buf[j] == bytenr)
5518 				break;
5519 		}
5520 		if (j == nr) {
5521 			WARN_ON(nr >= map->num_stripes);
5522 			buf[nr++] = bytenr;
5523 		}
5524 	}
5525 
5526 	*logical = buf;
5527 	*naddrs = nr;
5528 	*stripe_len = rmap_len;
5529 
5530 	free_extent_map(em);
5531 	return 0;
5532 }
5533 
btrfs_end_bbio(struct btrfs_bio * bbio,struct bio * bio,int err)5534 static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio, int err)
5535 {
5536 	if (likely(bbio->flags & BTRFS_BIO_ORIG_BIO_SUBMITTED))
5537 		bio_endio_nodec(bio, err);
5538 	else
5539 		bio_endio(bio, err);
5540 	kfree(bbio);
5541 }
5542 
btrfs_end_bio(struct bio * bio,int err)5543 static void btrfs_end_bio(struct bio *bio, int err)
5544 {
5545 	struct btrfs_bio *bbio = bio->bi_private;
5546 	struct btrfs_device *dev = bbio->stripes[0].dev;
5547 	int is_orig_bio = 0;
5548 
5549 	if (err) {
5550 		atomic_inc(&bbio->error);
5551 		if (err == -EIO || err == -EREMOTEIO) {
5552 			unsigned int stripe_index =
5553 				btrfs_io_bio(bio)->stripe_index;
5554 
5555 			BUG_ON(stripe_index >= bbio->num_stripes);
5556 			dev = bbio->stripes[stripe_index].dev;
5557 			if (dev->bdev) {
5558 				if (bio->bi_rw & WRITE)
5559 					btrfs_dev_stat_inc(dev,
5560 						BTRFS_DEV_STAT_WRITE_ERRS);
5561 				else
5562 					btrfs_dev_stat_inc(dev,
5563 						BTRFS_DEV_STAT_READ_ERRS);
5564 				if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH)
5565 					btrfs_dev_stat_inc(dev,
5566 						BTRFS_DEV_STAT_FLUSH_ERRS);
5567 				btrfs_dev_stat_print_on_error(dev);
5568 			}
5569 		}
5570 	}
5571 
5572 	if (bio == bbio->orig_bio)
5573 		is_orig_bio = 1;
5574 
5575 	btrfs_bio_counter_dec(bbio->fs_info);
5576 
5577 	if (atomic_dec_and_test(&bbio->stripes_pending)) {
5578 		if (!is_orig_bio) {
5579 			bio_put(bio);
5580 			bio = bbio->orig_bio;
5581 		}
5582 
5583 		bio->bi_private = bbio->private;
5584 		bio->bi_end_io = bbio->end_io;
5585 		btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
5586 		/* only send an error to the higher layers if it is
5587 		 * beyond the tolerance of the btrfs bio
5588 		 */
5589 		if (atomic_read(&bbio->error) > bbio->max_errors) {
5590 			err = -EIO;
5591 		} else {
5592 			/*
5593 			 * this bio is actually up to date, we didn't
5594 			 * go over the max number of errors
5595 			 */
5596 			set_bit(BIO_UPTODATE, &bio->bi_flags);
5597 			err = 0;
5598 		}
5599 
5600 		btrfs_end_bbio(bbio, bio, err);
5601 	} else if (!is_orig_bio) {
5602 		bio_put(bio);
5603 	}
5604 }
5605 
5606 /*
5607  * see run_scheduled_bios for a description of why bios are collected for
5608  * async submit.
5609  *
5610  * This will add one bio to the pending list for a device and make sure
5611  * the work struct is scheduled.
5612  */
btrfs_schedule_bio(struct btrfs_root * root,struct btrfs_device * device,int rw,struct bio * bio)5613 static noinline void btrfs_schedule_bio(struct btrfs_root *root,
5614 					struct btrfs_device *device,
5615 					int rw, struct bio *bio)
5616 {
5617 	int should_queue = 1;
5618 	struct btrfs_pending_bios *pending_bios;
5619 
5620 	if (device->missing || !device->bdev) {
5621 		bio_endio(bio, -EIO);
5622 		return;
5623 	}
5624 
5625 	/* don't bother with additional async steps for reads, right now */
5626 	if (!(rw & REQ_WRITE)) {
5627 		bio_get(bio);
5628 		btrfsic_submit_bio(rw, bio);
5629 		bio_put(bio);
5630 		return;
5631 	}
5632 
5633 	/*
5634 	 * nr_async_bios allows us to reliably return congestion to the
5635 	 * higher layers.  Otherwise, the async bio makes it appear we have
5636 	 * made progress against dirty pages when we've really just put it
5637 	 * on a queue for later
5638 	 */
5639 	atomic_inc(&root->fs_info->nr_async_bios);
5640 	WARN_ON(bio->bi_next);
5641 	bio->bi_next = NULL;
5642 	bio->bi_rw |= rw;
5643 
5644 	spin_lock(&device->io_lock);
5645 	if (bio->bi_rw & REQ_SYNC)
5646 		pending_bios = &device->pending_sync_bios;
5647 	else
5648 		pending_bios = &device->pending_bios;
5649 
5650 	if (pending_bios->tail)
5651 		pending_bios->tail->bi_next = bio;
5652 
5653 	pending_bios->tail = bio;
5654 	if (!pending_bios->head)
5655 		pending_bios->head = bio;
5656 	if (device->running_pending)
5657 		should_queue = 0;
5658 
5659 	spin_unlock(&device->io_lock);
5660 
5661 	if (should_queue)
5662 		btrfs_queue_work(root->fs_info->submit_workers,
5663 				 &device->work);
5664 }
5665 
bio_size_ok(struct block_device * bdev,struct bio * bio,sector_t sector)5666 static int bio_size_ok(struct block_device *bdev, struct bio *bio,
5667 		       sector_t sector)
5668 {
5669 	struct bio_vec *prev;
5670 	struct request_queue *q = bdev_get_queue(bdev);
5671 	unsigned int max_sectors = queue_max_sectors(q);
5672 	struct bvec_merge_data bvm = {
5673 		.bi_bdev = bdev,
5674 		.bi_sector = sector,
5675 		.bi_rw = bio->bi_rw,
5676 	};
5677 
5678 	if (WARN_ON(bio->bi_vcnt == 0))
5679 		return 1;
5680 
5681 	prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
5682 	if (bio_sectors(bio) > max_sectors)
5683 		return 0;
5684 
5685 	if (!q->merge_bvec_fn)
5686 		return 1;
5687 
5688 	bvm.bi_size = bio->bi_iter.bi_size - prev->bv_len;
5689 	if (q->merge_bvec_fn(q, &bvm, prev) < prev->bv_len)
5690 		return 0;
5691 	return 1;
5692 }
5693 
submit_stripe_bio(struct btrfs_root * root,struct btrfs_bio * bbio,struct bio * bio,u64 physical,int dev_nr,int rw,int async)5694 static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
5695 			      struct bio *bio, u64 physical, int dev_nr,
5696 			      int rw, int async)
5697 {
5698 	struct btrfs_device *dev = bbio->stripes[dev_nr].dev;
5699 
5700 	bio->bi_private = bbio;
5701 	btrfs_io_bio(bio)->stripe_index = dev_nr;
5702 	bio->bi_end_io = btrfs_end_bio;
5703 	bio->bi_iter.bi_sector = physical >> 9;
5704 #ifdef DEBUG
5705 	{
5706 		struct rcu_string *name;
5707 
5708 		rcu_read_lock();
5709 		name = rcu_dereference(dev->name);
5710 		pr_debug("btrfs_map_bio: rw %d, sector=%llu, dev=%lu "
5711 			 "(%s id %llu), size=%u\n", rw,
5712 			 (u64)bio->bi_iter.bi_sector, (u_long)dev->bdev->bd_dev,
5713 			 name->str, dev->devid, bio->bi_iter.bi_size);
5714 		rcu_read_unlock();
5715 	}
5716 #endif
5717 	bio->bi_bdev = dev->bdev;
5718 
5719 	btrfs_bio_counter_inc_noblocked(root->fs_info);
5720 
5721 	if (async)
5722 		btrfs_schedule_bio(root, dev, rw, bio);
5723 	else
5724 		btrfsic_submit_bio(rw, bio);
5725 }
5726 
breakup_stripe_bio(struct btrfs_root * root,struct btrfs_bio * bbio,struct bio * first_bio,struct btrfs_device * dev,int dev_nr,int rw,int async)5727 static int breakup_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
5728 			      struct bio *first_bio, struct btrfs_device *dev,
5729 			      int dev_nr, int rw, int async)
5730 {
5731 	struct bio_vec *bvec = first_bio->bi_io_vec;
5732 	struct bio *bio;
5733 	int nr_vecs = bio_get_nr_vecs(dev->bdev);
5734 	u64 physical = bbio->stripes[dev_nr].physical;
5735 
5736 again:
5737 	bio = btrfs_bio_alloc(dev->bdev, physical >> 9, nr_vecs, GFP_NOFS);
5738 	if (!bio)
5739 		return -ENOMEM;
5740 
5741 	while (bvec <= (first_bio->bi_io_vec + first_bio->bi_vcnt - 1)) {
5742 		if (bio_add_page(bio, bvec->bv_page, bvec->bv_len,
5743 				 bvec->bv_offset) < bvec->bv_len) {
5744 			u64 len = bio->bi_iter.bi_size;
5745 
5746 			atomic_inc(&bbio->stripes_pending);
5747 			submit_stripe_bio(root, bbio, bio, physical, dev_nr,
5748 					  rw, async);
5749 			physical += len;
5750 			goto again;
5751 		}
5752 		bvec++;
5753 	}
5754 
5755 	submit_stripe_bio(root, bbio, bio, physical, dev_nr, rw, async);
5756 	return 0;
5757 }
5758 
bbio_error(struct btrfs_bio * bbio,struct bio * bio,u64 logical)5759 static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
5760 {
5761 	atomic_inc(&bbio->error);
5762 	if (atomic_dec_and_test(&bbio->stripes_pending)) {
5763 		/* Shoud be the original bio. */
5764 		WARN_ON(bio != bbio->orig_bio);
5765 
5766 		bio->bi_private = bbio->private;
5767 		bio->bi_end_io = bbio->end_io;
5768 		btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
5769 		bio->bi_iter.bi_sector = logical >> 9;
5770 
5771 		btrfs_end_bbio(bbio, bio, -EIO);
5772 	}
5773 }
5774 
btrfs_map_bio(struct btrfs_root * root,int rw,struct bio * bio,int mirror_num,int async_submit)5775 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
5776 		  int mirror_num, int async_submit)
5777 {
5778 	struct btrfs_device *dev;
5779 	struct bio *first_bio = bio;
5780 	u64 logical = (u64)bio->bi_iter.bi_sector << 9;
5781 	u64 length = 0;
5782 	u64 map_length;
5783 	u64 *raid_map = NULL;
5784 	int ret;
5785 	int dev_nr = 0;
5786 	int total_devs = 1;
5787 	struct btrfs_bio *bbio = NULL;
5788 
5789 	length = bio->bi_iter.bi_size;
5790 	map_length = length;
5791 
5792 	btrfs_bio_counter_inc_blocked(root->fs_info);
5793 	ret = __btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio,
5794 			      mirror_num, &raid_map);
5795 	if (ret) {
5796 		btrfs_bio_counter_dec(root->fs_info);
5797 		return ret;
5798 	}
5799 
5800 	total_devs = bbio->num_stripes;
5801 	bbio->orig_bio = first_bio;
5802 	bbio->private = first_bio->bi_private;
5803 	bbio->end_io = first_bio->bi_end_io;
5804 	bbio->fs_info = root->fs_info;
5805 	atomic_set(&bbio->stripes_pending, bbio->num_stripes);
5806 
5807 	if (raid_map) {
5808 		/* In this case, map_length has been set to the length of
5809 		   a single stripe; not the whole write */
5810 		if (rw & WRITE) {
5811 			ret = raid56_parity_write(root, bio, bbio,
5812 						  raid_map, map_length);
5813 		} else {
5814 			ret = raid56_parity_recover(root, bio, bbio,
5815 						    raid_map, map_length,
5816 						    mirror_num);
5817 		}
5818 		/*
5819 		 * FIXME, replace dosen't support raid56 yet, please fix
5820 		 * it in the future.
5821 		 */
5822 		btrfs_bio_counter_dec(root->fs_info);
5823 		return ret;
5824 	}
5825 
5826 	if (map_length < length) {
5827 		btrfs_crit(root->fs_info, "mapping failed logical %llu bio len %llu len %llu",
5828 			logical, length, map_length);
5829 		BUG();
5830 	}
5831 
5832 	while (dev_nr < total_devs) {
5833 		dev = bbio->stripes[dev_nr].dev;
5834 		if (!dev || !dev->bdev || (rw & WRITE && !dev->writeable)) {
5835 			bbio_error(bbio, first_bio, logical);
5836 			dev_nr++;
5837 			continue;
5838 		}
5839 
5840 		/*
5841 		 * Check and see if we're ok with this bio based on it's size
5842 		 * and offset with the given device.
5843 		 */
5844 		if (!bio_size_ok(dev->bdev, first_bio,
5845 				 bbio->stripes[dev_nr].physical >> 9)) {
5846 			ret = breakup_stripe_bio(root, bbio, first_bio, dev,
5847 						 dev_nr, rw, async_submit);
5848 			BUG_ON(ret);
5849 			dev_nr++;
5850 			continue;
5851 		}
5852 
5853 		if (dev_nr < total_devs - 1) {
5854 			bio = btrfs_bio_clone(first_bio, GFP_NOFS);
5855 			BUG_ON(!bio); /* -ENOMEM */
5856 		} else {
5857 			bio = first_bio;
5858 			bbio->flags |= BTRFS_BIO_ORIG_BIO_SUBMITTED;
5859 		}
5860 
5861 		submit_stripe_bio(root, bbio, bio,
5862 				  bbio->stripes[dev_nr].physical, dev_nr, rw,
5863 				  async_submit);
5864 		dev_nr++;
5865 	}
5866 	btrfs_bio_counter_dec(root->fs_info);
5867 	return 0;
5868 }
5869 
btrfs_find_device(struct btrfs_fs_info * fs_info,u64 devid,u8 * uuid,u8 * fsid)5870 struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid,
5871 				       u8 *uuid, u8 *fsid)
5872 {
5873 	struct btrfs_device *device;
5874 	struct btrfs_fs_devices *cur_devices;
5875 
5876 	cur_devices = fs_info->fs_devices;
5877 	while (cur_devices) {
5878 		if (!fsid ||
5879 		    !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
5880 			device = __find_device(&cur_devices->devices,
5881 					       devid, uuid);
5882 			if (device)
5883 				return device;
5884 		}
5885 		cur_devices = cur_devices->seed;
5886 	}
5887 	return NULL;
5888 }
5889 
add_missing_dev(struct btrfs_root * root,struct btrfs_fs_devices * fs_devices,u64 devid,u8 * dev_uuid)5890 static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
5891 					    struct btrfs_fs_devices *fs_devices,
5892 					    u64 devid, u8 *dev_uuid)
5893 {
5894 	struct btrfs_device *device;
5895 
5896 	device = btrfs_alloc_device(NULL, &devid, dev_uuid);
5897 	if (IS_ERR(device))
5898 		return NULL;
5899 
5900 	list_add(&device->dev_list, &fs_devices->devices);
5901 	device->fs_devices = fs_devices;
5902 	fs_devices->num_devices++;
5903 
5904 	device->missing = 1;
5905 	fs_devices->missing_devices++;
5906 
5907 	return device;
5908 }
5909 
5910 /**
5911  * btrfs_alloc_device - allocate struct btrfs_device
5912  * @fs_info:	used only for generating a new devid, can be NULL if
5913  *		devid is provided (i.e. @devid != NULL).
5914  * @devid:	a pointer to devid for this device.  If NULL a new devid
5915  *		is generated.
5916  * @uuid:	a pointer to UUID for this device.  If NULL a new UUID
5917  *		is generated.
5918  *
5919  * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR()
5920  * on error.  Returned struct is not linked onto any lists and can be
5921  * destroyed with kfree() right away.
5922  */
btrfs_alloc_device(struct btrfs_fs_info * fs_info,const u64 * devid,const u8 * uuid)5923 struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
5924 					const u64 *devid,
5925 					const u8 *uuid)
5926 {
5927 	struct btrfs_device *dev;
5928 	u64 tmp;
5929 
5930 	if (WARN_ON(!devid && !fs_info))
5931 		return ERR_PTR(-EINVAL);
5932 
5933 	dev = __alloc_device();
5934 	if (IS_ERR(dev))
5935 		return dev;
5936 
5937 	if (devid)
5938 		tmp = *devid;
5939 	else {
5940 		int ret;
5941 
5942 		ret = find_next_devid(fs_info, &tmp);
5943 		if (ret) {
5944 			kfree(dev);
5945 			return ERR_PTR(ret);
5946 		}
5947 	}
5948 	dev->devid = tmp;
5949 
5950 	if (uuid)
5951 		memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE);
5952 	else
5953 		generate_random_uuid(dev->uuid);
5954 
5955 	btrfs_init_work(&dev->work, btrfs_submit_helper,
5956 			pending_bios_fn, NULL, NULL);
5957 
5958 	return dev;
5959 }
5960 
read_one_chunk(struct btrfs_root * root,struct btrfs_key * key,struct extent_buffer * leaf,struct btrfs_chunk * chunk)5961 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
5962 			  struct extent_buffer *leaf,
5963 			  struct btrfs_chunk *chunk)
5964 {
5965 	struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
5966 	struct map_lookup *map;
5967 	struct extent_map *em;
5968 	u64 logical;
5969 	u64 length;
5970 	u64 devid;
5971 	u8 uuid[BTRFS_UUID_SIZE];
5972 	int num_stripes;
5973 	int ret;
5974 	int i;
5975 
5976 	logical = key->offset;
5977 	length = btrfs_chunk_length(leaf, chunk);
5978 
5979 	read_lock(&map_tree->map_tree.lock);
5980 	em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
5981 	read_unlock(&map_tree->map_tree.lock);
5982 
5983 	/* already mapped? */
5984 	if (em && em->start <= logical && em->start + em->len > logical) {
5985 		free_extent_map(em);
5986 		return 0;
5987 	} else if (em) {
5988 		free_extent_map(em);
5989 	}
5990 
5991 	em = alloc_extent_map();
5992 	if (!em)
5993 		return -ENOMEM;
5994 	num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
5995 	map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
5996 	if (!map) {
5997 		free_extent_map(em);
5998 		return -ENOMEM;
5999 	}
6000 
6001 	set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
6002 	em->bdev = (struct block_device *)map;
6003 	em->start = logical;
6004 	em->len = length;
6005 	em->orig_start = 0;
6006 	em->block_start = 0;
6007 	em->block_len = em->len;
6008 
6009 	map->num_stripes = num_stripes;
6010 	map->io_width = btrfs_chunk_io_width(leaf, chunk);
6011 	map->io_align = btrfs_chunk_io_align(leaf, chunk);
6012 	map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
6013 	map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
6014 	map->type = btrfs_chunk_type(leaf, chunk);
6015 	map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
6016 	for (i = 0; i < num_stripes; i++) {
6017 		map->stripes[i].physical =
6018 			btrfs_stripe_offset_nr(leaf, chunk, i);
6019 		devid = btrfs_stripe_devid_nr(leaf, chunk, i);
6020 		read_extent_buffer(leaf, uuid, (unsigned long)
6021 				   btrfs_stripe_dev_uuid_nr(chunk, i),
6022 				   BTRFS_UUID_SIZE);
6023 		map->stripes[i].dev = btrfs_find_device(root->fs_info, devid,
6024 							uuid, NULL);
6025 		if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
6026 			free_extent_map(em);
6027 			return -EIO;
6028 		}
6029 		if (!map->stripes[i].dev) {
6030 			map->stripes[i].dev =
6031 				add_missing_dev(root, root->fs_info->fs_devices,
6032 						devid, uuid);
6033 			if (!map->stripes[i].dev) {
6034 				free_extent_map(em);
6035 				return -EIO;
6036 			}
6037 		}
6038 		map->stripes[i].dev->in_fs_metadata = 1;
6039 	}
6040 
6041 	write_lock(&map_tree->map_tree.lock);
6042 	ret = add_extent_mapping(&map_tree->map_tree, em, 0);
6043 	write_unlock(&map_tree->map_tree.lock);
6044 	BUG_ON(ret); /* Tree corruption */
6045 	free_extent_map(em);
6046 
6047 	return 0;
6048 }
6049 
fill_device_from_item(struct extent_buffer * leaf,struct btrfs_dev_item * dev_item,struct btrfs_device * device)6050 static void fill_device_from_item(struct extent_buffer *leaf,
6051 				 struct btrfs_dev_item *dev_item,
6052 				 struct btrfs_device *device)
6053 {
6054 	unsigned long ptr;
6055 
6056 	device->devid = btrfs_device_id(leaf, dev_item);
6057 	device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
6058 	device->total_bytes = device->disk_total_bytes;
6059 	device->commit_total_bytes = device->disk_total_bytes;
6060 	device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
6061 	device->commit_bytes_used = device->bytes_used;
6062 	device->type = btrfs_device_type(leaf, dev_item);
6063 	device->io_align = btrfs_device_io_align(leaf, dev_item);
6064 	device->io_width = btrfs_device_io_width(leaf, dev_item);
6065 	device->sector_size = btrfs_device_sector_size(leaf, dev_item);
6066 	WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID);
6067 	device->is_tgtdev_for_dev_replace = 0;
6068 
6069 	ptr = btrfs_device_uuid(dev_item);
6070 	read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
6071 }
6072 
open_seed_devices(struct btrfs_root * root,u8 * fsid)6073 static struct btrfs_fs_devices *open_seed_devices(struct btrfs_root *root,
6074 						  u8 *fsid)
6075 {
6076 	struct btrfs_fs_devices *fs_devices;
6077 	int ret;
6078 
6079 	BUG_ON(!mutex_is_locked(&uuid_mutex));
6080 
6081 	fs_devices = root->fs_info->fs_devices->seed;
6082 	while (fs_devices) {
6083 		if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE))
6084 			return fs_devices;
6085 
6086 		fs_devices = fs_devices->seed;
6087 	}
6088 
6089 	fs_devices = find_fsid(fsid);
6090 	if (!fs_devices) {
6091 		if (!btrfs_test_opt(root, DEGRADED))
6092 			return ERR_PTR(-ENOENT);
6093 
6094 		fs_devices = alloc_fs_devices(fsid);
6095 		if (IS_ERR(fs_devices))
6096 			return fs_devices;
6097 
6098 		fs_devices->seeding = 1;
6099 		fs_devices->opened = 1;
6100 		return fs_devices;
6101 	}
6102 
6103 	fs_devices = clone_fs_devices(fs_devices);
6104 	if (IS_ERR(fs_devices))
6105 		return fs_devices;
6106 
6107 	ret = __btrfs_open_devices(fs_devices, FMODE_READ,
6108 				   root->fs_info->bdev_holder);
6109 	if (ret) {
6110 		free_fs_devices(fs_devices);
6111 		fs_devices = ERR_PTR(ret);
6112 		goto out;
6113 	}
6114 
6115 	if (!fs_devices->seeding) {
6116 		__btrfs_close_devices(fs_devices);
6117 		free_fs_devices(fs_devices);
6118 		fs_devices = ERR_PTR(-EINVAL);
6119 		goto out;
6120 	}
6121 
6122 	fs_devices->seed = root->fs_info->fs_devices->seed;
6123 	root->fs_info->fs_devices->seed = fs_devices;
6124 out:
6125 	return fs_devices;
6126 }
6127 
read_one_dev(struct btrfs_root * root,struct extent_buffer * leaf,struct btrfs_dev_item * dev_item)6128 static int read_one_dev(struct btrfs_root *root,
6129 			struct extent_buffer *leaf,
6130 			struct btrfs_dev_item *dev_item)
6131 {
6132 	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
6133 	struct btrfs_device *device;
6134 	u64 devid;
6135 	int ret;
6136 	u8 fs_uuid[BTRFS_UUID_SIZE];
6137 	u8 dev_uuid[BTRFS_UUID_SIZE];
6138 
6139 	devid = btrfs_device_id(leaf, dev_item);
6140 	read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
6141 			   BTRFS_UUID_SIZE);
6142 	read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
6143 			   BTRFS_UUID_SIZE);
6144 
6145 	if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
6146 		fs_devices = open_seed_devices(root, fs_uuid);
6147 		if (IS_ERR(fs_devices))
6148 			return PTR_ERR(fs_devices);
6149 	}
6150 
6151 	device = btrfs_find_device(root->fs_info, devid, dev_uuid, fs_uuid);
6152 	if (!device) {
6153 		if (!btrfs_test_opt(root, DEGRADED))
6154 			return -EIO;
6155 
6156 		btrfs_warn(root->fs_info, "devid %llu missing", devid);
6157 		device = add_missing_dev(root, fs_devices, devid, dev_uuid);
6158 		if (!device)
6159 			return -ENOMEM;
6160 	} else {
6161 		if (!device->bdev && !btrfs_test_opt(root, DEGRADED))
6162 			return -EIO;
6163 
6164 		if(!device->bdev && !device->missing) {
6165 			/*
6166 			 * this happens when a device that was properly setup
6167 			 * in the device info lists suddenly goes bad.
6168 			 * device->bdev is NULL, and so we have to set
6169 			 * device->missing to one here
6170 			 */
6171 			device->fs_devices->missing_devices++;
6172 			device->missing = 1;
6173 		}
6174 
6175 		/* Move the device to its own fs_devices */
6176 		if (device->fs_devices != fs_devices) {
6177 			ASSERT(device->missing);
6178 
6179 			list_move(&device->dev_list, &fs_devices->devices);
6180 			device->fs_devices->num_devices--;
6181 			fs_devices->num_devices++;
6182 
6183 			device->fs_devices->missing_devices--;
6184 			fs_devices->missing_devices++;
6185 
6186 			device->fs_devices = fs_devices;
6187 		}
6188 	}
6189 
6190 	if (device->fs_devices != root->fs_info->fs_devices) {
6191 		BUG_ON(device->writeable);
6192 		if (device->generation !=
6193 		    btrfs_device_generation(leaf, dev_item))
6194 			return -EINVAL;
6195 	}
6196 
6197 	fill_device_from_item(leaf, dev_item, device);
6198 	device->in_fs_metadata = 1;
6199 	if (device->writeable && !device->is_tgtdev_for_dev_replace) {
6200 		device->fs_devices->total_rw_bytes += device->total_bytes;
6201 		spin_lock(&root->fs_info->free_chunk_lock);
6202 		root->fs_info->free_chunk_space += device->total_bytes -
6203 			device->bytes_used;
6204 		spin_unlock(&root->fs_info->free_chunk_lock);
6205 	}
6206 	ret = 0;
6207 	return ret;
6208 }
6209 
btrfs_read_sys_array(struct btrfs_root * root)6210 int btrfs_read_sys_array(struct btrfs_root *root)
6211 {
6212 	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
6213 	struct extent_buffer *sb;
6214 	struct btrfs_disk_key *disk_key;
6215 	struct btrfs_chunk *chunk;
6216 	u8 *ptr;
6217 	unsigned long sb_ptr;
6218 	int ret = 0;
6219 	u32 num_stripes;
6220 	u32 array_size;
6221 	u32 len = 0;
6222 	u32 cur;
6223 	struct btrfs_key key;
6224 
6225 	sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET,
6226 					  BTRFS_SUPER_INFO_SIZE);
6227 	if (!sb)
6228 		return -ENOMEM;
6229 	btrfs_set_buffer_uptodate(sb);
6230 	btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
6231 	/*
6232 	 * The sb extent buffer is artifical and just used to read the system array.
6233 	 * btrfs_set_buffer_uptodate() call does not properly mark all it's
6234 	 * pages up-to-date when the page is larger: extent does not cover the
6235 	 * whole page and consequently check_page_uptodate does not find all
6236 	 * the page's extents up-to-date (the hole beyond sb),
6237 	 * write_extent_buffer then triggers a WARN_ON.
6238 	 *
6239 	 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
6240 	 * but sb spans only this function. Add an explicit SetPageUptodate call
6241 	 * to silence the warning eg. on PowerPC 64.
6242 	 */
6243 	if (PAGE_CACHE_SIZE > BTRFS_SUPER_INFO_SIZE)
6244 		SetPageUptodate(sb->pages[0]);
6245 
6246 	write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
6247 	array_size = btrfs_super_sys_array_size(super_copy);
6248 
6249 	ptr = super_copy->sys_chunk_array;
6250 	sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
6251 	cur = 0;
6252 
6253 	while (cur < array_size) {
6254 		disk_key = (struct btrfs_disk_key *)ptr;
6255 		btrfs_disk_key_to_cpu(&key, disk_key);
6256 
6257 		len = sizeof(*disk_key); ptr += len;
6258 		sb_ptr += len;
6259 		cur += len;
6260 
6261 		if (key.type == BTRFS_CHUNK_ITEM_KEY) {
6262 			chunk = (struct btrfs_chunk *)sb_ptr;
6263 			ret = read_one_chunk(root, &key, sb, chunk);
6264 			if (ret)
6265 				break;
6266 			num_stripes = btrfs_chunk_num_stripes(sb, chunk);
6267 			len = btrfs_chunk_item_size(num_stripes);
6268 		} else {
6269 			ret = -EIO;
6270 			break;
6271 		}
6272 		ptr += len;
6273 		sb_ptr += len;
6274 		cur += len;
6275 	}
6276 	free_extent_buffer(sb);
6277 	return ret;
6278 }
6279 
btrfs_read_chunk_tree(struct btrfs_root * root)6280 int btrfs_read_chunk_tree(struct btrfs_root *root)
6281 {
6282 	struct btrfs_path *path;
6283 	struct extent_buffer *leaf;
6284 	struct btrfs_key key;
6285 	struct btrfs_key found_key;
6286 	int ret;
6287 	int slot;
6288 
6289 	root = root->fs_info->chunk_root;
6290 
6291 	path = btrfs_alloc_path();
6292 	if (!path)
6293 		return -ENOMEM;
6294 
6295 	mutex_lock(&uuid_mutex);
6296 	lock_chunks(root);
6297 
6298 	/*
6299 	 * Read all device items, and then all the chunk items. All
6300 	 * device items are found before any chunk item (their object id
6301 	 * is smaller than the lowest possible object id for a chunk
6302 	 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
6303 	 */
6304 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
6305 	key.offset = 0;
6306 	key.type = 0;
6307 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6308 	if (ret < 0)
6309 		goto error;
6310 	while (1) {
6311 		leaf = path->nodes[0];
6312 		slot = path->slots[0];
6313 		if (slot >= btrfs_header_nritems(leaf)) {
6314 			ret = btrfs_next_leaf(root, path);
6315 			if (ret == 0)
6316 				continue;
6317 			if (ret < 0)
6318 				goto error;
6319 			break;
6320 		}
6321 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
6322 		if (found_key.type == BTRFS_DEV_ITEM_KEY) {
6323 			struct btrfs_dev_item *dev_item;
6324 			dev_item = btrfs_item_ptr(leaf, slot,
6325 						  struct btrfs_dev_item);
6326 			ret = read_one_dev(root, leaf, dev_item);
6327 			if (ret)
6328 				goto error;
6329 		} else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
6330 			struct btrfs_chunk *chunk;
6331 			chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
6332 			ret = read_one_chunk(root, &found_key, leaf, chunk);
6333 			if (ret)
6334 				goto error;
6335 		}
6336 		path->slots[0]++;
6337 	}
6338 	ret = 0;
6339 error:
6340 	unlock_chunks(root);
6341 	mutex_unlock(&uuid_mutex);
6342 
6343 	btrfs_free_path(path);
6344 	return ret;
6345 }
6346 
btrfs_init_devices_late(struct btrfs_fs_info * fs_info)6347 void btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
6348 {
6349 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6350 	struct btrfs_device *device;
6351 
6352 	while (fs_devices) {
6353 		mutex_lock(&fs_devices->device_list_mutex);
6354 		list_for_each_entry(device, &fs_devices->devices, dev_list)
6355 			device->dev_root = fs_info->dev_root;
6356 		mutex_unlock(&fs_devices->device_list_mutex);
6357 
6358 		fs_devices = fs_devices->seed;
6359 	}
6360 }
6361 
__btrfs_reset_dev_stats(struct btrfs_device * dev)6362 static void __btrfs_reset_dev_stats(struct btrfs_device *dev)
6363 {
6364 	int i;
6365 
6366 	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6367 		btrfs_dev_stat_reset(dev, i);
6368 }
6369 
btrfs_init_dev_stats(struct btrfs_fs_info * fs_info)6370 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
6371 {
6372 	struct btrfs_key key;
6373 	struct btrfs_key found_key;
6374 	struct btrfs_root *dev_root = fs_info->dev_root;
6375 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6376 	struct extent_buffer *eb;
6377 	int slot;
6378 	int ret = 0;
6379 	struct btrfs_device *device;
6380 	struct btrfs_path *path = NULL;
6381 	int i;
6382 
6383 	path = btrfs_alloc_path();
6384 	if (!path) {
6385 		ret = -ENOMEM;
6386 		goto out;
6387 	}
6388 
6389 	mutex_lock(&fs_devices->device_list_mutex);
6390 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
6391 		int item_size;
6392 		struct btrfs_dev_stats_item *ptr;
6393 
6394 		key.objectid = 0;
6395 		key.type = BTRFS_DEV_STATS_KEY;
6396 		key.offset = device->devid;
6397 		ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
6398 		if (ret) {
6399 			__btrfs_reset_dev_stats(device);
6400 			device->dev_stats_valid = 1;
6401 			btrfs_release_path(path);
6402 			continue;
6403 		}
6404 		slot = path->slots[0];
6405 		eb = path->nodes[0];
6406 		btrfs_item_key_to_cpu(eb, &found_key, slot);
6407 		item_size = btrfs_item_size_nr(eb, slot);
6408 
6409 		ptr = btrfs_item_ptr(eb, slot,
6410 				     struct btrfs_dev_stats_item);
6411 
6412 		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
6413 			if (item_size >= (1 + i) * sizeof(__le64))
6414 				btrfs_dev_stat_set(device, i,
6415 					btrfs_dev_stats_value(eb, ptr, i));
6416 			else
6417 				btrfs_dev_stat_reset(device, i);
6418 		}
6419 
6420 		device->dev_stats_valid = 1;
6421 		btrfs_dev_stat_print_on_load(device);
6422 		btrfs_release_path(path);
6423 	}
6424 	mutex_unlock(&fs_devices->device_list_mutex);
6425 
6426 out:
6427 	btrfs_free_path(path);
6428 	return ret < 0 ? ret : 0;
6429 }
6430 
update_dev_stat_item(struct btrfs_trans_handle * trans,struct btrfs_root * dev_root,struct btrfs_device * device)6431 static int update_dev_stat_item(struct btrfs_trans_handle *trans,
6432 				struct btrfs_root *dev_root,
6433 				struct btrfs_device *device)
6434 {
6435 	struct btrfs_path *path;
6436 	struct btrfs_key key;
6437 	struct extent_buffer *eb;
6438 	struct btrfs_dev_stats_item *ptr;
6439 	int ret;
6440 	int i;
6441 
6442 	key.objectid = 0;
6443 	key.type = BTRFS_DEV_STATS_KEY;
6444 	key.offset = device->devid;
6445 
6446 	path = btrfs_alloc_path();
6447 	BUG_ON(!path);
6448 	ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
6449 	if (ret < 0) {
6450 		printk_in_rcu(KERN_WARNING "BTRFS: "
6451 			"error %d while searching for dev_stats item for device %s!\n",
6452 			      ret, rcu_str_deref(device->name));
6453 		goto out;
6454 	}
6455 
6456 	if (ret == 0 &&
6457 	    btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
6458 		/* need to delete old one and insert a new one */
6459 		ret = btrfs_del_item(trans, dev_root, path);
6460 		if (ret != 0) {
6461 			printk_in_rcu(KERN_WARNING "BTRFS: "
6462 				"delete too small dev_stats item for device %s failed %d!\n",
6463 				      rcu_str_deref(device->name), ret);
6464 			goto out;
6465 		}
6466 		ret = 1;
6467 	}
6468 
6469 	if (ret == 1) {
6470 		/* need to insert a new item */
6471 		btrfs_release_path(path);
6472 		ret = btrfs_insert_empty_item(trans, dev_root, path,
6473 					      &key, sizeof(*ptr));
6474 		if (ret < 0) {
6475 			printk_in_rcu(KERN_WARNING "BTRFS: "
6476 					  "insert dev_stats item for device %s failed %d!\n",
6477 				      rcu_str_deref(device->name), ret);
6478 			goto out;
6479 		}
6480 	}
6481 
6482 	eb = path->nodes[0];
6483 	ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
6484 	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6485 		btrfs_set_dev_stats_value(eb, ptr, i,
6486 					  btrfs_dev_stat_read(device, i));
6487 	btrfs_mark_buffer_dirty(eb);
6488 
6489 out:
6490 	btrfs_free_path(path);
6491 	return ret;
6492 }
6493 
6494 /*
6495  * called from commit_transaction. Writes all changed device stats to disk.
6496  */
btrfs_run_dev_stats(struct btrfs_trans_handle * trans,struct btrfs_fs_info * fs_info)6497 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
6498 			struct btrfs_fs_info *fs_info)
6499 {
6500 	struct btrfs_root *dev_root = fs_info->dev_root;
6501 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6502 	struct btrfs_device *device;
6503 	int stats_cnt;
6504 	int ret = 0;
6505 
6506 	mutex_lock(&fs_devices->device_list_mutex);
6507 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
6508 		if (!device->dev_stats_valid || !btrfs_dev_stats_dirty(device))
6509 			continue;
6510 
6511 		stats_cnt = atomic_read(&device->dev_stats_ccnt);
6512 		ret = update_dev_stat_item(trans, dev_root, device);
6513 		if (!ret)
6514 			atomic_sub(stats_cnt, &device->dev_stats_ccnt);
6515 	}
6516 	mutex_unlock(&fs_devices->device_list_mutex);
6517 
6518 	return ret;
6519 }
6520 
btrfs_dev_stat_inc_and_print(struct btrfs_device * dev,int index)6521 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
6522 {
6523 	btrfs_dev_stat_inc(dev, index);
6524 	btrfs_dev_stat_print_on_error(dev);
6525 }
6526 
btrfs_dev_stat_print_on_error(struct btrfs_device * dev)6527 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
6528 {
6529 	if (!dev->dev_stats_valid)
6530 		return;
6531 	printk_ratelimited_in_rcu(KERN_ERR "BTRFS: "
6532 			   "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
6533 			   rcu_str_deref(dev->name),
6534 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
6535 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
6536 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
6537 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
6538 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
6539 }
6540 
btrfs_dev_stat_print_on_load(struct btrfs_device * dev)6541 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
6542 {
6543 	int i;
6544 
6545 	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6546 		if (btrfs_dev_stat_read(dev, i) != 0)
6547 			break;
6548 	if (i == BTRFS_DEV_STAT_VALUES_MAX)
6549 		return; /* all values == 0, suppress message */
6550 
6551 	printk_in_rcu(KERN_INFO "BTRFS: "
6552 		   "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
6553 	       rcu_str_deref(dev->name),
6554 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
6555 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
6556 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
6557 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
6558 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
6559 }
6560 
btrfs_get_dev_stats(struct btrfs_root * root,struct btrfs_ioctl_get_dev_stats * stats)6561 int btrfs_get_dev_stats(struct btrfs_root *root,
6562 			struct btrfs_ioctl_get_dev_stats *stats)
6563 {
6564 	struct btrfs_device *dev;
6565 	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
6566 	int i;
6567 
6568 	mutex_lock(&fs_devices->device_list_mutex);
6569 	dev = btrfs_find_device(root->fs_info, stats->devid, NULL, NULL);
6570 	mutex_unlock(&fs_devices->device_list_mutex);
6571 
6572 	if (!dev) {
6573 		btrfs_warn(root->fs_info, "get dev_stats failed, device not found");
6574 		return -ENODEV;
6575 	} else if (!dev->dev_stats_valid) {
6576 		btrfs_warn(root->fs_info, "get dev_stats failed, not yet valid");
6577 		return -ENODEV;
6578 	} else if (stats->flags & BTRFS_DEV_STATS_RESET) {
6579 		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
6580 			if (stats->nr_items > i)
6581 				stats->values[i] =
6582 					btrfs_dev_stat_read_and_reset(dev, i);
6583 			else
6584 				btrfs_dev_stat_reset(dev, i);
6585 		}
6586 	} else {
6587 		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6588 			if (stats->nr_items > i)
6589 				stats->values[i] = btrfs_dev_stat_read(dev, i);
6590 	}
6591 	if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
6592 		stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
6593 	return 0;
6594 }
6595 
btrfs_scratch_superblock(struct btrfs_device * device)6596 int btrfs_scratch_superblock(struct btrfs_device *device)
6597 {
6598 	struct buffer_head *bh;
6599 	struct btrfs_super_block *disk_super;
6600 
6601 	bh = btrfs_read_dev_super(device->bdev);
6602 	if (!bh)
6603 		return -EINVAL;
6604 	disk_super = (struct btrfs_super_block *)bh->b_data;
6605 
6606 	memset(&disk_super->magic, 0, sizeof(disk_super->magic));
6607 	set_buffer_dirty(bh);
6608 	sync_dirty_buffer(bh);
6609 	brelse(bh);
6610 
6611 	return 0;
6612 }
6613 
6614 /*
6615  * Update the size of all devices, which is used for writing out the
6616  * super blocks.
6617  */
btrfs_update_commit_device_size(struct btrfs_fs_info * fs_info)6618 void btrfs_update_commit_device_size(struct btrfs_fs_info *fs_info)
6619 {
6620 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6621 	struct btrfs_device *curr, *next;
6622 
6623 	if (list_empty(&fs_devices->resized_devices))
6624 		return;
6625 
6626 	mutex_lock(&fs_devices->device_list_mutex);
6627 	lock_chunks(fs_info->dev_root);
6628 	list_for_each_entry_safe(curr, next, &fs_devices->resized_devices,
6629 				 resized_list) {
6630 		list_del_init(&curr->resized_list);
6631 		curr->commit_total_bytes = curr->disk_total_bytes;
6632 	}
6633 	unlock_chunks(fs_info->dev_root);
6634 	mutex_unlock(&fs_devices->device_list_mutex);
6635 }
6636 
6637 /* Must be invoked during the transaction commit */
btrfs_update_commit_device_bytes_used(struct btrfs_root * root,struct btrfs_transaction * transaction)6638 void btrfs_update_commit_device_bytes_used(struct btrfs_root *root,
6639 					struct btrfs_transaction *transaction)
6640 {
6641 	struct extent_map *em;
6642 	struct map_lookup *map;
6643 	struct btrfs_device *dev;
6644 	int i;
6645 
6646 	if (list_empty(&transaction->pending_chunks))
6647 		return;
6648 
6649 	/* In order to kick the device replace finish process */
6650 	lock_chunks(root);
6651 	list_for_each_entry(em, &transaction->pending_chunks, list) {
6652 		map = (struct map_lookup *)em->bdev;
6653 
6654 		for (i = 0; i < map->num_stripes; i++) {
6655 			dev = map->stripes[i].dev;
6656 			dev->commit_bytes_used = dev->bytes_used;
6657 		}
6658 	}
6659 	unlock_chunks(root);
6660 }
6661