• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/slab.h>
21 #include <linux/buffer_head.h>
22 #include <linux/blkdev.h>
23 #include <linux/iocontext.h>
24 #include <linux/capability.h>
25 #include <linux/ratelimit.h>
26 #include <linux/kthread.h>
27 #include <linux/raid/pq.h>
28 #include <linux/semaphore.h>
29 #include <linux/uuid.h>
30 #include <asm/div64.h>
31 #include "ctree.h"
32 #include "extent_map.h"
33 #include "disk-io.h"
34 #include "transaction.h"
35 #include "print-tree.h"
36 #include "volumes.h"
37 #include "raid56.h"
38 #include "async-thread.h"
39 #include "check-integrity.h"
40 #include "rcu-string.h"
41 #include "math.h"
42 #include "dev-replace.h"
43 #include "sysfs.h"
44 
45 const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
46 	[BTRFS_RAID_RAID10] = {
47 		.sub_stripes	= 2,
48 		.dev_stripes	= 1,
49 		.devs_max	= 0,	/* 0 == as many as possible */
50 		.devs_min	= 4,
51 		.tolerated_failures = 1,
52 		.devs_increment	= 2,
53 		.ncopies	= 2,
54 	},
55 	[BTRFS_RAID_RAID1] = {
56 		.sub_stripes	= 1,
57 		.dev_stripes	= 1,
58 		.devs_max	= 2,
59 		.devs_min	= 2,
60 		.tolerated_failures = 1,
61 		.devs_increment	= 2,
62 		.ncopies	= 2,
63 	},
64 	[BTRFS_RAID_DUP] = {
65 		.sub_stripes	= 1,
66 		.dev_stripes	= 2,
67 		.devs_max	= 1,
68 		.devs_min	= 1,
69 		.tolerated_failures = 0,
70 		.devs_increment	= 1,
71 		.ncopies	= 2,
72 	},
73 	[BTRFS_RAID_RAID0] = {
74 		.sub_stripes	= 1,
75 		.dev_stripes	= 1,
76 		.devs_max	= 0,
77 		.devs_min	= 2,
78 		.tolerated_failures = 0,
79 		.devs_increment	= 1,
80 		.ncopies	= 1,
81 	},
82 	[BTRFS_RAID_SINGLE] = {
83 		.sub_stripes	= 1,
84 		.dev_stripes	= 1,
85 		.devs_max	= 1,
86 		.devs_min	= 1,
87 		.tolerated_failures = 0,
88 		.devs_increment	= 1,
89 		.ncopies	= 1,
90 	},
91 	[BTRFS_RAID_RAID5] = {
92 		.sub_stripes	= 1,
93 		.dev_stripes	= 1,
94 		.devs_max	= 0,
95 		.devs_min	= 2,
96 		.tolerated_failures = 1,
97 		.devs_increment	= 1,
98 		.ncopies	= 2,
99 	},
100 	[BTRFS_RAID_RAID6] = {
101 		.sub_stripes	= 1,
102 		.dev_stripes	= 1,
103 		.devs_max	= 0,
104 		.devs_min	= 3,
105 		.tolerated_failures = 2,
106 		.devs_increment	= 1,
107 		.ncopies	= 3,
108 	},
109 };
110 
111 const u64 btrfs_raid_group[BTRFS_NR_RAID_TYPES] = {
112 	[BTRFS_RAID_RAID10] = BTRFS_BLOCK_GROUP_RAID10,
113 	[BTRFS_RAID_RAID1]  = BTRFS_BLOCK_GROUP_RAID1,
114 	[BTRFS_RAID_DUP]    = BTRFS_BLOCK_GROUP_DUP,
115 	[BTRFS_RAID_RAID0]  = BTRFS_BLOCK_GROUP_RAID0,
116 	[BTRFS_RAID_SINGLE] = 0,
117 	[BTRFS_RAID_RAID5]  = BTRFS_BLOCK_GROUP_RAID5,
118 	[BTRFS_RAID_RAID6]  = BTRFS_BLOCK_GROUP_RAID6,
119 };
120 
121 /*
122  * Table to convert BTRFS_RAID_* to the error code if minimum number of devices
123  * condition is not met. Zero means there's no corresponding
124  * BTRFS_ERROR_DEV_*_NOT_MET value.
125  */
126 const int btrfs_raid_mindev_error[BTRFS_NR_RAID_TYPES] = {
127 	[BTRFS_RAID_RAID10] = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET,
128 	[BTRFS_RAID_RAID1]  = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET,
129 	[BTRFS_RAID_DUP]    = 0,
130 	[BTRFS_RAID_RAID0]  = 0,
131 	[BTRFS_RAID_SINGLE] = 0,
132 	[BTRFS_RAID_RAID5]  = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET,
133 	[BTRFS_RAID_RAID6]  = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET,
134 };
135 
136 static int init_first_rw_device(struct btrfs_trans_handle *trans,
137 				struct btrfs_fs_info *fs_info);
138 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info);
139 static void __btrfs_reset_dev_stats(struct btrfs_device *dev);
140 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
141 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
142 static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
143 			     enum btrfs_map_op op,
144 			     u64 logical, u64 *length,
145 			     struct btrfs_bio **bbio_ret,
146 			     int mirror_num, int need_raid_map);
147 
148 DEFINE_MUTEX(uuid_mutex);
149 static LIST_HEAD(fs_uuids);
btrfs_get_fs_uuids(void)150 struct list_head *btrfs_get_fs_uuids(void)
151 {
152 	return &fs_uuids;
153 }
154 
155 /*
156  * alloc_fs_devices - allocate struct btrfs_fs_devices
157  * @fsid:	if not NULL, copy the uuid to fs_devices::fsid
158  *
159  * Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR().
160  * The returned struct is not linked onto any lists and can be destroyed with
161  * kfree() right away.
162  */
alloc_fs_devices(const u8 * fsid)163 static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid)
164 {
165 	struct btrfs_fs_devices *fs_devs;
166 
167 	fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL);
168 	if (!fs_devs)
169 		return ERR_PTR(-ENOMEM);
170 
171 	mutex_init(&fs_devs->device_list_mutex);
172 
173 	INIT_LIST_HEAD(&fs_devs->devices);
174 	INIT_LIST_HEAD(&fs_devs->resized_devices);
175 	INIT_LIST_HEAD(&fs_devs->alloc_list);
176 	INIT_LIST_HEAD(&fs_devs->list);
177 	if (fsid)
178 		memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE);
179 
180 	return fs_devs;
181 }
182 
free_fs_devices(struct btrfs_fs_devices * fs_devices)183 static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
184 {
185 	struct btrfs_device *device;
186 	WARN_ON(fs_devices->opened);
187 	while (!list_empty(&fs_devices->devices)) {
188 		device = list_entry(fs_devices->devices.next,
189 				    struct btrfs_device, dev_list);
190 		list_del(&device->dev_list);
191 		rcu_string_free(device->name);
192 		kfree(device);
193 	}
194 	kfree(fs_devices);
195 }
196 
btrfs_kobject_uevent(struct block_device * bdev,enum kobject_action action)197 static void btrfs_kobject_uevent(struct block_device *bdev,
198 				 enum kobject_action action)
199 {
200 	int ret;
201 
202 	ret = kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, action);
203 	if (ret)
204 		pr_warn("BTRFS: Sending event '%d' to kobject: '%s' (%p): failed\n",
205 			action,
206 			kobject_name(&disk_to_dev(bdev->bd_disk)->kobj),
207 			&disk_to_dev(bdev->bd_disk)->kobj);
208 }
209 
btrfs_cleanup_fs_uuids(void)210 void btrfs_cleanup_fs_uuids(void)
211 {
212 	struct btrfs_fs_devices *fs_devices;
213 
214 	while (!list_empty(&fs_uuids)) {
215 		fs_devices = list_entry(fs_uuids.next,
216 					struct btrfs_fs_devices, list);
217 		list_del(&fs_devices->list);
218 		free_fs_devices(fs_devices);
219 	}
220 }
221 
__alloc_device(void)222 static struct btrfs_device *__alloc_device(void)
223 {
224 	struct btrfs_device *dev;
225 
226 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
227 	if (!dev)
228 		return ERR_PTR(-ENOMEM);
229 
230 	/*
231 	 * Preallocate a bio that's always going to be used for flushing device
232 	 * barriers and matches the device lifespan
233 	 */
234 	dev->flush_bio = bio_alloc_bioset(GFP_KERNEL, 0, NULL);
235 	if (!dev->flush_bio) {
236 		kfree(dev);
237 		return ERR_PTR(-ENOMEM);
238 	}
239 
240 	INIT_LIST_HEAD(&dev->dev_list);
241 	INIT_LIST_HEAD(&dev->dev_alloc_list);
242 	INIT_LIST_HEAD(&dev->resized_list);
243 
244 	spin_lock_init(&dev->io_lock);
245 
246 	spin_lock_init(&dev->reada_lock);
247 	atomic_set(&dev->reada_in_flight, 0);
248 	atomic_set(&dev->dev_stats_ccnt, 0);
249 	btrfs_device_data_ordered_init(dev);
250 	INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
251 	INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
252 
253 	return dev;
254 }
255 
256 /*
257  * Find a device specified by @devid or @uuid in the list of @fs_devices, or
258  * return NULL.
259  *
260  * If devid and uuid are both specified, the match must be exact, otherwise
261  * only devid is used.
262  */
find_device(struct btrfs_fs_devices * fs_devices,u64 devid,const u8 * uuid)263 static struct btrfs_device *find_device(struct btrfs_fs_devices *fs_devices,
264 		u64 devid, const u8 *uuid)
265 {
266 	struct list_head *head = &fs_devices->devices;
267 	struct btrfs_device *dev;
268 
269 	list_for_each_entry(dev, head, dev_list) {
270 		if (dev->devid == devid &&
271 		    (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
272 			return dev;
273 		}
274 	}
275 	return NULL;
276 }
277 
find_fsid(u8 * fsid)278 static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
279 {
280 	struct btrfs_fs_devices *fs_devices;
281 
282 	list_for_each_entry(fs_devices, &fs_uuids, list) {
283 		if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
284 			return fs_devices;
285 	}
286 	return NULL;
287 }
288 
289 static int
btrfs_get_bdev_and_sb(const char * device_path,fmode_t flags,void * holder,int flush,struct block_device ** bdev,struct buffer_head ** bh)290 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder,
291 		      int flush, struct block_device **bdev,
292 		      struct buffer_head **bh)
293 {
294 	int ret;
295 
296 	*bdev = blkdev_get_by_path(device_path, flags, holder);
297 
298 	if (IS_ERR(*bdev)) {
299 		ret = PTR_ERR(*bdev);
300 		goto error;
301 	}
302 
303 	if (flush)
304 		filemap_write_and_wait((*bdev)->bd_inode->i_mapping);
305 	ret = set_blocksize(*bdev, BTRFS_BDEV_BLOCKSIZE);
306 	if (ret) {
307 		blkdev_put(*bdev, flags);
308 		goto error;
309 	}
310 	invalidate_bdev(*bdev);
311 	*bh = btrfs_read_dev_super(*bdev);
312 	if (IS_ERR(*bh)) {
313 		ret = PTR_ERR(*bh);
314 		blkdev_put(*bdev, flags);
315 		goto error;
316 	}
317 
318 	return 0;
319 
320 error:
321 	*bdev = NULL;
322 	*bh = NULL;
323 	return ret;
324 }
325 
requeue_list(struct btrfs_pending_bios * pending_bios,struct bio * head,struct bio * tail)326 static void requeue_list(struct btrfs_pending_bios *pending_bios,
327 			struct bio *head, struct bio *tail)
328 {
329 
330 	struct bio *old_head;
331 
332 	old_head = pending_bios->head;
333 	pending_bios->head = head;
334 	if (pending_bios->tail)
335 		tail->bi_next = old_head;
336 	else
337 		pending_bios->tail = tail;
338 }
339 
340 /*
341  * we try to collect pending bios for a device so we don't get a large
342  * number of procs sending bios down to the same device.  This greatly
343  * improves the schedulers ability to collect and merge the bios.
344  *
345  * But, it also turns into a long list of bios to process and that is sure
346  * to eventually make the worker thread block.  The solution here is to
347  * make some progress and then put this work struct back at the end of
348  * the list if the block device is congested.  This way, multiple devices
349  * can make progress from a single worker thread.
350  */
run_scheduled_bios(struct btrfs_device * device)351 static noinline void run_scheduled_bios(struct btrfs_device *device)
352 {
353 	struct btrfs_fs_info *fs_info = device->fs_info;
354 	struct bio *pending;
355 	struct backing_dev_info *bdi;
356 	struct btrfs_pending_bios *pending_bios;
357 	struct bio *tail;
358 	struct bio *cur;
359 	int again = 0;
360 	unsigned long num_run;
361 	unsigned long batch_run = 0;
362 	unsigned long limit;
363 	unsigned long last_waited = 0;
364 	int force_reg = 0;
365 	int sync_pending = 0;
366 	struct blk_plug plug;
367 
368 	/*
369 	 * this function runs all the bios we've collected for
370 	 * a particular device.  We don't want to wander off to
371 	 * another device without first sending all of these down.
372 	 * So, setup a plug here and finish it off before we return
373 	 */
374 	blk_start_plug(&plug);
375 
376 	bdi = device->bdev->bd_bdi;
377 	limit = btrfs_async_submit_limit(fs_info);
378 	limit = limit * 2 / 3;
379 
380 loop:
381 	spin_lock(&device->io_lock);
382 
383 loop_lock:
384 	num_run = 0;
385 
386 	/* take all the bios off the list at once and process them
387 	 * later on (without the lock held).  But, remember the
388 	 * tail and other pointers so the bios can be properly reinserted
389 	 * into the list if we hit congestion
390 	 */
391 	if (!force_reg && device->pending_sync_bios.head) {
392 		pending_bios = &device->pending_sync_bios;
393 		force_reg = 1;
394 	} else {
395 		pending_bios = &device->pending_bios;
396 		force_reg = 0;
397 	}
398 
399 	pending = pending_bios->head;
400 	tail = pending_bios->tail;
401 	WARN_ON(pending && !tail);
402 
403 	/*
404 	 * if pending was null this time around, no bios need processing
405 	 * at all and we can stop.  Otherwise it'll loop back up again
406 	 * and do an additional check so no bios are missed.
407 	 *
408 	 * device->running_pending is used to synchronize with the
409 	 * schedule_bio code.
410 	 */
411 	if (device->pending_sync_bios.head == NULL &&
412 	    device->pending_bios.head == NULL) {
413 		again = 0;
414 		device->running_pending = 0;
415 	} else {
416 		again = 1;
417 		device->running_pending = 1;
418 	}
419 
420 	pending_bios->head = NULL;
421 	pending_bios->tail = NULL;
422 
423 	spin_unlock(&device->io_lock);
424 
425 	while (pending) {
426 
427 		rmb();
428 		/* we want to work on both lists, but do more bios on the
429 		 * sync list than the regular list
430 		 */
431 		if ((num_run > 32 &&
432 		    pending_bios != &device->pending_sync_bios &&
433 		    device->pending_sync_bios.head) ||
434 		   (num_run > 64 && pending_bios == &device->pending_sync_bios &&
435 		    device->pending_bios.head)) {
436 			spin_lock(&device->io_lock);
437 			requeue_list(pending_bios, pending, tail);
438 			goto loop_lock;
439 		}
440 
441 		cur = pending;
442 		pending = pending->bi_next;
443 		cur->bi_next = NULL;
444 
445 		/*
446 		 * atomic_dec_return implies a barrier for waitqueue_active
447 		 */
448 		if (atomic_dec_return(&fs_info->nr_async_bios) < limit &&
449 		    waitqueue_active(&fs_info->async_submit_wait))
450 			wake_up(&fs_info->async_submit_wait);
451 
452 		BUG_ON(atomic_read(&cur->__bi_cnt) == 0);
453 
454 		/*
455 		 * if we're doing the sync list, record that our
456 		 * plug has some sync requests on it
457 		 *
458 		 * If we're doing the regular list and there are
459 		 * sync requests sitting around, unplug before
460 		 * we add more
461 		 */
462 		if (pending_bios == &device->pending_sync_bios) {
463 			sync_pending = 1;
464 		} else if (sync_pending) {
465 			blk_finish_plug(&plug);
466 			blk_start_plug(&plug);
467 			sync_pending = 0;
468 		}
469 
470 		btrfsic_submit_bio(cur);
471 		num_run++;
472 		batch_run++;
473 
474 		cond_resched();
475 
476 		/*
477 		 * we made progress, there is more work to do and the bdi
478 		 * is now congested.  Back off and let other work structs
479 		 * run instead
480 		 */
481 		if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
482 		    fs_info->fs_devices->open_devices > 1) {
483 			struct io_context *ioc;
484 
485 			ioc = current->io_context;
486 
487 			/*
488 			 * the main goal here is that we don't want to
489 			 * block if we're going to be able to submit
490 			 * more requests without blocking.
491 			 *
492 			 * This code does two great things, it pokes into
493 			 * the elevator code from a filesystem _and_
494 			 * it makes assumptions about how batching works.
495 			 */
496 			if (ioc && ioc->nr_batch_requests > 0 &&
497 			    time_before(jiffies, ioc->last_waited + HZ/50UL) &&
498 			    (last_waited == 0 ||
499 			     ioc->last_waited == last_waited)) {
500 				/*
501 				 * we want to go through our batch of
502 				 * requests and stop.  So, we copy out
503 				 * the ioc->last_waited time and test
504 				 * against it before looping
505 				 */
506 				last_waited = ioc->last_waited;
507 				cond_resched();
508 				continue;
509 			}
510 			spin_lock(&device->io_lock);
511 			requeue_list(pending_bios, pending, tail);
512 			device->running_pending = 1;
513 
514 			spin_unlock(&device->io_lock);
515 			btrfs_queue_work(fs_info->submit_workers,
516 					 &device->work);
517 			goto done;
518 		}
519 		/* unplug every 64 requests just for good measure */
520 		if (batch_run % 64 == 0) {
521 			blk_finish_plug(&plug);
522 			blk_start_plug(&plug);
523 			sync_pending = 0;
524 		}
525 	}
526 
527 	cond_resched();
528 	if (again)
529 		goto loop;
530 
531 	spin_lock(&device->io_lock);
532 	if (device->pending_bios.head || device->pending_sync_bios.head)
533 		goto loop_lock;
534 	spin_unlock(&device->io_lock);
535 
536 done:
537 	blk_finish_plug(&plug);
538 }
539 
pending_bios_fn(struct btrfs_work * work)540 static void pending_bios_fn(struct btrfs_work *work)
541 {
542 	struct btrfs_device *device;
543 
544 	device = container_of(work, struct btrfs_device, work);
545 	run_scheduled_bios(device);
546 }
547 
548 
btrfs_free_stale_device(struct btrfs_device * cur_dev)549 void btrfs_free_stale_device(struct btrfs_device *cur_dev)
550 {
551 	struct btrfs_fs_devices *fs_devs;
552 	struct btrfs_device *dev;
553 
554 	if (!cur_dev->name)
555 		return;
556 
557 	list_for_each_entry(fs_devs, &fs_uuids, list) {
558 		int del = 1;
559 
560 		if (fs_devs->opened)
561 			continue;
562 		if (fs_devs->seeding)
563 			continue;
564 
565 		list_for_each_entry(dev, &fs_devs->devices, dev_list) {
566 
567 			if (dev == cur_dev)
568 				continue;
569 			if (!dev->name)
570 				continue;
571 
572 			/*
573 			 * Todo: This won't be enough. What if the same device
574 			 * comes back (with new uuid and) with its mapper path?
575 			 * But for now, this does help as mostly an admin will
576 			 * either use mapper or non mapper path throughout.
577 			 */
578 			rcu_read_lock();
579 			del = strcmp(rcu_str_deref(dev->name),
580 						rcu_str_deref(cur_dev->name));
581 			rcu_read_unlock();
582 			if (!del)
583 				break;
584 		}
585 
586 		if (!del) {
587 			/* delete the stale device */
588 			if (fs_devs->num_devices == 1) {
589 				btrfs_sysfs_remove_fsid(fs_devs);
590 				list_del(&fs_devs->list);
591 				free_fs_devices(fs_devs);
592 				break;
593 			} else {
594 				fs_devs->num_devices--;
595 				list_del(&dev->dev_list);
596 				rcu_string_free(dev->name);
597 				kfree(dev);
598 			}
599 			break;
600 		}
601 	}
602 }
603 
604 /*
605  * Add new device to list of registered devices
606  *
607  * Returns:
608  * 1   - first time device is seen
609  * 0   - device already known
610  * < 0 - error
611  */
device_list_add(const char * path,struct btrfs_super_block * disk_super,u64 devid,struct btrfs_fs_devices ** fs_devices_ret)612 static noinline int device_list_add(const char *path,
613 			   struct btrfs_super_block *disk_super,
614 			   u64 devid, struct btrfs_fs_devices **fs_devices_ret)
615 {
616 	struct btrfs_device *device;
617 	struct btrfs_fs_devices *fs_devices;
618 	struct rcu_string *name;
619 	int ret = 0;
620 	u64 found_transid = btrfs_super_generation(disk_super);
621 
622 	fs_devices = find_fsid(disk_super->fsid);
623 	if (!fs_devices) {
624 		fs_devices = alloc_fs_devices(disk_super->fsid);
625 		if (IS_ERR(fs_devices))
626 			return PTR_ERR(fs_devices);
627 
628 		list_add(&fs_devices->list, &fs_uuids);
629 
630 		device = NULL;
631 	} else {
632 		device = find_device(fs_devices, devid,
633 				disk_super->dev_item.uuid);
634 	}
635 
636 	if (!device) {
637 		if (fs_devices->opened)
638 			return -EBUSY;
639 
640 		device = btrfs_alloc_device(NULL, &devid,
641 					    disk_super->dev_item.uuid);
642 		if (IS_ERR(device)) {
643 			/* we can safely leave the fs_devices entry around */
644 			return PTR_ERR(device);
645 		}
646 
647 		name = rcu_string_strdup(path, GFP_NOFS);
648 		if (!name) {
649 			kfree(device);
650 			return -ENOMEM;
651 		}
652 		rcu_assign_pointer(device->name, name);
653 
654 		mutex_lock(&fs_devices->device_list_mutex);
655 		list_add_rcu(&device->dev_list, &fs_devices->devices);
656 		fs_devices->num_devices++;
657 		mutex_unlock(&fs_devices->device_list_mutex);
658 
659 		ret = 1;
660 		device->fs_devices = fs_devices;
661 	} else if (!device->name || strcmp(device->name->str, path)) {
662 		/*
663 		 * When FS is already mounted.
664 		 * 1. If you are here and if the device->name is NULL that
665 		 *    means this device was missing at time of FS mount.
666 		 * 2. If you are here and if the device->name is different
667 		 *    from 'path' that means either
668 		 *      a. The same device disappeared and reappeared with
669 		 *         different name. or
670 		 *      b. The missing-disk-which-was-replaced, has
671 		 *         reappeared now.
672 		 *
673 		 * We must allow 1 and 2a above. But 2b would be a spurious
674 		 * and unintentional.
675 		 *
676 		 * Further in case of 1 and 2a above, the disk at 'path'
677 		 * would have missed some transaction when it was away and
678 		 * in case of 2a the stale bdev has to be updated as well.
679 		 * 2b must not be allowed at all time.
680 		 */
681 
682 		/*
683 		 * For now, we do allow update to btrfs_fs_device through the
684 		 * btrfs dev scan cli after FS has been mounted.  We're still
685 		 * tracking a problem where systems fail mount by subvolume id
686 		 * when we reject replacement on a mounted FS.
687 		 */
688 		if (!fs_devices->opened && found_transid < device->generation) {
689 			/*
690 			 * That is if the FS is _not_ mounted and if you
691 			 * are here, that means there is more than one
692 			 * disk with same uuid and devid.We keep the one
693 			 * with larger generation number or the last-in if
694 			 * generation are equal.
695 			 */
696 			return -EEXIST;
697 		}
698 
699 		name = rcu_string_strdup(path, GFP_NOFS);
700 		if (!name)
701 			return -ENOMEM;
702 		rcu_string_free(device->name);
703 		rcu_assign_pointer(device->name, name);
704 		if (device->missing) {
705 			fs_devices->missing_devices--;
706 			device->missing = 0;
707 		}
708 	}
709 
710 	/*
711 	 * Unmount does not free the btrfs_device struct but would zero
712 	 * generation along with most of the other members. So just update
713 	 * it back. We need it to pick the disk with largest generation
714 	 * (as above).
715 	 */
716 	if (!fs_devices->opened)
717 		device->generation = found_transid;
718 
719 	/*
720 	 * if there is new btrfs on an already registered device,
721 	 * then remove the stale device entry.
722 	 */
723 	if (ret > 0)
724 		btrfs_free_stale_device(device);
725 
726 	*fs_devices_ret = fs_devices;
727 
728 	return ret;
729 }
730 
clone_fs_devices(struct btrfs_fs_devices * orig)731 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
732 {
733 	struct btrfs_fs_devices *fs_devices;
734 	struct btrfs_device *device;
735 	struct btrfs_device *orig_dev;
736 
737 	fs_devices = alloc_fs_devices(orig->fsid);
738 	if (IS_ERR(fs_devices))
739 		return fs_devices;
740 
741 	mutex_lock(&orig->device_list_mutex);
742 	fs_devices->total_devices = orig->total_devices;
743 
744 	/* We have held the volume lock, it is safe to get the devices. */
745 	list_for_each_entry(orig_dev, &orig->devices, dev_list) {
746 		struct rcu_string *name;
747 
748 		device = btrfs_alloc_device(NULL, &orig_dev->devid,
749 					    orig_dev->uuid);
750 		if (IS_ERR(device))
751 			goto error;
752 
753 		/*
754 		 * This is ok to do without rcu read locked because we hold the
755 		 * uuid mutex so nothing we touch in here is going to disappear.
756 		 */
757 		if (orig_dev->name) {
758 			name = rcu_string_strdup(orig_dev->name->str,
759 					GFP_KERNEL);
760 			if (!name) {
761 				kfree(device);
762 				goto error;
763 			}
764 			rcu_assign_pointer(device->name, name);
765 		}
766 
767 		list_add(&device->dev_list, &fs_devices->devices);
768 		device->fs_devices = fs_devices;
769 		fs_devices->num_devices++;
770 	}
771 	mutex_unlock(&orig->device_list_mutex);
772 	return fs_devices;
773 error:
774 	mutex_unlock(&orig->device_list_mutex);
775 	free_fs_devices(fs_devices);
776 	return ERR_PTR(-ENOMEM);
777 }
778 
btrfs_close_extra_devices(struct btrfs_fs_devices * fs_devices,int step)779 void btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices, int step)
780 {
781 	struct btrfs_device *device, *next;
782 	struct btrfs_device *latest_dev = NULL;
783 
784 	mutex_lock(&uuid_mutex);
785 again:
786 	/* This is the initialized path, it is safe to release the devices. */
787 	list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
788 		if (device->in_fs_metadata) {
789 			if (!device->is_tgtdev_for_dev_replace &&
790 			    (!latest_dev ||
791 			     device->generation > latest_dev->generation)) {
792 				latest_dev = device;
793 			}
794 			continue;
795 		}
796 
797 		if (device->devid == BTRFS_DEV_REPLACE_DEVID) {
798 			/*
799 			 * In the first step, keep the device which has
800 			 * the correct fsid and the devid that is used
801 			 * for the dev_replace procedure.
802 			 * In the second step, the dev_replace state is
803 			 * read from the device tree and it is known
804 			 * whether the procedure is really active or
805 			 * not, which means whether this device is
806 			 * used or whether it should be removed.
807 			 */
808 			if (step == 0 || device->is_tgtdev_for_dev_replace) {
809 				continue;
810 			}
811 		}
812 		if (device->bdev) {
813 			blkdev_put(device->bdev, device->mode);
814 			device->bdev = NULL;
815 			fs_devices->open_devices--;
816 		}
817 		if (device->writeable) {
818 			list_del_init(&device->dev_alloc_list);
819 			device->writeable = 0;
820 			if (!device->is_tgtdev_for_dev_replace)
821 				fs_devices->rw_devices--;
822 		}
823 		list_del_init(&device->dev_list);
824 		fs_devices->num_devices--;
825 		rcu_string_free(device->name);
826 		kfree(device);
827 	}
828 
829 	if (fs_devices->seed) {
830 		fs_devices = fs_devices->seed;
831 		goto again;
832 	}
833 
834 	fs_devices->latest_bdev = latest_dev->bdev;
835 
836 	mutex_unlock(&uuid_mutex);
837 }
838 
__free_device(struct work_struct * work)839 static void __free_device(struct work_struct *work)
840 {
841 	struct btrfs_device *device;
842 
843 	device = container_of(work, struct btrfs_device, rcu_work);
844 	rcu_string_free(device->name);
845 	bio_put(device->flush_bio);
846 	kfree(device);
847 }
848 
free_device(struct rcu_head * head)849 static void free_device(struct rcu_head *head)
850 {
851 	struct btrfs_device *device;
852 
853 	device = container_of(head, struct btrfs_device, rcu);
854 
855 	INIT_WORK(&device->rcu_work, __free_device);
856 	schedule_work(&device->rcu_work);
857 }
858 
btrfs_close_bdev(struct btrfs_device * device)859 static void btrfs_close_bdev(struct btrfs_device *device)
860 {
861 	if (device->bdev && device->writeable) {
862 		sync_blockdev(device->bdev);
863 		invalidate_bdev(device->bdev);
864 	}
865 
866 	if (device->bdev)
867 		blkdev_put(device->bdev, device->mode);
868 }
869 
btrfs_prepare_close_one_device(struct btrfs_device * device)870 static void btrfs_prepare_close_one_device(struct btrfs_device *device)
871 {
872 	struct btrfs_fs_devices *fs_devices = device->fs_devices;
873 	struct btrfs_device *new_device;
874 	struct rcu_string *name;
875 
876 	if (device->bdev)
877 		fs_devices->open_devices--;
878 
879 	if (device->writeable &&
880 	    device->devid != BTRFS_DEV_REPLACE_DEVID) {
881 		list_del_init(&device->dev_alloc_list);
882 		fs_devices->rw_devices--;
883 	}
884 
885 	if (device->missing)
886 		fs_devices->missing_devices--;
887 
888 	new_device = btrfs_alloc_device(NULL, &device->devid,
889 					device->uuid);
890 	BUG_ON(IS_ERR(new_device)); /* -ENOMEM */
891 
892 	/* Safe because we are under uuid_mutex */
893 	if (device->name) {
894 		name = rcu_string_strdup(device->name->str, GFP_NOFS);
895 		BUG_ON(!name); /* -ENOMEM */
896 		rcu_assign_pointer(new_device->name, name);
897 	}
898 
899 	list_replace_rcu(&device->dev_list, &new_device->dev_list);
900 	new_device->fs_devices = device->fs_devices;
901 }
902 
__btrfs_close_devices(struct btrfs_fs_devices * fs_devices)903 static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
904 {
905 	struct btrfs_device *device, *tmp;
906 	struct list_head pending_put;
907 
908 	INIT_LIST_HEAD(&pending_put);
909 
910 	if (--fs_devices->opened > 0)
911 		return 0;
912 
913 	mutex_lock(&fs_devices->device_list_mutex);
914 	list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list) {
915 		btrfs_prepare_close_one_device(device);
916 		list_add(&device->dev_list, &pending_put);
917 	}
918 	mutex_unlock(&fs_devices->device_list_mutex);
919 
920 	/*
921 	 * btrfs_show_devname() is using the device_list_mutex,
922 	 * sometimes call to blkdev_put() leads vfs calling
923 	 * into this func. So do put outside of device_list_mutex,
924 	 * as of now.
925 	 */
926 	while (!list_empty(&pending_put)) {
927 		device = list_first_entry(&pending_put,
928 				struct btrfs_device, dev_list);
929 		list_del(&device->dev_list);
930 		btrfs_close_bdev(device);
931 		call_rcu(&device->rcu, free_device);
932 	}
933 
934 	WARN_ON(fs_devices->open_devices);
935 	WARN_ON(fs_devices->rw_devices);
936 	fs_devices->opened = 0;
937 	fs_devices->seeding = 0;
938 
939 	return 0;
940 }
941 
btrfs_close_devices(struct btrfs_fs_devices * fs_devices)942 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
943 {
944 	struct btrfs_fs_devices *seed_devices = NULL;
945 	int ret;
946 
947 	mutex_lock(&uuid_mutex);
948 	ret = __btrfs_close_devices(fs_devices);
949 	if (!fs_devices->opened) {
950 		seed_devices = fs_devices->seed;
951 		fs_devices->seed = NULL;
952 	}
953 	mutex_unlock(&uuid_mutex);
954 
955 	while (seed_devices) {
956 		fs_devices = seed_devices;
957 		seed_devices = fs_devices->seed;
958 		__btrfs_close_devices(fs_devices);
959 		free_fs_devices(fs_devices);
960 	}
961 	/*
962 	 * Wait for rcu kworkers under __btrfs_close_devices
963 	 * to finish all blkdev_puts so device is really
964 	 * free when umount is done.
965 	 */
966 	rcu_barrier();
967 	return ret;
968 }
969 
__btrfs_open_devices(struct btrfs_fs_devices * fs_devices,fmode_t flags,void * holder)970 static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
971 				fmode_t flags, void *holder)
972 {
973 	struct request_queue *q;
974 	struct block_device *bdev;
975 	struct list_head *head = &fs_devices->devices;
976 	struct btrfs_device *device;
977 	struct btrfs_device *latest_dev = NULL;
978 	struct buffer_head *bh;
979 	struct btrfs_super_block *disk_super;
980 	u64 devid;
981 	int seeding = 1;
982 	int ret = 0;
983 
984 	flags |= FMODE_EXCL;
985 
986 	list_for_each_entry(device, head, dev_list) {
987 		if (device->bdev)
988 			continue;
989 		if (!device->name)
990 			continue;
991 
992 		/* Just open everything we can; ignore failures here */
993 		if (btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
994 					    &bdev, &bh))
995 			continue;
996 
997 		disk_super = (struct btrfs_super_block *)bh->b_data;
998 		devid = btrfs_stack_device_id(&disk_super->dev_item);
999 		if (devid != device->devid)
1000 			goto error_brelse;
1001 
1002 		if (memcmp(device->uuid, disk_super->dev_item.uuid,
1003 			   BTRFS_UUID_SIZE))
1004 			goto error_brelse;
1005 
1006 		device->generation = btrfs_super_generation(disk_super);
1007 		if (!latest_dev ||
1008 		    device->generation > latest_dev->generation)
1009 			latest_dev = device;
1010 
1011 		if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
1012 			device->writeable = 0;
1013 		} else {
1014 			device->writeable = !bdev_read_only(bdev);
1015 			seeding = 0;
1016 		}
1017 
1018 		q = bdev_get_queue(bdev);
1019 		if (blk_queue_discard(q))
1020 			device->can_discard = 1;
1021 		if (!blk_queue_nonrot(q))
1022 			fs_devices->rotating = 1;
1023 
1024 		device->bdev = bdev;
1025 		device->in_fs_metadata = 0;
1026 		device->mode = flags;
1027 
1028 		fs_devices->open_devices++;
1029 		if (device->writeable &&
1030 		    device->devid != BTRFS_DEV_REPLACE_DEVID) {
1031 			fs_devices->rw_devices++;
1032 			list_add(&device->dev_alloc_list,
1033 				 &fs_devices->alloc_list);
1034 		}
1035 		brelse(bh);
1036 		continue;
1037 
1038 error_brelse:
1039 		brelse(bh);
1040 		blkdev_put(bdev, flags);
1041 		continue;
1042 	}
1043 	if (fs_devices->open_devices == 0) {
1044 		ret = -EINVAL;
1045 		goto out;
1046 	}
1047 	fs_devices->seeding = seeding;
1048 	fs_devices->opened = 1;
1049 	fs_devices->latest_bdev = latest_dev->bdev;
1050 	fs_devices->total_rw_bytes = 0;
1051 out:
1052 	return ret;
1053 }
1054 
btrfs_open_devices(struct btrfs_fs_devices * fs_devices,fmode_t flags,void * holder)1055 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
1056 		       fmode_t flags, void *holder)
1057 {
1058 	int ret;
1059 
1060 	mutex_lock(&uuid_mutex);
1061 	if (fs_devices->opened) {
1062 		fs_devices->opened++;
1063 		ret = 0;
1064 	} else {
1065 		ret = __btrfs_open_devices(fs_devices, flags, holder);
1066 	}
1067 	mutex_unlock(&uuid_mutex);
1068 	return ret;
1069 }
1070 
btrfs_release_disk_super(struct page * page)1071 void btrfs_release_disk_super(struct page *page)
1072 {
1073 	kunmap(page);
1074 	put_page(page);
1075 }
1076 
btrfs_read_disk_super(struct block_device * bdev,u64 bytenr,struct page ** page,struct btrfs_super_block ** disk_super)1077 int btrfs_read_disk_super(struct block_device *bdev, u64 bytenr,
1078 		struct page **page, struct btrfs_super_block **disk_super)
1079 {
1080 	void *p;
1081 	pgoff_t index;
1082 
1083 	/* make sure our super fits in the device */
1084 	if (bytenr + PAGE_SIZE >= i_size_read(bdev->bd_inode))
1085 		return 1;
1086 
1087 	/* make sure our super fits in the page */
1088 	if (sizeof(**disk_super) > PAGE_SIZE)
1089 		return 1;
1090 
1091 	/* make sure our super doesn't straddle pages on disk */
1092 	index = bytenr >> PAGE_SHIFT;
1093 	if ((bytenr + sizeof(**disk_super) - 1) >> PAGE_SHIFT != index)
1094 		return 1;
1095 
1096 	/* pull in the page with our super */
1097 	*page = read_cache_page_gfp(bdev->bd_inode->i_mapping,
1098 				   index, GFP_KERNEL);
1099 
1100 	if (IS_ERR_OR_NULL(*page))
1101 		return 1;
1102 
1103 	p = kmap(*page);
1104 
1105 	/* align our pointer to the offset of the super block */
1106 	*disk_super = p + (bytenr & ~PAGE_MASK);
1107 
1108 	if (btrfs_super_bytenr(*disk_super) != bytenr ||
1109 	    btrfs_super_magic(*disk_super) != BTRFS_MAGIC) {
1110 		btrfs_release_disk_super(*page);
1111 		return 1;
1112 	}
1113 
1114 	if ((*disk_super)->label[0] &&
1115 		(*disk_super)->label[BTRFS_LABEL_SIZE - 1])
1116 		(*disk_super)->label[BTRFS_LABEL_SIZE - 1] = '\0';
1117 
1118 	return 0;
1119 }
1120 
1121 /*
1122  * Look for a btrfs signature on a device. This may be called out of the mount path
1123  * and we are not allowed to call set_blocksize during the scan. The superblock
1124  * is read via pagecache
1125  */
btrfs_scan_one_device(const char * path,fmode_t flags,void * holder,struct btrfs_fs_devices ** fs_devices_ret)1126 int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
1127 			  struct btrfs_fs_devices **fs_devices_ret)
1128 {
1129 	struct btrfs_super_block *disk_super;
1130 	struct block_device *bdev;
1131 	struct page *page;
1132 	int ret = -EINVAL;
1133 	u64 devid;
1134 	u64 transid;
1135 	u64 total_devices;
1136 	u64 bytenr;
1137 
1138 	/*
1139 	 * we would like to check all the supers, but that would make
1140 	 * a btrfs mount succeed after a mkfs from a different FS.
1141 	 * So, we need to add a special mount option to scan for
1142 	 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
1143 	 */
1144 	bytenr = btrfs_sb_offset(0);
1145 	flags |= FMODE_EXCL;
1146 	mutex_lock(&uuid_mutex);
1147 
1148 	bdev = blkdev_get_by_path(path, flags, holder);
1149 	if (IS_ERR(bdev)) {
1150 		ret = PTR_ERR(bdev);
1151 		goto error;
1152 	}
1153 
1154 	if (btrfs_read_disk_super(bdev, bytenr, &page, &disk_super))
1155 		goto error_bdev_put;
1156 
1157 	devid = btrfs_stack_device_id(&disk_super->dev_item);
1158 	transid = btrfs_super_generation(disk_super);
1159 	total_devices = btrfs_super_num_devices(disk_super);
1160 
1161 	ret = device_list_add(path, disk_super, devid, fs_devices_ret);
1162 	if (ret > 0) {
1163 		if (disk_super->label[0]) {
1164 			pr_info("BTRFS: device label %s ", disk_super->label);
1165 		} else {
1166 			pr_info("BTRFS: device fsid %pU ", disk_super->fsid);
1167 		}
1168 
1169 		pr_cont("devid %llu transid %llu %s\n", devid, transid, path);
1170 		ret = 0;
1171 	}
1172 	if (!ret && fs_devices_ret)
1173 		(*fs_devices_ret)->total_devices = total_devices;
1174 
1175 	btrfs_release_disk_super(page);
1176 
1177 error_bdev_put:
1178 	blkdev_put(bdev, flags);
1179 error:
1180 	mutex_unlock(&uuid_mutex);
1181 	return ret;
1182 }
1183 
1184 /* helper to account the used device space in the range */
btrfs_account_dev_extents_size(struct btrfs_device * device,u64 start,u64 end,u64 * length)1185 int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
1186 				   u64 end, u64 *length)
1187 {
1188 	struct btrfs_key key;
1189 	struct btrfs_root *root = device->fs_info->dev_root;
1190 	struct btrfs_dev_extent *dev_extent;
1191 	struct btrfs_path *path;
1192 	u64 extent_end;
1193 	int ret;
1194 	int slot;
1195 	struct extent_buffer *l;
1196 
1197 	*length = 0;
1198 
1199 	if (start >= device->total_bytes || device->is_tgtdev_for_dev_replace)
1200 		return 0;
1201 
1202 	path = btrfs_alloc_path();
1203 	if (!path)
1204 		return -ENOMEM;
1205 	path->reada = READA_FORWARD;
1206 
1207 	key.objectid = device->devid;
1208 	key.offset = start;
1209 	key.type = BTRFS_DEV_EXTENT_KEY;
1210 
1211 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1212 	if (ret < 0)
1213 		goto out;
1214 	if (ret > 0) {
1215 		ret = btrfs_previous_item(root, path, key.objectid, key.type);
1216 		if (ret < 0)
1217 			goto out;
1218 	}
1219 
1220 	while (1) {
1221 		l = path->nodes[0];
1222 		slot = path->slots[0];
1223 		if (slot >= btrfs_header_nritems(l)) {
1224 			ret = btrfs_next_leaf(root, path);
1225 			if (ret == 0)
1226 				continue;
1227 			if (ret < 0)
1228 				goto out;
1229 
1230 			break;
1231 		}
1232 		btrfs_item_key_to_cpu(l, &key, slot);
1233 
1234 		if (key.objectid < device->devid)
1235 			goto next;
1236 
1237 		if (key.objectid > device->devid)
1238 			break;
1239 
1240 		if (key.type != BTRFS_DEV_EXTENT_KEY)
1241 			goto next;
1242 
1243 		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1244 		extent_end = key.offset + btrfs_dev_extent_length(l,
1245 								  dev_extent);
1246 		if (key.offset <= start && extent_end > end) {
1247 			*length = end - start + 1;
1248 			break;
1249 		} else if (key.offset <= start && extent_end > start)
1250 			*length += extent_end - start;
1251 		else if (key.offset > start && extent_end <= end)
1252 			*length += extent_end - key.offset;
1253 		else if (key.offset > start && key.offset <= end) {
1254 			*length += end - key.offset + 1;
1255 			break;
1256 		} else if (key.offset > end)
1257 			break;
1258 
1259 next:
1260 		path->slots[0]++;
1261 	}
1262 	ret = 0;
1263 out:
1264 	btrfs_free_path(path);
1265 	return ret;
1266 }
1267 
contains_pending_extent(struct btrfs_transaction * transaction,struct btrfs_device * device,u64 * start,u64 len)1268 static int contains_pending_extent(struct btrfs_transaction *transaction,
1269 				   struct btrfs_device *device,
1270 				   u64 *start, u64 len)
1271 {
1272 	struct btrfs_fs_info *fs_info = device->fs_info;
1273 	struct extent_map *em;
1274 	struct list_head *search_list = &fs_info->pinned_chunks;
1275 	int ret = 0;
1276 	u64 physical_start = *start;
1277 
1278 	if (transaction)
1279 		search_list = &transaction->pending_chunks;
1280 again:
1281 	list_for_each_entry(em, search_list, list) {
1282 		struct map_lookup *map;
1283 		int i;
1284 
1285 		map = em->map_lookup;
1286 		for (i = 0; i < map->num_stripes; i++) {
1287 			u64 end;
1288 
1289 			if (map->stripes[i].dev != device)
1290 				continue;
1291 			if (map->stripes[i].physical >= physical_start + len ||
1292 			    map->stripes[i].physical + em->orig_block_len <=
1293 			    physical_start)
1294 				continue;
1295 			/*
1296 			 * Make sure that while processing the pinned list we do
1297 			 * not override our *start with a lower value, because
1298 			 * we can have pinned chunks that fall within this
1299 			 * device hole and that have lower physical addresses
1300 			 * than the pending chunks we processed before. If we
1301 			 * do not take this special care we can end up getting
1302 			 * 2 pending chunks that start at the same physical
1303 			 * device offsets because the end offset of a pinned
1304 			 * chunk can be equal to the start offset of some
1305 			 * pending chunk.
1306 			 */
1307 			end = map->stripes[i].physical + em->orig_block_len;
1308 			if (end > *start) {
1309 				*start = end;
1310 				ret = 1;
1311 			}
1312 		}
1313 	}
1314 	if (search_list != &fs_info->pinned_chunks) {
1315 		search_list = &fs_info->pinned_chunks;
1316 		goto again;
1317 	}
1318 
1319 	return ret;
1320 }
1321 
1322 
1323 /*
1324  * find_free_dev_extent_start - find free space in the specified device
1325  * @device:	  the device which we search the free space in
1326  * @num_bytes:	  the size of the free space that we need
1327  * @search_start: the position from which to begin the search
1328  * @start:	  store the start of the free space.
1329  * @len:	  the size of the free space. that we find, or the size
1330  *		  of the max free space if we don't find suitable free space
1331  *
1332  * this uses a pretty simple search, the expectation is that it is
1333  * called very infrequently and that a given device has a small number
1334  * of extents
1335  *
1336  * @start is used to store the start of the free space if we find. But if we
1337  * don't find suitable free space, it will be used to store the start position
1338  * of the max free space.
1339  *
1340  * @len is used to store the size of the free space that we find.
1341  * But if we don't find suitable free space, it is used to store the size of
1342  * the max free space.
1343  */
find_free_dev_extent_start(struct btrfs_transaction * transaction,struct btrfs_device * device,u64 num_bytes,u64 search_start,u64 * start,u64 * len)1344 int find_free_dev_extent_start(struct btrfs_transaction *transaction,
1345 			       struct btrfs_device *device, u64 num_bytes,
1346 			       u64 search_start, u64 *start, u64 *len)
1347 {
1348 	struct btrfs_fs_info *fs_info = device->fs_info;
1349 	struct btrfs_root *root = fs_info->dev_root;
1350 	struct btrfs_key key;
1351 	struct btrfs_dev_extent *dev_extent;
1352 	struct btrfs_path *path;
1353 	u64 hole_size;
1354 	u64 max_hole_start;
1355 	u64 max_hole_size;
1356 	u64 extent_end;
1357 	u64 search_end = device->total_bytes;
1358 	int ret;
1359 	int slot;
1360 	struct extent_buffer *l;
1361 
1362 	/*
1363 	 * We don't want to overwrite the superblock on the drive nor any area
1364 	 * used by the boot loader (grub for example), so we make sure to start
1365 	 * at an offset of at least 1MB.
1366 	 */
1367 	search_start = max_t(u64, search_start, SZ_1M);
1368 
1369 	path = btrfs_alloc_path();
1370 	if (!path)
1371 		return -ENOMEM;
1372 
1373 	max_hole_start = search_start;
1374 	max_hole_size = 0;
1375 
1376 again:
1377 	if (search_start >= search_end || device->is_tgtdev_for_dev_replace) {
1378 		ret = -ENOSPC;
1379 		goto out;
1380 	}
1381 
1382 	path->reada = READA_FORWARD;
1383 	path->search_commit_root = 1;
1384 	path->skip_locking = 1;
1385 
1386 	key.objectid = device->devid;
1387 	key.offset = search_start;
1388 	key.type = BTRFS_DEV_EXTENT_KEY;
1389 
1390 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1391 	if (ret < 0)
1392 		goto out;
1393 	if (ret > 0) {
1394 		ret = btrfs_previous_item(root, path, key.objectid, key.type);
1395 		if (ret < 0)
1396 			goto out;
1397 	}
1398 
1399 	while (1) {
1400 		l = path->nodes[0];
1401 		slot = path->slots[0];
1402 		if (slot >= btrfs_header_nritems(l)) {
1403 			ret = btrfs_next_leaf(root, path);
1404 			if (ret == 0)
1405 				continue;
1406 			if (ret < 0)
1407 				goto out;
1408 
1409 			break;
1410 		}
1411 		btrfs_item_key_to_cpu(l, &key, slot);
1412 
1413 		if (key.objectid < device->devid)
1414 			goto next;
1415 
1416 		if (key.objectid > device->devid)
1417 			break;
1418 
1419 		if (key.type != BTRFS_DEV_EXTENT_KEY)
1420 			goto next;
1421 
1422 		if (key.offset > search_start) {
1423 			hole_size = key.offset - search_start;
1424 
1425 			/*
1426 			 * Have to check before we set max_hole_start, otherwise
1427 			 * we could end up sending back this offset anyway.
1428 			 */
1429 			if (contains_pending_extent(transaction, device,
1430 						    &search_start,
1431 						    hole_size)) {
1432 				if (key.offset >= search_start) {
1433 					hole_size = key.offset - search_start;
1434 				} else {
1435 					WARN_ON_ONCE(1);
1436 					hole_size = 0;
1437 				}
1438 			}
1439 
1440 			if (hole_size > max_hole_size) {
1441 				max_hole_start = search_start;
1442 				max_hole_size = hole_size;
1443 			}
1444 
1445 			/*
1446 			 * If this free space is greater than which we need,
1447 			 * it must be the max free space that we have found
1448 			 * until now, so max_hole_start must point to the start
1449 			 * of this free space and the length of this free space
1450 			 * is stored in max_hole_size. Thus, we return
1451 			 * max_hole_start and max_hole_size and go back to the
1452 			 * caller.
1453 			 */
1454 			if (hole_size >= num_bytes) {
1455 				ret = 0;
1456 				goto out;
1457 			}
1458 		}
1459 
1460 		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1461 		extent_end = key.offset + btrfs_dev_extent_length(l,
1462 								  dev_extent);
1463 		if (extent_end > search_start)
1464 			search_start = extent_end;
1465 next:
1466 		path->slots[0]++;
1467 		cond_resched();
1468 	}
1469 
1470 	/*
1471 	 * At this point, search_start should be the end of
1472 	 * allocated dev extents, and when shrinking the device,
1473 	 * search_end may be smaller than search_start.
1474 	 */
1475 	if (search_end > search_start) {
1476 		hole_size = search_end - search_start;
1477 
1478 		if (contains_pending_extent(transaction, device, &search_start,
1479 					    hole_size)) {
1480 			btrfs_release_path(path);
1481 			goto again;
1482 		}
1483 
1484 		if (hole_size > max_hole_size) {
1485 			max_hole_start = search_start;
1486 			max_hole_size = hole_size;
1487 		}
1488 	}
1489 
1490 	/* See above. */
1491 	if (max_hole_size < num_bytes)
1492 		ret = -ENOSPC;
1493 	else
1494 		ret = 0;
1495 
1496 out:
1497 	btrfs_free_path(path);
1498 	*start = max_hole_start;
1499 	if (len)
1500 		*len = max_hole_size;
1501 	return ret;
1502 }
1503 
find_free_dev_extent(struct btrfs_trans_handle * trans,struct btrfs_device * device,u64 num_bytes,u64 * start,u64 * len)1504 int find_free_dev_extent(struct btrfs_trans_handle *trans,
1505 			 struct btrfs_device *device, u64 num_bytes,
1506 			 u64 *start, u64 *len)
1507 {
1508 	/* FIXME use last free of some kind */
1509 	return find_free_dev_extent_start(trans->transaction, device,
1510 					  num_bytes, 0, start, len);
1511 }
1512 
btrfs_free_dev_extent(struct btrfs_trans_handle * trans,struct btrfs_device * device,u64 start,u64 * dev_extent_len)1513 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1514 			  struct btrfs_device *device,
1515 			  u64 start, u64 *dev_extent_len)
1516 {
1517 	struct btrfs_fs_info *fs_info = device->fs_info;
1518 	struct btrfs_root *root = fs_info->dev_root;
1519 	int ret;
1520 	struct btrfs_path *path;
1521 	struct btrfs_key key;
1522 	struct btrfs_key found_key;
1523 	struct extent_buffer *leaf = NULL;
1524 	struct btrfs_dev_extent *extent = NULL;
1525 
1526 	path = btrfs_alloc_path();
1527 	if (!path)
1528 		return -ENOMEM;
1529 
1530 	key.objectid = device->devid;
1531 	key.offset = start;
1532 	key.type = BTRFS_DEV_EXTENT_KEY;
1533 again:
1534 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1535 	if (ret > 0) {
1536 		ret = btrfs_previous_item(root, path, key.objectid,
1537 					  BTRFS_DEV_EXTENT_KEY);
1538 		if (ret)
1539 			goto out;
1540 		leaf = path->nodes[0];
1541 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1542 		extent = btrfs_item_ptr(leaf, path->slots[0],
1543 					struct btrfs_dev_extent);
1544 		BUG_ON(found_key.offset > start || found_key.offset +
1545 		       btrfs_dev_extent_length(leaf, extent) < start);
1546 		key = found_key;
1547 		btrfs_release_path(path);
1548 		goto again;
1549 	} else if (ret == 0) {
1550 		leaf = path->nodes[0];
1551 		extent = btrfs_item_ptr(leaf, path->slots[0],
1552 					struct btrfs_dev_extent);
1553 	} else {
1554 		btrfs_handle_fs_error(fs_info, ret, "Slot search failed");
1555 		goto out;
1556 	}
1557 
1558 	*dev_extent_len = btrfs_dev_extent_length(leaf, extent);
1559 
1560 	ret = btrfs_del_item(trans, root, path);
1561 	if (ret) {
1562 		btrfs_handle_fs_error(fs_info, ret,
1563 				      "Failed to remove dev extent item");
1564 	} else {
1565 		set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags);
1566 	}
1567 out:
1568 	btrfs_free_path(path);
1569 	return ret;
1570 }
1571 
btrfs_alloc_dev_extent(struct btrfs_trans_handle * trans,struct btrfs_device * device,u64 chunk_offset,u64 start,u64 num_bytes)1572 static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1573 				  struct btrfs_device *device,
1574 				  u64 chunk_offset, u64 start, u64 num_bytes)
1575 {
1576 	int ret;
1577 	struct btrfs_path *path;
1578 	struct btrfs_fs_info *fs_info = device->fs_info;
1579 	struct btrfs_root *root = fs_info->dev_root;
1580 	struct btrfs_dev_extent *extent;
1581 	struct extent_buffer *leaf;
1582 	struct btrfs_key key;
1583 
1584 	WARN_ON(!device->in_fs_metadata);
1585 	WARN_ON(device->is_tgtdev_for_dev_replace);
1586 	path = btrfs_alloc_path();
1587 	if (!path)
1588 		return -ENOMEM;
1589 
1590 	key.objectid = device->devid;
1591 	key.offset = start;
1592 	key.type = BTRFS_DEV_EXTENT_KEY;
1593 	ret = btrfs_insert_empty_item(trans, root, path, &key,
1594 				      sizeof(*extent));
1595 	if (ret)
1596 		goto out;
1597 
1598 	leaf = path->nodes[0];
1599 	extent = btrfs_item_ptr(leaf, path->slots[0],
1600 				struct btrfs_dev_extent);
1601 	btrfs_set_dev_extent_chunk_tree(leaf, extent,
1602 					BTRFS_CHUNK_TREE_OBJECTID);
1603 	btrfs_set_dev_extent_chunk_objectid(leaf, extent,
1604 					    BTRFS_FIRST_CHUNK_TREE_OBJECTID);
1605 	btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
1606 
1607 	btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1608 	btrfs_mark_buffer_dirty(leaf);
1609 out:
1610 	btrfs_free_path(path);
1611 	return ret;
1612 }
1613 
find_next_chunk(struct btrfs_fs_info * fs_info)1614 static u64 find_next_chunk(struct btrfs_fs_info *fs_info)
1615 {
1616 	struct extent_map_tree *em_tree;
1617 	struct extent_map *em;
1618 	struct rb_node *n;
1619 	u64 ret = 0;
1620 
1621 	em_tree = &fs_info->mapping_tree.map_tree;
1622 	read_lock(&em_tree->lock);
1623 	n = rb_last(&em_tree->map);
1624 	if (n) {
1625 		em = rb_entry(n, struct extent_map, rb_node);
1626 		ret = em->start + em->len;
1627 	}
1628 	read_unlock(&em_tree->lock);
1629 
1630 	return ret;
1631 }
1632 
find_next_devid(struct btrfs_fs_info * fs_info,u64 * devid_ret)1633 static noinline int find_next_devid(struct btrfs_fs_info *fs_info,
1634 				    u64 *devid_ret)
1635 {
1636 	int ret;
1637 	struct btrfs_key key;
1638 	struct btrfs_key found_key;
1639 	struct btrfs_path *path;
1640 
1641 	path = btrfs_alloc_path();
1642 	if (!path)
1643 		return -ENOMEM;
1644 
1645 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1646 	key.type = BTRFS_DEV_ITEM_KEY;
1647 	key.offset = (u64)-1;
1648 
1649 	ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0);
1650 	if (ret < 0)
1651 		goto error;
1652 
1653 	BUG_ON(ret == 0); /* Corruption */
1654 
1655 	ret = btrfs_previous_item(fs_info->chunk_root, path,
1656 				  BTRFS_DEV_ITEMS_OBJECTID,
1657 				  BTRFS_DEV_ITEM_KEY);
1658 	if (ret) {
1659 		*devid_ret = 1;
1660 	} else {
1661 		btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1662 				      path->slots[0]);
1663 		*devid_ret = found_key.offset + 1;
1664 	}
1665 	ret = 0;
1666 error:
1667 	btrfs_free_path(path);
1668 	return ret;
1669 }
1670 
1671 /*
1672  * the device information is stored in the chunk root
1673  * the btrfs_device struct should be fully filled in
1674  */
btrfs_add_device(struct btrfs_trans_handle * trans,struct btrfs_fs_info * fs_info,struct btrfs_device * device)1675 static int btrfs_add_device(struct btrfs_trans_handle *trans,
1676 			    struct btrfs_fs_info *fs_info,
1677 			    struct btrfs_device *device)
1678 {
1679 	struct btrfs_root *root = fs_info->chunk_root;
1680 	int ret;
1681 	struct btrfs_path *path;
1682 	struct btrfs_dev_item *dev_item;
1683 	struct extent_buffer *leaf;
1684 	struct btrfs_key key;
1685 	unsigned long ptr;
1686 
1687 	path = btrfs_alloc_path();
1688 	if (!path)
1689 		return -ENOMEM;
1690 
1691 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1692 	key.type = BTRFS_DEV_ITEM_KEY;
1693 	key.offset = device->devid;
1694 
1695 	ret = btrfs_insert_empty_item(trans, root, path, &key,
1696 				      sizeof(*dev_item));
1697 	if (ret)
1698 		goto out;
1699 
1700 	leaf = path->nodes[0];
1701 	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1702 
1703 	btrfs_set_device_id(leaf, dev_item, device->devid);
1704 	btrfs_set_device_generation(leaf, dev_item, 0);
1705 	btrfs_set_device_type(leaf, dev_item, device->type);
1706 	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1707 	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1708 	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1709 	btrfs_set_device_total_bytes(leaf, dev_item,
1710 				     btrfs_device_get_disk_total_bytes(device));
1711 	btrfs_set_device_bytes_used(leaf, dev_item,
1712 				    btrfs_device_get_bytes_used(device));
1713 	btrfs_set_device_group(leaf, dev_item, 0);
1714 	btrfs_set_device_seek_speed(leaf, dev_item, 0);
1715 	btrfs_set_device_bandwidth(leaf, dev_item, 0);
1716 	btrfs_set_device_start_offset(leaf, dev_item, 0);
1717 
1718 	ptr = btrfs_device_uuid(dev_item);
1719 	write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1720 	ptr = btrfs_device_fsid(dev_item);
1721 	write_extent_buffer(leaf, fs_info->fsid, ptr, BTRFS_FSID_SIZE);
1722 	btrfs_mark_buffer_dirty(leaf);
1723 
1724 	ret = 0;
1725 out:
1726 	btrfs_free_path(path);
1727 	return ret;
1728 }
1729 
1730 /*
1731  * Function to update ctime/mtime for a given device path.
1732  * Mainly used for ctime/mtime based probe like libblkid.
1733  */
update_dev_time(const char * path_name)1734 static void update_dev_time(const char *path_name)
1735 {
1736 	struct file *filp;
1737 
1738 	filp = filp_open(path_name, O_RDWR, 0);
1739 	if (IS_ERR(filp))
1740 		return;
1741 	file_update_time(filp);
1742 	filp_close(filp, NULL);
1743 }
1744 
btrfs_rm_dev_item(struct btrfs_fs_info * fs_info,struct btrfs_device * device)1745 static int btrfs_rm_dev_item(struct btrfs_fs_info *fs_info,
1746 			     struct btrfs_device *device)
1747 {
1748 	struct btrfs_root *root = fs_info->chunk_root;
1749 	int ret;
1750 	struct btrfs_path *path;
1751 	struct btrfs_key key;
1752 	struct btrfs_trans_handle *trans;
1753 
1754 	path = btrfs_alloc_path();
1755 	if (!path)
1756 		return -ENOMEM;
1757 
1758 	trans = btrfs_start_transaction(root, 0);
1759 	if (IS_ERR(trans)) {
1760 		btrfs_free_path(path);
1761 		return PTR_ERR(trans);
1762 	}
1763 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1764 	key.type = BTRFS_DEV_ITEM_KEY;
1765 	key.offset = device->devid;
1766 
1767 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1768 	if (ret) {
1769 		if (ret > 0)
1770 			ret = -ENOENT;
1771 		btrfs_abort_transaction(trans, ret);
1772 		btrfs_end_transaction(trans);
1773 		goto out;
1774 	}
1775 
1776 	ret = btrfs_del_item(trans, root, path);
1777 	if (ret) {
1778 		btrfs_abort_transaction(trans, ret);
1779 		btrfs_end_transaction(trans);
1780 	}
1781 
1782 out:
1783 	btrfs_free_path(path);
1784 	if (!ret)
1785 		ret = btrfs_commit_transaction(trans);
1786 	return ret;
1787 }
1788 
1789 /*
1790  * Verify that @num_devices satisfies the RAID profile constraints in the whole
1791  * filesystem. It's up to the caller to adjust that number regarding eg. device
1792  * replace.
1793  */
btrfs_check_raid_min_devices(struct btrfs_fs_info * fs_info,u64 num_devices)1794 static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info,
1795 		u64 num_devices)
1796 {
1797 	u64 all_avail;
1798 	unsigned seq;
1799 	int i;
1800 
1801 	do {
1802 		seq = read_seqbegin(&fs_info->profiles_lock);
1803 
1804 		all_avail = fs_info->avail_data_alloc_bits |
1805 			    fs_info->avail_system_alloc_bits |
1806 			    fs_info->avail_metadata_alloc_bits;
1807 	} while (read_seqretry(&fs_info->profiles_lock, seq));
1808 
1809 	for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
1810 		if (!(all_avail & btrfs_raid_group[i]))
1811 			continue;
1812 
1813 		if (num_devices < btrfs_raid_array[i].devs_min) {
1814 			int ret = btrfs_raid_mindev_error[i];
1815 
1816 			if (ret)
1817 				return ret;
1818 		}
1819 	}
1820 
1821 	return 0;
1822 }
1823 
btrfs_find_next_active_device(struct btrfs_fs_devices * fs_devs,struct btrfs_device * device)1824 struct btrfs_device *btrfs_find_next_active_device(struct btrfs_fs_devices *fs_devs,
1825 					struct btrfs_device *device)
1826 {
1827 	struct btrfs_device *next_device;
1828 
1829 	list_for_each_entry(next_device, &fs_devs->devices, dev_list) {
1830 		if (next_device != device &&
1831 			!next_device->missing && next_device->bdev)
1832 			return next_device;
1833 	}
1834 
1835 	return NULL;
1836 }
1837 
1838 /*
1839  * Helper function to check if the given device is part of s_bdev / latest_bdev
1840  * and replace it with the provided or the next active device, in the context
1841  * where this function called, there should be always be another device (or
1842  * this_dev) which is active.
1843  */
btrfs_assign_next_active_device(struct btrfs_fs_info * fs_info,struct btrfs_device * device,struct btrfs_device * this_dev)1844 void btrfs_assign_next_active_device(struct btrfs_fs_info *fs_info,
1845 		struct btrfs_device *device, struct btrfs_device *this_dev)
1846 {
1847 	struct btrfs_device *next_device;
1848 
1849 	if (this_dev)
1850 		next_device = this_dev;
1851 	else
1852 		next_device = btrfs_find_next_active_device(fs_info->fs_devices,
1853 								device);
1854 	ASSERT(next_device);
1855 
1856 	if (fs_info->sb->s_bdev &&
1857 			(fs_info->sb->s_bdev == device->bdev))
1858 		fs_info->sb->s_bdev = next_device->bdev;
1859 
1860 	if (fs_info->fs_devices->latest_bdev == device->bdev)
1861 		fs_info->fs_devices->latest_bdev = next_device->bdev;
1862 }
1863 
btrfs_rm_device(struct btrfs_fs_info * fs_info,const char * device_path,u64 devid)1864 int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path,
1865 		u64 devid)
1866 {
1867 	struct btrfs_device *device;
1868 	struct btrfs_fs_devices *cur_devices;
1869 	u64 num_devices;
1870 	int ret = 0;
1871 
1872 	mutex_lock(&uuid_mutex);
1873 
1874 	num_devices = fs_info->fs_devices->num_devices;
1875 	btrfs_dev_replace_lock(&fs_info->dev_replace, 0);
1876 	if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
1877 		WARN_ON(num_devices < 1);
1878 		num_devices--;
1879 	}
1880 	btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
1881 
1882 	ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1);
1883 	if (ret)
1884 		goto out;
1885 
1886 	ret = btrfs_find_device_by_devspec(fs_info, devid, device_path,
1887 					   &device);
1888 	if (ret)
1889 		goto out;
1890 
1891 	if (device->is_tgtdev_for_dev_replace) {
1892 		ret = BTRFS_ERROR_DEV_TGT_REPLACE;
1893 		goto out;
1894 	}
1895 
1896 	if (device->writeable && fs_info->fs_devices->rw_devices == 1) {
1897 		ret = BTRFS_ERROR_DEV_ONLY_WRITABLE;
1898 		goto out;
1899 	}
1900 
1901 	if (device->writeable) {
1902 		mutex_lock(&fs_info->chunk_mutex);
1903 		list_del_init(&device->dev_alloc_list);
1904 		device->fs_devices->rw_devices--;
1905 		mutex_unlock(&fs_info->chunk_mutex);
1906 	}
1907 
1908 	mutex_unlock(&uuid_mutex);
1909 	ret = btrfs_shrink_device(device, 0);
1910 	mutex_lock(&uuid_mutex);
1911 	if (ret)
1912 		goto error_undo;
1913 
1914 	/*
1915 	 * TODO: the superblock still includes this device in its num_devices
1916 	 * counter although write_all_supers() is not locked out. This
1917 	 * could give a filesystem state which requires a degraded mount.
1918 	 */
1919 	ret = btrfs_rm_dev_item(fs_info, device);
1920 	if (ret)
1921 		goto error_undo;
1922 
1923 	device->in_fs_metadata = 0;
1924 	btrfs_scrub_cancel_dev(fs_info, device);
1925 
1926 	/*
1927 	 * the device list mutex makes sure that we don't change
1928 	 * the device list while someone else is writing out all
1929 	 * the device supers. Whoever is writing all supers, should
1930 	 * lock the device list mutex before getting the number of
1931 	 * devices in the super block (super_copy). Conversely,
1932 	 * whoever updates the number of devices in the super block
1933 	 * (super_copy) should hold the device list mutex.
1934 	 */
1935 
1936 	cur_devices = device->fs_devices;
1937 	mutex_lock(&fs_info->fs_devices->device_list_mutex);
1938 	list_del_rcu(&device->dev_list);
1939 
1940 	device->fs_devices->num_devices--;
1941 	device->fs_devices->total_devices--;
1942 
1943 	if (device->missing)
1944 		device->fs_devices->missing_devices--;
1945 
1946 	btrfs_assign_next_active_device(fs_info, device, NULL);
1947 
1948 	if (device->bdev) {
1949 		device->fs_devices->open_devices--;
1950 		/* remove sysfs entry */
1951 		btrfs_sysfs_rm_device_link(fs_info->fs_devices, device);
1952 	}
1953 
1954 	num_devices = btrfs_super_num_devices(fs_info->super_copy) - 1;
1955 	btrfs_set_super_num_devices(fs_info->super_copy, num_devices);
1956 	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
1957 
1958 	/*
1959 	 * at this point, the device is zero sized and detached from
1960 	 * the devices list.  All that's left is to zero out the old
1961 	 * supers and free the device.
1962 	 */
1963 	if (device->writeable)
1964 		btrfs_scratch_superblocks(device->bdev, device->name->str);
1965 
1966 	btrfs_close_bdev(device);
1967 	call_rcu(&device->rcu, free_device);
1968 
1969 	if (cur_devices->open_devices == 0) {
1970 		struct btrfs_fs_devices *fs_devices;
1971 		fs_devices = fs_info->fs_devices;
1972 		while (fs_devices) {
1973 			if (fs_devices->seed == cur_devices) {
1974 				fs_devices->seed = cur_devices->seed;
1975 				break;
1976 			}
1977 			fs_devices = fs_devices->seed;
1978 		}
1979 		cur_devices->seed = NULL;
1980 		__btrfs_close_devices(cur_devices);
1981 		free_fs_devices(cur_devices);
1982 	}
1983 
1984 out:
1985 	mutex_unlock(&uuid_mutex);
1986 	return ret;
1987 
1988 error_undo:
1989 	if (device->writeable) {
1990 		mutex_lock(&fs_info->chunk_mutex);
1991 		list_add(&device->dev_alloc_list,
1992 			 &fs_info->fs_devices->alloc_list);
1993 		device->fs_devices->rw_devices++;
1994 		mutex_unlock(&fs_info->chunk_mutex);
1995 	}
1996 	goto out;
1997 }
1998 
btrfs_rm_dev_replace_remove_srcdev(struct btrfs_fs_info * fs_info,struct btrfs_device * srcdev)1999 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_fs_info *fs_info,
2000 					struct btrfs_device *srcdev)
2001 {
2002 	struct btrfs_fs_devices *fs_devices;
2003 
2004 	WARN_ON(!mutex_is_locked(&fs_info->fs_devices->device_list_mutex));
2005 
2006 	/*
2007 	 * in case of fs with no seed, srcdev->fs_devices will point
2008 	 * to fs_devices of fs_info. However when the dev being replaced is
2009 	 * a seed dev it will point to the seed's local fs_devices. In short
2010 	 * srcdev will have its correct fs_devices in both the cases.
2011 	 */
2012 	fs_devices = srcdev->fs_devices;
2013 
2014 	list_del_rcu(&srcdev->dev_list);
2015 	list_del_rcu(&srcdev->dev_alloc_list);
2016 	fs_devices->num_devices--;
2017 	if (srcdev->missing)
2018 		fs_devices->missing_devices--;
2019 
2020 	if (srcdev->writeable)
2021 		fs_devices->rw_devices--;
2022 
2023 	if (srcdev->bdev)
2024 		fs_devices->open_devices--;
2025 }
2026 
btrfs_rm_dev_replace_free_srcdev(struct btrfs_fs_info * fs_info,struct btrfs_device * srcdev)2027 void btrfs_rm_dev_replace_free_srcdev(struct btrfs_fs_info *fs_info,
2028 				      struct btrfs_device *srcdev)
2029 {
2030 	struct btrfs_fs_devices *fs_devices = srcdev->fs_devices;
2031 
2032 	if (srcdev->writeable) {
2033 		/* zero out the old super if it is writable */
2034 		btrfs_scratch_superblocks(srcdev->bdev, srcdev->name->str);
2035 	}
2036 
2037 	btrfs_close_bdev(srcdev);
2038 
2039 	call_rcu(&srcdev->rcu, free_device);
2040 
2041 	/*
2042 	 * unless fs_devices is seed fs, num_devices shouldn't go
2043 	 * zero
2044 	 */
2045 	BUG_ON(!fs_devices->num_devices && !fs_devices->seeding);
2046 
2047 	/* if this is no devs we rather delete the fs_devices */
2048 	if (!fs_devices->num_devices) {
2049 		struct btrfs_fs_devices *tmp_fs_devices;
2050 
2051 		tmp_fs_devices = fs_info->fs_devices;
2052 		while (tmp_fs_devices) {
2053 			if (tmp_fs_devices->seed == fs_devices) {
2054 				tmp_fs_devices->seed = fs_devices->seed;
2055 				break;
2056 			}
2057 			tmp_fs_devices = tmp_fs_devices->seed;
2058 		}
2059 		fs_devices->seed = NULL;
2060 		__btrfs_close_devices(fs_devices);
2061 		free_fs_devices(fs_devices);
2062 	}
2063 }
2064 
btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info * fs_info,struct btrfs_device * tgtdev)2065 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
2066 				      struct btrfs_device *tgtdev)
2067 {
2068 	mutex_lock(&uuid_mutex);
2069 	WARN_ON(!tgtdev);
2070 	mutex_lock(&fs_info->fs_devices->device_list_mutex);
2071 
2072 	btrfs_sysfs_rm_device_link(fs_info->fs_devices, tgtdev);
2073 
2074 	if (tgtdev->bdev)
2075 		fs_info->fs_devices->open_devices--;
2076 
2077 	fs_info->fs_devices->num_devices--;
2078 
2079 	btrfs_assign_next_active_device(fs_info, tgtdev, NULL);
2080 
2081 	list_del_rcu(&tgtdev->dev_list);
2082 
2083 	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2084 	mutex_unlock(&uuid_mutex);
2085 
2086 	/*
2087 	 * The update_dev_time() with in btrfs_scratch_superblocks()
2088 	 * may lead to a call to btrfs_show_devname() which will try
2089 	 * to hold device_list_mutex. And here this device
2090 	 * is already out of device list, so we don't have to hold
2091 	 * the device_list_mutex lock.
2092 	 */
2093 	btrfs_scratch_superblocks(tgtdev->bdev, tgtdev->name->str);
2094 
2095 	btrfs_close_bdev(tgtdev);
2096 	call_rcu(&tgtdev->rcu, free_device);
2097 }
2098 
btrfs_find_device_by_path(struct btrfs_fs_info * fs_info,const char * device_path,struct btrfs_device ** device)2099 static int btrfs_find_device_by_path(struct btrfs_fs_info *fs_info,
2100 				     const char *device_path,
2101 				     struct btrfs_device **device)
2102 {
2103 	int ret = 0;
2104 	struct btrfs_super_block *disk_super;
2105 	u64 devid;
2106 	u8 *dev_uuid;
2107 	struct block_device *bdev;
2108 	struct buffer_head *bh;
2109 
2110 	*device = NULL;
2111 	ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ,
2112 				    fs_info->bdev_holder, 0, &bdev, &bh);
2113 	if (ret)
2114 		return ret;
2115 	disk_super = (struct btrfs_super_block *)bh->b_data;
2116 	devid = btrfs_stack_device_id(&disk_super->dev_item);
2117 	dev_uuid = disk_super->dev_item.uuid;
2118 	*device = btrfs_find_device(fs_info, devid, dev_uuid, disk_super->fsid);
2119 	brelse(bh);
2120 	if (!*device)
2121 		ret = -ENOENT;
2122 	blkdev_put(bdev, FMODE_READ);
2123 	return ret;
2124 }
2125 
btrfs_find_device_missing_or_by_path(struct btrfs_fs_info * fs_info,const char * device_path,struct btrfs_device ** device)2126 int btrfs_find_device_missing_or_by_path(struct btrfs_fs_info *fs_info,
2127 					 const char *device_path,
2128 					 struct btrfs_device **device)
2129 {
2130 	*device = NULL;
2131 	if (strcmp(device_path, "missing") == 0) {
2132 		struct list_head *devices;
2133 		struct btrfs_device *tmp;
2134 
2135 		devices = &fs_info->fs_devices->devices;
2136 		/*
2137 		 * It is safe to read the devices since the volume_mutex
2138 		 * is held by the caller.
2139 		 */
2140 		list_for_each_entry(tmp, devices, dev_list) {
2141 			if (tmp->in_fs_metadata && !tmp->bdev) {
2142 				*device = tmp;
2143 				break;
2144 			}
2145 		}
2146 
2147 		if (!*device)
2148 			return BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
2149 
2150 		return 0;
2151 	} else {
2152 		return btrfs_find_device_by_path(fs_info, device_path, device);
2153 	}
2154 }
2155 
2156 /*
2157  * Lookup a device given by device id, or the path if the id is 0.
2158  */
btrfs_find_device_by_devspec(struct btrfs_fs_info * fs_info,u64 devid,const char * devpath,struct btrfs_device ** device)2159 int btrfs_find_device_by_devspec(struct btrfs_fs_info *fs_info, u64 devid,
2160 				 const char *devpath,
2161 				 struct btrfs_device **device)
2162 {
2163 	int ret;
2164 
2165 	if (devid) {
2166 		ret = 0;
2167 		*device = btrfs_find_device(fs_info, devid, NULL, NULL);
2168 		if (!*device)
2169 			ret = -ENOENT;
2170 	} else {
2171 		if (!devpath || !devpath[0])
2172 			return -EINVAL;
2173 
2174 		ret = btrfs_find_device_missing_or_by_path(fs_info, devpath,
2175 							   device);
2176 	}
2177 	return ret;
2178 }
2179 
2180 /*
2181  * does all the dirty work required for changing file system's UUID.
2182  */
btrfs_prepare_sprout(struct btrfs_fs_info * fs_info)2183 static int btrfs_prepare_sprout(struct btrfs_fs_info *fs_info)
2184 {
2185 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2186 	struct btrfs_fs_devices *old_devices;
2187 	struct btrfs_fs_devices *seed_devices;
2188 	struct btrfs_super_block *disk_super = fs_info->super_copy;
2189 	struct btrfs_device *device;
2190 	u64 super_flags;
2191 
2192 	BUG_ON(!mutex_is_locked(&uuid_mutex));
2193 	if (!fs_devices->seeding)
2194 		return -EINVAL;
2195 
2196 	seed_devices = alloc_fs_devices(NULL);
2197 	if (IS_ERR(seed_devices))
2198 		return PTR_ERR(seed_devices);
2199 
2200 	old_devices = clone_fs_devices(fs_devices);
2201 	if (IS_ERR(old_devices)) {
2202 		kfree(seed_devices);
2203 		return PTR_ERR(old_devices);
2204 	}
2205 
2206 	list_add(&old_devices->list, &fs_uuids);
2207 
2208 	memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
2209 	seed_devices->opened = 1;
2210 	INIT_LIST_HEAD(&seed_devices->devices);
2211 	INIT_LIST_HEAD(&seed_devices->alloc_list);
2212 	mutex_init(&seed_devices->device_list_mutex);
2213 
2214 	mutex_lock(&fs_info->fs_devices->device_list_mutex);
2215 	list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
2216 			      synchronize_rcu);
2217 	list_for_each_entry(device, &seed_devices->devices, dev_list)
2218 		device->fs_devices = seed_devices;
2219 
2220 	mutex_lock(&fs_info->chunk_mutex);
2221 	list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
2222 	mutex_unlock(&fs_info->chunk_mutex);
2223 
2224 	fs_devices->seeding = 0;
2225 	fs_devices->num_devices = 0;
2226 	fs_devices->open_devices = 0;
2227 	fs_devices->missing_devices = 0;
2228 	fs_devices->rotating = 0;
2229 	fs_devices->seed = seed_devices;
2230 
2231 	generate_random_uuid(fs_devices->fsid);
2232 	memcpy(fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
2233 	memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
2234 	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2235 
2236 	super_flags = btrfs_super_flags(disk_super) &
2237 		      ~BTRFS_SUPER_FLAG_SEEDING;
2238 	btrfs_set_super_flags(disk_super, super_flags);
2239 
2240 	return 0;
2241 }
2242 
2243 /*
2244  * Store the expected generation for seed devices in device items.
2245  */
btrfs_finish_sprout(struct btrfs_trans_handle * trans,struct btrfs_fs_info * fs_info)2246 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
2247 			       struct btrfs_fs_info *fs_info)
2248 {
2249 	struct btrfs_root *root = fs_info->chunk_root;
2250 	struct btrfs_path *path;
2251 	struct extent_buffer *leaf;
2252 	struct btrfs_dev_item *dev_item;
2253 	struct btrfs_device *device;
2254 	struct btrfs_key key;
2255 	u8 fs_uuid[BTRFS_FSID_SIZE];
2256 	u8 dev_uuid[BTRFS_UUID_SIZE];
2257 	u64 devid;
2258 	int ret;
2259 
2260 	path = btrfs_alloc_path();
2261 	if (!path)
2262 		return -ENOMEM;
2263 
2264 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2265 	key.offset = 0;
2266 	key.type = BTRFS_DEV_ITEM_KEY;
2267 
2268 	while (1) {
2269 		ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2270 		if (ret < 0)
2271 			goto error;
2272 
2273 		leaf = path->nodes[0];
2274 next_slot:
2275 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
2276 			ret = btrfs_next_leaf(root, path);
2277 			if (ret > 0)
2278 				break;
2279 			if (ret < 0)
2280 				goto error;
2281 			leaf = path->nodes[0];
2282 			btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2283 			btrfs_release_path(path);
2284 			continue;
2285 		}
2286 
2287 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2288 		if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
2289 		    key.type != BTRFS_DEV_ITEM_KEY)
2290 			break;
2291 
2292 		dev_item = btrfs_item_ptr(leaf, path->slots[0],
2293 					  struct btrfs_dev_item);
2294 		devid = btrfs_device_id(leaf, dev_item);
2295 		read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
2296 				   BTRFS_UUID_SIZE);
2297 		read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
2298 				   BTRFS_FSID_SIZE);
2299 		device = btrfs_find_device(fs_info, devid, dev_uuid, fs_uuid);
2300 		BUG_ON(!device); /* Logic error */
2301 
2302 		if (device->fs_devices->seeding) {
2303 			btrfs_set_device_generation(leaf, dev_item,
2304 						    device->generation);
2305 			btrfs_mark_buffer_dirty(leaf);
2306 		}
2307 
2308 		path->slots[0]++;
2309 		goto next_slot;
2310 	}
2311 	ret = 0;
2312 error:
2313 	btrfs_free_path(path);
2314 	return ret;
2315 }
2316 
btrfs_init_new_device(struct btrfs_fs_info * fs_info,const char * device_path)2317 int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path)
2318 {
2319 	struct btrfs_root *root = fs_info->dev_root;
2320 	struct request_queue *q;
2321 	struct btrfs_trans_handle *trans;
2322 	struct btrfs_device *device;
2323 	struct block_device *bdev;
2324 	struct list_head *devices;
2325 	struct super_block *sb = fs_info->sb;
2326 	struct rcu_string *name;
2327 	u64 tmp;
2328 	int seeding_dev = 0;
2329 	int ret = 0;
2330 
2331 	if (sb_rdonly(sb) && !fs_info->fs_devices->seeding)
2332 		return -EROFS;
2333 
2334 	bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2335 				  fs_info->bdev_holder);
2336 	if (IS_ERR(bdev))
2337 		return PTR_ERR(bdev);
2338 
2339 	if (fs_info->fs_devices->seeding) {
2340 		seeding_dev = 1;
2341 		down_write(&sb->s_umount);
2342 		mutex_lock(&uuid_mutex);
2343 	}
2344 
2345 	filemap_write_and_wait(bdev->bd_inode->i_mapping);
2346 
2347 	devices = &fs_info->fs_devices->devices;
2348 
2349 	mutex_lock(&fs_info->fs_devices->device_list_mutex);
2350 	list_for_each_entry(device, devices, dev_list) {
2351 		if (device->bdev == bdev) {
2352 			ret = -EEXIST;
2353 			mutex_unlock(
2354 				&fs_info->fs_devices->device_list_mutex);
2355 			goto error;
2356 		}
2357 	}
2358 	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2359 
2360 	device = btrfs_alloc_device(fs_info, NULL, NULL);
2361 	if (IS_ERR(device)) {
2362 		/* we can safely leave the fs_devices entry around */
2363 		ret = PTR_ERR(device);
2364 		goto error;
2365 	}
2366 
2367 	name = rcu_string_strdup(device_path, GFP_KERNEL);
2368 	if (!name) {
2369 		kfree(device);
2370 		ret = -ENOMEM;
2371 		goto error;
2372 	}
2373 	rcu_assign_pointer(device->name, name);
2374 
2375 	trans = btrfs_start_transaction(root, 0);
2376 	if (IS_ERR(trans)) {
2377 		rcu_string_free(device->name);
2378 		kfree(device);
2379 		ret = PTR_ERR(trans);
2380 		goto error;
2381 	}
2382 
2383 	q = bdev_get_queue(bdev);
2384 	if (blk_queue_discard(q))
2385 		device->can_discard = 1;
2386 	device->writeable = 1;
2387 	device->generation = trans->transid;
2388 	device->io_width = fs_info->sectorsize;
2389 	device->io_align = fs_info->sectorsize;
2390 	device->sector_size = fs_info->sectorsize;
2391 	device->total_bytes = round_down(i_size_read(bdev->bd_inode),
2392 					 fs_info->sectorsize);
2393 	device->disk_total_bytes = device->total_bytes;
2394 	device->commit_total_bytes = device->total_bytes;
2395 	device->fs_info = fs_info;
2396 	device->bdev = bdev;
2397 	device->in_fs_metadata = 1;
2398 	device->is_tgtdev_for_dev_replace = 0;
2399 	device->mode = FMODE_EXCL;
2400 	device->dev_stats_valid = 1;
2401 	set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE);
2402 
2403 	if (seeding_dev) {
2404 		sb->s_flags &= ~MS_RDONLY;
2405 		ret = btrfs_prepare_sprout(fs_info);
2406 		BUG_ON(ret); /* -ENOMEM */
2407 	}
2408 
2409 	device->fs_devices = fs_info->fs_devices;
2410 
2411 	mutex_lock(&fs_info->fs_devices->device_list_mutex);
2412 	mutex_lock(&fs_info->chunk_mutex);
2413 	list_add_rcu(&device->dev_list, &fs_info->fs_devices->devices);
2414 	list_add(&device->dev_alloc_list,
2415 		 &fs_info->fs_devices->alloc_list);
2416 	fs_info->fs_devices->num_devices++;
2417 	fs_info->fs_devices->open_devices++;
2418 	fs_info->fs_devices->rw_devices++;
2419 	fs_info->fs_devices->total_devices++;
2420 	fs_info->fs_devices->total_rw_bytes += device->total_bytes;
2421 
2422 	atomic64_add(device->total_bytes, &fs_info->free_chunk_space);
2423 
2424 	if (!blk_queue_nonrot(q))
2425 		fs_info->fs_devices->rotating = 1;
2426 
2427 	tmp = btrfs_super_total_bytes(fs_info->super_copy);
2428 	btrfs_set_super_total_bytes(fs_info->super_copy,
2429 		round_down(tmp + device->total_bytes, fs_info->sectorsize));
2430 
2431 	tmp = btrfs_super_num_devices(fs_info->super_copy);
2432 	btrfs_set_super_num_devices(fs_info->super_copy, tmp + 1);
2433 
2434 	/* add sysfs device entry */
2435 	btrfs_sysfs_add_device_link(fs_info->fs_devices, device);
2436 
2437 	/*
2438 	 * we've got more storage, clear any full flags on the space
2439 	 * infos
2440 	 */
2441 	btrfs_clear_space_info_full(fs_info);
2442 
2443 	mutex_unlock(&fs_info->chunk_mutex);
2444 	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2445 
2446 	if (seeding_dev) {
2447 		mutex_lock(&fs_info->chunk_mutex);
2448 		ret = init_first_rw_device(trans, fs_info);
2449 		mutex_unlock(&fs_info->chunk_mutex);
2450 		if (ret) {
2451 			btrfs_abort_transaction(trans, ret);
2452 			goto error_trans;
2453 		}
2454 	}
2455 
2456 	ret = btrfs_add_device(trans, fs_info, device);
2457 	if (ret) {
2458 		btrfs_abort_transaction(trans, ret);
2459 		goto error_trans;
2460 	}
2461 
2462 	if (seeding_dev) {
2463 		char fsid_buf[BTRFS_UUID_UNPARSED_SIZE];
2464 
2465 		ret = btrfs_finish_sprout(trans, fs_info);
2466 		if (ret) {
2467 			btrfs_abort_transaction(trans, ret);
2468 			goto error_trans;
2469 		}
2470 
2471 		/* Sprouting would change fsid of the mounted root,
2472 		 * so rename the fsid on the sysfs
2473 		 */
2474 		snprintf(fsid_buf, BTRFS_UUID_UNPARSED_SIZE, "%pU",
2475 						fs_info->fsid);
2476 		if (kobject_rename(&fs_info->fs_devices->fsid_kobj, fsid_buf))
2477 			btrfs_warn(fs_info,
2478 				   "sysfs: failed to create fsid for sprout");
2479 	}
2480 
2481 	ret = btrfs_commit_transaction(trans);
2482 
2483 	if (seeding_dev) {
2484 		mutex_unlock(&uuid_mutex);
2485 		up_write(&sb->s_umount);
2486 
2487 		if (ret) /* transaction commit */
2488 			return ret;
2489 
2490 		ret = btrfs_relocate_sys_chunks(fs_info);
2491 		if (ret < 0)
2492 			btrfs_handle_fs_error(fs_info, ret,
2493 				    "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command.");
2494 		trans = btrfs_attach_transaction(root);
2495 		if (IS_ERR(trans)) {
2496 			if (PTR_ERR(trans) == -ENOENT)
2497 				return 0;
2498 			return PTR_ERR(trans);
2499 		}
2500 		ret = btrfs_commit_transaction(trans);
2501 	}
2502 
2503 	/* Update ctime/mtime for libblkid */
2504 	update_dev_time(device_path);
2505 	return ret;
2506 
2507 error_trans:
2508 	if (seeding_dev)
2509 		sb->s_flags |= MS_RDONLY;
2510 	btrfs_end_transaction(trans);
2511 	rcu_string_free(device->name);
2512 	btrfs_sysfs_rm_device_link(fs_info->fs_devices, device);
2513 	kfree(device);
2514 error:
2515 	blkdev_put(bdev, FMODE_EXCL);
2516 	if (seeding_dev) {
2517 		mutex_unlock(&uuid_mutex);
2518 		up_write(&sb->s_umount);
2519 	}
2520 	return ret;
2521 }
2522 
btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info * fs_info,const char * device_path,struct btrfs_device * srcdev,struct btrfs_device ** device_out)2523 int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
2524 				  const char *device_path,
2525 				  struct btrfs_device *srcdev,
2526 				  struct btrfs_device **device_out)
2527 {
2528 	struct request_queue *q;
2529 	struct btrfs_device *device;
2530 	struct block_device *bdev;
2531 	struct list_head *devices;
2532 	struct rcu_string *name;
2533 	u64 devid = BTRFS_DEV_REPLACE_DEVID;
2534 	int ret = 0;
2535 
2536 	*device_out = NULL;
2537 	if (fs_info->fs_devices->seeding) {
2538 		btrfs_err(fs_info, "the filesystem is a seed filesystem!");
2539 		return -EINVAL;
2540 	}
2541 
2542 	bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2543 				  fs_info->bdev_holder);
2544 	if (IS_ERR(bdev)) {
2545 		btrfs_err(fs_info, "target device %s is invalid!", device_path);
2546 		return PTR_ERR(bdev);
2547 	}
2548 
2549 	filemap_write_and_wait(bdev->bd_inode->i_mapping);
2550 
2551 	devices = &fs_info->fs_devices->devices;
2552 	list_for_each_entry(device, devices, dev_list) {
2553 		if (device->bdev == bdev) {
2554 			btrfs_err(fs_info,
2555 				  "target device is in the filesystem!");
2556 			ret = -EEXIST;
2557 			goto error;
2558 		}
2559 	}
2560 
2561 
2562 	if (i_size_read(bdev->bd_inode) <
2563 	    btrfs_device_get_total_bytes(srcdev)) {
2564 		btrfs_err(fs_info,
2565 			  "target device is smaller than source device!");
2566 		ret = -EINVAL;
2567 		goto error;
2568 	}
2569 
2570 
2571 	device = btrfs_alloc_device(NULL, &devid, NULL);
2572 	if (IS_ERR(device)) {
2573 		ret = PTR_ERR(device);
2574 		goto error;
2575 	}
2576 
2577 	name = rcu_string_strdup(device_path, GFP_KERNEL);
2578 	if (!name) {
2579 		kfree(device);
2580 		ret = -ENOMEM;
2581 		goto error;
2582 	}
2583 	rcu_assign_pointer(device->name, name);
2584 
2585 	q = bdev_get_queue(bdev);
2586 	if (blk_queue_discard(q))
2587 		device->can_discard = 1;
2588 	mutex_lock(&fs_info->fs_devices->device_list_mutex);
2589 	device->writeable = 1;
2590 	device->generation = 0;
2591 	device->io_width = fs_info->sectorsize;
2592 	device->io_align = fs_info->sectorsize;
2593 	device->sector_size = fs_info->sectorsize;
2594 	device->total_bytes = btrfs_device_get_total_bytes(srcdev);
2595 	device->disk_total_bytes = btrfs_device_get_disk_total_bytes(srcdev);
2596 	device->bytes_used = btrfs_device_get_bytes_used(srcdev);
2597 	ASSERT(list_empty(&srcdev->resized_list));
2598 	device->commit_total_bytes = srcdev->commit_total_bytes;
2599 	device->commit_bytes_used = device->bytes_used;
2600 	device->fs_info = fs_info;
2601 	device->bdev = bdev;
2602 	device->in_fs_metadata = 1;
2603 	device->is_tgtdev_for_dev_replace = 1;
2604 	device->mode = FMODE_EXCL;
2605 	device->dev_stats_valid = 1;
2606 	set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE);
2607 	device->fs_devices = fs_info->fs_devices;
2608 	list_add(&device->dev_list, &fs_info->fs_devices->devices);
2609 	fs_info->fs_devices->num_devices++;
2610 	fs_info->fs_devices->open_devices++;
2611 	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2612 
2613 	*device_out = device;
2614 	return ret;
2615 
2616 error:
2617 	blkdev_put(bdev, FMODE_EXCL);
2618 	return ret;
2619 }
2620 
btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info * fs_info,struct btrfs_device * tgtdev)2621 void btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info *fs_info,
2622 					      struct btrfs_device *tgtdev)
2623 {
2624 	u32 sectorsize = fs_info->sectorsize;
2625 
2626 	WARN_ON(fs_info->fs_devices->rw_devices == 0);
2627 	tgtdev->io_width = sectorsize;
2628 	tgtdev->io_align = sectorsize;
2629 	tgtdev->sector_size = sectorsize;
2630 	tgtdev->fs_info = fs_info;
2631 	tgtdev->in_fs_metadata = 1;
2632 }
2633 
btrfs_update_device(struct btrfs_trans_handle * trans,struct btrfs_device * device)2634 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
2635 					struct btrfs_device *device)
2636 {
2637 	int ret;
2638 	struct btrfs_path *path;
2639 	struct btrfs_root *root = device->fs_info->chunk_root;
2640 	struct btrfs_dev_item *dev_item;
2641 	struct extent_buffer *leaf;
2642 	struct btrfs_key key;
2643 
2644 	path = btrfs_alloc_path();
2645 	if (!path)
2646 		return -ENOMEM;
2647 
2648 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2649 	key.type = BTRFS_DEV_ITEM_KEY;
2650 	key.offset = device->devid;
2651 
2652 	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2653 	if (ret < 0)
2654 		goto out;
2655 
2656 	if (ret > 0) {
2657 		ret = -ENOENT;
2658 		goto out;
2659 	}
2660 
2661 	leaf = path->nodes[0];
2662 	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
2663 
2664 	btrfs_set_device_id(leaf, dev_item, device->devid);
2665 	btrfs_set_device_type(leaf, dev_item, device->type);
2666 	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
2667 	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
2668 	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
2669 	btrfs_set_device_total_bytes(leaf, dev_item,
2670 				     btrfs_device_get_disk_total_bytes(device));
2671 	btrfs_set_device_bytes_used(leaf, dev_item,
2672 				    btrfs_device_get_bytes_used(device));
2673 	btrfs_mark_buffer_dirty(leaf);
2674 
2675 out:
2676 	btrfs_free_path(path);
2677 	return ret;
2678 }
2679 
btrfs_grow_device(struct btrfs_trans_handle * trans,struct btrfs_device * device,u64 new_size)2680 int btrfs_grow_device(struct btrfs_trans_handle *trans,
2681 		      struct btrfs_device *device, u64 new_size)
2682 {
2683 	struct btrfs_fs_info *fs_info = device->fs_info;
2684 	struct btrfs_super_block *super_copy = fs_info->super_copy;
2685 	struct btrfs_fs_devices *fs_devices;
2686 	u64 old_total;
2687 	u64 diff;
2688 
2689 	if (!device->writeable)
2690 		return -EACCES;
2691 
2692 	new_size = round_down(new_size, fs_info->sectorsize);
2693 
2694 	mutex_lock(&fs_info->chunk_mutex);
2695 	old_total = btrfs_super_total_bytes(super_copy);
2696 	diff = round_down(new_size - device->total_bytes, fs_info->sectorsize);
2697 
2698 	if (new_size <= device->total_bytes ||
2699 	    device->is_tgtdev_for_dev_replace) {
2700 		mutex_unlock(&fs_info->chunk_mutex);
2701 		return -EINVAL;
2702 	}
2703 
2704 	fs_devices = fs_info->fs_devices;
2705 
2706 	btrfs_set_super_total_bytes(super_copy,
2707 			round_down(old_total + diff, fs_info->sectorsize));
2708 	device->fs_devices->total_rw_bytes += diff;
2709 
2710 	btrfs_device_set_total_bytes(device, new_size);
2711 	btrfs_device_set_disk_total_bytes(device, new_size);
2712 	btrfs_clear_space_info_full(device->fs_info);
2713 	if (list_empty(&device->resized_list))
2714 		list_add_tail(&device->resized_list,
2715 			      &fs_devices->resized_devices);
2716 	mutex_unlock(&fs_info->chunk_mutex);
2717 
2718 	return btrfs_update_device(trans, device);
2719 }
2720 
btrfs_free_chunk(struct btrfs_trans_handle * trans,struct btrfs_fs_info * fs_info,u64 chunk_offset)2721 static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
2722 			    struct btrfs_fs_info *fs_info, u64 chunk_offset)
2723 {
2724 	struct btrfs_root *root = fs_info->chunk_root;
2725 	int ret;
2726 	struct btrfs_path *path;
2727 	struct btrfs_key key;
2728 
2729 	path = btrfs_alloc_path();
2730 	if (!path)
2731 		return -ENOMEM;
2732 
2733 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2734 	key.offset = chunk_offset;
2735 	key.type = BTRFS_CHUNK_ITEM_KEY;
2736 
2737 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2738 	if (ret < 0)
2739 		goto out;
2740 	else if (ret > 0) { /* Logic error or corruption */
2741 		btrfs_handle_fs_error(fs_info, -ENOENT,
2742 				      "Failed lookup while freeing chunk.");
2743 		ret = -ENOENT;
2744 		goto out;
2745 	}
2746 
2747 	ret = btrfs_del_item(trans, root, path);
2748 	if (ret < 0)
2749 		btrfs_handle_fs_error(fs_info, ret,
2750 				      "Failed to delete chunk item.");
2751 out:
2752 	btrfs_free_path(path);
2753 	return ret;
2754 }
2755 
btrfs_del_sys_chunk(struct btrfs_fs_info * fs_info,u64 chunk_offset)2756 static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
2757 {
2758 	struct btrfs_super_block *super_copy = fs_info->super_copy;
2759 	struct btrfs_disk_key *disk_key;
2760 	struct btrfs_chunk *chunk;
2761 	u8 *ptr;
2762 	int ret = 0;
2763 	u32 num_stripes;
2764 	u32 array_size;
2765 	u32 len = 0;
2766 	u32 cur;
2767 	struct btrfs_key key;
2768 
2769 	mutex_lock(&fs_info->chunk_mutex);
2770 	array_size = btrfs_super_sys_array_size(super_copy);
2771 
2772 	ptr = super_copy->sys_chunk_array;
2773 	cur = 0;
2774 
2775 	while (cur < array_size) {
2776 		disk_key = (struct btrfs_disk_key *)ptr;
2777 		btrfs_disk_key_to_cpu(&key, disk_key);
2778 
2779 		len = sizeof(*disk_key);
2780 
2781 		if (key.type == BTRFS_CHUNK_ITEM_KEY) {
2782 			chunk = (struct btrfs_chunk *)(ptr + len);
2783 			num_stripes = btrfs_stack_chunk_num_stripes(chunk);
2784 			len += btrfs_chunk_item_size(num_stripes);
2785 		} else {
2786 			ret = -EIO;
2787 			break;
2788 		}
2789 		if (key.objectid == BTRFS_FIRST_CHUNK_TREE_OBJECTID &&
2790 		    key.offset == chunk_offset) {
2791 			memmove(ptr, ptr + len, array_size - (cur + len));
2792 			array_size -= len;
2793 			btrfs_set_super_sys_array_size(super_copy, array_size);
2794 		} else {
2795 			ptr += len;
2796 			cur += len;
2797 		}
2798 	}
2799 	mutex_unlock(&fs_info->chunk_mutex);
2800 	return ret;
2801 }
2802 
get_chunk_map(struct btrfs_fs_info * fs_info,u64 logical,u64 length)2803 static struct extent_map *get_chunk_map(struct btrfs_fs_info *fs_info,
2804 					u64 logical, u64 length)
2805 {
2806 	struct extent_map_tree *em_tree;
2807 	struct extent_map *em;
2808 
2809 	em_tree = &fs_info->mapping_tree.map_tree;
2810 	read_lock(&em_tree->lock);
2811 	em = lookup_extent_mapping(em_tree, logical, length);
2812 	read_unlock(&em_tree->lock);
2813 
2814 	if (!em) {
2815 		btrfs_crit(fs_info, "unable to find logical %llu length %llu",
2816 			   logical, length);
2817 		return ERR_PTR(-EINVAL);
2818 	}
2819 
2820 	if (em->start > logical || em->start + em->len < logical) {
2821 		btrfs_crit(fs_info,
2822 			   "found a bad mapping, wanted %llu-%llu, found %llu-%llu",
2823 			   logical, length, em->start, em->start + em->len);
2824 		free_extent_map(em);
2825 		return ERR_PTR(-EINVAL);
2826 	}
2827 
2828 	/* callers are responsible for dropping em's ref. */
2829 	return em;
2830 }
2831 
btrfs_remove_chunk(struct btrfs_trans_handle * trans,struct btrfs_fs_info * fs_info,u64 chunk_offset)2832 int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
2833 		       struct btrfs_fs_info *fs_info, u64 chunk_offset)
2834 {
2835 	struct extent_map *em;
2836 	struct map_lookup *map;
2837 	u64 dev_extent_len = 0;
2838 	int i, ret = 0;
2839 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2840 
2841 	em = get_chunk_map(fs_info, chunk_offset, 1);
2842 	if (IS_ERR(em)) {
2843 		/*
2844 		 * This is a logic error, but we don't want to just rely on the
2845 		 * user having built with ASSERT enabled, so if ASSERT doesn't
2846 		 * do anything we still error out.
2847 		 */
2848 		ASSERT(0);
2849 		return PTR_ERR(em);
2850 	}
2851 	map = em->map_lookup;
2852 	mutex_lock(&fs_info->chunk_mutex);
2853 	check_system_chunk(trans, fs_info, map->type);
2854 	mutex_unlock(&fs_info->chunk_mutex);
2855 
2856 	/*
2857 	 * Take the device list mutex to prevent races with the final phase of
2858 	 * a device replace operation that replaces the device object associated
2859 	 * with map stripes (dev-replace.c:btrfs_dev_replace_finishing()).
2860 	 */
2861 	mutex_lock(&fs_devices->device_list_mutex);
2862 	for (i = 0; i < map->num_stripes; i++) {
2863 		struct btrfs_device *device = map->stripes[i].dev;
2864 		ret = btrfs_free_dev_extent(trans, device,
2865 					    map->stripes[i].physical,
2866 					    &dev_extent_len);
2867 		if (ret) {
2868 			mutex_unlock(&fs_devices->device_list_mutex);
2869 			btrfs_abort_transaction(trans, ret);
2870 			goto out;
2871 		}
2872 
2873 		if (device->bytes_used > 0) {
2874 			mutex_lock(&fs_info->chunk_mutex);
2875 			btrfs_device_set_bytes_used(device,
2876 					device->bytes_used - dev_extent_len);
2877 			atomic64_add(dev_extent_len, &fs_info->free_chunk_space);
2878 			btrfs_clear_space_info_full(fs_info);
2879 			mutex_unlock(&fs_info->chunk_mutex);
2880 		}
2881 
2882 		if (map->stripes[i].dev) {
2883 			ret = btrfs_update_device(trans, map->stripes[i].dev);
2884 			if (ret) {
2885 				mutex_unlock(&fs_devices->device_list_mutex);
2886 				btrfs_abort_transaction(trans, ret);
2887 				goto out;
2888 			}
2889 		}
2890 	}
2891 	mutex_unlock(&fs_devices->device_list_mutex);
2892 
2893 	ret = btrfs_free_chunk(trans, fs_info, chunk_offset);
2894 	if (ret) {
2895 		btrfs_abort_transaction(trans, ret);
2896 		goto out;
2897 	}
2898 
2899 	trace_btrfs_chunk_free(fs_info, map, chunk_offset, em->len);
2900 
2901 	if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
2902 		ret = btrfs_del_sys_chunk(fs_info, chunk_offset);
2903 		if (ret) {
2904 			btrfs_abort_transaction(trans, ret);
2905 			goto out;
2906 		}
2907 	}
2908 
2909 	ret = btrfs_remove_block_group(trans, fs_info, chunk_offset, em);
2910 	if (ret) {
2911 		btrfs_abort_transaction(trans, ret);
2912 		goto out;
2913 	}
2914 
2915 out:
2916 	/* once for us */
2917 	free_extent_map(em);
2918 	return ret;
2919 }
2920 
btrfs_relocate_chunk(struct btrfs_fs_info * fs_info,u64 chunk_offset)2921 static int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
2922 {
2923 	struct btrfs_root *root = fs_info->chunk_root;
2924 	struct btrfs_trans_handle *trans;
2925 	int ret;
2926 
2927 	/*
2928 	 * Prevent races with automatic removal of unused block groups.
2929 	 * After we relocate and before we remove the chunk with offset
2930 	 * chunk_offset, automatic removal of the block group can kick in,
2931 	 * resulting in a failure when calling btrfs_remove_chunk() below.
2932 	 *
2933 	 * Make sure to acquire this mutex before doing a tree search (dev
2934 	 * or chunk trees) to find chunks. Otherwise the cleaner kthread might
2935 	 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after
2936 	 * we release the path used to search the chunk/dev tree and before
2937 	 * the current task acquires this mutex and calls us.
2938 	 */
2939 	ASSERT(mutex_is_locked(&fs_info->delete_unused_bgs_mutex));
2940 
2941 	ret = btrfs_can_relocate(fs_info, chunk_offset);
2942 	if (ret)
2943 		return -ENOSPC;
2944 
2945 	/* step one, relocate all the extents inside this chunk */
2946 	btrfs_scrub_pause(fs_info);
2947 	ret = btrfs_relocate_block_group(fs_info, chunk_offset);
2948 	btrfs_scrub_continue(fs_info);
2949 	if (ret)
2950 		return ret;
2951 
2952 	trans = btrfs_start_trans_remove_block_group(root->fs_info,
2953 						     chunk_offset);
2954 	if (IS_ERR(trans)) {
2955 		ret = PTR_ERR(trans);
2956 		btrfs_handle_fs_error(root->fs_info, ret, NULL);
2957 		return ret;
2958 	}
2959 
2960 	/*
2961 	 * step two, delete the device extents and the
2962 	 * chunk tree entries
2963 	 */
2964 	ret = btrfs_remove_chunk(trans, fs_info, chunk_offset);
2965 	btrfs_end_transaction(trans);
2966 	return ret;
2967 }
2968 
btrfs_relocate_sys_chunks(struct btrfs_fs_info * fs_info)2969 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info)
2970 {
2971 	struct btrfs_root *chunk_root = fs_info->chunk_root;
2972 	struct btrfs_path *path;
2973 	struct extent_buffer *leaf;
2974 	struct btrfs_chunk *chunk;
2975 	struct btrfs_key key;
2976 	struct btrfs_key found_key;
2977 	u64 chunk_type;
2978 	bool retried = false;
2979 	int failed = 0;
2980 	int ret;
2981 
2982 	path = btrfs_alloc_path();
2983 	if (!path)
2984 		return -ENOMEM;
2985 
2986 again:
2987 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2988 	key.offset = (u64)-1;
2989 	key.type = BTRFS_CHUNK_ITEM_KEY;
2990 
2991 	while (1) {
2992 		mutex_lock(&fs_info->delete_unused_bgs_mutex);
2993 		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2994 		if (ret < 0) {
2995 			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
2996 			goto error;
2997 		}
2998 		BUG_ON(ret == 0); /* Corruption */
2999 
3000 		ret = btrfs_previous_item(chunk_root, path, key.objectid,
3001 					  key.type);
3002 		if (ret)
3003 			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3004 		if (ret < 0)
3005 			goto error;
3006 		if (ret > 0)
3007 			break;
3008 
3009 		leaf = path->nodes[0];
3010 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3011 
3012 		chunk = btrfs_item_ptr(leaf, path->slots[0],
3013 				       struct btrfs_chunk);
3014 		chunk_type = btrfs_chunk_type(leaf, chunk);
3015 		btrfs_release_path(path);
3016 
3017 		if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
3018 			ret = btrfs_relocate_chunk(fs_info, found_key.offset);
3019 			if (ret == -ENOSPC)
3020 				failed++;
3021 			else
3022 				BUG_ON(ret);
3023 		}
3024 		mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3025 
3026 		if (found_key.offset == 0)
3027 			break;
3028 		key.offset = found_key.offset - 1;
3029 	}
3030 	ret = 0;
3031 	if (failed && !retried) {
3032 		failed = 0;
3033 		retried = true;
3034 		goto again;
3035 	} else if (WARN_ON(failed && retried)) {
3036 		ret = -ENOSPC;
3037 	}
3038 error:
3039 	btrfs_free_path(path);
3040 	return ret;
3041 }
3042 
insert_balance_item(struct btrfs_fs_info * fs_info,struct btrfs_balance_control * bctl)3043 static int insert_balance_item(struct btrfs_fs_info *fs_info,
3044 			       struct btrfs_balance_control *bctl)
3045 {
3046 	struct btrfs_root *root = fs_info->tree_root;
3047 	struct btrfs_trans_handle *trans;
3048 	struct btrfs_balance_item *item;
3049 	struct btrfs_disk_balance_args disk_bargs;
3050 	struct btrfs_path *path;
3051 	struct extent_buffer *leaf;
3052 	struct btrfs_key key;
3053 	int ret, err;
3054 
3055 	path = btrfs_alloc_path();
3056 	if (!path)
3057 		return -ENOMEM;
3058 
3059 	trans = btrfs_start_transaction(root, 0);
3060 	if (IS_ERR(trans)) {
3061 		btrfs_free_path(path);
3062 		return PTR_ERR(trans);
3063 	}
3064 
3065 	key.objectid = BTRFS_BALANCE_OBJECTID;
3066 	key.type = BTRFS_TEMPORARY_ITEM_KEY;
3067 	key.offset = 0;
3068 
3069 	ret = btrfs_insert_empty_item(trans, root, path, &key,
3070 				      sizeof(*item));
3071 	if (ret)
3072 		goto out;
3073 
3074 	leaf = path->nodes[0];
3075 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
3076 
3077 	memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item));
3078 
3079 	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
3080 	btrfs_set_balance_data(leaf, item, &disk_bargs);
3081 	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
3082 	btrfs_set_balance_meta(leaf, item, &disk_bargs);
3083 	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
3084 	btrfs_set_balance_sys(leaf, item, &disk_bargs);
3085 
3086 	btrfs_set_balance_flags(leaf, item, bctl->flags);
3087 
3088 	btrfs_mark_buffer_dirty(leaf);
3089 out:
3090 	btrfs_free_path(path);
3091 	err = btrfs_commit_transaction(trans);
3092 	if (err && !ret)
3093 		ret = err;
3094 	return ret;
3095 }
3096 
del_balance_item(struct btrfs_fs_info * fs_info)3097 static int del_balance_item(struct btrfs_fs_info *fs_info)
3098 {
3099 	struct btrfs_root *root = fs_info->tree_root;
3100 	struct btrfs_trans_handle *trans;
3101 	struct btrfs_path *path;
3102 	struct btrfs_key key;
3103 	int ret, err;
3104 
3105 	path = btrfs_alloc_path();
3106 	if (!path)
3107 		return -ENOMEM;
3108 
3109 	trans = btrfs_start_transaction(root, 0);
3110 	if (IS_ERR(trans)) {
3111 		btrfs_free_path(path);
3112 		return PTR_ERR(trans);
3113 	}
3114 
3115 	key.objectid = BTRFS_BALANCE_OBJECTID;
3116 	key.type = BTRFS_TEMPORARY_ITEM_KEY;
3117 	key.offset = 0;
3118 
3119 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3120 	if (ret < 0)
3121 		goto out;
3122 	if (ret > 0) {
3123 		ret = -ENOENT;
3124 		goto out;
3125 	}
3126 
3127 	ret = btrfs_del_item(trans, root, path);
3128 out:
3129 	btrfs_free_path(path);
3130 	err = btrfs_commit_transaction(trans);
3131 	if (err && !ret)
3132 		ret = err;
3133 	return ret;
3134 }
3135 
3136 /*
3137  * This is a heuristic used to reduce the number of chunks balanced on
3138  * resume after balance was interrupted.
3139  */
update_balance_args(struct btrfs_balance_control * bctl)3140 static void update_balance_args(struct btrfs_balance_control *bctl)
3141 {
3142 	/*
3143 	 * Turn on soft mode for chunk types that were being converted.
3144 	 */
3145 	if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
3146 		bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
3147 	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
3148 		bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
3149 	if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
3150 		bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
3151 
3152 	/*
3153 	 * Turn on usage filter if is not already used.  The idea is
3154 	 * that chunks that we have already balanced should be
3155 	 * reasonably full.  Don't do it for chunks that are being
3156 	 * converted - that will keep us from relocating unconverted
3157 	 * (albeit full) chunks.
3158 	 */
3159 	if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3160 	    !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3161 	    !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3162 		bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
3163 		bctl->data.usage = 90;
3164 	}
3165 	if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3166 	    !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3167 	    !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3168 		bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
3169 		bctl->sys.usage = 90;
3170 	}
3171 	if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3172 	    !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3173 	    !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3174 		bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
3175 		bctl->meta.usage = 90;
3176 	}
3177 }
3178 
3179 /*
3180  * Should be called with both balance and volume mutexes held to
3181  * serialize other volume operations (add_dev/rm_dev/resize) with
3182  * restriper.  Same goes for unset_balance_control.
3183  */
set_balance_control(struct btrfs_balance_control * bctl)3184 static void set_balance_control(struct btrfs_balance_control *bctl)
3185 {
3186 	struct btrfs_fs_info *fs_info = bctl->fs_info;
3187 
3188 	BUG_ON(fs_info->balance_ctl);
3189 
3190 	spin_lock(&fs_info->balance_lock);
3191 	fs_info->balance_ctl = bctl;
3192 	spin_unlock(&fs_info->balance_lock);
3193 }
3194 
unset_balance_control(struct btrfs_fs_info * fs_info)3195 static void unset_balance_control(struct btrfs_fs_info *fs_info)
3196 {
3197 	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3198 
3199 	BUG_ON(!fs_info->balance_ctl);
3200 
3201 	spin_lock(&fs_info->balance_lock);
3202 	fs_info->balance_ctl = NULL;
3203 	spin_unlock(&fs_info->balance_lock);
3204 
3205 	kfree(bctl);
3206 }
3207 
3208 /*
3209  * Balance filters.  Return 1 if chunk should be filtered out
3210  * (should not be balanced).
3211  */
chunk_profiles_filter(u64 chunk_type,struct btrfs_balance_args * bargs)3212 static int chunk_profiles_filter(u64 chunk_type,
3213 				 struct btrfs_balance_args *bargs)
3214 {
3215 	chunk_type = chunk_to_extended(chunk_type) &
3216 				BTRFS_EXTENDED_PROFILE_MASK;
3217 
3218 	if (bargs->profiles & chunk_type)
3219 		return 0;
3220 
3221 	return 1;
3222 }
3223 
chunk_usage_range_filter(struct btrfs_fs_info * fs_info,u64 chunk_offset,struct btrfs_balance_args * bargs)3224 static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
3225 			      struct btrfs_balance_args *bargs)
3226 {
3227 	struct btrfs_block_group_cache *cache;
3228 	u64 chunk_used;
3229 	u64 user_thresh_min;
3230 	u64 user_thresh_max;
3231 	int ret = 1;
3232 
3233 	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3234 	chunk_used = btrfs_block_group_used(&cache->item);
3235 
3236 	if (bargs->usage_min == 0)
3237 		user_thresh_min = 0;
3238 	else
3239 		user_thresh_min = div_factor_fine(cache->key.offset,
3240 					bargs->usage_min);
3241 
3242 	if (bargs->usage_max == 0)
3243 		user_thresh_max = 1;
3244 	else if (bargs->usage_max > 100)
3245 		user_thresh_max = cache->key.offset;
3246 	else
3247 		user_thresh_max = div_factor_fine(cache->key.offset,
3248 					bargs->usage_max);
3249 
3250 	if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max)
3251 		ret = 0;
3252 
3253 	btrfs_put_block_group(cache);
3254 	return ret;
3255 }
3256 
chunk_usage_filter(struct btrfs_fs_info * fs_info,u64 chunk_offset,struct btrfs_balance_args * bargs)3257 static int chunk_usage_filter(struct btrfs_fs_info *fs_info,
3258 		u64 chunk_offset, struct btrfs_balance_args *bargs)
3259 {
3260 	struct btrfs_block_group_cache *cache;
3261 	u64 chunk_used, user_thresh;
3262 	int ret = 1;
3263 
3264 	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3265 	chunk_used = btrfs_block_group_used(&cache->item);
3266 
3267 	if (bargs->usage_min == 0)
3268 		user_thresh = 1;
3269 	else if (bargs->usage > 100)
3270 		user_thresh = cache->key.offset;
3271 	else
3272 		user_thresh = div_factor_fine(cache->key.offset,
3273 					      bargs->usage);
3274 
3275 	if (chunk_used < user_thresh)
3276 		ret = 0;
3277 
3278 	btrfs_put_block_group(cache);
3279 	return ret;
3280 }
3281 
chunk_devid_filter(struct extent_buffer * leaf,struct btrfs_chunk * chunk,struct btrfs_balance_args * bargs)3282 static int chunk_devid_filter(struct extent_buffer *leaf,
3283 			      struct btrfs_chunk *chunk,
3284 			      struct btrfs_balance_args *bargs)
3285 {
3286 	struct btrfs_stripe *stripe;
3287 	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3288 	int i;
3289 
3290 	for (i = 0; i < num_stripes; i++) {
3291 		stripe = btrfs_stripe_nr(chunk, i);
3292 		if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
3293 			return 0;
3294 	}
3295 
3296 	return 1;
3297 }
3298 
3299 /* [pstart, pend) */
chunk_drange_filter(struct extent_buffer * leaf,struct btrfs_chunk * chunk,struct btrfs_balance_args * bargs)3300 static int chunk_drange_filter(struct extent_buffer *leaf,
3301 			       struct btrfs_chunk *chunk,
3302 			       struct btrfs_balance_args *bargs)
3303 {
3304 	struct btrfs_stripe *stripe;
3305 	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3306 	u64 stripe_offset;
3307 	u64 stripe_length;
3308 	int factor;
3309 	int i;
3310 
3311 	if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
3312 		return 0;
3313 
3314 	if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP |
3315 	     BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)) {
3316 		factor = num_stripes / 2;
3317 	} else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID5) {
3318 		factor = num_stripes - 1;
3319 	} else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID6) {
3320 		factor = num_stripes - 2;
3321 	} else {
3322 		factor = num_stripes;
3323 	}
3324 
3325 	for (i = 0; i < num_stripes; i++) {
3326 		stripe = btrfs_stripe_nr(chunk, i);
3327 		if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
3328 			continue;
3329 
3330 		stripe_offset = btrfs_stripe_offset(leaf, stripe);
3331 		stripe_length = btrfs_chunk_length(leaf, chunk);
3332 		stripe_length = div_u64(stripe_length, factor);
3333 
3334 		if (stripe_offset < bargs->pend &&
3335 		    stripe_offset + stripe_length > bargs->pstart)
3336 			return 0;
3337 	}
3338 
3339 	return 1;
3340 }
3341 
3342 /* [vstart, vend) */
chunk_vrange_filter(struct extent_buffer * leaf,struct btrfs_chunk * chunk,u64 chunk_offset,struct btrfs_balance_args * bargs)3343 static int chunk_vrange_filter(struct extent_buffer *leaf,
3344 			       struct btrfs_chunk *chunk,
3345 			       u64 chunk_offset,
3346 			       struct btrfs_balance_args *bargs)
3347 {
3348 	if (chunk_offset < bargs->vend &&
3349 	    chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
3350 		/* at least part of the chunk is inside this vrange */
3351 		return 0;
3352 
3353 	return 1;
3354 }
3355 
chunk_stripes_range_filter(struct extent_buffer * leaf,struct btrfs_chunk * chunk,struct btrfs_balance_args * bargs)3356 static int chunk_stripes_range_filter(struct extent_buffer *leaf,
3357 			       struct btrfs_chunk *chunk,
3358 			       struct btrfs_balance_args *bargs)
3359 {
3360 	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3361 
3362 	if (bargs->stripes_min <= num_stripes
3363 			&& num_stripes <= bargs->stripes_max)
3364 		return 0;
3365 
3366 	return 1;
3367 }
3368 
chunk_soft_convert_filter(u64 chunk_type,struct btrfs_balance_args * bargs)3369 static int chunk_soft_convert_filter(u64 chunk_type,
3370 				     struct btrfs_balance_args *bargs)
3371 {
3372 	if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
3373 		return 0;
3374 
3375 	chunk_type = chunk_to_extended(chunk_type) &
3376 				BTRFS_EXTENDED_PROFILE_MASK;
3377 
3378 	if (bargs->target == chunk_type)
3379 		return 1;
3380 
3381 	return 0;
3382 }
3383 
should_balance_chunk(struct btrfs_fs_info * fs_info,struct extent_buffer * leaf,struct btrfs_chunk * chunk,u64 chunk_offset)3384 static int should_balance_chunk(struct btrfs_fs_info *fs_info,
3385 				struct extent_buffer *leaf,
3386 				struct btrfs_chunk *chunk, u64 chunk_offset)
3387 {
3388 	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3389 	struct btrfs_balance_args *bargs = NULL;
3390 	u64 chunk_type = btrfs_chunk_type(leaf, chunk);
3391 
3392 	/* type filter */
3393 	if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
3394 	      (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
3395 		return 0;
3396 	}
3397 
3398 	if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3399 		bargs = &bctl->data;
3400 	else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3401 		bargs = &bctl->sys;
3402 	else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3403 		bargs = &bctl->meta;
3404 
3405 	/* profiles filter */
3406 	if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
3407 	    chunk_profiles_filter(chunk_type, bargs)) {
3408 		return 0;
3409 	}
3410 
3411 	/* usage filter */
3412 	if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
3413 	    chunk_usage_filter(fs_info, chunk_offset, bargs)) {
3414 		return 0;
3415 	} else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3416 	    chunk_usage_range_filter(fs_info, chunk_offset, bargs)) {
3417 		return 0;
3418 	}
3419 
3420 	/* devid filter */
3421 	if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
3422 	    chunk_devid_filter(leaf, chunk, bargs)) {
3423 		return 0;
3424 	}
3425 
3426 	/* drange filter, makes sense only with devid filter */
3427 	if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
3428 	    chunk_drange_filter(leaf, chunk, bargs)) {
3429 		return 0;
3430 	}
3431 
3432 	/* vrange filter */
3433 	if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
3434 	    chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
3435 		return 0;
3436 	}
3437 
3438 	/* stripes filter */
3439 	if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) &&
3440 	    chunk_stripes_range_filter(leaf, chunk, bargs)) {
3441 		return 0;
3442 	}
3443 
3444 	/* soft profile changing mode */
3445 	if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
3446 	    chunk_soft_convert_filter(chunk_type, bargs)) {
3447 		return 0;
3448 	}
3449 
3450 	/*
3451 	 * limited by count, must be the last filter
3452 	 */
3453 	if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) {
3454 		if (bargs->limit == 0)
3455 			return 0;
3456 		else
3457 			bargs->limit--;
3458 	} else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) {
3459 		/*
3460 		 * Same logic as the 'limit' filter; the minimum cannot be
3461 		 * determined here because we do not have the global information
3462 		 * about the count of all chunks that satisfy the filters.
3463 		 */
3464 		if (bargs->limit_max == 0)
3465 			return 0;
3466 		else
3467 			bargs->limit_max--;
3468 	}
3469 
3470 	return 1;
3471 }
3472 
__btrfs_balance(struct btrfs_fs_info * fs_info)3473 static int __btrfs_balance(struct btrfs_fs_info *fs_info)
3474 {
3475 	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3476 	struct btrfs_root *chunk_root = fs_info->chunk_root;
3477 	struct btrfs_root *dev_root = fs_info->dev_root;
3478 	struct list_head *devices;
3479 	struct btrfs_device *device;
3480 	u64 old_size;
3481 	u64 size_to_free;
3482 	u64 chunk_type;
3483 	struct btrfs_chunk *chunk;
3484 	struct btrfs_path *path = NULL;
3485 	struct btrfs_key key;
3486 	struct btrfs_key found_key;
3487 	struct btrfs_trans_handle *trans;
3488 	struct extent_buffer *leaf;
3489 	int slot;
3490 	int ret;
3491 	int enospc_errors = 0;
3492 	bool counting = true;
3493 	/* The single value limit and min/max limits use the same bytes in the */
3494 	u64 limit_data = bctl->data.limit;
3495 	u64 limit_meta = bctl->meta.limit;
3496 	u64 limit_sys = bctl->sys.limit;
3497 	u32 count_data = 0;
3498 	u32 count_meta = 0;
3499 	u32 count_sys = 0;
3500 	int chunk_reserved = 0;
3501 	u64 bytes_used = 0;
3502 
3503 	/* step one make some room on all the devices */
3504 	devices = &fs_info->fs_devices->devices;
3505 	list_for_each_entry(device, devices, dev_list) {
3506 		old_size = btrfs_device_get_total_bytes(device);
3507 		size_to_free = div_factor(old_size, 1);
3508 		size_to_free = min_t(u64, size_to_free, SZ_1M);
3509 		if (!device->writeable ||
3510 		    btrfs_device_get_total_bytes(device) -
3511 		    btrfs_device_get_bytes_used(device) > size_to_free ||
3512 		    device->is_tgtdev_for_dev_replace)
3513 			continue;
3514 
3515 		ret = btrfs_shrink_device(device, old_size - size_to_free);
3516 		if (ret == -ENOSPC)
3517 			break;
3518 		if (ret) {
3519 			/* btrfs_shrink_device never returns ret > 0 */
3520 			WARN_ON(ret > 0);
3521 			goto error;
3522 		}
3523 
3524 		trans = btrfs_start_transaction(dev_root, 0);
3525 		if (IS_ERR(trans)) {
3526 			ret = PTR_ERR(trans);
3527 			btrfs_info_in_rcu(fs_info,
3528 		 "resize: unable to start transaction after shrinking device %s (error %d), old size %llu, new size %llu",
3529 					  rcu_str_deref(device->name), ret,
3530 					  old_size, old_size - size_to_free);
3531 			goto error;
3532 		}
3533 
3534 		ret = btrfs_grow_device(trans, device, old_size);
3535 		if (ret) {
3536 			btrfs_end_transaction(trans);
3537 			/* btrfs_grow_device never returns ret > 0 */
3538 			WARN_ON(ret > 0);
3539 			btrfs_info_in_rcu(fs_info,
3540 		 "resize: unable to grow device after shrinking device %s (error %d), old size %llu, new size %llu",
3541 					  rcu_str_deref(device->name), ret,
3542 					  old_size, old_size - size_to_free);
3543 			goto error;
3544 		}
3545 
3546 		btrfs_end_transaction(trans);
3547 	}
3548 
3549 	/* step two, relocate all the chunks */
3550 	path = btrfs_alloc_path();
3551 	if (!path) {
3552 		ret = -ENOMEM;
3553 		goto error;
3554 	}
3555 
3556 	/* zero out stat counters */
3557 	spin_lock(&fs_info->balance_lock);
3558 	memset(&bctl->stat, 0, sizeof(bctl->stat));
3559 	spin_unlock(&fs_info->balance_lock);
3560 again:
3561 	if (!counting) {
3562 		/*
3563 		 * The single value limit and min/max limits use the same bytes
3564 		 * in the
3565 		 */
3566 		bctl->data.limit = limit_data;
3567 		bctl->meta.limit = limit_meta;
3568 		bctl->sys.limit = limit_sys;
3569 	}
3570 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3571 	key.offset = (u64)-1;
3572 	key.type = BTRFS_CHUNK_ITEM_KEY;
3573 
3574 	while (1) {
3575 		if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
3576 		    atomic_read(&fs_info->balance_cancel_req)) {
3577 			ret = -ECANCELED;
3578 			goto error;
3579 		}
3580 
3581 		mutex_lock(&fs_info->delete_unused_bgs_mutex);
3582 		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3583 		if (ret < 0) {
3584 			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3585 			goto error;
3586 		}
3587 
3588 		/*
3589 		 * this shouldn't happen, it means the last relocate
3590 		 * failed
3591 		 */
3592 		if (ret == 0)
3593 			BUG(); /* FIXME break ? */
3594 
3595 		ret = btrfs_previous_item(chunk_root, path, 0,
3596 					  BTRFS_CHUNK_ITEM_KEY);
3597 		if (ret) {
3598 			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3599 			ret = 0;
3600 			break;
3601 		}
3602 
3603 		leaf = path->nodes[0];
3604 		slot = path->slots[0];
3605 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
3606 
3607 		if (found_key.objectid != key.objectid) {
3608 			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3609 			break;
3610 		}
3611 
3612 		chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
3613 		chunk_type = btrfs_chunk_type(leaf, chunk);
3614 
3615 		if (!counting) {
3616 			spin_lock(&fs_info->balance_lock);
3617 			bctl->stat.considered++;
3618 			spin_unlock(&fs_info->balance_lock);
3619 		}
3620 
3621 		ret = should_balance_chunk(fs_info, leaf, chunk,
3622 					   found_key.offset);
3623 
3624 		btrfs_release_path(path);
3625 		if (!ret) {
3626 			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3627 			goto loop;
3628 		}
3629 
3630 		if (counting) {
3631 			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3632 			spin_lock(&fs_info->balance_lock);
3633 			bctl->stat.expected++;
3634 			spin_unlock(&fs_info->balance_lock);
3635 
3636 			if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3637 				count_data++;
3638 			else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3639 				count_sys++;
3640 			else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3641 				count_meta++;
3642 
3643 			goto loop;
3644 		}
3645 
3646 		/*
3647 		 * Apply limit_min filter, no need to check if the LIMITS
3648 		 * filter is used, limit_min is 0 by default
3649 		 */
3650 		if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) &&
3651 					count_data < bctl->data.limit_min)
3652 				|| ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) &&
3653 					count_meta < bctl->meta.limit_min)
3654 				|| ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) &&
3655 					count_sys < bctl->sys.limit_min)) {
3656 			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3657 			goto loop;
3658 		}
3659 
3660 		ASSERT(fs_info->data_sinfo);
3661 		spin_lock(&fs_info->data_sinfo->lock);
3662 		bytes_used = fs_info->data_sinfo->bytes_used;
3663 		spin_unlock(&fs_info->data_sinfo->lock);
3664 
3665 		if ((chunk_type & BTRFS_BLOCK_GROUP_DATA) &&
3666 		    !chunk_reserved && !bytes_used) {
3667 			trans = btrfs_start_transaction(chunk_root, 0);
3668 			if (IS_ERR(trans)) {
3669 				mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3670 				ret = PTR_ERR(trans);
3671 				goto error;
3672 			}
3673 
3674 			ret = btrfs_force_chunk_alloc(trans, fs_info,
3675 						      BTRFS_BLOCK_GROUP_DATA);
3676 			btrfs_end_transaction(trans);
3677 			if (ret < 0) {
3678 				mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3679 				goto error;
3680 			}
3681 			chunk_reserved = 1;
3682 		}
3683 
3684 		ret = btrfs_relocate_chunk(fs_info, found_key.offset);
3685 		mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3686 		if (ret && ret != -ENOSPC)
3687 			goto error;
3688 		if (ret == -ENOSPC) {
3689 			enospc_errors++;
3690 		} else {
3691 			spin_lock(&fs_info->balance_lock);
3692 			bctl->stat.completed++;
3693 			spin_unlock(&fs_info->balance_lock);
3694 		}
3695 loop:
3696 		if (found_key.offset == 0)
3697 			break;
3698 		key.offset = found_key.offset - 1;
3699 	}
3700 
3701 	if (counting) {
3702 		btrfs_release_path(path);
3703 		counting = false;
3704 		goto again;
3705 	}
3706 error:
3707 	btrfs_free_path(path);
3708 	if (enospc_errors) {
3709 		btrfs_info(fs_info, "%d enospc errors during balance",
3710 			   enospc_errors);
3711 		if (!ret)
3712 			ret = -ENOSPC;
3713 	}
3714 
3715 	return ret;
3716 }
3717 
3718 /**
3719  * alloc_profile_is_valid - see if a given profile is valid and reduced
3720  * @flags: profile to validate
3721  * @extended: if true @flags is treated as an extended profile
3722  */
alloc_profile_is_valid(u64 flags,int extended)3723 static int alloc_profile_is_valid(u64 flags, int extended)
3724 {
3725 	u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
3726 			       BTRFS_BLOCK_GROUP_PROFILE_MASK);
3727 
3728 	flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
3729 
3730 	/* 1) check that all other bits are zeroed */
3731 	if (flags & ~mask)
3732 		return 0;
3733 
3734 	/* 2) see if profile is reduced */
3735 	if (flags == 0)
3736 		return !extended; /* "0" is valid for usual profiles */
3737 
3738 	/* true if exactly one bit set */
3739 	return (flags & (flags - 1)) == 0;
3740 }
3741 
balance_need_close(struct btrfs_fs_info * fs_info)3742 static inline int balance_need_close(struct btrfs_fs_info *fs_info)
3743 {
3744 	/* cancel requested || normal exit path */
3745 	return atomic_read(&fs_info->balance_cancel_req) ||
3746 		(atomic_read(&fs_info->balance_pause_req) == 0 &&
3747 		 atomic_read(&fs_info->balance_cancel_req) == 0);
3748 }
3749 
__cancel_balance(struct btrfs_fs_info * fs_info)3750 static void __cancel_balance(struct btrfs_fs_info *fs_info)
3751 {
3752 	int ret;
3753 
3754 	unset_balance_control(fs_info);
3755 	ret = del_balance_item(fs_info);
3756 	if (ret)
3757 		btrfs_handle_fs_error(fs_info, ret, NULL);
3758 
3759 	clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
3760 }
3761 
3762 /* Non-zero return value signifies invalidity */
validate_convert_profile(struct btrfs_balance_args * bctl_arg,u64 allowed)3763 static inline int validate_convert_profile(struct btrfs_balance_args *bctl_arg,
3764 		u64 allowed)
3765 {
3766 	return ((bctl_arg->flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3767 		(!alloc_profile_is_valid(bctl_arg->target, 1) ||
3768 		 (bctl_arg->target & ~allowed)));
3769 }
3770 
3771 /*
3772  * Should be called with both balance and volume mutexes held
3773  */
btrfs_balance(struct btrfs_balance_control * bctl,struct btrfs_ioctl_balance_args * bargs)3774 int btrfs_balance(struct btrfs_balance_control *bctl,
3775 		  struct btrfs_ioctl_balance_args *bargs)
3776 {
3777 	struct btrfs_fs_info *fs_info = bctl->fs_info;
3778 	u64 meta_target, data_target;
3779 	u64 allowed;
3780 	int mixed = 0;
3781 	int ret;
3782 	u64 num_devices;
3783 	unsigned seq;
3784 
3785 	if (btrfs_fs_closing(fs_info) ||
3786 	    atomic_read(&fs_info->balance_pause_req) ||
3787 	    atomic_read(&fs_info->balance_cancel_req)) {
3788 		ret = -EINVAL;
3789 		goto out;
3790 	}
3791 
3792 	allowed = btrfs_super_incompat_flags(fs_info->super_copy);
3793 	if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
3794 		mixed = 1;
3795 
3796 	/*
3797 	 * In case of mixed groups both data and meta should be picked,
3798 	 * and identical options should be given for both of them.
3799 	 */
3800 	allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
3801 	if (mixed && (bctl->flags & allowed)) {
3802 		if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
3803 		    !(bctl->flags & BTRFS_BALANCE_METADATA) ||
3804 		    memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
3805 			btrfs_err(fs_info,
3806 				  "with mixed groups data and metadata balance options must be the same");
3807 			ret = -EINVAL;
3808 			goto out;
3809 		}
3810 	}
3811 
3812 	num_devices = fs_info->fs_devices->num_devices;
3813 	btrfs_dev_replace_lock(&fs_info->dev_replace, 0);
3814 	if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
3815 		BUG_ON(num_devices < 1);
3816 		num_devices--;
3817 	}
3818 	btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
3819 	allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE | BTRFS_BLOCK_GROUP_DUP;
3820 	if (num_devices > 1)
3821 		allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
3822 	if (num_devices > 2)
3823 		allowed |= BTRFS_BLOCK_GROUP_RAID5;
3824 	if (num_devices > 3)
3825 		allowed |= (BTRFS_BLOCK_GROUP_RAID10 |
3826 			    BTRFS_BLOCK_GROUP_RAID6);
3827 	if (validate_convert_profile(&bctl->data, allowed)) {
3828 		btrfs_err(fs_info,
3829 			  "unable to start balance with target data profile %llu",
3830 			  bctl->data.target);
3831 		ret = -EINVAL;
3832 		goto out;
3833 	}
3834 	if (validate_convert_profile(&bctl->meta, allowed)) {
3835 		btrfs_err(fs_info,
3836 			  "unable to start balance with target metadata profile %llu",
3837 			  bctl->meta.target);
3838 		ret = -EINVAL;
3839 		goto out;
3840 	}
3841 	if (validate_convert_profile(&bctl->sys, allowed)) {
3842 		btrfs_err(fs_info,
3843 			  "unable to start balance with target system profile %llu",
3844 			  bctl->sys.target);
3845 		ret = -EINVAL;
3846 		goto out;
3847 	}
3848 
3849 	/* allow to reduce meta or sys integrity only if force set */
3850 	allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3851 			BTRFS_BLOCK_GROUP_RAID10 |
3852 			BTRFS_BLOCK_GROUP_RAID5 |
3853 			BTRFS_BLOCK_GROUP_RAID6;
3854 	do {
3855 		seq = read_seqbegin(&fs_info->profiles_lock);
3856 
3857 		if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3858 		     (fs_info->avail_system_alloc_bits & allowed) &&
3859 		     !(bctl->sys.target & allowed)) ||
3860 		    ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3861 		     (fs_info->avail_metadata_alloc_bits & allowed) &&
3862 		     !(bctl->meta.target & allowed))) {
3863 			if (bctl->flags & BTRFS_BALANCE_FORCE) {
3864 				btrfs_info(fs_info,
3865 					   "force reducing metadata integrity");
3866 			} else {
3867 				btrfs_err(fs_info,
3868 					  "balance will reduce metadata integrity, use force if you want this");
3869 				ret = -EINVAL;
3870 				goto out;
3871 			}
3872 		}
3873 	} while (read_seqretry(&fs_info->profiles_lock, seq));
3874 
3875 	/* if we're not converting, the target field is uninitialized */
3876 	meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
3877 		bctl->meta.target : fs_info->avail_metadata_alloc_bits;
3878 	data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
3879 		bctl->data.target : fs_info->avail_data_alloc_bits;
3880 	if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) <
3881 		btrfs_get_num_tolerated_disk_barrier_failures(data_target)) {
3882 		btrfs_warn(fs_info,
3883 			   "metadata profile 0x%llx has lower redundancy than data profile 0x%llx",
3884 			   meta_target, data_target);
3885 	}
3886 
3887 	ret = insert_balance_item(fs_info, bctl);
3888 	if (ret && ret != -EEXIST)
3889 		goto out;
3890 
3891 	if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
3892 		BUG_ON(ret == -EEXIST);
3893 		set_balance_control(bctl);
3894 	} else {
3895 		BUG_ON(ret != -EEXIST);
3896 		spin_lock(&fs_info->balance_lock);
3897 		update_balance_args(bctl);
3898 		spin_unlock(&fs_info->balance_lock);
3899 	}
3900 
3901 	atomic_inc(&fs_info->balance_running);
3902 	mutex_unlock(&fs_info->balance_mutex);
3903 
3904 	ret = __btrfs_balance(fs_info);
3905 
3906 	mutex_lock(&fs_info->balance_mutex);
3907 	atomic_dec(&fs_info->balance_running);
3908 
3909 	if (bargs) {
3910 		memset(bargs, 0, sizeof(*bargs));
3911 		update_ioctl_balance_args(fs_info, 0, bargs);
3912 	}
3913 
3914 	if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
3915 	    balance_need_close(fs_info)) {
3916 		__cancel_balance(fs_info);
3917 	}
3918 
3919 	wake_up(&fs_info->balance_wait_q);
3920 
3921 	return ret;
3922 out:
3923 	if (bctl->flags & BTRFS_BALANCE_RESUME)
3924 		__cancel_balance(fs_info);
3925 	else {
3926 		kfree(bctl);
3927 		clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
3928 	}
3929 	return ret;
3930 }
3931 
balance_kthread(void * data)3932 static int balance_kthread(void *data)
3933 {
3934 	struct btrfs_fs_info *fs_info = data;
3935 	int ret = 0;
3936 
3937 	mutex_lock(&fs_info->volume_mutex);
3938 	mutex_lock(&fs_info->balance_mutex);
3939 
3940 	if (fs_info->balance_ctl) {
3941 		btrfs_info(fs_info, "continuing balance");
3942 		ret = btrfs_balance(fs_info->balance_ctl, NULL);
3943 	}
3944 
3945 	mutex_unlock(&fs_info->balance_mutex);
3946 	mutex_unlock(&fs_info->volume_mutex);
3947 
3948 	return ret;
3949 }
3950 
btrfs_resume_balance_async(struct btrfs_fs_info * fs_info)3951 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
3952 {
3953 	struct task_struct *tsk;
3954 
3955 	spin_lock(&fs_info->balance_lock);
3956 	if (!fs_info->balance_ctl) {
3957 		spin_unlock(&fs_info->balance_lock);
3958 		return 0;
3959 	}
3960 	spin_unlock(&fs_info->balance_lock);
3961 
3962 	if (btrfs_test_opt(fs_info, SKIP_BALANCE)) {
3963 		btrfs_info(fs_info, "force skipping balance");
3964 		return 0;
3965 	}
3966 
3967 	/*
3968 	 * A ro->rw remount sequence should continue with the paused balance
3969 	 * regardless of who pauses it, system or the user as of now, so set
3970 	 * the resume flag.
3971 	 */
3972 	spin_lock(&fs_info->balance_lock);
3973 	fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME;
3974 	spin_unlock(&fs_info->balance_lock);
3975 
3976 	tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
3977 	return PTR_ERR_OR_ZERO(tsk);
3978 }
3979 
btrfs_recover_balance(struct btrfs_fs_info * fs_info)3980 int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
3981 {
3982 	struct btrfs_balance_control *bctl;
3983 	struct btrfs_balance_item *item;
3984 	struct btrfs_disk_balance_args disk_bargs;
3985 	struct btrfs_path *path;
3986 	struct extent_buffer *leaf;
3987 	struct btrfs_key key;
3988 	int ret;
3989 
3990 	path = btrfs_alloc_path();
3991 	if (!path)
3992 		return -ENOMEM;
3993 
3994 	key.objectid = BTRFS_BALANCE_OBJECTID;
3995 	key.type = BTRFS_TEMPORARY_ITEM_KEY;
3996 	key.offset = 0;
3997 
3998 	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
3999 	if (ret < 0)
4000 		goto out;
4001 	if (ret > 0) { /* ret = -ENOENT; */
4002 		ret = 0;
4003 		goto out;
4004 	}
4005 
4006 	bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
4007 	if (!bctl) {
4008 		ret = -ENOMEM;
4009 		goto out;
4010 	}
4011 
4012 	leaf = path->nodes[0];
4013 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
4014 
4015 	bctl->fs_info = fs_info;
4016 	bctl->flags = btrfs_balance_flags(leaf, item);
4017 	bctl->flags |= BTRFS_BALANCE_RESUME;
4018 
4019 	btrfs_balance_data(leaf, item, &disk_bargs);
4020 	btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
4021 	btrfs_balance_meta(leaf, item, &disk_bargs);
4022 	btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
4023 	btrfs_balance_sys(leaf, item, &disk_bargs);
4024 	btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
4025 
4026 	WARN_ON(test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags));
4027 
4028 	mutex_lock(&fs_info->volume_mutex);
4029 	mutex_lock(&fs_info->balance_mutex);
4030 
4031 	set_balance_control(bctl);
4032 
4033 	mutex_unlock(&fs_info->balance_mutex);
4034 	mutex_unlock(&fs_info->volume_mutex);
4035 out:
4036 	btrfs_free_path(path);
4037 	return ret;
4038 }
4039 
btrfs_pause_balance(struct btrfs_fs_info * fs_info)4040 int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
4041 {
4042 	int ret = 0;
4043 
4044 	mutex_lock(&fs_info->balance_mutex);
4045 	if (!fs_info->balance_ctl) {
4046 		mutex_unlock(&fs_info->balance_mutex);
4047 		return -ENOTCONN;
4048 	}
4049 
4050 	if (atomic_read(&fs_info->balance_running)) {
4051 		atomic_inc(&fs_info->balance_pause_req);
4052 		mutex_unlock(&fs_info->balance_mutex);
4053 
4054 		wait_event(fs_info->balance_wait_q,
4055 			   atomic_read(&fs_info->balance_running) == 0);
4056 
4057 		mutex_lock(&fs_info->balance_mutex);
4058 		/* we are good with balance_ctl ripped off from under us */
4059 		BUG_ON(atomic_read(&fs_info->balance_running));
4060 		atomic_dec(&fs_info->balance_pause_req);
4061 	} else {
4062 		ret = -ENOTCONN;
4063 	}
4064 
4065 	mutex_unlock(&fs_info->balance_mutex);
4066 	return ret;
4067 }
4068 
btrfs_cancel_balance(struct btrfs_fs_info * fs_info)4069 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
4070 {
4071 	if (sb_rdonly(fs_info->sb))
4072 		return -EROFS;
4073 
4074 	mutex_lock(&fs_info->balance_mutex);
4075 	if (!fs_info->balance_ctl) {
4076 		mutex_unlock(&fs_info->balance_mutex);
4077 		return -ENOTCONN;
4078 	}
4079 
4080 	atomic_inc(&fs_info->balance_cancel_req);
4081 	/*
4082 	 * if we are running just wait and return, balance item is
4083 	 * deleted in btrfs_balance in this case
4084 	 */
4085 	if (atomic_read(&fs_info->balance_running)) {
4086 		mutex_unlock(&fs_info->balance_mutex);
4087 		wait_event(fs_info->balance_wait_q,
4088 			   atomic_read(&fs_info->balance_running) == 0);
4089 		mutex_lock(&fs_info->balance_mutex);
4090 	} else {
4091 		/* __cancel_balance needs volume_mutex */
4092 		mutex_unlock(&fs_info->balance_mutex);
4093 		mutex_lock(&fs_info->volume_mutex);
4094 		mutex_lock(&fs_info->balance_mutex);
4095 
4096 		if (fs_info->balance_ctl)
4097 			__cancel_balance(fs_info);
4098 
4099 		mutex_unlock(&fs_info->volume_mutex);
4100 	}
4101 
4102 	BUG_ON(fs_info->balance_ctl || atomic_read(&fs_info->balance_running));
4103 	atomic_dec(&fs_info->balance_cancel_req);
4104 	mutex_unlock(&fs_info->balance_mutex);
4105 	return 0;
4106 }
4107 
btrfs_uuid_scan_kthread(void * data)4108 static int btrfs_uuid_scan_kthread(void *data)
4109 {
4110 	struct btrfs_fs_info *fs_info = data;
4111 	struct btrfs_root *root = fs_info->tree_root;
4112 	struct btrfs_key key;
4113 	struct btrfs_path *path = NULL;
4114 	int ret = 0;
4115 	struct extent_buffer *eb;
4116 	int slot;
4117 	struct btrfs_root_item root_item;
4118 	u32 item_size;
4119 	struct btrfs_trans_handle *trans = NULL;
4120 
4121 	path = btrfs_alloc_path();
4122 	if (!path) {
4123 		ret = -ENOMEM;
4124 		goto out;
4125 	}
4126 
4127 	key.objectid = 0;
4128 	key.type = BTRFS_ROOT_ITEM_KEY;
4129 	key.offset = 0;
4130 
4131 	while (1) {
4132 		ret = btrfs_search_forward(root, &key, path, 0);
4133 		if (ret) {
4134 			if (ret > 0)
4135 				ret = 0;
4136 			break;
4137 		}
4138 
4139 		if (key.type != BTRFS_ROOT_ITEM_KEY ||
4140 		    (key.objectid < BTRFS_FIRST_FREE_OBJECTID &&
4141 		     key.objectid != BTRFS_FS_TREE_OBJECTID) ||
4142 		    key.objectid > BTRFS_LAST_FREE_OBJECTID)
4143 			goto skip;
4144 
4145 		eb = path->nodes[0];
4146 		slot = path->slots[0];
4147 		item_size = btrfs_item_size_nr(eb, slot);
4148 		if (item_size < sizeof(root_item))
4149 			goto skip;
4150 
4151 		read_extent_buffer(eb, &root_item,
4152 				   btrfs_item_ptr_offset(eb, slot),
4153 				   (int)sizeof(root_item));
4154 		if (btrfs_root_refs(&root_item) == 0)
4155 			goto skip;
4156 
4157 		if (!btrfs_is_empty_uuid(root_item.uuid) ||
4158 		    !btrfs_is_empty_uuid(root_item.received_uuid)) {
4159 			if (trans)
4160 				goto update_tree;
4161 
4162 			btrfs_release_path(path);
4163 			/*
4164 			 * 1 - subvol uuid item
4165 			 * 1 - received_subvol uuid item
4166 			 */
4167 			trans = btrfs_start_transaction(fs_info->uuid_root, 2);
4168 			if (IS_ERR(trans)) {
4169 				ret = PTR_ERR(trans);
4170 				break;
4171 			}
4172 			continue;
4173 		} else {
4174 			goto skip;
4175 		}
4176 update_tree:
4177 		if (!btrfs_is_empty_uuid(root_item.uuid)) {
4178 			ret = btrfs_uuid_tree_add(trans, fs_info,
4179 						  root_item.uuid,
4180 						  BTRFS_UUID_KEY_SUBVOL,
4181 						  key.objectid);
4182 			if (ret < 0) {
4183 				btrfs_warn(fs_info, "uuid_tree_add failed %d",
4184 					ret);
4185 				break;
4186 			}
4187 		}
4188 
4189 		if (!btrfs_is_empty_uuid(root_item.received_uuid)) {
4190 			ret = btrfs_uuid_tree_add(trans, fs_info,
4191 						  root_item.received_uuid,
4192 						 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
4193 						  key.objectid);
4194 			if (ret < 0) {
4195 				btrfs_warn(fs_info, "uuid_tree_add failed %d",
4196 					ret);
4197 				break;
4198 			}
4199 		}
4200 
4201 skip:
4202 		if (trans) {
4203 			ret = btrfs_end_transaction(trans);
4204 			trans = NULL;
4205 			if (ret)
4206 				break;
4207 		}
4208 
4209 		btrfs_release_path(path);
4210 		if (key.offset < (u64)-1) {
4211 			key.offset++;
4212 		} else if (key.type < BTRFS_ROOT_ITEM_KEY) {
4213 			key.offset = 0;
4214 			key.type = BTRFS_ROOT_ITEM_KEY;
4215 		} else if (key.objectid < (u64)-1) {
4216 			key.offset = 0;
4217 			key.type = BTRFS_ROOT_ITEM_KEY;
4218 			key.objectid++;
4219 		} else {
4220 			break;
4221 		}
4222 		cond_resched();
4223 	}
4224 
4225 out:
4226 	btrfs_free_path(path);
4227 	if (trans && !IS_ERR(trans))
4228 		btrfs_end_transaction(trans);
4229 	if (ret)
4230 		btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret);
4231 	else
4232 		set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
4233 	up(&fs_info->uuid_tree_rescan_sem);
4234 	return 0;
4235 }
4236 
4237 /*
4238  * Callback for btrfs_uuid_tree_iterate().
4239  * returns:
4240  * 0	check succeeded, the entry is not outdated.
4241  * < 0	if an error occurred.
4242  * > 0	if the check failed, which means the caller shall remove the entry.
4243  */
btrfs_check_uuid_tree_entry(struct btrfs_fs_info * fs_info,u8 * uuid,u8 type,u64 subid)4244 static int btrfs_check_uuid_tree_entry(struct btrfs_fs_info *fs_info,
4245 				       u8 *uuid, u8 type, u64 subid)
4246 {
4247 	struct btrfs_key key;
4248 	int ret = 0;
4249 	struct btrfs_root *subvol_root;
4250 
4251 	if (type != BTRFS_UUID_KEY_SUBVOL &&
4252 	    type != BTRFS_UUID_KEY_RECEIVED_SUBVOL)
4253 		goto out;
4254 
4255 	key.objectid = subid;
4256 	key.type = BTRFS_ROOT_ITEM_KEY;
4257 	key.offset = (u64)-1;
4258 	subvol_root = btrfs_read_fs_root_no_name(fs_info, &key);
4259 	if (IS_ERR(subvol_root)) {
4260 		ret = PTR_ERR(subvol_root);
4261 		if (ret == -ENOENT)
4262 			ret = 1;
4263 		goto out;
4264 	}
4265 
4266 	switch (type) {
4267 	case BTRFS_UUID_KEY_SUBVOL:
4268 		if (memcmp(uuid, subvol_root->root_item.uuid, BTRFS_UUID_SIZE))
4269 			ret = 1;
4270 		break;
4271 	case BTRFS_UUID_KEY_RECEIVED_SUBVOL:
4272 		if (memcmp(uuid, subvol_root->root_item.received_uuid,
4273 			   BTRFS_UUID_SIZE))
4274 			ret = 1;
4275 		break;
4276 	}
4277 
4278 out:
4279 	return ret;
4280 }
4281 
btrfs_uuid_rescan_kthread(void * data)4282 static int btrfs_uuid_rescan_kthread(void *data)
4283 {
4284 	struct btrfs_fs_info *fs_info = (struct btrfs_fs_info *)data;
4285 	int ret;
4286 
4287 	/*
4288 	 * 1st step is to iterate through the existing UUID tree and
4289 	 * to delete all entries that contain outdated data.
4290 	 * 2nd step is to add all missing entries to the UUID tree.
4291 	 */
4292 	ret = btrfs_uuid_tree_iterate(fs_info, btrfs_check_uuid_tree_entry);
4293 	if (ret < 0) {
4294 		btrfs_warn(fs_info, "iterating uuid_tree failed %d", ret);
4295 		up(&fs_info->uuid_tree_rescan_sem);
4296 		return ret;
4297 	}
4298 	return btrfs_uuid_scan_kthread(data);
4299 }
4300 
btrfs_create_uuid_tree(struct btrfs_fs_info * fs_info)4301 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
4302 {
4303 	struct btrfs_trans_handle *trans;
4304 	struct btrfs_root *tree_root = fs_info->tree_root;
4305 	struct btrfs_root *uuid_root;
4306 	struct task_struct *task;
4307 	int ret;
4308 
4309 	/*
4310 	 * 1 - root node
4311 	 * 1 - root item
4312 	 */
4313 	trans = btrfs_start_transaction(tree_root, 2);
4314 	if (IS_ERR(trans))
4315 		return PTR_ERR(trans);
4316 
4317 	uuid_root = btrfs_create_tree(trans, fs_info,
4318 				      BTRFS_UUID_TREE_OBJECTID);
4319 	if (IS_ERR(uuid_root)) {
4320 		ret = PTR_ERR(uuid_root);
4321 		btrfs_abort_transaction(trans, ret);
4322 		btrfs_end_transaction(trans);
4323 		return ret;
4324 	}
4325 
4326 	fs_info->uuid_root = uuid_root;
4327 
4328 	ret = btrfs_commit_transaction(trans);
4329 	if (ret)
4330 		return ret;
4331 
4332 	down(&fs_info->uuid_tree_rescan_sem);
4333 	task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid");
4334 	if (IS_ERR(task)) {
4335 		/* fs_info->update_uuid_tree_gen remains 0 in all error case */
4336 		btrfs_warn(fs_info, "failed to start uuid_scan task");
4337 		up(&fs_info->uuid_tree_rescan_sem);
4338 		return PTR_ERR(task);
4339 	}
4340 
4341 	return 0;
4342 }
4343 
btrfs_check_uuid_tree(struct btrfs_fs_info * fs_info)4344 int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info)
4345 {
4346 	struct task_struct *task;
4347 
4348 	down(&fs_info->uuid_tree_rescan_sem);
4349 	task = kthread_run(btrfs_uuid_rescan_kthread, fs_info, "btrfs-uuid");
4350 	if (IS_ERR(task)) {
4351 		/* fs_info->update_uuid_tree_gen remains 0 in all error case */
4352 		btrfs_warn(fs_info, "failed to start uuid_rescan task");
4353 		up(&fs_info->uuid_tree_rescan_sem);
4354 		return PTR_ERR(task);
4355 	}
4356 
4357 	return 0;
4358 }
4359 
4360 /*
4361  * shrinking a device means finding all of the device extents past
4362  * the new size, and then following the back refs to the chunks.
4363  * The chunk relocation code actually frees the device extent
4364  */
btrfs_shrink_device(struct btrfs_device * device,u64 new_size)4365 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
4366 {
4367 	struct btrfs_fs_info *fs_info = device->fs_info;
4368 	struct btrfs_root *root = fs_info->dev_root;
4369 	struct btrfs_trans_handle *trans;
4370 	struct btrfs_dev_extent *dev_extent = NULL;
4371 	struct btrfs_path *path;
4372 	u64 length;
4373 	u64 chunk_offset;
4374 	int ret;
4375 	int slot;
4376 	int failed = 0;
4377 	bool retried = false;
4378 	bool checked_pending_chunks = false;
4379 	struct extent_buffer *l;
4380 	struct btrfs_key key;
4381 	struct btrfs_super_block *super_copy = fs_info->super_copy;
4382 	u64 old_total = btrfs_super_total_bytes(super_copy);
4383 	u64 old_size = btrfs_device_get_total_bytes(device);
4384 	u64 diff;
4385 
4386 	new_size = round_down(new_size, fs_info->sectorsize);
4387 	diff = round_down(old_size - new_size, fs_info->sectorsize);
4388 
4389 	if (device->is_tgtdev_for_dev_replace)
4390 		return -EINVAL;
4391 
4392 	path = btrfs_alloc_path();
4393 	if (!path)
4394 		return -ENOMEM;
4395 
4396 	path->reada = READA_FORWARD;
4397 
4398 	mutex_lock(&fs_info->chunk_mutex);
4399 
4400 	btrfs_device_set_total_bytes(device, new_size);
4401 	if (device->writeable) {
4402 		device->fs_devices->total_rw_bytes -= diff;
4403 		atomic64_sub(diff, &fs_info->free_chunk_space);
4404 	}
4405 	mutex_unlock(&fs_info->chunk_mutex);
4406 
4407 again:
4408 	key.objectid = device->devid;
4409 	key.offset = (u64)-1;
4410 	key.type = BTRFS_DEV_EXTENT_KEY;
4411 
4412 	do {
4413 		mutex_lock(&fs_info->delete_unused_bgs_mutex);
4414 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4415 		if (ret < 0) {
4416 			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4417 			goto done;
4418 		}
4419 
4420 		ret = btrfs_previous_item(root, path, 0, key.type);
4421 		if (ret)
4422 			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4423 		if (ret < 0)
4424 			goto done;
4425 		if (ret) {
4426 			ret = 0;
4427 			btrfs_release_path(path);
4428 			break;
4429 		}
4430 
4431 		l = path->nodes[0];
4432 		slot = path->slots[0];
4433 		btrfs_item_key_to_cpu(l, &key, path->slots[0]);
4434 
4435 		if (key.objectid != device->devid) {
4436 			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4437 			btrfs_release_path(path);
4438 			break;
4439 		}
4440 
4441 		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
4442 		length = btrfs_dev_extent_length(l, dev_extent);
4443 
4444 		if (key.offset + length <= new_size) {
4445 			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4446 			btrfs_release_path(path);
4447 			break;
4448 		}
4449 
4450 		chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
4451 		btrfs_release_path(path);
4452 
4453 		ret = btrfs_relocate_chunk(fs_info, chunk_offset);
4454 		mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4455 		if (ret && ret != -ENOSPC)
4456 			goto done;
4457 		if (ret == -ENOSPC)
4458 			failed++;
4459 	} while (key.offset-- > 0);
4460 
4461 	if (failed && !retried) {
4462 		failed = 0;
4463 		retried = true;
4464 		goto again;
4465 	} else if (failed && retried) {
4466 		ret = -ENOSPC;
4467 		goto done;
4468 	}
4469 
4470 	/* Shrinking succeeded, else we would be at "done". */
4471 	trans = btrfs_start_transaction(root, 0);
4472 	if (IS_ERR(trans)) {
4473 		ret = PTR_ERR(trans);
4474 		goto done;
4475 	}
4476 
4477 	mutex_lock(&fs_info->chunk_mutex);
4478 
4479 	/*
4480 	 * We checked in the above loop all device extents that were already in
4481 	 * the device tree. However before we have updated the device's
4482 	 * total_bytes to the new size, we might have had chunk allocations that
4483 	 * have not complete yet (new block groups attached to transaction
4484 	 * handles), and therefore their device extents were not yet in the
4485 	 * device tree and we missed them in the loop above. So if we have any
4486 	 * pending chunk using a device extent that overlaps the device range
4487 	 * that we can not use anymore, commit the current transaction and
4488 	 * repeat the search on the device tree - this way we guarantee we will
4489 	 * not have chunks using device extents that end beyond 'new_size'.
4490 	 */
4491 	if (!checked_pending_chunks) {
4492 		u64 start = new_size;
4493 		u64 len = old_size - new_size;
4494 
4495 		if (contains_pending_extent(trans->transaction, device,
4496 					    &start, len)) {
4497 			mutex_unlock(&fs_info->chunk_mutex);
4498 			checked_pending_chunks = true;
4499 			failed = 0;
4500 			retried = false;
4501 			ret = btrfs_commit_transaction(trans);
4502 			if (ret)
4503 				goto done;
4504 			goto again;
4505 		}
4506 	}
4507 
4508 	btrfs_device_set_disk_total_bytes(device, new_size);
4509 	if (list_empty(&device->resized_list))
4510 		list_add_tail(&device->resized_list,
4511 			      &fs_info->fs_devices->resized_devices);
4512 
4513 	WARN_ON(diff > old_total);
4514 	btrfs_set_super_total_bytes(super_copy,
4515 			round_down(old_total - diff, fs_info->sectorsize));
4516 	mutex_unlock(&fs_info->chunk_mutex);
4517 
4518 	/* Now btrfs_update_device() will change the on-disk size. */
4519 	ret = btrfs_update_device(trans, device);
4520 	if (ret < 0) {
4521 		btrfs_abort_transaction(trans, ret);
4522 		btrfs_end_transaction(trans);
4523 	} else {
4524 		ret = btrfs_commit_transaction(trans);
4525 	}
4526 done:
4527 	btrfs_free_path(path);
4528 	if (ret) {
4529 		mutex_lock(&fs_info->chunk_mutex);
4530 		btrfs_device_set_total_bytes(device, old_size);
4531 		if (device->writeable)
4532 			device->fs_devices->total_rw_bytes += diff;
4533 		atomic64_add(diff, &fs_info->free_chunk_space);
4534 		mutex_unlock(&fs_info->chunk_mutex);
4535 	}
4536 	return ret;
4537 }
4538 
btrfs_add_system_chunk(struct btrfs_fs_info * fs_info,struct btrfs_key * key,struct btrfs_chunk * chunk,int item_size)4539 static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info,
4540 			   struct btrfs_key *key,
4541 			   struct btrfs_chunk *chunk, int item_size)
4542 {
4543 	struct btrfs_super_block *super_copy = fs_info->super_copy;
4544 	struct btrfs_disk_key disk_key;
4545 	u32 array_size;
4546 	u8 *ptr;
4547 
4548 	mutex_lock(&fs_info->chunk_mutex);
4549 	array_size = btrfs_super_sys_array_size(super_copy);
4550 	if (array_size + item_size + sizeof(disk_key)
4551 			> BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
4552 		mutex_unlock(&fs_info->chunk_mutex);
4553 		return -EFBIG;
4554 	}
4555 
4556 	ptr = super_copy->sys_chunk_array + array_size;
4557 	btrfs_cpu_key_to_disk(&disk_key, key);
4558 	memcpy(ptr, &disk_key, sizeof(disk_key));
4559 	ptr += sizeof(disk_key);
4560 	memcpy(ptr, chunk, item_size);
4561 	item_size += sizeof(disk_key);
4562 	btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
4563 	mutex_unlock(&fs_info->chunk_mutex);
4564 
4565 	return 0;
4566 }
4567 
4568 /*
4569  * sort the devices in descending order by max_avail, total_avail
4570  */
btrfs_cmp_device_info(const void * a,const void * b)4571 static int btrfs_cmp_device_info(const void *a, const void *b)
4572 {
4573 	const struct btrfs_device_info *di_a = a;
4574 	const struct btrfs_device_info *di_b = b;
4575 
4576 	if (di_a->max_avail > di_b->max_avail)
4577 		return -1;
4578 	if (di_a->max_avail < di_b->max_avail)
4579 		return 1;
4580 	if (di_a->total_avail > di_b->total_avail)
4581 		return -1;
4582 	if (di_a->total_avail < di_b->total_avail)
4583 		return 1;
4584 	return 0;
4585 }
4586 
check_raid56_incompat_flag(struct btrfs_fs_info * info,u64 type)4587 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
4588 {
4589 	if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK))
4590 		return;
4591 
4592 	btrfs_set_fs_incompat(info, RAID56);
4593 }
4594 
4595 #define BTRFS_MAX_DEVS(r) ((BTRFS_MAX_ITEM_SIZE(r->fs_info)		\
4596 			- sizeof(struct btrfs_chunk))		\
4597 			/ sizeof(struct btrfs_stripe) + 1)
4598 
4599 #define BTRFS_MAX_DEVS_SYS_CHUNK ((BTRFS_SYSTEM_CHUNK_ARRAY_SIZE	\
4600 				- 2 * sizeof(struct btrfs_disk_key)	\
4601 				- 2 * sizeof(struct btrfs_chunk))	\
4602 				/ sizeof(struct btrfs_stripe) + 1)
4603 
__btrfs_alloc_chunk(struct btrfs_trans_handle * trans,u64 start,u64 type)4604 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
4605 			       u64 start, u64 type)
4606 {
4607 	struct btrfs_fs_info *info = trans->fs_info;
4608 	struct btrfs_fs_devices *fs_devices = info->fs_devices;
4609 	struct btrfs_device *device;
4610 	struct map_lookup *map = NULL;
4611 	struct extent_map_tree *em_tree;
4612 	struct extent_map *em;
4613 	struct btrfs_device_info *devices_info = NULL;
4614 	u64 total_avail;
4615 	int num_stripes;	/* total number of stripes to allocate */
4616 	int data_stripes;	/* number of stripes that count for
4617 				   block group size */
4618 	int sub_stripes;	/* sub_stripes info for map */
4619 	int dev_stripes;	/* stripes per dev */
4620 	int devs_max;		/* max devs to use */
4621 	int devs_min;		/* min devs needed */
4622 	int devs_increment;	/* ndevs has to be a multiple of this */
4623 	int ncopies;		/* how many copies to data has */
4624 	int ret;
4625 	u64 max_stripe_size;
4626 	u64 max_chunk_size;
4627 	u64 stripe_size;
4628 	u64 num_bytes;
4629 	int ndevs;
4630 	int i;
4631 	int j;
4632 	int index;
4633 
4634 	BUG_ON(!alloc_profile_is_valid(type, 0));
4635 
4636 	if (list_empty(&fs_devices->alloc_list))
4637 		return -ENOSPC;
4638 
4639 	index = __get_raid_index(type);
4640 
4641 	sub_stripes = btrfs_raid_array[index].sub_stripes;
4642 	dev_stripes = btrfs_raid_array[index].dev_stripes;
4643 	devs_max = btrfs_raid_array[index].devs_max;
4644 	devs_min = btrfs_raid_array[index].devs_min;
4645 	devs_increment = btrfs_raid_array[index].devs_increment;
4646 	ncopies = btrfs_raid_array[index].ncopies;
4647 
4648 	if (type & BTRFS_BLOCK_GROUP_DATA) {
4649 		max_stripe_size = SZ_1G;
4650 		max_chunk_size = BTRFS_MAX_DATA_CHUNK_SIZE;
4651 		if (!devs_max)
4652 			devs_max = BTRFS_MAX_DEVS(info->chunk_root);
4653 	} else if (type & BTRFS_BLOCK_GROUP_METADATA) {
4654 		/* for larger filesystems, use larger metadata chunks */
4655 		if (fs_devices->total_rw_bytes > 50ULL * SZ_1G)
4656 			max_stripe_size = SZ_1G;
4657 		else
4658 			max_stripe_size = SZ_256M;
4659 		max_chunk_size = max_stripe_size;
4660 		if (!devs_max)
4661 			devs_max = BTRFS_MAX_DEVS(info->chunk_root);
4662 	} else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
4663 		max_stripe_size = SZ_32M;
4664 		max_chunk_size = 2 * max_stripe_size;
4665 		if (!devs_max)
4666 			devs_max = BTRFS_MAX_DEVS_SYS_CHUNK;
4667 	} else {
4668 		btrfs_err(info, "invalid chunk type 0x%llx requested",
4669 		       type);
4670 		BUG_ON(1);
4671 	}
4672 
4673 	/* we don't want a chunk larger than 10% of writeable space */
4674 	max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
4675 			     max_chunk_size);
4676 
4677 	devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info),
4678 			       GFP_NOFS);
4679 	if (!devices_info)
4680 		return -ENOMEM;
4681 
4682 	/*
4683 	 * in the first pass through the devices list, we gather information
4684 	 * about the available holes on each device.
4685 	 */
4686 	ndevs = 0;
4687 	list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
4688 		u64 max_avail;
4689 		u64 dev_offset;
4690 
4691 		if (!device->writeable) {
4692 			WARN(1, KERN_ERR
4693 			       "BTRFS: read-only device in alloc_list\n");
4694 			continue;
4695 		}
4696 
4697 		if (!device->in_fs_metadata ||
4698 		    device->is_tgtdev_for_dev_replace)
4699 			continue;
4700 
4701 		if (device->total_bytes > device->bytes_used)
4702 			total_avail = device->total_bytes - device->bytes_used;
4703 		else
4704 			total_avail = 0;
4705 
4706 		/* If there is no space on this device, skip it. */
4707 		if (total_avail == 0)
4708 			continue;
4709 
4710 		ret = find_free_dev_extent(trans, device,
4711 					   max_stripe_size * dev_stripes,
4712 					   &dev_offset, &max_avail);
4713 		if (ret && ret != -ENOSPC)
4714 			goto error;
4715 
4716 		if (ret == 0)
4717 			max_avail = max_stripe_size * dev_stripes;
4718 
4719 		if (max_avail < BTRFS_STRIPE_LEN * dev_stripes)
4720 			continue;
4721 
4722 		if (ndevs == fs_devices->rw_devices) {
4723 			WARN(1, "%s: found more than %llu devices\n",
4724 			     __func__, fs_devices->rw_devices);
4725 			break;
4726 		}
4727 		devices_info[ndevs].dev_offset = dev_offset;
4728 		devices_info[ndevs].max_avail = max_avail;
4729 		devices_info[ndevs].total_avail = total_avail;
4730 		devices_info[ndevs].dev = device;
4731 		++ndevs;
4732 	}
4733 
4734 	/*
4735 	 * now sort the devices by hole size / available space
4736 	 */
4737 	sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
4738 	     btrfs_cmp_device_info, NULL);
4739 
4740 	/* round down to number of usable stripes */
4741 	ndevs = round_down(ndevs, devs_increment);
4742 
4743 	if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) {
4744 		ret = -ENOSPC;
4745 		goto error;
4746 	}
4747 
4748 	ndevs = min(ndevs, devs_max);
4749 
4750 	/*
4751 	 * The primary goal is to maximize the number of stripes, so use as
4752 	 * many devices as possible, even if the stripes are not maximum sized.
4753 	 *
4754 	 * The DUP profile stores more than one stripe per device, the
4755 	 * max_avail is the total size so we have to adjust.
4756 	 */
4757 	stripe_size = div_u64(devices_info[ndevs - 1].max_avail, dev_stripes);
4758 	num_stripes = ndevs * dev_stripes;
4759 
4760 	/*
4761 	 * this will have to be fixed for RAID1 and RAID10 over
4762 	 * more drives
4763 	 */
4764 	data_stripes = num_stripes / ncopies;
4765 
4766 	if (type & BTRFS_BLOCK_GROUP_RAID5)
4767 		data_stripes = num_stripes - 1;
4768 
4769 	if (type & BTRFS_BLOCK_GROUP_RAID6)
4770 		data_stripes = num_stripes - 2;
4771 
4772 	/*
4773 	 * Use the number of data stripes to figure out how big this chunk
4774 	 * is really going to be in terms of logical address space,
4775 	 * and compare that answer with the max chunk size
4776 	 */
4777 	if (stripe_size * data_stripes > max_chunk_size) {
4778 		u64 mask = (1ULL << 24) - 1;
4779 
4780 		stripe_size = div_u64(max_chunk_size, data_stripes);
4781 
4782 		/* bump the answer up to a 16MB boundary */
4783 		stripe_size = (stripe_size + mask) & ~mask;
4784 
4785 		/* but don't go higher than the limits we found
4786 		 * while searching for free extents
4787 		 */
4788 		if (stripe_size > devices_info[ndevs-1].max_avail)
4789 			stripe_size = devices_info[ndevs-1].max_avail;
4790 	}
4791 
4792 	/* align to BTRFS_STRIPE_LEN */
4793 	stripe_size = round_down(stripe_size, BTRFS_STRIPE_LEN);
4794 
4795 	map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
4796 	if (!map) {
4797 		ret = -ENOMEM;
4798 		goto error;
4799 	}
4800 	map->num_stripes = num_stripes;
4801 
4802 	for (i = 0; i < ndevs; ++i) {
4803 		for (j = 0; j < dev_stripes; ++j) {
4804 			int s = i * dev_stripes + j;
4805 			map->stripes[s].dev = devices_info[i].dev;
4806 			map->stripes[s].physical = devices_info[i].dev_offset +
4807 						   j * stripe_size;
4808 		}
4809 	}
4810 	map->stripe_len = BTRFS_STRIPE_LEN;
4811 	map->io_align = BTRFS_STRIPE_LEN;
4812 	map->io_width = BTRFS_STRIPE_LEN;
4813 	map->type = type;
4814 	map->sub_stripes = sub_stripes;
4815 
4816 	num_bytes = stripe_size * data_stripes;
4817 
4818 	trace_btrfs_chunk_alloc(info, map, start, num_bytes);
4819 
4820 	em = alloc_extent_map();
4821 	if (!em) {
4822 		kfree(map);
4823 		ret = -ENOMEM;
4824 		goto error;
4825 	}
4826 	set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
4827 	em->map_lookup = map;
4828 	em->start = start;
4829 	em->len = num_bytes;
4830 	em->block_start = 0;
4831 	em->block_len = em->len;
4832 	em->orig_block_len = stripe_size;
4833 
4834 	em_tree = &info->mapping_tree.map_tree;
4835 	write_lock(&em_tree->lock);
4836 	ret = add_extent_mapping(em_tree, em, 0);
4837 	if (!ret) {
4838 		list_add_tail(&em->list, &trans->transaction->pending_chunks);
4839 		refcount_inc(&em->refs);
4840 	}
4841 	write_unlock(&em_tree->lock);
4842 	if (ret) {
4843 		free_extent_map(em);
4844 		goto error;
4845 	}
4846 
4847 	ret = btrfs_make_block_group(trans, info, 0, type, start, num_bytes);
4848 	if (ret)
4849 		goto error_del_extent;
4850 
4851 	for (i = 0; i < map->num_stripes; i++) {
4852 		num_bytes = map->stripes[i].dev->bytes_used + stripe_size;
4853 		btrfs_device_set_bytes_used(map->stripes[i].dev, num_bytes);
4854 		map->stripes[i].dev->has_pending_chunks = true;
4855 	}
4856 
4857 	atomic64_sub(stripe_size * map->num_stripes, &info->free_chunk_space);
4858 
4859 	free_extent_map(em);
4860 	check_raid56_incompat_flag(info, type);
4861 
4862 	kfree(devices_info);
4863 	return 0;
4864 
4865 error_del_extent:
4866 	write_lock(&em_tree->lock);
4867 	remove_extent_mapping(em_tree, em);
4868 	write_unlock(&em_tree->lock);
4869 
4870 	/* One for our allocation */
4871 	free_extent_map(em);
4872 	/* One for the tree reference */
4873 	free_extent_map(em);
4874 	/* One for the pending_chunks list reference */
4875 	free_extent_map(em);
4876 error:
4877 	kfree(devices_info);
4878 	return ret;
4879 }
4880 
btrfs_finish_chunk_alloc(struct btrfs_trans_handle * trans,struct btrfs_fs_info * fs_info,u64 chunk_offset,u64 chunk_size)4881 int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
4882 				struct btrfs_fs_info *fs_info,
4883 				u64 chunk_offset, u64 chunk_size)
4884 {
4885 	struct btrfs_root *extent_root = fs_info->extent_root;
4886 	struct btrfs_root *chunk_root = fs_info->chunk_root;
4887 	struct btrfs_key key;
4888 	struct btrfs_device *device;
4889 	struct btrfs_chunk *chunk;
4890 	struct btrfs_stripe *stripe;
4891 	struct extent_map *em;
4892 	struct map_lookup *map;
4893 	size_t item_size;
4894 	u64 dev_offset;
4895 	u64 stripe_size;
4896 	int i = 0;
4897 	int ret = 0;
4898 
4899 	em = get_chunk_map(fs_info, chunk_offset, chunk_size);
4900 	if (IS_ERR(em))
4901 		return PTR_ERR(em);
4902 
4903 	map = em->map_lookup;
4904 	item_size = btrfs_chunk_item_size(map->num_stripes);
4905 	stripe_size = em->orig_block_len;
4906 
4907 	chunk = kzalloc(item_size, GFP_NOFS);
4908 	if (!chunk) {
4909 		ret = -ENOMEM;
4910 		goto out;
4911 	}
4912 
4913 	/*
4914 	 * Take the device list mutex to prevent races with the final phase of
4915 	 * a device replace operation that replaces the device object associated
4916 	 * with the map's stripes, because the device object's id can change
4917 	 * at any time during that final phase of the device replace operation
4918 	 * (dev-replace.c:btrfs_dev_replace_finishing()).
4919 	 */
4920 	mutex_lock(&fs_info->fs_devices->device_list_mutex);
4921 	for (i = 0; i < map->num_stripes; i++) {
4922 		device = map->stripes[i].dev;
4923 		dev_offset = map->stripes[i].physical;
4924 
4925 		ret = btrfs_update_device(trans, device);
4926 		if (ret)
4927 			break;
4928 		ret = btrfs_alloc_dev_extent(trans, device, chunk_offset,
4929 					     dev_offset, stripe_size);
4930 		if (ret)
4931 			break;
4932 	}
4933 	if (ret) {
4934 		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4935 		goto out;
4936 	}
4937 
4938 	stripe = &chunk->stripe;
4939 	for (i = 0; i < map->num_stripes; i++) {
4940 		device = map->stripes[i].dev;
4941 		dev_offset = map->stripes[i].physical;
4942 
4943 		btrfs_set_stack_stripe_devid(stripe, device->devid);
4944 		btrfs_set_stack_stripe_offset(stripe, dev_offset);
4945 		memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
4946 		stripe++;
4947 	}
4948 	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4949 
4950 	btrfs_set_stack_chunk_length(chunk, chunk_size);
4951 	btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
4952 	btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
4953 	btrfs_set_stack_chunk_type(chunk, map->type);
4954 	btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
4955 	btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
4956 	btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
4957 	btrfs_set_stack_chunk_sector_size(chunk, fs_info->sectorsize);
4958 	btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
4959 
4960 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
4961 	key.type = BTRFS_CHUNK_ITEM_KEY;
4962 	key.offset = chunk_offset;
4963 
4964 	ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
4965 	if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
4966 		/*
4967 		 * TODO: Cleanup of inserted chunk root in case of
4968 		 * failure.
4969 		 */
4970 		ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size);
4971 	}
4972 
4973 out:
4974 	kfree(chunk);
4975 	free_extent_map(em);
4976 	return ret;
4977 }
4978 
4979 /*
4980  * Chunk allocation falls into two parts. The first part does works
4981  * that make the new allocated chunk useable, but not do any operation
4982  * that modifies the chunk tree. The second part does the works that
4983  * require modifying the chunk tree. This division is important for the
4984  * bootstrap process of adding storage to a seed btrfs.
4985  */
btrfs_alloc_chunk(struct btrfs_trans_handle * trans,struct btrfs_fs_info * fs_info,u64 type)4986 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
4987 		      struct btrfs_fs_info *fs_info, u64 type)
4988 {
4989 	u64 chunk_offset;
4990 
4991 	ASSERT(mutex_is_locked(&fs_info->chunk_mutex));
4992 	chunk_offset = find_next_chunk(fs_info);
4993 	return __btrfs_alloc_chunk(trans, chunk_offset, type);
4994 }
4995 
init_first_rw_device(struct btrfs_trans_handle * trans,struct btrfs_fs_info * fs_info)4996 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
4997 					 struct btrfs_fs_info *fs_info)
4998 {
4999 	u64 chunk_offset;
5000 	u64 sys_chunk_offset;
5001 	u64 alloc_profile;
5002 	int ret;
5003 
5004 	chunk_offset = find_next_chunk(fs_info);
5005 	alloc_profile = btrfs_metadata_alloc_profile(fs_info);
5006 	ret = __btrfs_alloc_chunk(trans, chunk_offset, alloc_profile);
5007 	if (ret)
5008 		return ret;
5009 
5010 	sys_chunk_offset = find_next_chunk(fs_info);
5011 	alloc_profile = btrfs_system_alloc_profile(fs_info);
5012 	ret = __btrfs_alloc_chunk(trans, sys_chunk_offset, alloc_profile);
5013 	return ret;
5014 }
5015 
btrfs_chunk_max_errors(struct map_lookup * map)5016 static inline int btrfs_chunk_max_errors(struct map_lookup *map)
5017 {
5018 	int max_errors;
5019 
5020 	if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
5021 			 BTRFS_BLOCK_GROUP_RAID10 |
5022 			 BTRFS_BLOCK_GROUP_RAID5)) {
5023 		max_errors = 1;
5024 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID6) {
5025 		max_errors = 2;
5026 	} else {
5027 		max_errors = 0;
5028 	}
5029 
5030 	return max_errors;
5031 }
5032 
btrfs_chunk_readonly(struct btrfs_fs_info * fs_info,u64 chunk_offset)5033 int btrfs_chunk_readonly(struct btrfs_fs_info *fs_info, u64 chunk_offset)
5034 {
5035 	struct extent_map *em;
5036 	struct map_lookup *map;
5037 	int readonly = 0;
5038 	int miss_ndevs = 0;
5039 	int i;
5040 
5041 	em = get_chunk_map(fs_info, chunk_offset, 1);
5042 	if (IS_ERR(em))
5043 		return 1;
5044 
5045 	map = em->map_lookup;
5046 	for (i = 0; i < map->num_stripes; i++) {
5047 		if (map->stripes[i].dev->missing) {
5048 			miss_ndevs++;
5049 			continue;
5050 		}
5051 
5052 		if (!map->stripes[i].dev->writeable) {
5053 			readonly = 1;
5054 			goto end;
5055 		}
5056 	}
5057 
5058 	/*
5059 	 * If the number of missing devices is larger than max errors,
5060 	 * we can not write the data into that chunk successfully, so
5061 	 * set it readonly.
5062 	 */
5063 	if (miss_ndevs > btrfs_chunk_max_errors(map))
5064 		readonly = 1;
5065 end:
5066 	free_extent_map(em);
5067 	return readonly;
5068 }
5069 
btrfs_mapping_init(struct btrfs_mapping_tree * tree)5070 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
5071 {
5072 	extent_map_tree_init(&tree->map_tree);
5073 }
5074 
btrfs_mapping_tree_free(struct btrfs_mapping_tree * tree)5075 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
5076 {
5077 	struct extent_map *em;
5078 
5079 	while (1) {
5080 		write_lock(&tree->map_tree.lock);
5081 		em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
5082 		if (em)
5083 			remove_extent_mapping(&tree->map_tree, em);
5084 		write_unlock(&tree->map_tree.lock);
5085 		if (!em)
5086 			break;
5087 		/* once for us */
5088 		free_extent_map(em);
5089 		/* once for the tree */
5090 		free_extent_map(em);
5091 	}
5092 }
5093 
btrfs_num_copies(struct btrfs_fs_info * fs_info,u64 logical,u64 len)5094 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5095 {
5096 	struct extent_map *em;
5097 	struct map_lookup *map;
5098 	int ret;
5099 
5100 	em = get_chunk_map(fs_info, logical, len);
5101 	if (IS_ERR(em))
5102 		/*
5103 		 * We could return errors for these cases, but that could get
5104 		 * ugly and we'd probably do the same thing which is just not do
5105 		 * anything else and exit, so return 1 so the callers don't try
5106 		 * to use other copies.
5107 		 */
5108 		return 1;
5109 
5110 	map = em->map_lookup;
5111 	if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
5112 		ret = map->num_stripes;
5113 	else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5114 		ret = map->sub_stripes;
5115 	else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
5116 		ret = 2;
5117 	else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
5118 		/*
5119 		 * There could be two corrupted data stripes, we need
5120 		 * to loop retry in order to rebuild the correct data.
5121 		 *
5122 		 * Fail a stripe at a time on every retry except the
5123 		 * stripe under reconstruction.
5124 		 */
5125 		ret = map->num_stripes;
5126 	else
5127 		ret = 1;
5128 	free_extent_map(em);
5129 
5130 	btrfs_dev_replace_lock(&fs_info->dev_replace, 0);
5131 	if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace) &&
5132 	    fs_info->dev_replace.tgtdev)
5133 		ret++;
5134 	btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
5135 
5136 	return ret;
5137 }
5138 
btrfs_full_stripe_len(struct btrfs_fs_info * fs_info,u64 logical)5139 unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
5140 				    u64 logical)
5141 {
5142 	struct extent_map *em;
5143 	struct map_lookup *map;
5144 	unsigned long len = fs_info->sectorsize;
5145 
5146 	em = get_chunk_map(fs_info, logical, len);
5147 
5148 	if (!WARN_ON(IS_ERR(em))) {
5149 		map = em->map_lookup;
5150 		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5151 			len = map->stripe_len * nr_data_stripes(map);
5152 		free_extent_map(em);
5153 	}
5154 	return len;
5155 }
5156 
btrfs_is_parity_mirror(struct btrfs_fs_info * fs_info,u64 logical,u64 len)5157 int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5158 {
5159 	struct extent_map *em;
5160 	struct map_lookup *map;
5161 	int ret = 0;
5162 
5163 	em = get_chunk_map(fs_info, logical, len);
5164 
5165 	if(!WARN_ON(IS_ERR(em))) {
5166 		map = em->map_lookup;
5167 		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5168 			ret = 1;
5169 		free_extent_map(em);
5170 	}
5171 	return ret;
5172 }
5173 
find_live_mirror(struct btrfs_fs_info * fs_info,struct map_lookup * map,int first,int num,int optimal,int dev_replace_is_ongoing)5174 static int find_live_mirror(struct btrfs_fs_info *fs_info,
5175 			    struct map_lookup *map, int first, int num,
5176 			    int optimal, int dev_replace_is_ongoing)
5177 {
5178 	int i;
5179 	int tolerance;
5180 	struct btrfs_device *srcdev;
5181 
5182 	if (dev_replace_is_ongoing &&
5183 	    fs_info->dev_replace.cont_reading_from_srcdev_mode ==
5184 	     BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID)
5185 		srcdev = fs_info->dev_replace.srcdev;
5186 	else
5187 		srcdev = NULL;
5188 
5189 	/*
5190 	 * try to avoid the drive that is the source drive for a
5191 	 * dev-replace procedure, only choose it if no other non-missing
5192 	 * mirror is available
5193 	 */
5194 	for (tolerance = 0; tolerance < 2; tolerance++) {
5195 		if (map->stripes[optimal].dev->bdev &&
5196 		    (tolerance || map->stripes[optimal].dev != srcdev))
5197 			return optimal;
5198 		for (i = first; i < first + num; i++) {
5199 			if (map->stripes[i].dev->bdev &&
5200 			    (tolerance || map->stripes[i].dev != srcdev))
5201 				return i;
5202 		}
5203 	}
5204 
5205 	/* we couldn't find one that doesn't fail.  Just return something
5206 	 * and the io error handling code will clean up eventually
5207 	 */
5208 	return optimal;
5209 }
5210 
parity_smaller(u64 a,u64 b)5211 static inline int parity_smaller(u64 a, u64 b)
5212 {
5213 	return a > b;
5214 }
5215 
5216 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */
sort_parity_stripes(struct btrfs_bio * bbio,int num_stripes)5217 static void sort_parity_stripes(struct btrfs_bio *bbio, int num_stripes)
5218 {
5219 	struct btrfs_bio_stripe s;
5220 	int i;
5221 	u64 l;
5222 	int again = 1;
5223 
5224 	while (again) {
5225 		again = 0;
5226 		for (i = 0; i < num_stripes - 1; i++) {
5227 			if (parity_smaller(bbio->raid_map[i],
5228 					   bbio->raid_map[i+1])) {
5229 				s = bbio->stripes[i];
5230 				l = bbio->raid_map[i];
5231 				bbio->stripes[i] = bbio->stripes[i+1];
5232 				bbio->raid_map[i] = bbio->raid_map[i+1];
5233 				bbio->stripes[i+1] = s;
5234 				bbio->raid_map[i+1] = l;
5235 
5236 				again = 1;
5237 			}
5238 		}
5239 	}
5240 }
5241 
alloc_btrfs_bio(int total_stripes,int real_stripes)5242 static struct btrfs_bio *alloc_btrfs_bio(int total_stripes, int real_stripes)
5243 {
5244 	struct btrfs_bio *bbio = kzalloc(
5245 		 /* the size of the btrfs_bio */
5246 		sizeof(struct btrfs_bio) +
5247 		/* plus the variable array for the stripes */
5248 		sizeof(struct btrfs_bio_stripe) * (total_stripes) +
5249 		/* plus the variable array for the tgt dev */
5250 		sizeof(int) * (real_stripes) +
5251 		/*
5252 		 * plus the raid_map, which includes both the tgt dev
5253 		 * and the stripes
5254 		 */
5255 		sizeof(u64) * (total_stripes),
5256 		GFP_NOFS|__GFP_NOFAIL);
5257 
5258 	atomic_set(&bbio->error, 0);
5259 	refcount_set(&bbio->refs, 1);
5260 
5261 	return bbio;
5262 }
5263 
btrfs_get_bbio(struct btrfs_bio * bbio)5264 void btrfs_get_bbio(struct btrfs_bio *bbio)
5265 {
5266 	WARN_ON(!refcount_read(&bbio->refs));
5267 	refcount_inc(&bbio->refs);
5268 }
5269 
btrfs_put_bbio(struct btrfs_bio * bbio)5270 void btrfs_put_bbio(struct btrfs_bio *bbio)
5271 {
5272 	if (!bbio)
5273 		return;
5274 	if (refcount_dec_and_test(&bbio->refs))
5275 		kfree(bbio);
5276 }
5277 
5278 /* can REQ_OP_DISCARD be sent with other REQ like REQ_OP_WRITE? */
5279 /*
5280  * Please note that, discard won't be sent to target device of device
5281  * replace.
5282  */
__btrfs_map_block_for_discard(struct btrfs_fs_info * fs_info,u64 logical,u64 length,struct btrfs_bio ** bbio_ret)5283 static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info,
5284 					 u64 logical, u64 length,
5285 					 struct btrfs_bio **bbio_ret)
5286 {
5287 	struct extent_map *em;
5288 	struct map_lookup *map;
5289 	struct btrfs_bio *bbio;
5290 	u64 offset;
5291 	u64 stripe_nr;
5292 	u64 stripe_nr_end;
5293 	u64 stripe_end_offset;
5294 	u64 stripe_cnt;
5295 	u64 stripe_len;
5296 	u64 stripe_offset;
5297 	u64 num_stripes;
5298 	u32 stripe_index;
5299 	u32 factor = 0;
5300 	u32 sub_stripes = 0;
5301 	u64 stripes_per_dev = 0;
5302 	u32 remaining_stripes = 0;
5303 	u32 last_stripe = 0;
5304 	int ret = 0;
5305 	int i;
5306 
5307 	/* discard always return a bbio */
5308 	ASSERT(bbio_ret);
5309 
5310 	em = get_chunk_map(fs_info, logical, length);
5311 	if (IS_ERR(em))
5312 		return PTR_ERR(em);
5313 
5314 	map = em->map_lookup;
5315 	/* we don't discard raid56 yet */
5316 	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5317 		ret = -EOPNOTSUPP;
5318 		goto out;
5319 	}
5320 
5321 	offset = logical - em->start;
5322 	length = min_t(u64, em->len - offset, length);
5323 
5324 	stripe_len = map->stripe_len;
5325 	/*
5326 	 * stripe_nr counts the total number of stripes we have to stride
5327 	 * to get to this block
5328 	 */
5329 	stripe_nr = div64_u64(offset, stripe_len);
5330 
5331 	/* stripe_offset is the offset of this block in its stripe */
5332 	stripe_offset = offset - stripe_nr * stripe_len;
5333 
5334 	stripe_nr_end = round_up(offset + length, map->stripe_len);
5335 	stripe_nr_end = div64_u64(stripe_nr_end, map->stripe_len);
5336 	stripe_cnt = stripe_nr_end - stripe_nr;
5337 	stripe_end_offset = stripe_nr_end * map->stripe_len -
5338 			    (offset + length);
5339 	/*
5340 	 * after this, stripe_nr is the number of stripes on this
5341 	 * device we have to walk to find the data, and stripe_index is
5342 	 * the number of our device in the stripe array
5343 	 */
5344 	num_stripes = 1;
5345 	stripe_index = 0;
5346 	if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
5347 			 BTRFS_BLOCK_GROUP_RAID10)) {
5348 		if (map->type & BTRFS_BLOCK_GROUP_RAID0)
5349 			sub_stripes = 1;
5350 		else
5351 			sub_stripes = map->sub_stripes;
5352 
5353 		factor = map->num_stripes / sub_stripes;
5354 		num_stripes = min_t(u64, map->num_stripes,
5355 				    sub_stripes * stripe_cnt);
5356 		stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
5357 		stripe_index *= sub_stripes;
5358 		stripes_per_dev = div_u64_rem(stripe_cnt, factor,
5359 					      &remaining_stripes);
5360 		div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
5361 		last_stripe *= sub_stripes;
5362 	} else if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
5363 				BTRFS_BLOCK_GROUP_DUP)) {
5364 		num_stripes = map->num_stripes;
5365 	} else {
5366 		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
5367 					&stripe_index);
5368 	}
5369 
5370 	bbio = alloc_btrfs_bio(num_stripes, 0);
5371 	if (!bbio) {
5372 		ret = -ENOMEM;
5373 		goto out;
5374 	}
5375 
5376 	for (i = 0; i < num_stripes; i++) {
5377 		bbio->stripes[i].physical =
5378 			map->stripes[stripe_index].physical +
5379 			stripe_offset + stripe_nr * map->stripe_len;
5380 		bbio->stripes[i].dev = map->stripes[stripe_index].dev;
5381 
5382 		if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
5383 				 BTRFS_BLOCK_GROUP_RAID10)) {
5384 			bbio->stripes[i].length = stripes_per_dev *
5385 				map->stripe_len;
5386 
5387 			if (i / sub_stripes < remaining_stripes)
5388 				bbio->stripes[i].length +=
5389 					map->stripe_len;
5390 
5391 			/*
5392 			 * Special for the first stripe and
5393 			 * the last stripe:
5394 			 *
5395 			 * |-------|...|-------|
5396 			 *     |----------|
5397 			 *    off     end_off
5398 			 */
5399 			if (i < sub_stripes)
5400 				bbio->stripes[i].length -=
5401 					stripe_offset;
5402 
5403 			if (stripe_index >= last_stripe &&
5404 			    stripe_index <= (last_stripe +
5405 					     sub_stripes - 1))
5406 				bbio->stripes[i].length -=
5407 					stripe_end_offset;
5408 
5409 			if (i == sub_stripes - 1)
5410 				stripe_offset = 0;
5411 		} else {
5412 			bbio->stripes[i].length = length;
5413 		}
5414 
5415 		stripe_index++;
5416 		if (stripe_index == map->num_stripes) {
5417 			stripe_index = 0;
5418 			stripe_nr++;
5419 		}
5420 	}
5421 
5422 	*bbio_ret = bbio;
5423 	bbio->map_type = map->type;
5424 	bbio->num_stripes = num_stripes;
5425 out:
5426 	free_extent_map(em);
5427 	return ret;
5428 }
5429 
5430 /*
5431  * In dev-replace case, for repair case (that's the only case where the mirror
5432  * is selected explicitly when calling btrfs_map_block), blocks left of the
5433  * left cursor can also be read from the target drive.
5434  *
5435  * For REQ_GET_READ_MIRRORS, the target drive is added as the last one to the
5436  * array of stripes.
5437  * For READ, it also needs to be supported using the same mirror number.
5438  *
5439  * If the requested block is not left of the left cursor, EIO is returned. This
5440  * can happen because btrfs_num_copies() returns one more in the dev-replace
5441  * case.
5442  */
get_extra_mirror_from_replace(struct btrfs_fs_info * fs_info,u64 logical,u64 length,u64 srcdev_devid,int * mirror_num,u64 * physical)5443 static int get_extra_mirror_from_replace(struct btrfs_fs_info *fs_info,
5444 					 u64 logical, u64 length,
5445 					 u64 srcdev_devid, int *mirror_num,
5446 					 u64 *physical)
5447 {
5448 	struct btrfs_bio *bbio = NULL;
5449 	int num_stripes;
5450 	int index_srcdev = 0;
5451 	int found = 0;
5452 	u64 physical_of_found = 0;
5453 	int i;
5454 	int ret = 0;
5455 
5456 	ret = __btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
5457 				logical, &length, &bbio, 0, 0);
5458 	if (ret) {
5459 		ASSERT(bbio == NULL);
5460 		return ret;
5461 	}
5462 
5463 	num_stripes = bbio->num_stripes;
5464 	if (*mirror_num > num_stripes) {
5465 		/*
5466 		 * BTRFS_MAP_GET_READ_MIRRORS does not contain this mirror,
5467 		 * that means that the requested area is not left of the left
5468 		 * cursor
5469 		 */
5470 		btrfs_put_bbio(bbio);
5471 		return -EIO;
5472 	}
5473 
5474 	/*
5475 	 * process the rest of the function using the mirror_num of the source
5476 	 * drive. Therefore look it up first.  At the end, patch the device
5477 	 * pointer to the one of the target drive.
5478 	 */
5479 	for (i = 0; i < num_stripes; i++) {
5480 		if (bbio->stripes[i].dev->devid != srcdev_devid)
5481 			continue;
5482 
5483 		/*
5484 		 * In case of DUP, in order to keep it simple, only add the
5485 		 * mirror with the lowest physical address
5486 		 */
5487 		if (found &&
5488 		    physical_of_found <= bbio->stripes[i].physical)
5489 			continue;
5490 
5491 		index_srcdev = i;
5492 		found = 1;
5493 		physical_of_found = bbio->stripes[i].physical;
5494 	}
5495 
5496 	btrfs_put_bbio(bbio);
5497 
5498 	ASSERT(found);
5499 	if (!found)
5500 		return -EIO;
5501 
5502 	*mirror_num = index_srcdev + 1;
5503 	*physical = physical_of_found;
5504 	return ret;
5505 }
5506 
handle_ops_on_dev_replace(enum btrfs_map_op op,struct btrfs_bio ** bbio_ret,struct btrfs_dev_replace * dev_replace,int * num_stripes_ret,int * max_errors_ret)5507 static void handle_ops_on_dev_replace(enum btrfs_map_op op,
5508 				      struct btrfs_bio **bbio_ret,
5509 				      struct btrfs_dev_replace *dev_replace,
5510 				      int *num_stripes_ret, int *max_errors_ret)
5511 {
5512 	struct btrfs_bio *bbio = *bbio_ret;
5513 	u64 srcdev_devid = dev_replace->srcdev->devid;
5514 	int tgtdev_indexes = 0;
5515 	int num_stripes = *num_stripes_ret;
5516 	int max_errors = *max_errors_ret;
5517 	int i;
5518 
5519 	if (op == BTRFS_MAP_WRITE) {
5520 		int index_where_to_add;
5521 
5522 		/*
5523 		 * duplicate the write operations while the dev replace
5524 		 * procedure is running. Since the copying of the old disk to
5525 		 * the new disk takes place at run time while the filesystem is
5526 		 * mounted writable, the regular write operations to the old
5527 		 * disk have to be duplicated to go to the new disk as well.
5528 		 *
5529 		 * Note that device->missing is handled by the caller, and that
5530 		 * the write to the old disk is already set up in the stripes
5531 		 * array.
5532 		 */
5533 		index_where_to_add = num_stripes;
5534 		for (i = 0; i < num_stripes; i++) {
5535 			if (bbio->stripes[i].dev->devid == srcdev_devid) {
5536 				/* write to new disk, too */
5537 				struct btrfs_bio_stripe *new =
5538 					bbio->stripes + index_where_to_add;
5539 				struct btrfs_bio_stripe *old =
5540 					bbio->stripes + i;
5541 
5542 				new->physical = old->physical;
5543 				new->length = old->length;
5544 				new->dev = dev_replace->tgtdev;
5545 				bbio->tgtdev_map[i] = index_where_to_add;
5546 				index_where_to_add++;
5547 				max_errors++;
5548 				tgtdev_indexes++;
5549 			}
5550 		}
5551 		num_stripes = index_where_to_add;
5552 	} else if (op == BTRFS_MAP_GET_READ_MIRRORS) {
5553 		int index_srcdev = 0;
5554 		int found = 0;
5555 		u64 physical_of_found = 0;
5556 
5557 		/*
5558 		 * During the dev-replace procedure, the target drive can also
5559 		 * be used to read data in case it is needed to repair a corrupt
5560 		 * block elsewhere. This is possible if the requested area is
5561 		 * left of the left cursor. In this area, the target drive is a
5562 		 * full copy of the source drive.
5563 		 */
5564 		for (i = 0; i < num_stripes; i++) {
5565 			if (bbio->stripes[i].dev->devid == srcdev_devid) {
5566 				/*
5567 				 * In case of DUP, in order to keep it simple,
5568 				 * only add the mirror with the lowest physical
5569 				 * address
5570 				 */
5571 				if (found &&
5572 				    physical_of_found <=
5573 				     bbio->stripes[i].physical)
5574 					continue;
5575 				index_srcdev = i;
5576 				found = 1;
5577 				physical_of_found = bbio->stripes[i].physical;
5578 			}
5579 		}
5580 		if (found) {
5581 			struct btrfs_bio_stripe *tgtdev_stripe =
5582 				bbio->stripes + num_stripes;
5583 
5584 			tgtdev_stripe->physical = physical_of_found;
5585 			tgtdev_stripe->length =
5586 				bbio->stripes[index_srcdev].length;
5587 			tgtdev_stripe->dev = dev_replace->tgtdev;
5588 			bbio->tgtdev_map[index_srcdev] = num_stripes;
5589 
5590 			tgtdev_indexes++;
5591 			num_stripes++;
5592 		}
5593 	}
5594 
5595 	*num_stripes_ret = num_stripes;
5596 	*max_errors_ret = max_errors;
5597 	bbio->num_tgtdevs = tgtdev_indexes;
5598 	*bbio_ret = bbio;
5599 }
5600 
need_full_stripe(enum btrfs_map_op op)5601 static bool need_full_stripe(enum btrfs_map_op op)
5602 {
5603 	return (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS);
5604 }
5605 
__btrfs_map_block(struct btrfs_fs_info * fs_info,enum btrfs_map_op op,u64 logical,u64 * length,struct btrfs_bio ** bbio_ret,int mirror_num,int need_raid_map)5606 static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
5607 			     enum btrfs_map_op op,
5608 			     u64 logical, u64 *length,
5609 			     struct btrfs_bio **bbio_ret,
5610 			     int mirror_num, int need_raid_map)
5611 {
5612 	struct extent_map *em;
5613 	struct map_lookup *map;
5614 	u64 offset;
5615 	u64 stripe_offset;
5616 	u64 stripe_nr;
5617 	u64 stripe_len;
5618 	u32 stripe_index;
5619 	int i;
5620 	int ret = 0;
5621 	int num_stripes;
5622 	int max_errors = 0;
5623 	int tgtdev_indexes = 0;
5624 	struct btrfs_bio *bbio = NULL;
5625 	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
5626 	int dev_replace_is_ongoing = 0;
5627 	int num_alloc_stripes;
5628 	int patch_the_first_stripe_for_dev_replace = 0;
5629 	u64 physical_to_patch_in_first_stripe = 0;
5630 	u64 raid56_full_stripe_start = (u64)-1;
5631 
5632 	if (op == BTRFS_MAP_DISCARD)
5633 		return __btrfs_map_block_for_discard(fs_info, logical,
5634 						     *length, bbio_ret);
5635 
5636 	em = get_chunk_map(fs_info, logical, *length);
5637 	if (IS_ERR(em))
5638 		return PTR_ERR(em);
5639 
5640 	map = em->map_lookup;
5641 	offset = logical - em->start;
5642 
5643 	stripe_len = map->stripe_len;
5644 	stripe_nr = offset;
5645 	/*
5646 	 * stripe_nr counts the total number of stripes we have to stride
5647 	 * to get to this block
5648 	 */
5649 	stripe_nr = div64_u64(stripe_nr, stripe_len);
5650 
5651 	stripe_offset = stripe_nr * stripe_len;
5652 	if (offset < stripe_offset) {
5653 		btrfs_crit(fs_info,
5654 			   "stripe math has gone wrong, stripe_offset=%llu, offset=%llu, start=%llu, logical=%llu, stripe_len=%llu",
5655 			   stripe_offset, offset, em->start, logical,
5656 			   stripe_len);
5657 		free_extent_map(em);
5658 		return -EINVAL;
5659 	}
5660 
5661 	/* stripe_offset is the offset of this block in its stripe*/
5662 	stripe_offset = offset - stripe_offset;
5663 
5664 	/* if we're here for raid56, we need to know the stripe aligned start */
5665 	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5666 		unsigned long full_stripe_len = stripe_len * nr_data_stripes(map);
5667 		raid56_full_stripe_start = offset;
5668 
5669 		/* allow a write of a full stripe, but make sure we don't
5670 		 * allow straddling of stripes
5671 		 */
5672 		raid56_full_stripe_start = div64_u64(raid56_full_stripe_start,
5673 				full_stripe_len);
5674 		raid56_full_stripe_start *= full_stripe_len;
5675 	}
5676 
5677 	if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
5678 		u64 max_len;
5679 		/* For writes to RAID[56], allow a full stripeset across all disks.
5680 		   For other RAID types and for RAID[56] reads, just allow a single
5681 		   stripe (on a single disk). */
5682 		if ((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
5683 		    (op == BTRFS_MAP_WRITE)) {
5684 			max_len = stripe_len * nr_data_stripes(map) -
5685 				(offset - raid56_full_stripe_start);
5686 		} else {
5687 			/* we limit the length of each bio to what fits in a stripe */
5688 			max_len = stripe_len - stripe_offset;
5689 		}
5690 		*length = min_t(u64, em->len - offset, max_len);
5691 	} else {
5692 		*length = em->len - offset;
5693 	}
5694 
5695 	/* This is for when we're called from btrfs_merge_bio_hook() and all
5696 	   it cares about is the length */
5697 	if (!bbio_ret)
5698 		goto out;
5699 
5700 	btrfs_dev_replace_lock(dev_replace, 0);
5701 	dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
5702 	if (!dev_replace_is_ongoing)
5703 		btrfs_dev_replace_unlock(dev_replace, 0);
5704 	else
5705 		btrfs_dev_replace_set_lock_blocking(dev_replace);
5706 
5707 	if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
5708 	    !need_full_stripe(op) && dev_replace->tgtdev != NULL) {
5709 		ret = get_extra_mirror_from_replace(fs_info, logical, *length,
5710 						    dev_replace->srcdev->devid,
5711 						    &mirror_num,
5712 					    &physical_to_patch_in_first_stripe);
5713 		if (ret)
5714 			goto out;
5715 		else
5716 			patch_the_first_stripe_for_dev_replace = 1;
5717 	} else if (mirror_num > map->num_stripes) {
5718 		mirror_num = 0;
5719 	}
5720 
5721 	num_stripes = 1;
5722 	stripe_index = 0;
5723 	if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
5724 		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
5725 				&stripe_index);
5726 		if (op != BTRFS_MAP_WRITE && op != BTRFS_MAP_GET_READ_MIRRORS)
5727 			mirror_num = 1;
5728 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
5729 		if (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS)
5730 			num_stripes = map->num_stripes;
5731 		else if (mirror_num)
5732 			stripe_index = mirror_num - 1;
5733 		else {
5734 			stripe_index = find_live_mirror(fs_info, map, 0,
5735 					    map->num_stripes,
5736 					    current->pid % map->num_stripes,
5737 					    dev_replace_is_ongoing);
5738 			mirror_num = stripe_index + 1;
5739 		}
5740 
5741 	} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
5742 		if (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS) {
5743 			num_stripes = map->num_stripes;
5744 		} else if (mirror_num) {
5745 			stripe_index = mirror_num - 1;
5746 		} else {
5747 			mirror_num = 1;
5748 		}
5749 
5750 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
5751 		u32 factor = map->num_stripes / map->sub_stripes;
5752 
5753 		stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
5754 		stripe_index *= map->sub_stripes;
5755 
5756 		if (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS)
5757 			num_stripes = map->sub_stripes;
5758 		else if (mirror_num)
5759 			stripe_index += mirror_num - 1;
5760 		else {
5761 			int old_stripe_index = stripe_index;
5762 			stripe_index = find_live_mirror(fs_info, map,
5763 					      stripe_index,
5764 					      map->sub_stripes, stripe_index +
5765 					      current->pid % map->sub_stripes,
5766 					      dev_replace_is_ongoing);
5767 			mirror_num = stripe_index - old_stripe_index + 1;
5768 		}
5769 
5770 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5771 		if (need_raid_map &&
5772 		    (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS ||
5773 		     mirror_num > 1)) {
5774 			/* push stripe_nr back to the start of the full stripe */
5775 			stripe_nr = div64_u64(raid56_full_stripe_start,
5776 					stripe_len * nr_data_stripes(map));
5777 
5778 			/* RAID[56] write or recovery. Return all stripes */
5779 			num_stripes = map->num_stripes;
5780 			max_errors = nr_parity_stripes(map);
5781 
5782 			*length = map->stripe_len;
5783 			stripe_index = 0;
5784 			stripe_offset = 0;
5785 		} else {
5786 			/*
5787 			 * Mirror #0 or #1 means the original data block.
5788 			 * Mirror #2 is RAID5 parity block.
5789 			 * Mirror #3 is RAID6 Q block.
5790 			 */
5791 			stripe_nr = div_u64_rem(stripe_nr,
5792 					nr_data_stripes(map), &stripe_index);
5793 			if (mirror_num > 1)
5794 				stripe_index = nr_data_stripes(map) +
5795 						mirror_num - 2;
5796 
5797 			/* We distribute the parity blocks across stripes */
5798 			div_u64_rem(stripe_nr + stripe_index, map->num_stripes,
5799 					&stripe_index);
5800 			if ((op != BTRFS_MAP_WRITE &&
5801 			     op != BTRFS_MAP_GET_READ_MIRRORS) &&
5802 			    mirror_num <= 1)
5803 				mirror_num = 1;
5804 		}
5805 	} else {
5806 		/*
5807 		 * after this, stripe_nr is the number of stripes on this
5808 		 * device we have to walk to find the data, and stripe_index is
5809 		 * the number of our device in the stripe array
5810 		 */
5811 		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
5812 				&stripe_index);
5813 		mirror_num = stripe_index + 1;
5814 	}
5815 	if (stripe_index >= map->num_stripes) {
5816 		btrfs_crit(fs_info,
5817 			   "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u",
5818 			   stripe_index, map->num_stripes);
5819 		ret = -EINVAL;
5820 		goto out;
5821 	}
5822 
5823 	num_alloc_stripes = num_stripes;
5824 	if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL) {
5825 		if (op == BTRFS_MAP_WRITE)
5826 			num_alloc_stripes <<= 1;
5827 		if (op == BTRFS_MAP_GET_READ_MIRRORS)
5828 			num_alloc_stripes++;
5829 		tgtdev_indexes = num_stripes;
5830 	}
5831 
5832 	bbio = alloc_btrfs_bio(num_alloc_stripes, tgtdev_indexes);
5833 	if (!bbio) {
5834 		ret = -ENOMEM;
5835 		goto out;
5836 	}
5837 	if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL)
5838 		bbio->tgtdev_map = (int *)(bbio->stripes + num_alloc_stripes);
5839 
5840 	/* build raid_map */
5841 	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && need_raid_map &&
5842 	    (need_full_stripe(op) || mirror_num > 1)) {
5843 		u64 tmp;
5844 		unsigned rot;
5845 
5846 		bbio->raid_map = (u64 *)((void *)bbio->stripes +
5847 				 sizeof(struct btrfs_bio_stripe) *
5848 				 num_alloc_stripes +
5849 				 sizeof(int) * tgtdev_indexes);
5850 
5851 		/* Work out the disk rotation on this stripe-set */
5852 		div_u64_rem(stripe_nr, num_stripes, &rot);
5853 
5854 		/* Fill in the logical address of each stripe */
5855 		tmp = stripe_nr * nr_data_stripes(map);
5856 		for (i = 0; i < nr_data_stripes(map); i++)
5857 			bbio->raid_map[(i+rot) % num_stripes] =
5858 				em->start + (tmp + i) * map->stripe_len;
5859 
5860 		bbio->raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE;
5861 		if (map->type & BTRFS_BLOCK_GROUP_RAID6)
5862 			bbio->raid_map[(i+rot+1) % num_stripes] =
5863 				RAID6_Q_STRIPE;
5864 	}
5865 
5866 
5867 	for (i = 0; i < num_stripes; i++) {
5868 		bbio->stripes[i].physical =
5869 			map->stripes[stripe_index].physical +
5870 			stripe_offset +
5871 			stripe_nr * map->stripe_len;
5872 		bbio->stripes[i].dev =
5873 			map->stripes[stripe_index].dev;
5874 		stripe_index++;
5875 	}
5876 
5877 	if (need_full_stripe(op))
5878 		max_errors = btrfs_chunk_max_errors(map);
5879 
5880 	if (bbio->raid_map)
5881 		sort_parity_stripes(bbio, num_stripes);
5882 
5883 	if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL &&
5884 	    need_full_stripe(op)) {
5885 		handle_ops_on_dev_replace(op, &bbio, dev_replace, &num_stripes,
5886 					  &max_errors);
5887 	}
5888 
5889 	*bbio_ret = bbio;
5890 	bbio->map_type = map->type;
5891 	bbio->num_stripes = num_stripes;
5892 	bbio->max_errors = max_errors;
5893 	bbio->mirror_num = mirror_num;
5894 
5895 	/*
5896 	 * this is the case that REQ_READ && dev_replace_is_ongoing &&
5897 	 * mirror_num == num_stripes + 1 && dev_replace target drive is
5898 	 * available as a mirror
5899 	 */
5900 	if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) {
5901 		WARN_ON(num_stripes > 1);
5902 		bbio->stripes[0].dev = dev_replace->tgtdev;
5903 		bbio->stripes[0].physical = physical_to_patch_in_first_stripe;
5904 		bbio->mirror_num = map->num_stripes + 1;
5905 	}
5906 out:
5907 	if (dev_replace_is_ongoing) {
5908 		btrfs_dev_replace_clear_lock_blocking(dev_replace);
5909 		btrfs_dev_replace_unlock(dev_replace, 0);
5910 	}
5911 	free_extent_map(em);
5912 	return ret;
5913 }
5914 
btrfs_map_block(struct btrfs_fs_info * fs_info,enum btrfs_map_op op,u64 logical,u64 * length,struct btrfs_bio ** bbio_ret,int mirror_num)5915 int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
5916 		      u64 logical, u64 *length,
5917 		      struct btrfs_bio **bbio_ret, int mirror_num)
5918 {
5919 	return __btrfs_map_block(fs_info, op, logical, length, bbio_ret,
5920 				 mirror_num, 0);
5921 }
5922 
5923 /* For Scrub/replace */
btrfs_map_sblock(struct btrfs_fs_info * fs_info,enum btrfs_map_op op,u64 logical,u64 * length,struct btrfs_bio ** bbio_ret)5924 int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
5925 		     u64 logical, u64 *length,
5926 		     struct btrfs_bio **bbio_ret)
5927 {
5928 	return __btrfs_map_block(fs_info, op, logical, length, bbio_ret, 0, 1);
5929 }
5930 
btrfs_rmap_block(struct btrfs_fs_info * fs_info,u64 chunk_start,u64 physical,u64 devid,u64 ** logical,int * naddrs,int * stripe_len)5931 int btrfs_rmap_block(struct btrfs_fs_info *fs_info,
5932 		     u64 chunk_start, u64 physical, u64 devid,
5933 		     u64 **logical, int *naddrs, int *stripe_len)
5934 {
5935 	struct extent_map *em;
5936 	struct map_lookup *map;
5937 	u64 *buf;
5938 	u64 bytenr;
5939 	u64 length;
5940 	u64 stripe_nr;
5941 	u64 rmap_len;
5942 	int i, j, nr = 0;
5943 
5944 	em = get_chunk_map(fs_info, chunk_start, 1);
5945 	if (IS_ERR(em))
5946 		return -EIO;
5947 
5948 	map = em->map_lookup;
5949 	length = em->len;
5950 	rmap_len = map->stripe_len;
5951 
5952 	if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5953 		length = div_u64(length, map->num_stripes / map->sub_stripes);
5954 	else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
5955 		length = div_u64(length, map->num_stripes);
5956 	else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5957 		length = div_u64(length, nr_data_stripes(map));
5958 		rmap_len = map->stripe_len * nr_data_stripes(map);
5959 	}
5960 
5961 	buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS);
5962 	BUG_ON(!buf); /* -ENOMEM */
5963 
5964 	for (i = 0; i < map->num_stripes; i++) {
5965 		if (devid && map->stripes[i].dev->devid != devid)
5966 			continue;
5967 		if (map->stripes[i].physical > physical ||
5968 		    map->stripes[i].physical + length <= physical)
5969 			continue;
5970 
5971 		stripe_nr = physical - map->stripes[i].physical;
5972 		stripe_nr = div64_u64(stripe_nr, map->stripe_len);
5973 
5974 		if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
5975 			stripe_nr = stripe_nr * map->num_stripes + i;
5976 			stripe_nr = div_u64(stripe_nr, map->sub_stripes);
5977 		} else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
5978 			stripe_nr = stripe_nr * map->num_stripes + i;
5979 		} /* else if RAID[56], multiply by nr_data_stripes().
5980 		   * Alternatively, just use rmap_len below instead of
5981 		   * map->stripe_len */
5982 
5983 		bytenr = chunk_start + stripe_nr * rmap_len;
5984 		WARN_ON(nr >= map->num_stripes);
5985 		for (j = 0; j < nr; j++) {
5986 			if (buf[j] == bytenr)
5987 				break;
5988 		}
5989 		if (j == nr) {
5990 			WARN_ON(nr >= map->num_stripes);
5991 			buf[nr++] = bytenr;
5992 		}
5993 	}
5994 
5995 	*logical = buf;
5996 	*naddrs = nr;
5997 	*stripe_len = rmap_len;
5998 
5999 	free_extent_map(em);
6000 	return 0;
6001 }
6002 
btrfs_end_bbio(struct btrfs_bio * bbio,struct bio * bio)6003 static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio)
6004 {
6005 	bio->bi_private = bbio->private;
6006 	bio->bi_end_io = bbio->end_io;
6007 	bio_endio(bio);
6008 
6009 	btrfs_put_bbio(bbio);
6010 }
6011 
btrfs_end_bio(struct bio * bio)6012 static void btrfs_end_bio(struct bio *bio)
6013 {
6014 	struct btrfs_bio *bbio = bio->bi_private;
6015 	int is_orig_bio = 0;
6016 
6017 	if (bio->bi_status) {
6018 		atomic_inc(&bbio->error);
6019 		if (bio->bi_status == BLK_STS_IOERR ||
6020 		    bio->bi_status == BLK_STS_TARGET) {
6021 			unsigned int stripe_index =
6022 				btrfs_io_bio(bio)->stripe_index;
6023 			struct btrfs_device *dev;
6024 
6025 			BUG_ON(stripe_index >= bbio->num_stripes);
6026 			dev = bbio->stripes[stripe_index].dev;
6027 			if (dev->bdev) {
6028 				if (bio_op(bio) == REQ_OP_WRITE)
6029 					btrfs_dev_stat_inc(dev,
6030 						BTRFS_DEV_STAT_WRITE_ERRS);
6031 				else
6032 					btrfs_dev_stat_inc(dev,
6033 						BTRFS_DEV_STAT_READ_ERRS);
6034 				if (bio->bi_opf & REQ_PREFLUSH)
6035 					btrfs_dev_stat_inc(dev,
6036 						BTRFS_DEV_STAT_FLUSH_ERRS);
6037 				btrfs_dev_stat_print_on_error(dev);
6038 			}
6039 		}
6040 	}
6041 
6042 	if (bio == bbio->orig_bio)
6043 		is_orig_bio = 1;
6044 
6045 	btrfs_bio_counter_dec(bbio->fs_info);
6046 
6047 	if (atomic_dec_and_test(&bbio->stripes_pending)) {
6048 		if (!is_orig_bio) {
6049 			bio_put(bio);
6050 			bio = bbio->orig_bio;
6051 		}
6052 
6053 		btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
6054 		/* only send an error to the higher layers if it is
6055 		 * beyond the tolerance of the btrfs bio
6056 		 */
6057 		if (atomic_read(&bbio->error) > bbio->max_errors) {
6058 			bio->bi_status = BLK_STS_IOERR;
6059 		} else {
6060 			/*
6061 			 * this bio is actually up to date, we didn't
6062 			 * go over the max number of errors
6063 			 */
6064 			bio->bi_status = 0;
6065 		}
6066 
6067 		btrfs_end_bbio(bbio, bio);
6068 	} else if (!is_orig_bio) {
6069 		bio_put(bio);
6070 	}
6071 }
6072 
6073 /*
6074  * see run_scheduled_bios for a description of why bios are collected for
6075  * async submit.
6076  *
6077  * This will add one bio to the pending list for a device and make sure
6078  * the work struct is scheduled.
6079  */
btrfs_schedule_bio(struct btrfs_device * device,struct bio * bio)6080 static noinline void btrfs_schedule_bio(struct btrfs_device *device,
6081 					struct bio *bio)
6082 {
6083 	struct btrfs_fs_info *fs_info = device->fs_info;
6084 	int should_queue = 1;
6085 	struct btrfs_pending_bios *pending_bios;
6086 
6087 	if (device->missing || !device->bdev) {
6088 		bio_io_error(bio);
6089 		return;
6090 	}
6091 
6092 	/* don't bother with additional async steps for reads, right now */
6093 	if (bio_op(bio) == REQ_OP_READ) {
6094 		bio_get(bio);
6095 		btrfsic_submit_bio(bio);
6096 		bio_put(bio);
6097 		return;
6098 	}
6099 
6100 	/*
6101 	 * nr_async_bios allows us to reliably return congestion to the
6102 	 * higher layers.  Otherwise, the async bio makes it appear we have
6103 	 * made progress against dirty pages when we've really just put it
6104 	 * on a queue for later
6105 	 */
6106 	atomic_inc(&fs_info->nr_async_bios);
6107 	WARN_ON(bio->bi_next);
6108 	bio->bi_next = NULL;
6109 
6110 	spin_lock(&device->io_lock);
6111 	if (op_is_sync(bio->bi_opf))
6112 		pending_bios = &device->pending_sync_bios;
6113 	else
6114 		pending_bios = &device->pending_bios;
6115 
6116 	if (pending_bios->tail)
6117 		pending_bios->tail->bi_next = bio;
6118 
6119 	pending_bios->tail = bio;
6120 	if (!pending_bios->head)
6121 		pending_bios->head = bio;
6122 	if (device->running_pending)
6123 		should_queue = 0;
6124 
6125 	spin_unlock(&device->io_lock);
6126 
6127 	if (should_queue)
6128 		btrfs_queue_work(fs_info->submit_workers, &device->work);
6129 }
6130 
submit_stripe_bio(struct btrfs_bio * bbio,struct bio * bio,u64 physical,int dev_nr,int async)6131 static void submit_stripe_bio(struct btrfs_bio *bbio, struct bio *bio,
6132 			      u64 physical, int dev_nr, int async)
6133 {
6134 	struct btrfs_device *dev = bbio->stripes[dev_nr].dev;
6135 	struct btrfs_fs_info *fs_info = bbio->fs_info;
6136 
6137 	bio->bi_private = bbio;
6138 	btrfs_io_bio(bio)->stripe_index = dev_nr;
6139 	bio->bi_end_io = btrfs_end_bio;
6140 	bio->bi_iter.bi_sector = physical >> 9;
6141 #ifdef DEBUG
6142 	{
6143 		struct rcu_string *name;
6144 
6145 		rcu_read_lock();
6146 		name = rcu_dereference(dev->name);
6147 		btrfs_debug(fs_info,
6148 			"btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u",
6149 			bio_op(bio), bio->bi_opf,
6150 			(u64)bio->bi_iter.bi_sector,
6151 			(u_long)dev->bdev->bd_dev, name->str, dev->devid,
6152 			bio->bi_iter.bi_size);
6153 		rcu_read_unlock();
6154 	}
6155 #endif
6156 	bio_set_dev(bio, dev->bdev);
6157 
6158 	btrfs_bio_counter_inc_noblocked(fs_info);
6159 
6160 	if (async)
6161 		btrfs_schedule_bio(dev, bio);
6162 	else
6163 		btrfsic_submit_bio(bio);
6164 }
6165 
bbio_error(struct btrfs_bio * bbio,struct bio * bio,u64 logical)6166 static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
6167 {
6168 	atomic_inc(&bbio->error);
6169 	if (atomic_dec_and_test(&bbio->stripes_pending)) {
6170 		/* Should be the original bio. */
6171 		WARN_ON(bio != bbio->orig_bio);
6172 
6173 		btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
6174 		bio->bi_iter.bi_sector = logical >> 9;
6175 		if (atomic_read(&bbio->error) > bbio->max_errors)
6176 			bio->bi_status = BLK_STS_IOERR;
6177 		else
6178 			bio->bi_status = BLK_STS_OK;
6179 		btrfs_end_bbio(bbio, bio);
6180 	}
6181 }
6182 
btrfs_map_bio(struct btrfs_fs_info * fs_info,struct bio * bio,int mirror_num,int async_submit)6183 blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
6184 			   int mirror_num, int async_submit)
6185 {
6186 	struct btrfs_device *dev;
6187 	struct bio *first_bio = bio;
6188 	u64 logical = (u64)bio->bi_iter.bi_sector << 9;
6189 	u64 length = 0;
6190 	u64 map_length;
6191 	int ret;
6192 	int dev_nr;
6193 	int total_devs;
6194 	struct btrfs_bio *bbio = NULL;
6195 
6196 	length = bio->bi_iter.bi_size;
6197 	map_length = length;
6198 
6199 	btrfs_bio_counter_inc_blocked(fs_info);
6200 	ret = __btrfs_map_block(fs_info, btrfs_op(bio), logical,
6201 				&map_length, &bbio, mirror_num, 1);
6202 	if (ret) {
6203 		btrfs_bio_counter_dec(fs_info);
6204 		return errno_to_blk_status(ret);
6205 	}
6206 
6207 	total_devs = bbio->num_stripes;
6208 	bbio->orig_bio = first_bio;
6209 	bbio->private = first_bio->bi_private;
6210 	bbio->end_io = first_bio->bi_end_io;
6211 	bbio->fs_info = fs_info;
6212 	atomic_set(&bbio->stripes_pending, bbio->num_stripes);
6213 
6214 	if ((bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
6215 	    ((bio_op(bio) == REQ_OP_WRITE) || (mirror_num > 1))) {
6216 		/* In this case, map_length has been set to the length of
6217 		   a single stripe; not the whole write */
6218 		if (bio_op(bio) == REQ_OP_WRITE) {
6219 			ret = raid56_parity_write(fs_info, bio, bbio,
6220 						  map_length);
6221 		} else {
6222 			ret = raid56_parity_recover(fs_info, bio, bbio,
6223 						    map_length, mirror_num, 1);
6224 		}
6225 
6226 		btrfs_bio_counter_dec(fs_info);
6227 		return errno_to_blk_status(ret);
6228 	}
6229 
6230 	if (map_length < length) {
6231 		btrfs_crit(fs_info,
6232 			   "mapping failed logical %llu bio len %llu len %llu",
6233 			   logical, length, map_length);
6234 		BUG();
6235 	}
6236 
6237 	for (dev_nr = 0; dev_nr < total_devs; dev_nr++) {
6238 		dev = bbio->stripes[dev_nr].dev;
6239 		if (!dev || !dev->bdev ||
6240 		    (bio_op(first_bio) == REQ_OP_WRITE && !dev->writeable)) {
6241 			bbio_error(bbio, first_bio, logical);
6242 			continue;
6243 		}
6244 
6245 		if (dev_nr < total_devs - 1)
6246 			bio = btrfs_bio_clone(first_bio);
6247 		else
6248 			bio = first_bio;
6249 
6250 		submit_stripe_bio(bbio, bio, bbio->stripes[dev_nr].physical,
6251 				  dev_nr, async_submit);
6252 	}
6253 	btrfs_bio_counter_dec(fs_info);
6254 	return BLK_STS_OK;
6255 }
6256 
btrfs_find_device(struct btrfs_fs_info * fs_info,u64 devid,u8 * uuid,u8 * fsid)6257 struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid,
6258 				       u8 *uuid, u8 *fsid)
6259 {
6260 	struct btrfs_device *device;
6261 	struct btrfs_fs_devices *cur_devices;
6262 
6263 	cur_devices = fs_info->fs_devices;
6264 	while (cur_devices) {
6265 		if (!fsid ||
6266 		    !memcmp(cur_devices->fsid, fsid, BTRFS_FSID_SIZE)) {
6267 			device = find_device(cur_devices, devid, uuid);
6268 			if (device)
6269 				return device;
6270 		}
6271 		cur_devices = cur_devices->seed;
6272 	}
6273 	return NULL;
6274 }
6275 
add_missing_dev(struct btrfs_fs_devices * fs_devices,u64 devid,u8 * dev_uuid)6276 static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices,
6277 					    u64 devid, u8 *dev_uuid)
6278 {
6279 	struct btrfs_device *device;
6280 
6281 	device = btrfs_alloc_device(NULL, &devid, dev_uuid);
6282 	if (IS_ERR(device))
6283 		return NULL;
6284 
6285 	list_add(&device->dev_list, &fs_devices->devices);
6286 	device->fs_devices = fs_devices;
6287 	fs_devices->num_devices++;
6288 
6289 	device->missing = 1;
6290 	fs_devices->missing_devices++;
6291 
6292 	return device;
6293 }
6294 
6295 /**
6296  * btrfs_alloc_device - allocate struct btrfs_device
6297  * @fs_info:	used only for generating a new devid, can be NULL if
6298  *		devid is provided (i.e. @devid != NULL).
6299  * @devid:	a pointer to devid for this device.  If NULL a new devid
6300  *		is generated.
6301  * @uuid:	a pointer to UUID for this device.  If NULL a new UUID
6302  *		is generated.
6303  *
6304  * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR()
6305  * on error.  Returned struct is not linked onto any lists and can be
6306  * destroyed with kfree() right away.
6307  */
btrfs_alloc_device(struct btrfs_fs_info * fs_info,const u64 * devid,const u8 * uuid)6308 struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
6309 					const u64 *devid,
6310 					const u8 *uuid)
6311 {
6312 	struct btrfs_device *dev;
6313 	u64 tmp;
6314 
6315 	if (WARN_ON(!devid && !fs_info))
6316 		return ERR_PTR(-EINVAL);
6317 
6318 	dev = __alloc_device();
6319 	if (IS_ERR(dev))
6320 		return dev;
6321 
6322 	if (devid)
6323 		tmp = *devid;
6324 	else {
6325 		int ret;
6326 
6327 		ret = find_next_devid(fs_info, &tmp);
6328 		if (ret) {
6329 			kfree(dev);
6330 			return ERR_PTR(ret);
6331 		}
6332 	}
6333 	dev->devid = tmp;
6334 
6335 	if (uuid)
6336 		memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE);
6337 	else
6338 		generate_random_uuid(dev->uuid);
6339 
6340 	btrfs_init_work(&dev->work, btrfs_submit_helper,
6341 			pending_bios_fn, NULL, NULL);
6342 
6343 	return dev;
6344 }
6345 
6346 /* Return -EIO if any error, otherwise return 0. */
btrfs_check_chunk_valid(struct btrfs_fs_info * fs_info,struct extent_buffer * leaf,struct btrfs_chunk * chunk,u64 logical)6347 static int btrfs_check_chunk_valid(struct btrfs_fs_info *fs_info,
6348 				   struct extent_buffer *leaf,
6349 				   struct btrfs_chunk *chunk, u64 logical)
6350 {
6351 	u64 length;
6352 	u64 stripe_len;
6353 	u16 num_stripes;
6354 	u16 sub_stripes;
6355 	u64 type;
6356 	u64 features;
6357 	bool mixed = false;
6358 
6359 	length = btrfs_chunk_length(leaf, chunk);
6360 	stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
6361 	num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
6362 	sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
6363 	type = btrfs_chunk_type(leaf, chunk);
6364 
6365 	if (!num_stripes) {
6366 		btrfs_err(fs_info, "invalid chunk num_stripes: %u",
6367 			  num_stripes);
6368 		return -EIO;
6369 	}
6370 	if (!IS_ALIGNED(logical, fs_info->sectorsize)) {
6371 		btrfs_err(fs_info, "invalid chunk logical %llu", logical);
6372 		return -EIO;
6373 	}
6374 	if (btrfs_chunk_sector_size(leaf, chunk) != fs_info->sectorsize) {
6375 		btrfs_err(fs_info, "invalid chunk sectorsize %u",
6376 			  btrfs_chunk_sector_size(leaf, chunk));
6377 		return -EIO;
6378 	}
6379 	if (!length || !IS_ALIGNED(length, fs_info->sectorsize)) {
6380 		btrfs_err(fs_info, "invalid chunk length %llu", length);
6381 		return -EIO;
6382 	}
6383 	if (!is_power_of_2(stripe_len) || stripe_len != BTRFS_STRIPE_LEN) {
6384 		btrfs_err(fs_info, "invalid chunk stripe length: %llu",
6385 			  stripe_len);
6386 		return -EIO;
6387 	}
6388 	if (~(BTRFS_BLOCK_GROUP_TYPE_MASK | BTRFS_BLOCK_GROUP_PROFILE_MASK) &
6389 	    type) {
6390 		btrfs_err(fs_info, "unrecognized chunk type: %llu",
6391 			  ~(BTRFS_BLOCK_GROUP_TYPE_MASK |
6392 			    BTRFS_BLOCK_GROUP_PROFILE_MASK) &
6393 			  btrfs_chunk_type(leaf, chunk));
6394 		return -EIO;
6395 	}
6396 
6397 	if ((type & BTRFS_BLOCK_GROUP_TYPE_MASK) == 0) {
6398 		btrfs_err(fs_info, "missing chunk type flag: 0x%llx", type);
6399 		return -EIO;
6400 	}
6401 
6402 	if ((type & BTRFS_BLOCK_GROUP_SYSTEM) &&
6403 	    (type & (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA))) {
6404 		btrfs_err(fs_info,
6405 			"system chunk with data or metadata type: 0x%llx", type);
6406 		return -EIO;
6407 	}
6408 
6409 	features = btrfs_super_incompat_flags(fs_info->super_copy);
6410 	if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
6411 		mixed = true;
6412 
6413 	if (!mixed) {
6414 		if ((type & BTRFS_BLOCK_GROUP_METADATA) &&
6415 		    (type & BTRFS_BLOCK_GROUP_DATA)) {
6416 			btrfs_err(fs_info,
6417 			"mixed chunk type in non-mixed mode: 0x%llx", type);
6418 			return -EIO;
6419 		}
6420 	}
6421 
6422 	if ((type & BTRFS_BLOCK_GROUP_RAID10 && sub_stripes != 2) ||
6423 	    (type & BTRFS_BLOCK_GROUP_RAID1 && num_stripes != 2) ||
6424 	    (type & BTRFS_BLOCK_GROUP_RAID5 && num_stripes < 2) ||
6425 	    (type & BTRFS_BLOCK_GROUP_RAID6 && num_stripes < 3) ||
6426 	    (type & BTRFS_BLOCK_GROUP_DUP && num_stripes != 2) ||
6427 	    ((type & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 &&
6428 	     num_stripes != 1)) {
6429 		btrfs_err(fs_info,
6430 			"invalid num_stripes:sub_stripes %u:%u for profile %llu",
6431 			num_stripes, sub_stripes,
6432 			type & BTRFS_BLOCK_GROUP_PROFILE_MASK);
6433 		return -EIO;
6434 	}
6435 
6436 	return 0;
6437 }
6438 
read_one_chunk(struct btrfs_fs_info * fs_info,struct btrfs_key * key,struct extent_buffer * leaf,struct btrfs_chunk * chunk)6439 static int read_one_chunk(struct btrfs_fs_info *fs_info, struct btrfs_key *key,
6440 			  struct extent_buffer *leaf,
6441 			  struct btrfs_chunk *chunk)
6442 {
6443 	struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
6444 	struct map_lookup *map;
6445 	struct extent_map *em;
6446 	u64 logical;
6447 	u64 length;
6448 	u64 devid;
6449 	u8 uuid[BTRFS_UUID_SIZE];
6450 	int num_stripes;
6451 	int ret;
6452 	int i;
6453 
6454 	logical = key->offset;
6455 	length = btrfs_chunk_length(leaf, chunk);
6456 	num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
6457 
6458 	ret = btrfs_check_chunk_valid(fs_info, leaf, chunk, logical);
6459 	if (ret)
6460 		return ret;
6461 
6462 	read_lock(&map_tree->map_tree.lock);
6463 	em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
6464 	read_unlock(&map_tree->map_tree.lock);
6465 
6466 	/* already mapped? */
6467 	if (em && em->start <= logical && em->start + em->len > logical) {
6468 		free_extent_map(em);
6469 		return 0;
6470 	} else if (em) {
6471 		free_extent_map(em);
6472 	}
6473 
6474 	em = alloc_extent_map();
6475 	if (!em)
6476 		return -ENOMEM;
6477 	map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
6478 	if (!map) {
6479 		free_extent_map(em);
6480 		return -ENOMEM;
6481 	}
6482 
6483 	set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
6484 	em->map_lookup = map;
6485 	em->start = logical;
6486 	em->len = length;
6487 	em->orig_start = 0;
6488 	em->block_start = 0;
6489 	em->block_len = em->len;
6490 
6491 	map->num_stripes = num_stripes;
6492 	map->io_width = btrfs_chunk_io_width(leaf, chunk);
6493 	map->io_align = btrfs_chunk_io_align(leaf, chunk);
6494 	map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
6495 	map->type = btrfs_chunk_type(leaf, chunk);
6496 	map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
6497 	for (i = 0; i < num_stripes; i++) {
6498 		map->stripes[i].physical =
6499 			btrfs_stripe_offset_nr(leaf, chunk, i);
6500 		devid = btrfs_stripe_devid_nr(leaf, chunk, i);
6501 		read_extent_buffer(leaf, uuid, (unsigned long)
6502 				   btrfs_stripe_dev_uuid_nr(chunk, i),
6503 				   BTRFS_UUID_SIZE);
6504 		map->stripes[i].dev = btrfs_find_device(fs_info, devid,
6505 							uuid, NULL);
6506 		if (!map->stripes[i].dev &&
6507 		    !btrfs_test_opt(fs_info, DEGRADED)) {
6508 			free_extent_map(em);
6509 			btrfs_report_missing_device(fs_info, devid, uuid);
6510 			return -EIO;
6511 		}
6512 		if (!map->stripes[i].dev) {
6513 			map->stripes[i].dev =
6514 				add_missing_dev(fs_info->fs_devices, devid,
6515 						uuid);
6516 			if (!map->stripes[i].dev) {
6517 				free_extent_map(em);
6518 				return -EIO;
6519 			}
6520 			btrfs_report_missing_device(fs_info, devid, uuid);
6521 		}
6522 		map->stripes[i].dev->in_fs_metadata = 1;
6523 	}
6524 
6525 	write_lock(&map_tree->map_tree.lock);
6526 	ret = add_extent_mapping(&map_tree->map_tree, em, 0);
6527 	write_unlock(&map_tree->map_tree.lock);
6528 	if (ret < 0) {
6529 		btrfs_err(fs_info,
6530 			  "failed to add chunk map, start=%llu len=%llu: %d",
6531 			  em->start, em->len, ret);
6532 	}
6533 	free_extent_map(em);
6534 
6535 	return ret;
6536 }
6537 
fill_device_from_item(struct extent_buffer * leaf,struct btrfs_dev_item * dev_item,struct btrfs_device * device)6538 static void fill_device_from_item(struct extent_buffer *leaf,
6539 				 struct btrfs_dev_item *dev_item,
6540 				 struct btrfs_device *device)
6541 {
6542 	unsigned long ptr;
6543 
6544 	device->devid = btrfs_device_id(leaf, dev_item);
6545 	device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
6546 	device->total_bytes = device->disk_total_bytes;
6547 	device->commit_total_bytes = device->disk_total_bytes;
6548 	device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
6549 	device->commit_bytes_used = device->bytes_used;
6550 	device->type = btrfs_device_type(leaf, dev_item);
6551 	device->io_align = btrfs_device_io_align(leaf, dev_item);
6552 	device->io_width = btrfs_device_io_width(leaf, dev_item);
6553 	device->sector_size = btrfs_device_sector_size(leaf, dev_item);
6554 	WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID);
6555 	device->is_tgtdev_for_dev_replace = 0;
6556 
6557 	ptr = btrfs_device_uuid(dev_item);
6558 	read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
6559 }
6560 
open_seed_devices(struct btrfs_fs_info * fs_info,u8 * fsid)6561 static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info,
6562 						  u8 *fsid)
6563 {
6564 	struct btrfs_fs_devices *fs_devices;
6565 	int ret;
6566 
6567 	BUG_ON(!mutex_is_locked(&uuid_mutex));
6568 	ASSERT(fsid);
6569 
6570 	fs_devices = fs_info->fs_devices->seed;
6571 	while (fs_devices) {
6572 		if (!memcmp(fs_devices->fsid, fsid, BTRFS_FSID_SIZE))
6573 			return fs_devices;
6574 
6575 		fs_devices = fs_devices->seed;
6576 	}
6577 
6578 	fs_devices = find_fsid(fsid);
6579 	if (!fs_devices) {
6580 		if (!btrfs_test_opt(fs_info, DEGRADED))
6581 			return ERR_PTR(-ENOENT);
6582 
6583 		fs_devices = alloc_fs_devices(fsid);
6584 		if (IS_ERR(fs_devices))
6585 			return fs_devices;
6586 
6587 		fs_devices->seeding = 1;
6588 		fs_devices->opened = 1;
6589 		return fs_devices;
6590 	}
6591 
6592 	fs_devices = clone_fs_devices(fs_devices);
6593 	if (IS_ERR(fs_devices))
6594 		return fs_devices;
6595 
6596 	ret = __btrfs_open_devices(fs_devices, FMODE_READ,
6597 				   fs_info->bdev_holder);
6598 	if (ret) {
6599 		free_fs_devices(fs_devices);
6600 		fs_devices = ERR_PTR(ret);
6601 		goto out;
6602 	}
6603 
6604 	if (!fs_devices->seeding) {
6605 		__btrfs_close_devices(fs_devices);
6606 		free_fs_devices(fs_devices);
6607 		fs_devices = ERR_PTR(-EINVAL);
6608 		goto out;
6609 	}
6610 
6611 	fs_devices->seed = fs_info->fs_devices->seed;
6612 	fs_info->fs_devices->seed = fs_devices;
6613 out:
6614 	return fs_devices;
6615 }
6616 
read_one_dev(struct btrfs_fs_info * fs_info,struct extent_buffer * leaf,struct btrfs_dev_item * dev_item)6617 static int read_one_dev(struct btrfs_fs_info *fs_info,
6618 			struct extent_buffer *leaf,
6619 			struct btrfs_dev_item *dev_item)
6620 {
6621 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6622 	struct btrfs_device *device;
6623 	u64 devid;
6624 	int ret;
6625 	u8 fs_uuid[BTRFS_FSID_SIZE];
6626 	u8 dev_uuid[BTRFS_UUID_SIZE];
6627 
6628 	devid = btrfs_device_id(leaf, dev_item);
6629 	read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
6630 			   BTRFS_UUID_SIZE);
6631 	read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
6632 			   BTRFS_FSID_SIZE);
6633 
6634 	if (memcmp(fs_uuid, fs_info->fsid, BTRFS_FSID_SIZE)) {
6635 		fs_devices = open_seed_devices(fs_info, fs_uuid);
6636 		if (IS_ERR(fs_devices))
6637 			return PTR_ERR(fs_devices);
6638 	}
6639 
6640 	device = btrfs_find_device(fs_info, devid, dev_uuid, fs_uuid);
6641 	if (!device) {
6642 		if (!btrfs_test_opt(fs_info, DEGRADED)) {
6643 			btrfs_report_missing_device(fs_info, devid, dev_uuid);
6644 			return -EIO;
6645 		}
6646 
6647 		device = add_missing_dev(fs_devices, devid, dev_uuid);
6648 		if (!device)
6649 			return -ENOMEM;
6650 		btrfs_report_missing_device(fs_info, devid, dev_uuid);
6651 	} else {
6652 		if (!device->bdev) {
6653 			btrfs_report_missing_device(fs_info, devid, dev_uuid);
6654 			if (!btrfs_test_opt(fs_info, DEGRADED))
6655 				return -EIO;
6656 		}
6657 
6658 		if(!device->bdev && !device->missing) {
6659 			/*
6660 			 * this happens when a device that was properly setup
6661 			 * in the device info lists suddenly goes bad.
6662 			 * device->bdev is NULL, and so we have to set
6663 			 * device->missing to one here
6664 			 */
6665 			device->fs_devices->missing_devices++;
6666 			device->missing = 1;
6667 		}
6668 
6669 		/* Move the device to its own fs_devices */
6670 		if (device->fs_devices != fs_devices) {
6671 			ASSERT(device->missing);
6672 
6673 			list_move(&device->dev_list, &fs_devices->devices);
6674 			device->fs_devices->num_devices--;
6675 			fs_devices->num_devices++;
6676 
6677 			device->fs_devices->missing_devices--;
6678 			fs_devices->missing_devices++;
6679 
6680 			device->fs_devices = fs_devices;
6681 		}
6682 	}
6683 
6684 	if (device->fs_devices != fs_info->fs_devices) {
6685 		BUG_ON(device->writeable);
6686 		if (device->generation !=
6687 		    btrfs_device_generation(leaf, dev_item))
6688 			return -EINVAL;
6689 	}
6690 
6691 	fill_device_from_item(leaf, dev_item, device);
6692 	device->in_fs_metadata = 1;
6693 	if (device->writeable && !device->is_tgtdev_for_dev_replace) {
6694 		device->fs_devices->total_rw_bytes += device->total_bytes;
6695 		atomic64_add(device->total_bytes - device->bytes_used,
6696 				&fs_info->free_chunk_space);
6697 	}
6698 	ret = 0;
6699 	return ret;
6700 }
6701 
btrfs_read_sys_array(struct btrfs_fs_info * fs_info)6702 int btrfs_read_sys_array(struct btrfs_fs_info *fs_info)
6703 {
6704 	struct btrfs_root *root = fs_info->tree_root;
6705 	struct btrfs_super_block *super_copy = fs_info->super_copy;
6706 	struct extent_buffer *sb;
6707 	struct btrfs_disk_key *disk_key;
6708 	struct btrfs_chunk *chunk;
6709 	u8 *array_ptr;
6710 	unsigned long sb_array_offset;
6711 	int ret = 0;
6712 	u32 num_stripes;
6713 	u32 array_size;
6714 	u32 len = 0;
6715 	u32 cur_offset;
6716 	u64 type;
6717 	struct btrfs_key key;
6718 
6719 	ASSERT(BTRFS_SUPER_INFO_SIZE <= fs_info->nodesize);
6720 	/*
6721 	 * This will create extent buffer of nodesize, superblock size is
6722 	 * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will
6723 	 * overallocate but we can keep it as-is, only the first page is used.
6724 	 */
6725 	sb = btrfs_find_create_tree_block(fs_info, BTRFS_SUPER_INFO_OFFSET);
6726 	if (IS_ERR(sb))
6727 		return PTR_ERR(sb);
6728 	set_extent_buffer_uptodate(sb);
6729 	btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
6730 	/*
6731 	 * The sb extent buffer is artificial and just used to read the system array.
6732 	 * set_extent_buffer_uptodate() call does not properly mark all it's
6733 	 * pages up-to-date when the page is larger: extent does not cover the
6734 	 * whole page and consequently check_page_uptodate does not find all
6735 	 * the page's extents up-to-date (the hole beyond sb),
6736 	 * write_extent_buffer then triggers a WARN_ON.
6737 	 *
6738 	 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
6739 	 * but sb spans only this function. Add an explicit SetPageUptodate call
6740 	 * to silence the warning eg. on PowerPC 64.
6741 	 */
6742 	if (PAGE_SIZE > BTRFS_SUPER_INFO_SIZE)
6743 		SetPageUptodate(sb->pages[0]);
6744 
6745 	write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
6746 	array_size = btrfs_super_sys_array_size(super_copy);
6747 
6748 	array_ptr = super_copy->sys_chunk_array;
6749 	sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array);
6750 	cur_offset = 0;
6751 
6752 	while (cur_offset < array_size) {
6753 		disk_key = (struct btrfs_disk_key *)array_ptr;
6754 		len = sizeof(*disk_key);
6755 		if (cur_offset + len > array_size)
6756 			goto out_short_read;
6757 
6758 		btrfs_disk_key_to_cpu(&key, disk_key);
6759 
6760 		array_ptr += len;
6761 		sb_array_offset += len;
6762 		cur_offset += len;
6763 
6764 		if (key.type == BTRFS_CHUNK_ITEM_KEY) {
6765 			chunk = (struct btrfs_chunk *)sb_array_offset;
6766 			/*
6767 			 * At least one btrfs_chunk with one stripe must be
6768 			 * present, exact stripe count check comes afterwards
6769 			 */
6770 			len = btrfs_chunk_item_size(1);
6771 			if (cur_offset + len > array_size)
6772 				goto out_short_read;
6773 
6774 			num_stripes = btrfs_chunk_num_stripes(sb, chunk);
6775 			if (!num_stripes) {
6776 				btrfs_err(fs_info,
6777 					"invalid number of stripes %u in sys_array at offset %u",
6778 					num_stripes, cur_offset);
6779 				ret = -EIO;
6780 				break;
6781 			}
6782 
6783 			type = btrfs_chunk_type(sb, chunk);
6784 			if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) {
6785 				btrfs_err(fs_info,
6786 			    "invalid chunk type %llu in sys_array at offset %u",
6787 					type, cur_offset);
6788 				ret = -EIO;
6789 				break;
6790 			}
6791 
6792 			len = btrfs_chunk_item_size(num_stripes);
6793 			if (cur_offset + len > array_size)
6794 				goto out_short_read;
6795 
6796 			ret = read_one_chunk(fs_info, &key, sb, chunk);
6797 			if (ret)
6798 				break;
6799 		} else {
6800 			btrfs_err(fs_info,
6801 			    "unexpected item type %u in sys_array at offset %u",
6802 				  (u32)key.type, cur_offset);
6803 			ret = -EIO;
6804 			break;
6805 		}
6806 		array_ptr += len;
6807 		sb_array_offset += len;
6808 		cur_offset += len;
6809 	}
6810 	clear_extent_buffer_uptodate(sb);
6811 	free_extent_buffer_stale(sb);
6812 	return ret;
6813 
6814 out_short_read:
6815 	btrfs_err(fs_info, "sys_array too short to read %u bytes at offset %u",
6816 			len, cur_offset);
6817 	clear_extent_buffer_uptodate(sb);
6818 	free_extent_buffer_stale(sb);
6819 	return -EIO;
6820 }
6821 
btrfs_report_missing_device(struct btrfs_fs_info * fs_info,u64 devid,u8 * uuid)6822 void btrfs_report_missing_device(struct btrfs_fs_info *fs_info, u64 devid,
6823 				 u8 *uuid)
6824 {
6825 	btrfs_warn_rl(fs_info, "devid %llu uuid %pU is missing", devid, uuid);
6826 }
6827 
6828 /*
6829  * Check if all chunks in the fs are OK for read-write degraded mount
6830  *
6831  * Return true if all chunks meet the minimal RW mount requirements.
6832  * Return false if any chunk doesn't meet the minimal RW mount requirements.
6833  */
btrfs_check_rw_degradable(struct btrfs_fs_info * fs_info)6834 bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info)
6835 {
6836 	struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
6837 	struct extent_map *em;
6838 	u64 next_start = 0;
6839 	bool ret = true;
6840 
6841 	read_lock(&map_tree->map_tree.lock);
6842 	em = lookup_extent_mapping(&map_tree->map_tree, 0, (u64)-1);
6843 	read_unlock(&map_tree->map_tree.lock);
6844 	/* No chunk at all? Return false anyway */
6845 	if (!em) {
6846 		ret = false;
6847 		goto out;
6848 	}
6849 	while (em) {
6850 		struct map_lookup *map;
6851 		int missing = 0;
6852 		int max_tolerated;
6853 		int i;
6854 
6855 		map = em->map_lookup;
6856 		max_tolerated =
6857 			btrfs_get_num_tolerated_disk_barrier_failures(
6858 					map->type);
6859 		for (i = 0; i < map->num_stripes; i++) {
6860 			struct btrfs_device *dev = map->stripes[i].dev;
6861 
6862 			if (!dev || !dev->bdev || dev->missing ||
6863 			    dev->last_flush_error)
6864 				missing++;
6865 		}
6866 		if (missing > max_tolerated) {
6867 			btrfs_warn(fs_info,
6868 	"chunk %llu missing %d devices, max tolerance is %d for writeable mount",
6869 				   em->start, missing, max_tolerated);
6870 			free_extent_map(em);
6871 			ret = false;
6872 			goto out;
6873 		}
6874 		next_start = extent_map_end(em);
6875 		free_extent_map(em);
6876 
6877 		read_lock(&map_tree->map_tree.lock);
6878 		em = lookup_extent_mapping(&map_tree->map_tree, next_start,
6879 					   (u64)(-1) - next_start);
6880 		read_unlock(&map_tree->map_tree.lock);
6881 	}
6882 out:
6883 	return ret;
6884 }
6885 
btrfs_read_chunk_tree(struct btrfs_fs_info * fs_info)6886 int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
6887 {
6888 	struct btrfs_root *root = fs_info->chunk_root;
6889 	struct btrfs_path *path;
6890 	struct extent_buffer *leaf;
6891 	struct btrfs_key key;
6892 	struct btrfs_key found_key;
6893 	int ret;
6894 	int slot;
6895 	u64 total_dev = 0;
6896 
6897 	path = btrfs_alloc_path();
6898 	if (!path)
6899 		return -ENOMEM;
6900 
6901 	mutex_lock(&uuid_mutex);
6902 	mutex_lock(&fs_info->chunk_mutex);
6903 
6904 	/*
6905 	 * Read all device items, and then all the chunk items. All
6906 	 * device items are found before any chunk item (their object id
6907 	 * is smaller than the lowest possible object id for a chunk
6908 	 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
6909 	 */
6910 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
6911 	key.offset = 0;
6912 	key.type = 0;
6913 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6914 	if (ret < 0)
6915 		goto error;
6916 	while (1) {
6917 		leaf = path->nodes[0];
6918 		slot = path->slots[0];
6919 		if (slot >= btrfs_header_nritems(leaf)) {
6920 			ret = btrfs_next_leaf(root, path);
6921 			if (ret == 0)
6922 				continue;
6923 			if (ret < 0)
6924 				goto error;
6925 			break;
6926 		}
6927 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
6928 		if (found_key.type == BTRFS_DEV_ITEM_KEY) {
6929 			struct btrfs_dev_item *dev_item;
6930 			dev_item = btrfs_item_ptr(leaf, slot,
6931 						  struct btrfs_dev_item);
6932 			ret = read_one_dev(fs_info, leaf, dev_item);
6933 			if (ret)
6934 				goto error;
6935 			total_dev++;
6936 		} else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
6937 			struct btrfs_chunk *chunk;
6938 			chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
6939 			ret = read_one_chunk(fs_info, &found_key, leaf, chunk);
6940 			if (ret)
6941 				goto error;
6942 		}
6943 		path->slots[0]++;
6944 	}
6945 
6946 	/*
6947 	 * After loading chunk tree, we've got all device information,
6948 	 * do another round of validation checks.
6949 	 */
6950 	if (total_dev != fs_info->fs_devices->total_devices) {
6951 		btrfs_err(fs_info,
6952 	   "super_num_devices %llu mismatch with num_devices %llu found here",
6953 			  btrfs_super_num_devices(fs_info->super_copy),
6954 			  total_dev);
6955 		ret = -EINVAL;
6956 		goto error;
6957 	}
6958 	if (btrfs_super_total_bytes(fs_info->super_copy) <
6959 	    fs_info->fs_devices->total_rw_bytes) {
6960 		btrfs_err(fs_info,
6961 	"super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu",
6962 			  btrfs_super_total_bytes(fs_info->super_copy),
6963 			  fs_info->fs_devices->total_rw_bytes);
6964 		ret = -EINVAL;
6965 		goto error;
6966 	}
6967 	ret = 0;
6968 error:
6969 	mutex_unlock(&fs_info->chunk_mutex);
6970 	mutex_unlock(&uuid_mutex);
6971 
6972 	btrfs_free_path(path);
6973 	return ret;
6974 }
6975 
btrfs_init_devices_late(struct btrfs_fs_info * fs_info)6976 void btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
6977 {
6978 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6979 	struct btrfs_device *device;
6980 
6981 	while (fs_devices) {
6982 		mutex_lock(&fs_devices->device_list_mutex);
6983 		list_for_each_entry(device, &fs_devices->devices, dev_list)
6984 			device->fs_info = fs_info;
6985 		mutex_unlock(&fs_devices->device_list_mutex);
6986 
6987 		fs_devices = fs_devices->seed;
6988 	}
6989 }
6990 
__btrfs_reset_dev_stats(struct btrfs_device * dev)6991 static void __btrfs_reset_dev_stats(struct btrfs_device *dev)
6992 {
6993 	int i;
6994 
6995 	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6996 		btrfs_dev_stat_reset(dev, i);
6997 }
6998 
btrfs_init_dev_stats(struct btrfs_fs_info * fs_info)6999 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
7000 {
7001 	struct btrfs_key key;
7002 	struct btrfs_key found_key;
7003 	struct btrfs_root *dev_root = fs_info->dev_root;
7004 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7005 	struct extent_buffer *eb;
7006 	int slot;
7007 	int ret = 0;
7008 	struct btrfs_device *device;
7009 	struct btrfs_path *path = NULL;
7010 	int i;
7011 
7012 	path = btrfs_alloc_path();
7013 	if (!path) {
7014 		ret = -ENOMEM;
7015 		goto out;
7016 	}
7017 
7018 	mutex_lock(&fs_devices->device_list_mutex);
7019 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
7020 		int item_size;
7021 		struct btrfs_dev_stats_item *ptr;
7022 
7023 		key.objectid = BTRFS_DEV_STATS_OBJECTID;
7024 		key.type = BTRFS_PERSISTENT_ITEM_KEY;
7025 		key.offset = device->devid;
7026 		ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
7027 		if (ret) {
7028 			__btrfs_reset_dev_stats(device);
7029 			device->dev_stats_valid = 1;
7030 			btrfs_release_path(path);
7031 			continue;
7032 		}
7033 		slot = path->slots[0];
7034 		eb = path->nodes[0];
7035 		btrfs_item_key_to_cpu(eb, &found_key, slot);
7036 		item_size = btrfs_item_size_nr(eb, slot);
7037 
7038 		ptr = btrfs_item_ptr(eb, slot,
7039 				     struct btrfs_dev_stats_item);
7040 
7041 		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
7042 			if (item_size >= (1 + i) * sizeof(__le64))
7043 				btrfs_dev_stat_set(device, i,
7044 					btrfs_dev_stats_value(eb, ptr, i));
7045 			else
7046 				btrfs_dev_stat_reset(device, i);
7047 		}
7048 
7049 		device->dev_stats_valid = 1;
7050 		btrfs_dev_stat_print_on_load(device);
7051 		btrfs_release_path(path);
7052 	}
7053 	mutex_unlock(&fs_devices->device_list_mutex);
7054 
7055 out:
7056 	btrfs_free_path(path);
7057 	return ret < 0 ? ret : 0;
7058 }
7059 
update_dev_stat_item(struct btrfs_trans_handle * trans,struct btrfs_fs_info * fs_info,struct btrfs_device * device)7060 static int update_dev_stat_item(struct btrfs_trans_handle *trans,
7061 				struct btrfs_fs_info *fs_info,
7062 				struct btrfs_device *device)
7063 {
7064 	struct btrfs_root *dev_root = fs_info->dev_root;
7065 	struct btrfs_path *path;
7066 	struct btrfs_key key;
7067 	struct extent_buffer *eb;
7068 	struct btrfs_dev_stats_item *ptr;
7069 	int ret;
7070 	int i;
7071 
7072 	key.objectid = BTRFS_DEV_STATS_OBJECTID;
7073 	key.type = BTRFS_PERSISTENT_ITEM_KEY;
7074 	key.offset = device->devid;
7075 
7076 	path = btrfs_alloc_path();
7077 	if (!path)
7078 		return -ENOMEM;
7079 	ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
7080 	if (ret < 0) {
7081 		btrfs_warn_in_rcu(fs_info,
7082 			"error %d while searching for dev_stats item for device %s",
7083 			      ret, rcu_str_deref(device->name));
7084 		goto out;
7085 	}
7086 
7087 	if (ret == 0 &&
7088 	    btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
7089 		/* need to delete old one and insert a new one */
7090 		ret = btrfs_del_item(trans, dev_root, path);
7091 		if (ret != 0) {
7092 			btrfs_warn_in_rcu(fs_info,
7093 				"delete too small dev_stats item for device %s failed %d",
7094 				      rcu_str_deref(device->name), ret);
7095 			goto out;
7096 		}
7097 		ret = 1;
7098 	}
7099 
7100 	if (ret == 1) {
7101 		/* need to insert a new item */
7102 		btrfs_release_path(path);
7103 		ret = btrfs_insert_empty_item(trans, dev_root, path,
7104 					      &key, sizeof(*ptr));
7105 		if (ret < 0) {
7106 			btrfs_warn_in_rcu(fs_info,
7107 				"insert dev_stats item for device %s failed %d",
7108 				rcu_str_deref(device->name), ret);
7109 			goto out;
7110 		}
7111 	}
7112 
7113 	eb = path->nodes[0];
7114 	ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
7115 	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7116 		btrfs_set_dev_stats_value(eb, ptr, i,
7117 					  btrfs_dev_stat_read(device, i));
7118 	btrfs_mark_buffer_dirty(eb);
7119 
7120 out:
7121 	btrfs_free_path(path);
7122 	return ret;
7123 }
7124 
7125 /*
7126  * called from commit_transaction. Writes all changed device stats to disk.
7127  */
btrfs_run_dev_stats(struct btrfs_trans_handle * trans,struct btrfs_fs_info * fs_info)7128 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
7129 			struct btrfs_fs_info *fs_info)
7130 {
7131 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7132 	struct btrfs_device *device;
7133 	int stats_cnt;
7134 	int ret = 0;
7135 
7136 	mutex_lock(&fs_devices->device_list_mutex);
7137 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
7138 		stats_cnt = atomic_read(&device->dev_stats_ccnt);
7139 		if (!device->dev_stats_valid || stats_cnt == 0)
7140 			continue;
7141 
7142 
7143 		/*
7144 		 * There is a LOAD-LOAD control dependency between the value of
7145 		 * dev_stats_ccnt and updating the on-disk values which requires
7146 		 * reading the in-memory counters. Such control dependencies
7147 		 * require explicit read memory barriers.
7148 		 *
7149 		 * This memory barriers pairs with smp_mb__before_atomic in
7150 		 * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full
7151 		 * barrier implied by atomic_xchg in
7152 		 * btrfs_dev_stats_read_and_reset
7153 		 */
7154 		smp_rmb();
7155 
7156 		ret = update_dev_stat_item(trans, fs_info, device);
7157 		if (!ret)
7158 			atomic_sub(stats_cnt, &device->dev_stats_ccnt);
7159 	}
7160 	mutex_unlock(&fs_devices->device_list_mutex);
7161 
7162 	return ret;
7163 }
7164 
btrfs_dev_stat_inc_and_print(struct btrfs_device * dev,int index)7165 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
7166 {
7167 	btrfs_dev_stat_inc(dev, index);
7168 	btrfs_dev_stat_print_on_error(dev);
7169 }
7170 
btrfs_dev_stat_print_on_error(struct btrfs_device * dev)7171 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
7172 {
7173 	if (!dev->dev_stats_valid)
7174 		return;
7175 	btrfs_err_rl_in_rcu(dev->fs_info,
7176 		"bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7177 			   rcu_str_deref(dev->name),
7178 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
7179 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
7180 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
7181 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
7182 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
7183 }
7184 
btrfs_dev_stat_print_on_load(struct btrfs_device * dev)7185 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
7186 {
7187 	int i;
7188 
7189 	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7190 		if (btrfs_dev_stat_read(dev, i) != 0)
7191 			break;
7192 	if (i == BTRFS_DEV_STAT_VALUES_MAX)
7193 		return; /* all values == 0, suppress message */
7194 
7195 	btrfs_info_in_rcu(dev->fs_info,
7196 		"bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7197 	       rcu_str_deref(dev->name),
7198 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
7199 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
7200 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
7201 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
7202 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
7203 }
7204 
btrfs_get_dev_stats(struct btrfs_fs_info * fs_info,struct btrfs_ioctl_get_dev_stats * stats)7205 int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info,
7206 			struct btrfs_ioctl_get_dev_stats *stats)
7207 {
7208 	struct btrfs_device *dev;
7209 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7210 	int i;
7211 
7212 	mutex_lock(&fs_devices->device_list_mutex);
7213 	dev = btrfs_find_device(fs_info, stats->devid, NULL, NULL);
7214 	mutex_unlock(&fs_devices->device_list_mutex);
7215 
7216 	if (!dev) {
7217 		btrfs_warn(fs_info, "get dev_stats failed, device not found");
7218 		return -ENODEV;
7219 	} else if (!dev->dev_stats_valid) {
7220 		btrfs_warn(fs_info, "get dev_stats failed, not yet valid");
7221 		return -ENODEV;
7222 	} else if (stats->flags & BTRFS_DEV_STATS_RESET) {
7223 		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
7224 			if (stats->nr_items > i)
7225 				stats->values[i] =
7226 					btrfs_dev_stat_read_and_reset(dev, i);
7227 			else
7228 				btrfs_dev_stat_reset(dev, i);
7229 		}
7230 		btrfs_info(fs_info, "device stats zeroed by %s (%d)",
7231 			   current->comm, task_pid_nr(current));
7232 	} else {
7233 		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7234 			if (stats->nr_items > i)
7235 				stats->values[i] = btrfs_dev_stat_read(dev, i);
7236 	}
7237 	if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
7238 		stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
7239 	return 0;
7240 }
7241 
btrfs_scratch_superblocks(struct block_device * bdev,const char * device_path)7242 void btrfs_scratch_superblocks(struct block_device *bdev, const char *device_path)
7243 {
7244 	struct buffer_head *bh;
7245 	struct btrfs_super_block *disk_super;
7246 	int copy_num;
7247 
7248 	if (!bdev)
7249 		return;
7250 
7251 	for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX;
7252 		copy_num++) {
7253 
7254 		if (btrfs_read_dev_one_super(bdev, copy_num, &bh))
7255 			continue;
7256 
7257 		disk_super = (struct btrfs_super_block *)bh->b_data;
7258 
7259 		memset(&disk_super->magic, 0, sizeof(disk_super->magic));
7260 		set_buffer_dirty(bh);
7261 		sync_dirty_buffer(bh);
7262 		brelse(bh);
7263 	}
7264 
7265 	/* Notify udev that device has changed */
7266 	btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
7267 
7268 	/* Update ctime/mtime for device path for libblkid */
7269 	update_dev_time(device_path);
7270 }
7271 
7272 /*
7273  * Update the size of all devices, which is used for writing out the
7274  * super blocks.
7275  */
btrfs_update_commit_device_size(struct btrfs_fs_info * fs_info)7276 void btrfs_update_commit_device_size(struct btrfs_fs_info *fs_info)
7277 {
7278 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7279 	struct btrfs_device *curr, *next;
7280 
7281 	if (list_empty(&fs_devices->resized_devices))
7282 		return;
7283 
7284 	mutex_lock(&fs_devices->device_list_mutex);
7285 	mutex_lock(&fs_info->chunk_mutex);
7286 	list_for_each_entry_safe(curr, next, &fs_devices->resized_devices,
7287 				 resized_list) {
7288 		list_del_init(&curr->resized_list);
7289 		curr->commit_total_bytes = curr->disk_total_bytes;
7290 	}
7291 	mutex_unlock(&fs_info->chunk_mutex);
7292 	mutex_unlock(&fs_devices->device_list_mutex);
7293 }
7294 
7295 /* Must be invoked during the transaction commit */
btrfs_update_commit_device_bytes_used(struct btrfs_fs_info * fs_info,struct btrfs_transaction * transaction)7296 void btrfs_update_commit_device_bytes_used(struct btrfs_fs_info *fs_info,
7297 					struct btrfs_transaction *transaction)
7298 {
7299 	struct extent_map *em;
7300 	struct map_lookup *map;
7301 	struct btrfs_device *dev;
7302 	int i;
7303 
7304 	if (list_empty(&transaction->pending_chunks))
7305 		return;
7306 
7307 	/* In order to kick the device replace finish process */
7308 	mutex_lock(&fs_info->chunk_mutex);
7309 	list_for_each_entry(em, &transaction->pending_chunks, list) {
7310 		map = em->map_lookup;
7311 
7312 		for (i = 0; i < map->num_stripes; i++) {
7313 			dev = map->stripes[i].dev;
7314 			dev->commit_bytes_used = dev->bytes_used;
7315 			dev->has_pending_chunks = false;
7316 		}
7317 	}
7318 	mutex_unlock(&fs_info->chunk_mutex);
7319 }
7320 
btrfs_set_fs_info_ptr(struct btrfs_fs_info * fs_info)7321 void btrfs_set_fs_info_ptr(struct btrfs_fs_info *fs_info)
7322 {
7323 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7324 	while (fs_devices) {
7325 		fs_devices->fs_info = fs_info;
7326 		fs_devices = fs_devices->seed;
7327 	}
7328 }
7329 
btrfs_reset_fs_info_ptr(struct btrfs_fs_info * fs_info)7330 void btrfs_reset_fs_info_ptr(struct btrfs_fs_info *fs_info)
7331 {
7332 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7333 	while (fs_devices) {
7334 		fs_devices->fs_info = NULL;
7335 		fs_devices = fs_devices->seed;
7336 	}
7337 }
7338