1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2007 Oracle. All rights reserved.
4 */
5
6 #include <linux/sched.h>
7 #include <linux/sched/mm.h>
8 #include <linux/bio.h>
9 #include <linux/slab.h>
10 #include <linux/blkdev.h>
11 #include <linux/ratelimit.h>
12 #include <linux/kthread.h>
13 #include <linux/raid/pq.h>
14 #include <linux/semaphore.h>
15 #include <linux/uuid.h>
16 #include <linux/list_sort.h>
17 #include <linux/namei.h>
18 #include "misc.h"
19 #include "ctree.h"
20 #include "extent_map.h"
21 #include "disk-io.h"
22 #include "transaction.h"
23 #include "print-tree.h"
24 #include "volumes.h"
25 #include "raid56.h"
26 #include "async-thread.h"
27 #include "check-integrity.h"
28 #include "rcu-string.h"
29 #include "dev-replace.h"
30 #include "sysfs.h"
31 #include "tree-checker.h"
32 #include "space-info.h"
33 #include "block-group.h"
34 #include "discard.h"
35
36 const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
37 [BTRFS_RAID_RAID10] = {
38 .sub_stripes = 2,
39 .dev_stripes = 1,
40 .devs_max = 0, /* 0 == as many as possible */
41 .devs_min = 4,
42 .tolerated_failures = 1,
43 .devs_increment = 2,
44 .ncopies = 2,
45 .nparity = 0,
46 .raid_name = "raid10",
47 .bg_flag = BTRFS_BLOCK_GROUP_RAID10,
48 .mindev_error = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET,
49 },
50 [BTRFS_RAID_RAID1] = {
51 .sub_stripes = 1,
52 .dev_stripes = 1,
53 .devs_max = 2,
54 .devs_min = 2,
55 .tolerated_failures = 1,
56 .devs_increment = 2,
57 .ncopies = 2,
58 .nparity = 0,
59 .raid_name = "raid1",
60 .bg_flag = BTRFS_BLOCK_GROUP_RAID1,
61 .mindev_error = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET,
62 },
63 [BTRFS_RAID_RAID1C3] = {
64 .sub_stripes = 1,
65 .dev_stripes = 1,
66 .devs_max = 3,
67 .devs_min = 3,
68 .tolerated_failures = 2,
69 .devs_increment = 3,
70 .ncopies = 3,
71 .nparity = 0,
72 .raid_name = "raid1c3",
73 .bg_flag = BTRFS_BLOCK_GROUP_RAID1C3,
74 .mindev_error = BTRFS_ERROR_DEV_RAID1C3_MIN_NOT_MET,
75 },
76 [BTRFS_RAID_RAID1C4] = {
77 .sub_stripes = 1,
78 .dev_stripes = 1,
79 .devs_max = 4,
80 .devs_min = 4,
81 .tolerated_failures = 3,
82 .devs_increment = 4,
83 .ncopies = 4,
84 .nparity = 0,
85 .raid_name = "raid1c4",
86 .bg_flag = BTRFS_BLOCK_GROUP_RAID1C4,
87 .mindev_error = BTRFS_ERROR_DEV_RAID1C4_MIN_NOT_MET,
88 },
89 [BTRFS_RAID_DUP] = {
90 .sub_stripes = 1,
91 .dev_stripes = 2,
92 .devs_max = 1,
93 .devs_min = 1,
94 .tolerated_failures = 0,
95 .devs_increment = 1,
96 .ncopies = 2,
97 .nparity = 0,
98 .raid_name = "dup",
99 .bg_flag = BTRFS_BLOCK_GROUP_DUP,
100 .mindev_error = 0,
101 },
102 [BTRFS_RAID_RAID0] = {
103 .sub_stripes = 1,
104 .dev_stripes = 1,
105 .devs_max = 0,
106 .devs_min = 2,
107 .tolerated_failures = 0,
108 .devs_increment = 1,
109 .ncopies = 1,
110 .nparity = 0,
111 .raid_name = "raid0",
112 .bg_flag = BTRFS_BLOCK_GROUP_RAID0,
113 .mindev_error = 0,
114 },
115 [BTRFS_RAID_SINGLE] = {
116 .sub_stripes = 1,
117 .dev_stripes = 1,
118 .devs_max = 1,
119 .devs_min = 1,
120 .tolerated_failures = 0,
121 .devs_increment = 1,
122 .ncopies = 1,
123 .nparity = 0,
124 .raid_name = "single",
125 .bg_flag = 0,
126 .mindev_error = 0,
127 },
128 [BTRFS_RAID_RAID5] = {
129 .sub_stripes = 1,
130 .dev_stripes = 1,
131 .devs_max = 0,
132 .devs_min = 2,
133 .tolerated_failures = 1,
134 .devs_increment = 1,
135 .ncopies = 1,
136 .nparity = 1,
137 .raid_name = "raid5",
138 .bg_flag = BTRFS_BLOCK_GROUP_RAID5,
139 .mindev_error = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET,
140 },
141 [BTRFS_RAID_RAID6] = {
142 .sub_stripes = 1,
143 .dev_stripes = 1,
144 .devs_max = 0,
145 .devs_min = 3,
146 .tolerated_failures = 2,
147 .devs_increment = 1,
148 .ncopies = 1,
149 .nparity = 2,
150 .raid_name = "raid6",
151 .bg_flag = BTRFS_BLOCK_GROUP_RAID6,
152 .mindev_error = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET,
153 },
154 };
155
btrfs_bg_type_to_raid_name(u64 flags)156 const char *btrfs_bg_type_to_raid_name(u64 flags)
157 {
158 const int index = btrfs_bg_flags_to_raid_index(flags);
159
160 if (index >= BTRFS_NR_RAID_TYPES)
161 return NULL;
162
163 return btrfs_raid_array[index].raid_name;
164 }
165
166 /*
167 * Fill @buf with textual description of @bg_flags, no more than @size_buf
168 * bytes including terminating null byte.
169 */
btrfs_describe_block_groups(u64 bg_flags,char * buf,u32 size_buf)170 void btrfs_describe_block_groups(u64 bg_flags, char *buf, u32 size_buf)
171 {
172 int i;
173 int ret;
174 char *bp = buf;
175 u64 flags = bg_flags;
176 u32 size_bp = size_buf;
177
178 if (!flags) {
179 strcpy(bp, "NONE");
180 return;
181 }
182
183 #define DESCRIBE_FLAG(flag, desc) \
184 do { \
185 if (flags & (flag)) { \
186 ret = snprintf(bp, size_bp, "%s|", (desc)); \
187 if (ret < 0 || ret >= size_bp) \
188 goto out_overflow; \
189 size_bp -= ret; \
190 bp += ret; \
191 flags &= ~(flag); \
192 } \
193 } while (0)
194
195 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_DATA, "data");
196 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_SYSTEM, "system");
197 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_METADATA, "metadata");
198
199 DESCRIBE_FLAG(BTRFS_AVAIL_ALLOC_BIT_SINGLE, "single");
200 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
201 DESCRIBE_FLAG(btrfs_raid_array[i].bg_flag,
202 btrfs_raid_array[i].raid_name);
203 #undef DESCRIBE_FLAG
204
205 if (flags) {
206 ret = snprintf(bp, size_bp, "0x%llx|", flags);
207 size_bp -= ret;
208 }
209
210 if (size_bp < size_buf)
211 buf[size_buf - size_bp - 1] = '\0'; /* remove last | */
212
213 /*
214 * The text is trimmed, it's up to the caller to provide sufficiently
215 * large buffer
216 */
217 out_overflow:;
218 }
219
220 static int init_first_rw_device(struct btrfs_trans_handle *trans);
221 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info);
222 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
223 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
224 static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
225 enum btrfs_map_op op,
226 u64 logical, u64 *length,
227 struct btrfs_bio **bbio_ret,
228 int mirror_num, int need_raid_map);
229
230 /*
231 * Device locking
232 * ==============
233 *
234 * There are several mutexes that protect manipulation of devices and low-level
235 * structures like chunks but not block groups, extents or files
236 *
237 * uuid_mutex (global lock)
238 * ------------------------
239 * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from
240 * the SCAN_DEV ioctl registration or from mount either implicitly (the first
241 * device) or requested by the device= mount option
242 *
243 * the mutex can be very coarse and can cover long-running operations
244 *
245 * protects: updates to fs_devices counters like missing devices, rw devices,
246 * seeding, structure cloning, opening/closing devices at mount/umount time
247 *
248 * global::fs_devs - add, remove, updates to the global list
249 *
250 * does not protect: manipulation of the fs_devices::devices list in general
251 * but in mount context it could be used to exclude list modifications by eg.
252 * scan ioctl
253 *
254 * btrfs_device::name - renames (write side), read is RCU
255 *
256 * fs_devices::device_list_mutex (per-fs, with RCU)
257 * ------------------------------------------------
258 * protects updates to fs_devices::devices, ie. adding and deleting
259 *
260 * simple list traversal with read-only actions can be done with RCU protection
261 *
262 * may be used to exclude some operations from running concurrently without any
263 * modifications to the list (see write_all_supers)
264 *
265 * Is not required at mount and close times, because our device list is
266 * protected by the uuid_mutex at that point.
267 *
268 * balance_mutex
269 * -------------
270 * protects balance structures (status, state) and context accessed from
271 * several places (internally, ioctl)
272 *
273 * chunk_mutex
274 * -----------
275 * protects chunks, adding or removing during allocation, trim or when a new
276 * device is added/removed. Additionally it also protects post_commit_list of
277 * individual devices, since they can be added to the transaction's
278 * post_commit_list only with chunk_mutex held.
279 *
280 * cleaner_mutex
281 * -------------
282 * a big lock that is held by the cleaner thread and prevents running subvolume
283 * cleaning together with relocation or delayed iputs
284 *
285 *
286 * Lock nesting
287 * ============
288 *
289 * uuid_mutex
290 * device_list_mutex
291 * chunk_mutex
292 * balance_mutex
293 *
294 *
295 * Exclusive operations
296 * ====================
297 *
298 * Maintains the exclusivity of the following operations that apply to the
299 * whole filesystem and cannot run in parallel.
300 *
301 * - Balance (*)
302 * - Device add
303 * - Device remove
304 * - Device replace (*)
305 * - Resize
306 *
307 * The device operations (as above) can be in one of the following states:
308 *
309 * - Running state
310 * - Paused state
311 * - Completed state
312 *
313 * Only device operations marked with (*) can go into the Paused state for the
314 * following reasons:
315 *
316 * - ioctl (only Balance can be Paused through ioctl)
317 * - filesystem remounted as read-only
318 * - filesystem unmounted and mounted as read-only
319 * - system power-cycle and filesystem mounted as read-only
320 * - filesystem or device errors leading to forced read-only
321 *
322 * The status of exclusive operation is set and cleared atomically.
323 * During the course of Paused state, fs_info::exclusive_operation remains set.
324 * A device operation in Paused or Running state can be canceled or resumed
325 * either by ioctl (Balance only) or when remounted as read-write.
326 * The exclusive status is cleared when the device operation is canceled or
327 * completed.
328 */
329
330 DEFINE_MUTEX(uuid_mutex);
331 static LIST_HEAD(fs_uuids);
btrfs_get_fs_uuids(void)332 struct list_head * __attribute_const__ btrfs_get_fs_uuids(void)
333 {
334 return &fs_uuids;
335 }
336
337 /*
338 * alloc_fs_devices - allocate struct btrfs_fs_devices
339 * @fsid: if not NULL, copy the UUID to fs_devices::fsid
340 * @metadata_fsid: if not NULL, copy the UUID to fs_devices::metadata_fsid
341 *
342 * Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR().
343 * The returned struct is not linked onto any lists and can be destroyed with
344 * kfree() right away.
345 */
alloc_fs_devices(const u8 * fsid,const u8 * metadata_fsid)346 static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid,
347 const u8 *metadata_fsid)
348 {
349 struct btrfs_fs_devices *fs_devs;
350
351 fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL);
352 if (!fs_devs)
353 return ERR_PTR(-ENOMEM);
354
355 mutex_init(&fs_devs->device_list_mutex);
356
357 INIT_LIST_HEAD(&fs_devs->devices);
358 INIT_LIST_HEAD(&fs_devs->alloc_list);
359 INIT_LIST_HEAD(&fs_devs->fs_list);
360 INIT_LIST_HEAD(&fs_devs->seed_list);
361 if (fsid)
362 memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE);
363
364 if (metadata_fsid)
365 memcpy(fs_devs->metadata_uuid, metadata_fsid, BTRFS_FSID_SIZE);
366 else if (fsid)
367 memcpy(fs_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE);
368
369 return fs_devs;
370 }
371
btrfs_free_device(struct btrfs_device * device)372 void btrfs_free_device(struct btrfs_device *device)
373 {
374 WARN_ON(!list_empty(&device->post_commit_list));
375 rcu_string_free(device->name);
376 extent_io_tree_release(&device->alloc_state);
377 bio_put(device->flush_bio);
378 kfree(device);
379 }
380
free_fs_devices(struct btrfs_fs_devices * fs_devices)381 static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
382 {
383 struct btrfs_device *device;
384
385 WARN_ON(fs_devices->opened);
386 while (!list_empty(&fs_devices->devices)) {
387 device = list_entry(fs_devices->devices.next,
388 struct btrfs_device, dev_list);
389 list_del(&device->dev_list);
390 btrfs_free_device(device);
391 }
392 kfree(fs_devices);
393 }
394
btrfs_cleanup_fs_uuids(void)395 void __exit btrfs_cleanup_fs_uuids(void)
396 {
397 struct btrfs_fs_devices *fs_devices;
398
399 while (!list_empty(&fs_uuids)) {
400 fs_devices = list_entry(fs_uuids.next,
401 struct btrfs_fs_devices, fs_list);
402 list_del(&fs_devices->fs_list);
403 free_fs_devices(fs_devices);
404 }
405 }
406
407 /*
408 * Returns a pointer to a new btrfs_device on success; ERR_PTR() on error.
409 * Returned struct is not linked onto any lists and must be destroyed using
410 * btrfs_free_device.
411 */
__alloc_device(struct btrfs_fs_info * fs_info)412 static struct btrfs_device *__alloc_device(struct btrfs_fs_info *fs_info)
413 {
414 struct btrfs_device *dev;
415
416 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
417 if (!dev)
418 return ERR_PTR(-ENOMEM);
419
420 /*
421 * Preallocate a bio that's always going to be used for flushing device
422 * barriers and matches the device lifespan
423 */
424 dev->flush_bio = bio_alloc_bioset(GFP_KERNEL, 0, NULL);
425 if (!dev->flush_bio) {
426 kfree(dev);
427 return ERR_PTR(-ENOMEM);
428 }
429
430 INIT_LIST_HEAD(&dev->dev_list);
431 INIT_LIST_HEAD(&dev->dev_alloc_list);
432 INIT_LIST_HEAD(&dev->post_commit_list);
433
434 atomic_set(&dev->reada_in_flight, 0);
435 atomic_set(&dev->dev_stats_ccnt, 0);
436 btrfs_device_data_ordered_init(dev);
437 INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
438 INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
439 extent_io_tree_init(fs_info, &dev->alloc_state,
440 IO_TREE_DEVICE_ALLOC_STATE, NULL);
441
442 return dev;
443 }
444
find_fsid(const u8 * fsid,const u8 * metadata_fsid)445 static noinline struct btrfs_fs_devices *find_fsid(
446 const u8 *fsid, const u8 *metadata_fsid)
447 {
448 struct btrfs_fs_devices *fs_devices;
449
450 ASSERT(fsid);
451
452 /* Handle non-split brain cases */
453 list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
454 if (metadata_fsid) {
455 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0
456 && memcmp(metadata_fsid, fs_devices->metadata_uuid,
457 BTRFS_FSID_SIZE) == 0)
458 return fs_devices;
459 } else {
460 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
461 return fs_devices;
462 }
463 }
464 return NULL;
465 }
466
find_fsid_with_metadata_uuid(struct btrfs_super_block * disk_super)467 static struct btrfs_fs_devices *find_fsid_with_metadata_uuid(
468 struct btrfs_super_block *disk_super)
469 {
470
471 struct btrfs_fs_devices *fs_devices;
472
473 /*
474 * Handle scanned device having completed its fsid change but
475 * belonging to a fs_devices that was created by first scanning
476 * a device which didn't have its fsid/metadata_uuid changed
477 * at all and the CHANGING_FSID_V2 flag set.
478 */
479 list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
480 if (fs_devices->fsid_change &&
481 memcmp(disk_super->metadata_uuid, fs_devices->fsid,
482 BTRFS_FSID_SIZE) == 0 &&
483 memcmp(fs_devices->fsid, fs_devices->metadata_uuid,
484 BTRFS_FSID_SIZE) == 0) {
485 return fs_devices;
486 }
487 }
488 /*
489 * Handle scanned device having completed its fsid change but
490 * belonging to a fs_devices that was created by a device that
491 * has an outdated pair of fsid/metadata_uuid and
492 * CHANGING_FSID_V2 flag set.
493 */
494 list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
495 if (fs_devices->fsid_change &&
496 memcmp(fs_devices->metadata_uuid,
497 fs_devices->fsid, BTRFS_FSID_SIZE) != 0 &&
498 memcmp(disk_super->metadata_uuid, fs_devices->metadata_uuid,
499 BTRFS_FSID_SIZE) == 0) {
500 return fs_devices;
501 }
502 }
503
504 return find_fsid(disk_super->fsid, disk_super->metadata_uuid);
505 }
506
507
508 static int
btrfs_get_bdev_and_sb(const char * device_path,fmode_t flags,void * holder,int flush,struct block_device ** bdev,struct btrfs_super_block ** disk_super)509 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder,
510 int flush, struct block_device **bdev,
511 struct btrfs_super_block **disk_super)
512 {
513 int ret;
514
515 *bdev = blkdev_get_by_path(device_path, flags, holder);
516
517 if (IS_ERR(*bdev)) {
518 ret = PTR_ERR(*bdev);
519 goto error;
520 }
521
522 if (flush)
523 filemap_write_and_wait((*bdev)->bd_inode->i_mapping);
524 ret = set_blocksize(*bdev, BTRFS_BDEV_BLOCKSIZE);
525 if (ret) {
526 blkdev_put(*bdev, flags);
527 goto error;
528 }
529 invalidate_bdev(*bdev);
530 *disk_super = btrfs_read_dev_super(*bdev);
531 if (IS_ERR(*disk_super)) {
532 ret = PTR_ERR(*disk_super);
533 blkdev_put(*bdev, flags);
534 goto error;
535 }
536
537 return 0;
538
539 error:
540 *bdev = NULL;
541 return ret;
542 }
543
544 /*
545 * Check if the device in the path matches the device in the given struct device.
546 *
547 * Returns:
548 * true If it is the same device.
549 * false If it is not the same device or on error.
550 */
device_matched(const struct btrfs_device * device,const char * path)551 static bool device_matched(const struct btrfs_device *device, const char *path)
552 {
553 char *device_name;
554 struct block_device *bdev_old;
555 struct block_device *bdev_new;
556
557 /*
558 * If we are looking for a device with the matching dev_t, then skip
559 * device without a name (a missing device).
560 */
561 if (!device->name)
562 return false;
563
564 device_name = kzalloc(BTRFS_PATH_NAME_MAX, GFP_KERNEL);
565 if (!device_name)
566 return false;
567
568 rcu_read_lock();
569 scnprintf(device_name, BTRFS_PATH_NAME_MAX, "%s", rcu_str_deref(device->name));
570 rcu_read_unlock();
571
572 bdev_old = lookup_bdev(device_name);
573 kfree(device_name);
574 if (IS_ERR(bdev_old))
575 return false;
576
577 bdev_new = lookup_bdev(path);
578 if (IS_ERR(bdev_new))
579 return false;
580
581 if (bdev_old == bdev_new)
582 return true;
583
584 return false;
585 }
586
587 /*
588 * Search and remove all stale (devices which are not mounted) devices.
589 * When both inputs are NULL, it will search and release all stale devices.
590 * path: Optional. When provided will it release all unmounted devices
591 * matching this path only.
592 * skip_dev: Optional. Will skip this device when searching for the stale
593 * devices.
594 * Return: 0 for success or if @path is NULL.
595 * -EBUSY if @path is a mounted device.
596 * -ENOENT if @path does not match any device in the list.
597 */
btrfs_free_stale_devices(const char * path,struct btrfs_device * skip_device)598 static int btrfs_free_stale_devices(const char *path,
599 struct btrfs_device *skip_device)
600 {
601 struct btrfs_fs_devices *fs_devices, *tmp_fs_devices;
602 struct btrfs_device *device, *tmp_device;
603 int ret = 0;
604
605 lockdep_assert_held(&uuid_mutex);
606
607 if (path)
608 ret = -ENOENT;
609
610 list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) {
611
612 mutex_lock(&fs_devices->device_list_mutex);
613 list_for_each_entry_safe(device, tmp_device,
614 &fs_devices->devices, dev_list) {
615 if (skip_device && skip_device == device)
616 continue;
617 if (path && !device_matched(device, path))
618 continue;
619 if (fs_devices->opened) {
620 /* for an already deleted device return 0 */
621 if (path && ret != 0)
622 ret = -EBUSY;
623 break;
624 }
625
626 /* delete the stale device */
627 fs_devices->num_devices--;
628 list_del(&device->dev_list);
629 btrfs_free_device(device);
630
631 ret = 0;
632 }
633 mutex_unlock(&fs_devices->device_list_mutex);
634
635 if (fs_devices->num_devices == 0) {
636 btrfs_sysfs_remove_fsid(fs_devices);
637 list_del(&fs_devices->fs_list);
638 free_fs_devices(fs_devices);
639 }
640 }
641
642 return ret;
643 }
644
645 /*
646 * This is only used on mount, and we are protected from competing things
647 * messing with our fs_devices by the uuid_mutex, thus we do not need the
648 * fs_devices->device_list_mutex here.
649 */
btrfs_open_one_device(struct btrfs_fs_devices * fs_devices,struct btrfs_device * device,fmode_t flags,void * holder)650 static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
651 struct btrfs_device *device, fmode_t flags,
652 void *holder)
653 {
654 struct request_queue *q;
655 struct block_device *bdev;
656 struct btrfs_super_block *disk_super;
657 u64 devid;
658 int ret;
659
660 if (device->bdev)
661 return -EINVAL;
662 if (!device->name)
663 return -EINVAL;
664
665 ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
666 &bdev, &disk_super);
667 if (ret)
668 return ret;
669
670 devid = btrfs_stack_device_id(&disk_super->dev_item);
671 if (devid != device->devid)
672 goto error_free_page;
673
674 if (memcmp(device->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE))
675 goto error_free_page;
676
677 device->generation = btrfs_super_generation(disk_super);
678
679 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
680 if (btrfs_super_incompat_flags(disk_super) &
681 BTRFS_FEATURE_INCOMPAT_METADATA_UUID) {
682 pr_err(
683 "BTRFS: Invalid seeding and uuid-changed device detected\n");
684 goto error_free_page;
685 }
686
687 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
688 fs_devices->seeding = true;
689 } else {
690 if (bdev_read_only(bdev))
691 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
692 else
693 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
694 }
695
696 q = bdev_get_queue(bdev);
697 if (!blk_queue_nonrot(q))
698 fs_devices->rotating = true;
699
700 device->bdev = bdev;
701 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
702 device->mode = flags;
703
704 fs_devices->open_devices++;
705 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
706 device->devid != BTRFS_DEV_REPLACE_DEVID) {
707 fs_devices->rw_devices++;
708 list_add_tail(&device->dev_alloc_list, &fs_devices->alloc_list);
709 }
710 btrfs_release_disk_super(disk_super);
711
712 return 0;
713
714 error_free_page:
715 btrfs_release_disk_super(disk_super);
716 blkdev_put(bdev, flags);
717
718 return -EINVAL;
719 }
720
btrfs_sb_fsid_ptr(struct btrfs_super_block * sb)721 u8 *btrfs_sb_fsid_ptr(struct btrfs_super_block *sb)
722 {
723 bool has_metadata_uuid = (btrfs_super_incompat_flags(sb) &
724 BTRFS_FEATURE_INCOMPAT_METADATA_UUID);
725
726 return has_metadata_uuid ? sb->metadata_uuid : sb->fsid;
727 }
728
729 /*
730 * Handle scanned device having its CHANGING_FSID_V2 flag set and the fs_devices
731 * being created with a disk that has already completed its fsid change. Such
732 * disk can belong to an fs which has its FSID changed or to one which doesn't.
733 * Handle both cases here.
734 */
find_fsid_inprogress(struct btrfs_super_block * disk_super)735 static struct btrfs_fs_devices *find_fsid_inprogress(
736 struct btrfs_super_block *disk_super)
737 {
738 struct btrfs_fs_devices *fs_devices;
739
740 list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
741 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
742 BTRFS_FSID_SIZE) != 0 &&
743 memcmp(fs_devices->metadata_uuid, disk_super->fsid,
744 BTRFS_FSID_SIZE) == 0 && !fs_devices->fsid_change) {
745 return fs_devices;
746 }
747 }
748
749 return find_fsid(disk_super->fsid, NULL);
750 }
751
752
find_fsid_changed(struct btrfs_super_block * disk_super)753 static struct btrfs_fs_devices *find_fsid_changed(
754 struct btrfs_super_block *disk_super)
755 {
756 struct btrfs_fs_devices *fs_devices;
757
758 /*
759 * Handles the case where scanned device is part of an fs that had
760 * multiple successful changes of FSID but curently device didn't
761 * observe it. Meaning our fsid will be different than theirs. We need
762 * to handle two subcases :
763 * 1 - The fs still continues to have different METADATA/FSID uuids.
764 * 2 - The fs is switched back to its original FSID (METADATA/FSID
765 * are equal).
766 */
767 list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
768 /* Changed UUIDs */
769 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
770 BTRFS_FSID_SIZE) != 0 &&
771 memcmp(fs_devices->metadata_uuid, disk_super->metadata_uuid,
772 BTRFS_FSID_SIZE) == 0 &&
773 memcmp(fs_devices->fsid, disk_super->fsid,
774 BTRFS_FSID_SIZE) != 0)
775 return fs_devices;
776
777 /* Unchanged UUIDs */
778 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
779 BTRFS_FSID_SIZE) == 0 &&
780 memcmp(fs_devices->fsid, disk_super->metadata_uuid,
781 BTRFS_FSID_SIZE) == 0)
782 return fs_devices;
783 }
784
785 return NULL;
786 }
787
find_fsid_reverted_metadata(struct btrfs_super_block * disk_super)788 static struct btrfs_fs_devices *find_fsid_reverted_metadata(
789 struct btrfs_super_block *disk_super)
790 {
791 struct btrfs_fs_devices *fs_devices;
792
793 /*
794 * Handle the case where the scanned device is part of an fs whose last
795 * metadata UUID change reverted it to the original FSID. At the same
796 * time * fs_devices was first created by another constitutent device
797 * which didn't fully observe the operation. This results in an
798 * btrfs_fs_devices created with metadata/fsid different AND
799 * btrfs_fs_devices::fsid_change set AND the metadata_uuid of the
800 * fs_devices equal to the FSID of the disk.
801 */
802 list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
803 if (memcmp(fs_devices->fsid, fs_devices->metadata_uuid,
804 BTRFS_FSID_SIZE) != 0 &&
805 memcmp(fs_devices->metadata_uuid, disk_super->fsid,
806 BTRFS_FSID_SIZE) == 0 &&
807 fs_devices->fsid_change)
808 return fs_devices;
809 }
810
811 return NULL;
812 }
813 /*
814 * Add new device to list of registered devices
815 *
816 * Returns:
817 * device pointer which was just added or updated when successful
818 * error pointer when failed
819 */
device_list_add(const char * path,struct btrfs_super_block * disk_super,bool * new_device_added)820 static noinline struct btrfs_device *device_list_add(const char *path,
821 struct btrfs_super_block *disk_super,
822 bool *new_device_added)
823 {
824 struct btrfs_device *device;
825 struct btrfs_fs_devices *fs_devices = NULL;
826 struct rcu_string *name;
827 u64 found_transid = btrfs_super_generation(disk_super);
828 u64 devid = btrfs_stack_device_id(&disk_super->dev_item);
829 bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) &
830 BTRFS_FEATURE_INCOMPAT_METADATA_UUID);
831 bool fsid_change_in_progress = (btrfs_super_flags(disk_super) &
832 BTRFS_SUPER_FLAG_CHANGING_FSID_V2);
833
834 if (fsid_change_in_progress) {
835 if (!has_metadata_uuid)
836 fs_devices = find_fsid_inprogress(disk_super);
837 else
838 fs_devices = find_fsid_changed(disk_super);
839 } else if (has_metadata_uuid) {
840 fs_devices = find_fsid_with_metadata_uuid(disk_super);
841 } else {
842 fs_devices = find_fsid_reverted_metadata(disk_super);
843 if (!fs_devices)
844 fs_devices = find_fsid(disk_super->fsid, NULL);
845 }
846
847
848 if (!fs_devices) {
849 if (has_metadata_uuid)
850 fs_devices = alloc_fs_devices(disk_super->fsid,
851 disk_super->metadata_uuid);
852 else
853 fs_devices = alloc_fs_devices(disk_super->fsid, NULL);
854
855 if (IS_ERR(fs_devices))
856 return ERR_CAST(fs_devices);
857
858 fs_devices->fsid_change = fsid_change_in_progress;
859
860 mutex_lock(&fs_devices->device_list_mutex);
861 list_add(&fs_devices->fs_list, &fs_uuids);
862
863 device = NULL;
864 } else {
865 mutex_lock(&fs_devices->device_list_mutex);
866 device = btrfs_find_device(fs_devices, devid,
867 disk_super->dev_item.uuid, NULL, false);
868
869 /*
870 * If this disk has been pulled into an fs devices created by
871 * a device which had the CHANGING_FSID_V2 flag then replace the
872 * metadata_uuid/fsid values of the fs_devices.
873 */
874 if (fs_devices->fsid_change &&
875 found_transid > fs_devices->latest_generation) {
876 memcpy(fs_devices->fsid, disk_super->fsid,
877 BTRFS_FSID_SIZE);
878
879 if (has_metadata_uuid)
880 memcpy(fs_devices->metadata_uuid,
881 disk_super->metadata_uuid,
882 BTRFS_FSID_SIZE);
883 else
884 memcpy(fs_devices->metadata_uuid,
885 disk_super->fsid, BTRFS_FSID_SIZE);
886
887 fs_devices->fsid_change = false;
888 }
889 }
890
891 if (!device) {
892 if (fs_devices->opened) {
893 mutex_unlock(&fs_devices->device_list_mutex);
894 return ERR_PTR(-EBUSY);
895 }
896
897 device = btrfs_alloc_device(NULL, &devid,
898 disk_super->dev_item.uuid);
899 if (IS_ERR(device)) {
900 mutex_unlock(&fs_devices->device_list_mutex);
901 /* we can safely leave the fs_devices entry around */
902 return device;
903 }
904
905 name = rcu_string_strdup(path, GFP_NOFS);
906 if (!name) {
907 btrfs_free_device(device);
908 mutex_unlock(&fs_devices->device_list_mutex);
909 return ERR_PTR(-ENOMEM);
910 }
911 rcu_assign_pointer(device->name, name);
912
913 list_add_rcu(&device->dev_list, &fs_devices->devices);
914 fs_devices->num_devices++;
915
916 device->fs_devices = fs_devices;
917 *new_device_added = true;
918
919 if (disk_super->label[0])
920 pr_info(
921 "BTRFS: device label %s devid %llu transid %llu %s scanned by %s (%d)\n",
922 disk_super->label, devid, found_transid, path,
923 current->comm, task_pid_nr(current));
924 else
925 pr_info(
926 "BTRFS: device fsid %pU devid %llu transid %llu %s scanned by %s (%d)\n",
927 disk_super->fsid, devid, found_transid, path,
928 current->comm, task_pid_nr(current));
929
930 } else if (!device->name || strcmp(device->name->str, path)) {
931 /*
932 * When FS is already mounted.
933 * 1. If you are here and if the device->name is NULL that
934 * means this device was missing at time of FS mount.
935 * 2. If you are here and if the device->name is different
936 * from 'path' that means either
937 * a. The same device disappeared and reappeared with
938 * different name. or
939 * b. The missing-disk-which-was-replaced, has
940 * reappeared now.
941 *
942 * We must allow 1 and 2a above. But 2b would be a spurious
943 * and unintentional.
944 *
945 * Further in case of 1 and 2a above, the disk at 'path'
946 * would have missed some transaction when it was away and
947 * in case of 2a the stale bdev has to be updated as well.
948 * 2b must not be allowed at all time.
949 */
950
951 /*
952 * For now, we do allow update to btrfs_fs_device through the
953 * btrfs dev scan cli after FS has been mounted. We're still
954 * tracking a problem where systems fail mount by subvolume id
955 * when we reject replacement on a mounted FS.
956 */
957 if (!fs_devices->opened && found_transid < device->generation) {
958 /*
959 * That is if the FS is _not_ mounted and if you
960 * are here, that means there is more than one
961 * disk with same uuid and devid.We keep the one
962 * with larger generation number or the last-in if
963 * generation are equal.
964 */
965 mutex_unlock(&fs_devices->device_list_mutex);
966 return ERR_PTR(-EEXIST);
967 }
968
969 /*
970 * We are going to replace the device path for a given devid,
971 * make sure it's the same device if the device is mounted
972 */
973 if (device->bdev) {
974 struct block_device *path_bdev;
975
976 path_bdev = lookup_bdev(path);
977 if (IS_ERR(path_bdev)) {
978 mutex_unlock(&fs_devices->device_list_mutex);
979 return ERR_CAST(path_bdev);
980 }
981
982 if (device->bdev != path_bdev) {
983 bdput(path_bdev);
984 mutex_unlock(&fs_devices->device_list_mutex);
985 /*
986 * device->fs_info may not be reliable here, so
987 * pass in a NULL instead. This avoids a
988 * possible use-after-free when the fs_info and
989 * fs_info->sb are already torn down.
990 */
991 btrfs_warn_in_rcu(NULL,
992 "duplicate device %s devid %llu generation %llu scanned by %s (%d)",
993 path, devid, found_transid,
994 current->comm,
995 task_pid_nr(current));
996 return ERR_PTR(-EEXIST);
997 }
998 bdput(path_bdev);
999 btrfs_info_in_rcu(device->fs_info,
1000 "devid %llu device path %s changed to %s scanned by %s (%d)",
1001 devid, rcu_str_deref(device->name),
1002 path, current->comm,
1003 task_pid_nr(current));
1004 }
1005
1006 name = rcu_string_strdup(path, GFP_NOFS);
1007 if (!name) {
1008 mutex_unlock(&fs_devices->device_list_mutex);
1009 return ERR_PTR(-ENOMEM);
1010 }
1011 rcu_string_free(device->name);
1012 rcu_assign_pointer(device->name, name);
1013 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
1014 fs_devices->missing_devices--;
1015 clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
1016 }
1017 }
1018
1019 /*
1020 * Unmount does not free the btrfs_device struct but would zero
1021 * generation along with most of the other members. So just update
1022 * it back. We need it to pick the disk with largest generation
1023 * (as above).
1024 */
1025 if (!fs_devices->opened) {
1026 device->generation = found_transid;
1027 fs_devices->latest_generation = max_t(u64, found_transid,
1028 fs_devices->latest_generation);
1029 }
1030
1031 fs_devices->total_devices = btrfs_super_num_devices(disk_super);
1032
1033 mutex_unlock(&fs_devices->device_list_mutex);
1034 return device;
1035 }
1036
clone_fs_devices(struct btrfs_fs_devices * orig)1037 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
1038 {
1039 struct btrfs_fs_devices *fs_devices;
1040 struct btrfs_device *device;
1041 struct btrfs_device *orig_dev;
1042 int ret = 0;
1043
1044 lockdep_assert_held(&uuid_mutex);
1045
1046 fs_devices = alloc_fs_devices(orig->fsid, NULL);
1047 if (IS_ERR(fs_devices))
1048 return fs_devices;
1049
1050 fs_devices->total_devices = orig->total_devices;
1051
1052 list_for_each_entry(orig_dev, &orig->devices, dev_list) {
1053 struct rcu_string *name;
1054
1055 device = btrfs_alloc_device(NULL, &orig_dev->devid,
1056 orig_dev->uuid);
1057 if (IS_ERR(device)) {
1058 ret = PTR_ERR(device);
1059 goto error;
1060 }
1061
1062 /*
1063 * This is ok to do without rcu read locked because we hold the
1064 * uuid mutex so nothing we touch in here is going to disappear.
1065 */
1066 if (orig_dev->name) {
1067 name = rcu_string_strdup(orig_dev->name->str,
1068 GFP_KERNEL);
1069 if (!name) {
1070 btrfs_free_device(device);
1071 ret = -ENOMEM;
1072 goto error;
1073 }
1074 rcu_assign_pointer(device->name, name);
1075 }
1076
1077 list_add(&device->dev_list, &fs_devices->devices);
1078 device->fs_devices = fs_devices;
1079 fs_devices->num_devices++;
1080 }
1081 return fs_devices;
1082 error:
1083 free_fs_devices(fs_devices);
1084 return ERR_PTR(ret);
1085 }
1086
__btrfs_free_extra_devids(struct btrfs_fs_devices * fs_devices,int step,struct btrfs_device ** latest_dev)1087 static void __btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices,
1088 int step, struct btrfs_device **latest_dev)
1089 {
1090 struct btrfs_device *device, *next;
1091
1092 /* This is the initialized path, it is safe to release the devices. */
1093 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
1094 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state)) {
1095 if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
1096 &device->dev_state) &&
1097 !test_bit(BTRFS_DEV_STATE_MISSING,
1098 &device->dev_state) &&
1099 (!*latest_dev ||
1100 device->generation > (*latest_dev)->generation)) {
1101 *latest_dev = device;
1102 }
1103 continue;
1104 }
1105
1106 /*
1107 * We have already validated the presence of BTRFS_DEV_REPLACE_DEVID,
1108 * in btrfs_init_dev_replace() so just continue.
1109 */
1110 if (device->devid == BTRFS_DEV_REPLACE_DEVID)
1111 continue;
1112
1113 if (device->bdev) {
1114 blkdev_put(device->bdev, device->mode);
1115 device->bdev = NULL;
1116 fs_devices->open_devices--;
1117 }
1118 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
1119 list_del_init(&device->dev_alloc_list);
1120 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
1121 fs_devices->rw_devices--;
1122 }
1123 list_del_init(&device->dev_list);
1124 fs_devices->num_devices--;
1125 btrfs_free_device(device);
1126 }
1127
1128 }
1129
1130 /*
1131 * After we have read the system tree and know devids belonging to this
1132 * filesystem, remove the device which does not belong there.
1133 */
btrfs_free_extra_devids(struct btrfs_fs_devices * fs_devices,int step)1134 void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices, int step)
1135 {
1136 struct btrfs_device *latest_dev = NULL;
1137 struct btrfs_fs_devices *seed_dev;
1138
1139 mutex_lock(&uuid_mutex);
1140 __btrfs_free_extra_devids(fs_devices, step, &latest_dev);
1141
1142 list_for_each_entry(seed_dev, &fs_devices->seed_list, seed_list)
1143 __btrfs_free_extra_devids(seed_dev, step, &latest_dev);
1144
1145 fs_devices->latest_bdev = latest_dev->bdev;
1146
1147 mutex_unlock(&uuid_mutex);
1148 }
1149
btrfs_close_bdev(struct btrfs_device * device)1150 static void btrfs_close_bdev(struct btrfs_device *device)
1151 {
1152 if (!device->bdev)
1153 return;
1154
1155 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
1156 sync_blockdev(device->bdev);
1157 invalidate_bdev(device->bdev);
1158 }
1159
1160 blkdev_put(device->bdev, device->mode);
1161 }
1162
btrfs_close_one_device(struct btrfs_device * device)1163 static void btrfs_close_one_device(struct btrfs_device *device)
1164 {
1165 struct btrfs_fs_devices *fs_devices = device->fs_devices;
1166
1167 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
1168 device->devid != BTRFS_DEV_REPLACE_DEVID) {
1169 list_del_init(&device->dev_alloc_list);
1170 fs_devices->rw_devices--;
1171 }
1172
1173 if (device->devid == BTRFS_DEV_REPLACE_DEVID)
1174 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
1175
1176 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
1177 clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
1178 fs_devices->missing_devices--;
1179 }
1180
1181 btrfs_close_bdev(device);
1182 if (device->bdev) {
1183 fs_devices->open_devices--;
1184 device->bdev = NULL;
1185 }
1186 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
1187
1188 device->fs_info = NULL;
1189 atomic_set(&device->dev_stats_ccnt, 0);
1190 extent_io_tree_release(&device->alloc_state);
1191
1192 /*
1193 * Reset the flush error record. We might have a transient flush error
1194 * in this mount, and if so we aborted the current transaction and set
1195 * the fs to an error state, guaranteeing no super blocks can be further
1196 * committed. However that error might be transient and if we unmount the
1197 * filesystem and mount it again, we should allow the mount to succeed
1198 * (btrfs_check_rw_degradable() should not fail) - if after mounting the
1199 * filesystem again we still get flush errors, then we will again abort
1200 * any transaction and set the error state, guaranteeing no commits of
1201 * unsafe super blocks.
1202 */
1203 device->last_flush_error = 0;
1204
1205 /* Verify the device is back in a pristine state */
1206 ASSERT(!test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state));
1207 ASSERT(!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state));
1208 ASSERT(list_empty(&device->dev_alloc_list));
1209 ASSERT(list_empty(&device->post_commit_list));
1210 ASSERT(atomic_read(&device->reada_in_flight) == 0);
1211 }
1212
close_fs_devices(struct btrfs_fs_devices * fs_devices)1213 static void close_fs_devices(struct btrfs_fs_devices *fs_devices)
1214 {
1215 struct btrfs_device *device, *tmp;
1216
1217 lockdep_assert_held(&uuid_mutex);
1218
1219 if (--fs_devices->opened > 0)
1220 return;
1221
1222 list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list)
1223 btrfs_close_one_device(device);
1224
1225 WARN_ON(fs_devices->open_devices);
1226 WARN_ON(fs_devices->rw_devices);
1227 fs_devices->opened = 0;
1228 fs_devices->seeding = false;
1229 fs_devices->fs_info = NULL;
1230 }
1231
btrfs_close_devices(struct btrfs_fs_devices * fs_devices)1232 void btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
1233 {
1234 LIST_HEAD(list);
1235 struct btrfs_fs_devices *tmp;
1236
1237 mutex_lock(&uuid_mutex);
1238 close_fs_devices(fs_devices);
1239 if (!fs_devices->opened) {
1240 list_splice_init(&fs_devices->seed_list, &list);
1241
1242 /*
1243 * If the struct btrfs_fs_devices is not assembled with any
1244 * other device, it can be re-initialized during the next mount
1245 * without the needing device-scan step. Therefore, it can be
1246 * fully freed.
1247 */
1248 if (fs_devices->num_devices == 1) {
1249 list_del(&fs_devices->fs_list);
1250 free_fs_devices(fs_devices);
1251 }
1252 }
1253
1254
1255 list_for_each_entry_safe(fs_devices, tmp, &list, seed_list) {
1256 close_fs_devices(fs_devices);
1257 list_del(&fs_devices->seed_list);
1258 free_fs_devices(fs_devices);
1259 }
1260 mutex_unlock(&uuid_mutex);
1261 }
1262
open_fs_devices(struct btrfs_fs_devices * fs_devices,fmode_t flags,void * holder)1263 static int open_fs_devices(struct btrfs_fs_devices *fs_devices,
1264 fmode_t flags, void *holder)
1265 {
1266 struct btrfs_device *device;
1267 struct btrfs_device *latest_dev = NULL;
1268 struct btrfs_device *tmp_device;
1269
1270 flags |= FMODE_EXCL;
1271
1272 list_for_each_entry_safe(device, tmp_device, &fs_devices->devices,
1273 dev_list) {
1274 int ret;
1275
1276 ret = btrfs_open_one_device(fs_devices, device, flags, holder);
1277 if (ret == 0 &&
1278 (!latest_dev || device->generation > latest_dev->generation)) {
1279 latest_dev = device;
1280 } else if (ret == -ENODATA) {
1281 fs_devices->num_devices--;
1282 list_del(&device->dev_list);
1283 btrfs_free_device(device);
1284 }
1285 }
1286 if (fs_devices->open_devices == 0)
1287 return -EINVAL;
1288
1289 fs_devices->opened = 1;
1290 fs_devices->latest_bdev = latest_dev->bdev;
1291 fs_devices->total_rw_bytes = 0;
1292 fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_REGULAR;
1293
1294 return 0;
1295 }
1296
devid_cmp(void * priv,const struct list_head * a,const struct list_head * b)1297 static int devid_cmp(void *priv, const struct list_head *a,
1298 const struct list_head *b)
1299 {
1300 struct btrfs_device *dev1, *dev2;
1301
1302 dev1 = list_entry(a, struct btrfs_device, dev_list);
1303 dev2 = list_entry(b, struct btrfs_device, dev_list);
1304
1305 if (dev1->devid < dev2->devid)
1306 return -1;
1307 else if (dev1->devid > dev2->devid)
1308 return 1;
1309 return 0;
1310 }
1311
btrfs_open_devices(struct btrfs_fs_devices * fs_devices,fmode_t flags,void * holder)1312 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
1313 fmode_t flags, void *holder)
1314 {
1315 int ret;
1316
1317 lockdep_assert_held(&uuid_mutex);
1318 /*
1319 * The device_list_mutex cannot be taken here in case opening the
1320 * underlying device takes further locks like bd_mutex.
1321 *
1322 * We also don't need the lock here as this is called during mount and
1323 * exclusion is provided by uuid_mutex
1324 */
1325
1326 if (fs_devices->opened) {
1327 fs_devices->opened++;
1328 ret = 0;
1329 } else {
1330 list_sort(NULL, &fs_devices->devices, devid_cmp);
1331 ret = open_fs_devices(fs_devices, flags, holder);
1332 }
1333
1334 return ret;
1335 }
1336
btrfs_release_disk_super(struct btrfs_super_block * super)1337 void btrfs_release_disk_super(struct btrfs_super_block *super)
1338 {
1339 struct page *page = virt_to_page(super);
1340
1341 put_page(page);
1342 }
1343
btrfs_read_disk_super(struct block_device * bdev,u64 bytenr)1344 static struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev,
1345 u64 bytenr)
1346 {
1347 struct btrfs_super_block *disk_super;
1348 struct page *page;
1349 void *p;
1350 pgoff_t index;
1351
1352 /* make sure our super fits in the device */
1353 if (bytenr + PAGE_SIZE >= i_size_read(bdev->bd_inode))
1354 return ERR_PTR(-EINVAL);
1355
1356 /* make sure our super fits in the page */
1357 if (sizeof(*disk_super) > PAGE_SIZE)
1358 return ERR_PTR(-EINVAL);
1359
1360 /* make sure our super doesn't straddle pages on disk */
1361 index = bytenr >> PAGE_SHIFT;
1362 if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_SHIFT != index)
1363 return ERR_PTR(-EINVAL);
1364
1365 /* pull in the page with our super */
1366 page = read_cache_page_gfp(bdev->bd_inode->i_mapping, index, GFP_KERNEL);
1367
1368 if (IS_ERR(page))
1369 return ERR_CAST(page);
1370
1371 p = page_address(page);
1372
1373 /* align our pointer to the offset of the super block */
1374 disk_super = p + offset_in_page(bytenr);
1375
1376 if (btrfs_super_bytenr(disk_super) != bytenr ||
1377 btrfs_super_magic(disk_super) != BTRFS_MAGIC) {
1378 btrfs_release_disk_super(p);
1379 return ERR_PTR(-EINVAL);
1380 }
1381
1382 if (disk_super->label[0] && disk_super->label[BTRFS_LABEL_SIZE - 1])
1383 disk_super->label[BTRFS_LABEL_SIZE - 1] = 0;
1384
1385 return disk_super;
1386 }
1387
btrfs_forget_devices(const char * path)1388 int btrfs_forget_devices(const char *path)
1389 {
1390 int ret;
1391
1392 mutex_lock(&uuid_mutex);
1393 ret = btrfs_free_stale_devices(strlen(path) ? path : NULL, NULL);
1394 mutex_unlock(&uuid_mutex);
1395
1396 return ret;
1397 }
1398
1399 /*
1400 * Look for a btrfs signature on a device. This may be called out of the mount path
1401 * and we are not allowed to call set_blocksize during the scan. The superblock
1402 * is read via pagecache
1403 */
btrfs_scan_one_device(const char * path,fmode_t flags,void * holder)1404 struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags,
1405 void *holder)
1406 {
1407 struct btrfs_super_block *disk_super;
1408 bool new_device_added = false;
1409 struct btrfs_device *device = NULL;
1410 struct block_device *bdev;
1411 u64 bytenr;
1412
1413 lockdep_assert_held(&uuid_mutex);
1414
1415 /*
1416 * we would like to check all the supers, but that would make
1417 * a btrfs mount succeed after a mkfs from a different FS.
1418 * So, we need to add a special mount option to scan for
1419 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
1420 */
1421 bytenr = btrfs_sb_offset(0);
1422
1423 /*
1424 * Avoid using flag |= FMODE_EXCL here, as the systemd-udev may
1425 * initiate the device scan which may race with the user's mount
1426 * or mkfs command, resulting in failure.
1427 * Since the device scan is solely for reading purposes, there is
1428 * no need for FMODE_EXCL. Additionally, the devices are read again
1429 * during the mount process. It is ok to get some inconsistent
1430 * values temporarily, as the device paths of the fsid are the only
1431 * required information for assembling the volume.
1432 */
1433 bdev = blkdev_get_by_path(path, flags, holder);
1434 if (IS_ERR(bdev))
1435 return ERR_CAST(bdev);
1436
1437 disk_super = btrfs_read_disk_super(bdev, bytenr);
1438 if (IS_ERR(disk_super)) {
1439 device = ERR_CAST(disk_super);
1440 goto error_bdev_put;
1441 }
1442
1443 device = device_list_add(path, disk_super, &new_device_added);
1444 if (!IS_ERR(device)) {
1445 if (new_device_added)
1446 btrfs_free_stale_devices(path, device);
1447 }
1448
1449 btrfs_release_disk_super(disk_super);
1450
1451 error_bdev_put:
1452 blkdev_put(bdev, flags);
1453
1454 return device;
1455 }
1456
1457 /*
1458 * Try to find a chunk that intersects [start, start + len] range and when one
1459 * such is found, record the end of it in *start
1460 */
contains_pending_extent(struct btrfs_device * device,u64 * start,u64 len)1461 static bool contains_pending_extent(struct btrfs_device *device, u64 *start,
1462 u64 len)
1463 {
1464 u64 physical_start, physical_end;
1465
1466 lockdep_assert_held(&device->fs_info->chunk_mutex);
1467
1468 if (!find_first_extent_bit(&device->alloc_state, *start,
1469 &physical_start, &physical_end,
1470 CHUNK_ALLOCATED, NULL)) {
1471
1472 if (in_range(physical_start, *start, len) ||
1473 in_range(*start, physical_start,
1474 physical_end - physical_start)) {
1475 *start = physical_end + 1;
1476 return true;
1477 }
1478 }
1479 return false;
1480 }
1481
dev_extent_search_start(struct btrfs_device * device,u64 start)1482 static u64 dev_extent_search_start(struct btrfs_device *device, u64 start)
1483 {
1484 switch (device->fs_devices->chunk_alloc_policy) {
1485 case BTRFS_CHUNK_ALLOC_REGULAR:
1486 /*
1487 * We don't want to overwrite the superblock on the drive nor
1488 * any area used by the boot loader (grub for example), so we
1489 * make sure to start at an offset of at least 1MB.
1490 */
1491 return max_t(u64, start, SZ_1M);
1492 default:
1493 BUG();
1494 }
1495 }
1496
1497 /**
1498 * dev_extent_hole_check - check if specified hole is suitable for allocation
1499 * @device: the device which we have the hole
1500 * @hole_start: starting position of the hole
1501 * @hole_size: the size of the hole
1502 * @num_bytes: the size of the free space that we need
1503 *
1504 * This function may modify @hole_start and @hole_end to reflect the suitable
1505 * position for allocation. Returns 1 if hole position is updated, 0 otherwise.
1506 */
dev_extent_hole_check(struct btrfs_device * device,u64 * hole_start,u64 * hole_size,u64 num_bytes)1507 static bool dev_extent_hole_check(struct btrfs_device *device, u64 *hole_start,
1508 u64 *hole_size, u64 num_bytes)
1509 {
1510 bool changed = false;
1511 u64 hole_end = *hole_start + *hole_size;
1512
1513 /*
1514 * Check before we set max_hole_start, otherwise we could end up
1515 * sending back this offset anyway.
1516 */
1517 if (contains_pending_extent(device, hole_start, *hole_size)) {
1518 if (hole_end >= *hole_start)
1519 *hole_size = hole_end - *hole_start;
1520 else
1521 *hole_size = 0;
1522 changed = true;
1523 }
1524
1525 switch (device->fs_devices->chunk_alloc_policy) {
1526 case BTRFS_CHUNK_ALLOC_REGULAR:
1527 /* No extra check */
1528 break;
1529 default:
1530 BUG();
1531 }
1532
1533 return changed;
1534 }
1535
1536 /*
1537 * find_free_dev_extent_start - find free space in the specified device
1538 * @device: the device which we search the free space in
1539 * @num_bytes: the size of the free space that we need
1540 * @search_start: the position from which to begin the search
1541 * @start: store the start of the free space.
1542 * @len: the size of the free space. that we find, or the size
1543 * of the max free space if we don't find suitable free space
1544 *
1545 * this uses a pretty simple search, the expectation is that it is
1546 * called very infrequently and that a given device has a small number
1547 * of extents
1548 *
1549 * @start is used to store the start of the free space if we find. But if we
1550 * don't find suitable free space, it will be used to store the start position
1551 * of the max free space.
1552 *
1553 * @len is used to store the size of the free space that we find.
1554 * But if we don't find suitable free space, it is used to store the size of
1555 * the max free space.
1556 *
1557 * NOTE: This function will search *commit* root of device tree, and does extra
1558 * check to ensure dev extents are not double allocated.
1559 * This makes the function safe to allocate dev extents but may not report
1560 * correct usable device space, as device extent freed in current transaction
1561 * is not reported as avaiable.
1562 */
find_free_dev_extent_start(struct btrfs_device * device,u64 num_bytes,u64 search_start,u64 * start,u64 * len)1563 static int find_free_dev_extent_start(struct btrfs_device *device,
1564 u64 num_bytes, u64 search_start, u64 *start,
1565 u64 *len)
1566 {
1567 struct btrfs_fs_info *fs_info = device->fs_info;
1568 struct btrfs_root *root = fs_info->dev_root;
1569 struct btrfs_key key;
1570 struct btrfs_dev_extent *dev_extent;
1571 struct btrfs_path *path;
1572 u64 hole_size;
1573 u64 max_hole_start;
1574 u64 max_hole_size;
1575 u64 extent_end;
1576 u64 search_end = device->total_bytes;
1577 int ret;
1578 int slot;
1579 struct extent_buffer *l;
1580
1581 search_start = dev_extent_search_start(device, search_start);
1582
1583 path = btrfs_alloc_path();
1584 if (!path)
1585 return -ENOMEM;
1586
1587 max_hole_start = search_start;
1588 max_hole_size = 0;
1589
1590 again:
1591 if (search_start >= search_end ||
1592 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
1593 ret = -ENOSPC;
1594 goto out;
1595 }
1596
1597 path->reada = READA_FORWARD;
1598 path->search_commit_root = 1;
1599 path->skip_locking = 1;
1600
1601 key.objectid = device->devid;
1602 key.offset = search_start;
1603 key.type = BTRFS_DEV_EXTENT_KEY;
1604
1605 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1606 if (ret < 0)
1607 goto out;
1608 if (ret > 0) {
1609 ret = btrfs_previous_item(root, path, key.objectid, key.type);
1610 if (ret < 0)
1611 goto out;
1612 }
1613
1614 while (search_start < search_end) {
1615 l = path->nodes[0];
1616 slot = path->slots[0];
1617 if (slot >= btrfs_header_nritems(l)) {
1618 ret = btrfs_next_leaf(root, path);
1619 if (ret == 0)
1620 continue;
1621 if (ret < 0)
1622 goto out;
1623
1624 break;
1625 }
1626 btrfs_item_key_to_cpu(l, &key, slot);
1627
1628 if (key.objectid < device->devid)
1629 goto next;
1630
1631 if (key.objectid > device->devid)
1632 break;
1633
1634 if (key.type != BTRFS_DEV_EXTENT_KEY)
1635 goto next;
1636
1637 if (key.offset > search_end)
1638 break;
1639
1640 if (key.offset > search_start) {
1641 hole_size = key.offset - search_start;
1642 dev_extent_hole_check(device, &search_start, &hole_size,
1643 num_bytes);
1644
1645 if (hole_size > max_hole_size) {
1646 max_hole_start = search_start;
1647 max_hole_size = hole_size;
1648 }
1649
1650 /*
1651 * If this free space is greater than which we need,
1652 * it must be the max free space that we have found
1653 * until now, so max_hole_start must point to the start
1654 * of this free space and the length of this free space
1655 * is stored in max_hole_size. Thus, we return
1656 * max_hole_start and max_hole_size and go back to the
1657 * caller.
1658 */
1659 if (hole_size >= num_bytes) {
1660 ret = 0;
1661 goto out;
1662 }
1663 }
1664
1665 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1666 extent_end = key.offset + btrfs_dev_extent_length(l,
1667 dev_extent);
1668 if (extent_end > search_start)
1669 search_start = extent_end;
1670 next:
1671 path->slots[0]++;
1672 cond_resched();
1673 }
1674
1675 /*
1676 * At this point, search_start should be the end of
1677 * allocated dev extents, and when shrinking the device,
1678 * search_end may be smaller than search_start.
1679 */
1680 if (search_end > search_start) {
1681 hole_size = search_end - search_start;
1682 if (dev_extent_hole_check(device, &search_start, &hole_size,
1683 num_bytes)) {
1684 btrfs_release_path(path);
1685 goto again;
1686 }
1687
1688 if (hole_size > max_hole_size) {
1689 max_hole_start = search_start;
1690 max_hole_size = hole_size;
1691 }
1692 }
1693
1694 /* See above. */
1695 if (max_hole_size < num_bytes)
1696 ret = -ENOSPC;
1697 else
1698 ret = 0;
1699
1700 ASSERT(max_hole_start + max_hole_size <= search_end);
1701 out:
1702 btrfs_free_path(path);
1703 *start = max_hole_start;
1704 if (len)
1705 *len = max_hole_size;
1706 return ret;
1707 }
1708
find_free_dev_extent(struct btrfs_device * device,u64 num_bytes,u64 * start,u64 * len)1709 int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
1710 u64 *start, u64 *len)
1711 {
1712 /* FIXME use last free of some kind */
1713 return find_free_dev_extent_start(device, num_bytes, 0, start, len);
1714 }
1715
btrfs_free_dev_extent(struct btrfs_trans_handle * trans,struct btrfs_device * device,u64 start,u64 * dev_extent_len)1716 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1717 struct btrfs_device *device,
1718 u64 start, u64 *dev_extent_len)
1719 {
1720 struct btrfs_fs_info *fs_info = device->fs_info;
1721 struct btrfs_root *root = fs_info->dev_root;
1722 int ret;
1723 struct btrfs_path *path;
1724 struct btrfs_key key;
1725 struct btrfs_key found_key;
1726 struct extent_buffer *leaf = NULL;
1727 struct btrfs_dev_extent *extent = NULL;
1728
1729 path = btrfs_alloc_path();
1730 if (!path)
1731 return -ENOMEM;
1732
1733 key.objectid = device->devid;
1734 key.offset = start;
1735 key.type = BTRFS_DEV_EXTENT_KEY;
1736 again:
1737 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1738 if (ret > 0) {
1739 ret = btrfs_previous_item(root, path, key.objectid,
1740 BTRFS_DEV_EXTENT_KEY);
1741 if (ret)
1742 goto out;
1743 leaf = path->nodes[0];
1744 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1745 extent = btrfs_item_ptr(leaf, path->slots[0],
1746 struct btrfs_dev_extent);
1747 BUG_ON(found_key.offset > start || found_key.offset +
1748 btrfs_dev_extent_length(leaf, extent) < start);
1749 key = found_key;
1750 btrfs_release_path(path);
1751 goto again;
1752 } else if (ret == 0) {
1753 leaf = path->nodes[0];
1754 extent = btrfs_item_ptr(leaf, path->slots[0],
1755 struct btrfs_dev_extent);
1756 } else {
1757 btrfs_handle_fs_error(fs_info, ret, "Slot search failed");
1758 goto out;
1759 }
1760
1761 *dev_extent_len = btrfs_dev_extent_length(leaf, extent);
1762
1763 ret = btrfs_del_item(trans, root, path);
1764 if (ret) {
1765 btrfs_handle_fs_error(fs_info, ret,
1766 "Failed to remove dev extent item");
1767 } else {
1768 set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags);
1769 }
1770 out:
1771 btrfs_free_path(path);
1772 return ret;
1773 }
1774
btrfs_alloc_dev_extent(struct btrfs_trans_handle * trans,struct btrfs_device * device,u64 chunk_offset,u64 start,u64 num_bytes)1775 static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1776 struct btrfs_device *device,
1777 u64 chunk_offset, u64 start, u64 num_bytes)
1778 {
1779 int ret;
1780 struct btrfs_path *path;
1781 struct btrfs_fs_info *fs_info = device->fs_info;
1782 struct btrfs_root *root = fs_info->dev_root;
1783 struct btrfs_dev_extent *extent;
1784 struct extent_buffer *leaf;
1785 struct btrfs_key key;
1786
1787 WARN_ON(!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state));
1788 WARN_ON(test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state));
1789 path = btrfs_alloc_path();
1790 if (!path)
1791 return -ENOMEM;
1792
1793 key.objectid = device->devid;
1794 key.offset = start;
1795 key.type = BTRFS_DEV_EXTENT_KEY;
1796 ret = btrfs_insert_empty_item(trans, root, path, &key,
1797 sizeof(*extent));
1798 if (ret)
1799 goto out;
1800
1801 leaf = path->nodes[0];
1802 extent = btrfs_item_ptr(leaf, path->slots[0],
1803 struct btrfs_dev_extent);
1804 btrfs_set_dev_extent_chunk_tree(leaf, extent,
1805 BTRFS_CHUNK_TREE_OBJECTID);
1806 btrfs_set_dev_extent_chunk_objectid(leaf, extent,
1807 BTRFS_FIRST_CHUNK_TREE_OBJECTID);
1808 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
1809
1810 btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1811 btrfs_mark_buffer_dirty(leaf);
1812 out:
1813 btrfs_free_path(path);
1814 return ret;
1815 }
1816
find_next_chunk(struct btrfs_fs_info * fs_info)1817 static u64 find_next_chunk(struct btrfs_fs_info *fs_info)
1818 {
1819 struct extent_map_tree *em_tree;
1820 struct extent_map *em;
1821 struct rb_node *n;
1822 u64 ret = 0;
1823
1824 em_tree = &fs_info->mapping_tree;
1825 read_lock(&em_tree->lock);
1826 n = rb_last(&em_tree->map.rb_root);
1827 if (n) {
1828 em = rb_entry(n, struct extent_map, rb_node);
1829 ret = em->start + em->len;
1830 }
1831 read_unlock(&em_tree->lock);
1832
1833 return ret;
1834 }
1835
find_next_devid(struct btrfs_fs_info * fs_info,u64 * devid_ret)1836 static noinline int find_next_devid(struct btrfs_fs_info *fs_info,
1837 u64 *devid_ret)
1838 {
1839 int ret;
1840 struct btrfs_key key;
1841 struct btrfs_key found_key;
1842 struct btrfs_path *path;
1843
1844 path = btrfs_alloc_path();
1845 if (!path)
1846 return -ENOMEM;
1847
1848 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1849 key.type = BTRFS_DEV_ITEM_KEY;
1850 key.offset = (u64)-1;
1851
1852 ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0);
1853 if (ret < 0)
1854 goto error;
1855
1856 if (ret == 0) {
1857 /* Corruption */
1858 btrfs_err(fs_info, "corrupted chunk tree devid -1 matched");
1859 ret = -EUCLEAN;
1860 goto error;
1861 }
1862
1863 ret = btrfs_previous_item(fs_info->chunk_root, path,
1864 BTRFS_DEV_ITEMS_OBJECTID,
1865 BTRFS_DEV_ITEM_KEY);
1866 if (ret) {
1867 *devid_ret = 1;
1868 } else {
1869 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1870 path->slots[0]);
1871 *devid_ret = found_key.offset + 1;
1872 }
1873 ret = 0;
1874 error:
1875 btrfs_free_path(path);
1876 return ret;
1877 }
1878
1879 /*
1880 * the device information is stored in the chunk root
1881 * the btrfs_device struct should be fully filled in
1882 */
btrfs_add_dev_item(struct btrfs_trans_handle * trans,struct btrfs_device * device)1883 static int btrfs_add_dev_item(struct btrfs_trans_handle *trans,
1884 struct btrfs_device *device)
1885 {
1886 int ret;
1887 struct btrfs_path *path;
1888 struct btrfs_dev_item *dev_item;
1889 struct extent_buffer *leaf;
1890 struct btrfs_key key;
1891 unsigned long ptr;
1892
1893 path = btrfs_alloc_path();
1894 if (!path)
1895 return -ENOMEM;
1896
1897 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1898 key.type = BTRFS_DEV_ITEM_KEY;
1899 key.offset = device->devid;
1900
1901 ret = btrfs_insert_empty_item(trans, trans->fs_info->chunk_root, path,
1902 &key, sizeof(*dev_item));
1903 if (ret)
1904 goto out;
1905
1906 leaf = path->nodes[0];
1907 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1908
1909 btrfs_set_device_id(leaf, dev_item, device->devid);
1910 btrfs_set_device_generation(leaf, dev_item, 0);
1911 btrfs_set_device_type(leaf, dev_item, device->type);
1912 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1913 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1914 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1915 btrfs_set_device_total_bytes(leaf, dev_item,
1916 btrfs_device_get_disk_total_bytes(device));
1917 btrfs_set_device_bytes_used(leaf, dev_item,
1918 btrfs_device_get_bytes_used(device));
1919 btrfs_set_device_group(leaf, dev_item, 0);
1920 btrfs_set_device_seek_speed(leaf, dev_item, 0);
1921 btrfs_set_device_bandwidth(leaf, dev_item, 0);
1922 btrfs_set_device_start_offset(leaf, dev_item, 0);
1923
1924 ptr = btrfs_device_uuid(dev_item);
1925 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1926 ptr = btrfs_device_fsid(dev_item);
1927 write_extent_buffer(leaf, trans->fs_info->fs_devices->metadata_uuid,
1928 ptr, BTRFS_FSID_SIZE);
1929 btrfs_mark_buffer_dirty(leaf);
1930
1931 ret = 0;
1932 out:
1933 btrfs_free_path(path);
1934 return ret;
1935 }
1936
1937 /*
1938 * Function to update ctime/mtime for a given device path.
1939 * Mainly used for ctime/mtime based probe like libblkid.
1940 *
1941 * We don't care about errors here, this is just to be kind to userspace.
1942 */
update_dev_time(const char * device_path)1943 static void update_dev_time(const char *device_path)
1944 {
1945 struct path path;
1946 struct timespec64 now;
1947 int ret;
1948
1949 ret = kern_path(device_path, LOOKUP_FOLLOW, &path);
1950 if (ret)
1951 return;
1952
1953 now = current_time(d_inode(path.dentry));
1954 inode_update_time(d_inode(path.dentry), &now, S_MTIME | S_CTIME);
1955 path_put(&path);
1956 }
1957
btrfs_rm_dev_item(struct btrfs_device * device)1958 static int btrfs_rm_dev_item(struct btrfs_device *device)
1959 {
1960 struct btrfs_root *root = device->fs_info->chunk_root;
1961 int ret;
1962 struct btrfs_path *path;
1963 struct btrfs_key key;
1964 struct btrfs_trans_handle *trans;
1965
1966 path = btrfs_alloc_path();
1967 if (!path)
1968 return -ENOMEM;
1969
1970 trans = btrfs_start_transaction(root, 0);
1971 if (IS_ERR(trans)) {
1972 btrfs_free_path(path);
1973 return PTR_ERR(trans);
1974 }
1975 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1976 key.type = BTRFS_DEV_ITEM_KEY;
1977 key.offset = device->devid;
1978
1979 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1980 if (ret) {
1981 if (ret > 0)
1982 ret = -ENOENT;
1983 btrfs_abort_transaction(trans, ret);
1984 btrfs_end_transaction(trans);
1985 goto out;
1986 }
1987
1988 ret = btrfs_del_item(trans, root, path);
1989 if (ret) {
1990 btrfs_abort_transaction(trans, ret);
1991 btrfs_end_transaction(trans);
1992 }
1993
1994 out:
1995 btrfs_free_path(path);
1996 if (!ret)
1997 ret = btrfs_commit_transaction(trans);
1998 return ret;
1999 }
2000
2001 /*
2002 * Verify that @num_devices satisfies the RAID profile constraints in the whole
2003 * filesystem. It's up to the caller to adjust that number regarding eg. device
2004 * replace.
2005 */
btrfs_check_raid_min_devices(struct btrfs_fs_info * fs_info,u64 num_devices)2006 static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info,
2007 u64 num_devices)
2008 {
2009 u64 all_avail;
2010 unsigned seq;
2011 int i;
2012
2013 do {
2014 seq = read_seqbegin(&fs_info->profiles_lock);
2015
2016 all_avail = fs_info->avail_data_alloc_bits |
2017 fs_info->avail_system_alloc_bits |
2018 fs_info->avail_metadata_alloc_bits;
2019 } while (read_seqretry(&fs_info->profiles_lock, seq));
2020
2021 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
2022 if (!(all_avail & btrfs_raid_array[i].bg_flag))
2023 continue;
2024
2025 if (num_devices < btrfs_raid_array[i].devs_min) {
2026 int ret = btrfs_raid_array[i].mindev_error;
2027
2028 if (ret)
2029 return ret;
2030 }
2031 }
2032
2033 return 0;
2034 }
2035
btrfs_find_next_active_device(struct btrfs_fs_devices * fs_devs,struct btrfs_device * device)2036 static struct btrfs_device * btrfs_find_next_active_device(
2037 struct btrfs_fs_devices *fs_devs, struct btrfs_device *device)
2038 {
2039 struct btrfs_device *next_device;
2040
2041 list_for_each_entry(next_device, &fs_devs->devices, dev_list) {
2042 if (next_device != device &&
2043 !test_bit(BTRFS_DEV_STATE_MISSING, &next_device->dev_state)
2044 && next_device->bdev)
2045 return next_device;
2046 }
2047
2048 return NULL;
2049 }
2050
2051 /*
2052 * Helper function to check if the given device is part of s_bdev / latest_bdev
2053 * and replace it with the provided or the next active device, in the context
2054 * where this function called, there should be always be another device (or
2055 * this_dev) which is active.
2056 */
btrfs_assign_next_active_device(struct btrfs_device * device,struct btrfs_device * next_device)2057 void __cold btrfs_assign_next_active_device(struct btrfs_device *device,
2058 struct btrfs_device *next_device)
2059 {
2060 struct btrfs_fs_info *fs_info = device->fs_info;
2061
2062 if (!next_device)
2063 next_device = btrfs_find_next_active_device(fs_info->fs_devices,
2064 device);
2065 ASSERT(next_device);
2066
2067 if (fs_info->sb->s_bdev &&
2068 (fs_info->sb->s_bdev == device->bdev))
2069 fs_info->sb->s_bdev = next_device->bdev;
2070
2071 if (fs_info->fs_devices->latest_bdev == device->bdev)
2072 fs_info->fs_devices->latest_bdev = next_device->bdev;
2073 }
2074
2075 /*
2076 * Return btrfs_fs_devices::num_devices excluding the device that's being
2077 * currently replaced.
2078 */
btrfs_num_devices(struct btrfs_fs_info * fs_info)2079 static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info)
2080 {
2081 u64 num_devices = fs_info->fs_devices->num_devices;
2082
2083 down_read(&fs_info->dev_replace.rwsem);
2084 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
2085 ASSERT(num_devices > 1);
2086 num_devices--;
2087 }
2088 up_read(&fs_info->dev_replace.rwsem);
2089
2090 return num_devices;
2091 }
2092
btrfs_scratch_superblocks(struct btrfs_fs_info * fs_info,struct block_device * bdev,const char * device_path)2093 void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info,
2094 struct block_device *bdev,
2095 const char *device_path)
2096 {
2097 struct btrfs_super_block *disk_super;
2098 int copy_num;
2099
2100 if (!bdev)
2101 return;
2102
2103 for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX; copy_num++) {
2104 struct page *page;
2105 int ret;
2106
2107 disk_super = btrfs_read_dev_one_super(bdev, copy_num);
2108 if (IS_ERR(disk_super))
2109 continue;
2110
2111 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
2112
2113 page = virt_to_page(disk_super);
2114 set_page_dirty(page);
2115 lock_page(page);
2116 /* write_on_page() unlocks the page */
2117 ret = write_one_page(page);
2118 if (ret)
2119 btrfs_warn(fs_info,
2120 "error clearing superblock number %d (%d)",
2121 copy_num, ret);
2122 btrfs_release_disk_super(disk_super);
2123
2124 }
2125
2126 /* Notify udev that device has changed */
2127 btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
2128
2129 /* Update ctime/mtime for device path for libblkid */
2130 update_dev_time(device_path);
2131 }
2132
btrfs_rm_device(struct btrfs_fs_info * fs_info,const char * device_path,u64 devid)2133 int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path,
2134 u64 devid)
2135 {
2136 struct btrfs_device *device;
2137 struct btrfs_fs_devices *cur_devices;
2138 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2139 u64 num_devices;
2140 int ret = 0;
2141
2142 /*
2143 * The device list in fs_devices is accessed without locks (neither
2144 * uuid_mutex nor device_list_mutex) as it won't change on a mounted
2145 * filesystem and another device rm cannot run.
2146 */
2147 num_devices = btrfs_num_devices(fs_info);
2148
2149 ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1);
2150 if (ret)
2151 goto out;
2152
2153 device = btrfs_find_device_by_devspec(fs_info, devid, device_path);
2154
2155 if (IS_ERR(device)) {
2156 if (PTR_ERR(device) == -ENOENT &&
2157 device_path && strcmp(device_path, "missing") == 0)
2158 ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
2159 else
2160 ret = PTR_ERR(device);
2161 goto out;
2162 }
2163
2164 if (btrfs_pinned_by_swapfile(fs_info, device)) {
2165 btrfs_warn_in_rcu(fs_info,
2166 "cannot remove device %s (devid %llu) due to active swapfile",
2167 rcu_str_deref(device->name), device->devid);
2168 ret = -ETXTBSY;
2169 goto out;
2170 }
2171
2172 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
2173 ret = BTRFS_ERROR_DEV_TGT_REPLACE;
2174 goto out;
2175 }
2176
2177 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
2178 fs_info->fs_devices->rw_devices == 1) {
2179 ret = BTRFS_ERROR_DEV_ONLY_WRITABLE;
2180 goto out;
2181 }
2182
2183 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
2184 mutex_lock(&fs_info->chunk_mutex);
2185 list_del_init(&device->dev_alloc_list);
2186 device->fs_devices->rw_devices--;
2187 mutex_unlock(&fs_info->chunk_mutex);
2188 }
2189
2190 ret = btrfs_shrink_device(device, 0);
2191 if (!ret)
2192 btrfs_reada_remove_dev(device);
2193 if (ret)
2194 goto error_undo;
2195
2196 /*
2197 * TODO: the superblock still includes this device in its num_devices
2198 * counter although write_all_supers() is not locked out. This
2199 * could give a filesystem state which requires a degraded mount.
2200 */
2201 ret = btrfs_rm_dev_item(device);
2202 if (ret)
2203 goto error_undo;
2204
2205 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
2206 btrfs_scrub_cancel_dev(device);
2207
2208 /*
2209 * the device list mutex makes sure that we don't change
2210 * the device list while someone else is writing out all
2211 * the device supers. Whoever is writing all supers, should
2212 * lock the device list mutex before getting the number of
2213 * devices in the super block (super_copy). Conversely,
2214 * whoever updates the number of devices in the super block
2215 * (super_copy) should hold the device list mutex.
2216 */
2217
2218 /*
2219 * In normal cases the cur_devices == fs_devices. But in case
2220 * of deleting a seed device, the cur_devices should point to
2221 * its own fs_devices listed under the fs_devices->seed.
2222 */
2223 cur_devices = device->fs_devices;
2224 mutex_lock(&fs_devices->device_list_mutex);
2225 list_del_rcu(&device->dev_list);
2226
2227 cur_devices->num_devices--;
2228 cur_devices->total_devices--;
2229 /* Update total_devices of the parent fs_devices if it's seed */
2230 if (cur_devices != fs_devices)
2231 fs_devices->total_devices--;
2232
2233 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
2234 cur_devices->missing_devices--;
2235
2236 btrfs_assign_next_active_device(device, NULL);
2237
2238 if (device->bdev) {
2239 cur_devices->open_devices--;
2240 /* remove sysfs entry */
2241 btrfs_sysfs_remove_device(device);
2242 }
2243
2244 num_devices = btrfs_super_num_devices(fs_info->super_copy) - 1;
2245 btrfs_set_super_num_devices(fs_info->super_copy, num_devices);
2246 mutex_unlock(&fs_devices->device_list_mutex);
2247
2248 /*
2249 * at this point, the device is zero sized and detached from
2250 * the devices list. All that's left is to zero out the old
2251 * supers and free the device.
2252 */
2253 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
2254 btrfs_scratch_superblocks(fs_info, device->bdev,
2255 device->name->str);
2256
2257 btrfs_close_bdev(device);
2258 synchronize_rcu();
2259 btrfs_free_device(device);
2260
2261 if (cur_devices->open_devices == 0) {
2262 list_del_init(&cur_devices->seed_list);
2263 close_fs_devices(cur_devices);
2264 free_fs_devices(cur_devices);
2265 }
2266
2267 out:
2268 return ret;
2269
2270 error_undo:
2271 btrfs_reada_undo_remove_dev(device);
2272 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
2273 mutex_lock(&fs_info->chunk_mutex);
2274 list_add(&device->dev_alloc_list,
2275 &fs_devices->alloc_list);
2276 device->fs_devices->rw_devices++;
2277 mutex_unlock(&fs_info->chunk_mutex);
2278 }
2279 goto out;
2280 }
2281
btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device * srcdev)2282 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev)
2283 {
2284 struct btrfs_fs_devices *fs_devices;
2285
2286 lockdep_assert_held(&srcdev->fs_info->fs_devices->device_list_mutex);
2287
2288 /*
2289 * in case of fs with no seed, srcdev->fs_devices will point
2290 * to fs_devices of fs_info. However when the dev being replaced is
2291 * a seed dev it will point to the seed's local fs_devices. In short
2292 * srcdev will have its correct fs_devices in both the cases.
2293 */
2294 fs_devices = srcdev->fs_devices;
2295
2296 list_del_rcu(&srcdev->dev_list);
2297 list_del(&srcdev->dev_alloc_list);
2298 fs_devices->num_devices--;
2299 if (test_bit(BTRFS_DEV_STATE_MISSING, &srcdev->dev_state))
2300 fs_devices->missing_devices--;
2301
2302 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state))
2303 fs_devices->rw_devices--;
2304
2305 if (srcdev->bdev)
2306 fs_devices->open_devices--;
2307 }
2308
btrfs_rm_dev_replace_free_srcdev(struct btrfs_device * srcdev)2309 void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev)
2310 {
2311 struct btrfs_fs_devices *fs_devices = srcdev->fs_devices;
2312
2313 mutex_lock(&uuid_mutex);
2314
2315 btrfs_close_bdev(srcdev);
2316 synchronize_rcu();
2317 btrfs_free_device(srcdev);
2318
2319 /* if this is no devs we rather delete the fs_devices */
2320 if (!fs_devices->num_devices) {
2321 /*
2322 * On a mounted FS, num_devices can't be zero unless it's a
2323 * seed. In case of a seed device being replaced, the replace
2324 * target added to the sprout FS, so there will be no more
2325 * device left under the seed FS.
2326 */
2327 ASSERT(fs_devices->seeding);
2328
2329 list_del_init(&fs_devices->seed_list);
2330 close_fs_devices(fs_devices);
2331 free_fs_devices(fs_devices);
2332 }
2333 mutex_unlock(&uuid_mutex);
2334 }
2335
btrfs_destroy_dev_replace_tgtdev(struct btrfs_device * tgtdev)2336 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev)
2337 {
2338 struct btrfs_fs_devices *fs_devices = tgtdev->fs_info->fs_devices;
2339
2340 mutex_lock(&fs_devices->device_list_mutex);
2341
2342 btrfs_sysfs_remove_device(tgtdev);
2343
2344 if (tgtdev->bdev)
2345 fs_devices->open_devices--;
2346
2347 fs_devices->num_devices--;
2348
2349 btrfs_assign_next_active_device(tgtdev, NULL);
2350
2351 list_del_rcu(&tgtdev->dev_list);
2352
2353 mutex_unlock(&fs_devices->device_list_mutex);
2354
2355 /*
2356 * The update_dev_time() with in btrfs_scratch_superblocks()
2357 * may lead to a call to btrfs_show_devname() which will try
2358 * to hold device_list_mutex. And here this device
2359 * is already out of device list, so we don't have to hold
2360 * the device_list_mutex lock.
2361 */
2362 btrfs_scratch_superblocks(tgtdev->fs_info, tgtdev->bdev,
2363 tgtdev->name->str);
2364
2365 btrfs_close_bdev(tgtdev);
2366 synchronize_rcu();
2367 btrfs_free_device(tgtdev);
2368 }
2369
btrfs_find_device_by_path(struct btrfs_fs_info * fs_info,const char * device_path)2370 static struct btrfs_device *btrfs_find_device_by_path(
2371 struct btrfs_fs_info *fs_info, const char *device_path)
2372 {
2373 int ret = 0;
2374 struct btrfs_super_block *disk_super;
2375 u64 devid;
2376 u8 *dev_uuid;
2377 struct block_device *bdev;
2378 struct btrfs_device *device;
2379
2380 ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ,
2381 fs_info->bdev_holder, 0, &bdev, &disk_super);
2382 if (ret)
2383 return ERR_PTR(ret);
2384
2385 devid = btrfs_stack_device_id(&disk_super->dev_item);
2386 dev_uuid = disk_super->dev_item.uuid;
2387 if (btrfs_fs_incompat(fs_info, METADATA_UUID))
2388 device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
2389 disk_super->metadata_uuid, true);
2390 else
2391 device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
2392 disk_super->fsid, true);
2393
2394 btrfs_release_disk_super(disk_super);
2395 if (!device)
2396 device = ERR_PTR(-ENOENT);
2397 blkdev_put(bdev, FMODE_READ);
2398 return device;
2399 }
2400
2401 /*
2402 * Lookup a device given by device id, or the path if the id is 0.
2403 */
btrfs_find_device_by_devspec(struct btrfs_fs_info * fs_info,u64 devid,const char * device_path)2404 struct btrfs_device *btrfs_find_device_by_devspec(
2405 struct btrfs_fs_info *fs_info, u64 devid,
2406 const char *device_path)
2407 {
2408 struct btrfs_device *device;
2409
2410 if (devid) {
2411 device = btrfs_find_device(fs_info->fs_devices, devid, NULL,
2412 NULL, true);
2413 if (!device)
2414 return ERR_PTR(-ENOENT);
2415 return device;
2416 }
2417
2418 if (!device_path || !device_path[0])
2419 return ERR_PTR(-EINVAL);
2420
2421 if (strcmp(device_path, "missing") == 0) {
2422 /* Find first missing device */
2423 list_for_each_entry(device, &fs_info->fs_devices->devices,
2424 dev_list) {
2425 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
2426 &device->dev_state) && !device->bdev)
2427 return device;
2428 }
2429 return ERR_PTR(-ENOENT);
2430 }
2431
2432 return btrfs_find_device_by_path(fs_info, device_path);
2433 }
2434
2435 /*
2436 * does all the dirty work required for changing file system's UUID.
2437 */
btrfs_prepare_sprout(struct btrfs_fs_info * fs_info)2438 static int btrfs_prepare_sprout(struct btrfs_fs_info *fs_info)
2439 {
2440 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2441 struct btrfs_fs_devices *old_devices;
2442 struct btrfs_fs_devices *seed_devices;
2443 struct btrfs_super_block *disk_super = fs_info->super_copy;
2444 struct btrfs_device *device;
2445 u64 super_flags;
2446
2447 lockdep_assert_held(&uuid_mutex);
2448 if (!fs_devices->seeding)
2449 return -EINVAL;
2450
2451 /*
2452 * Private copy of the seed devices, anchored at
2453 * fs_info->fs_devices->seed_list
2454 */
2455 seed_devices = alloc_fs_devices(NULL, NULL);
2456 if (IS_ERR(seed_devices))
2457 return PTR_ERR(seed_devices);
2458
2459 /*
2460 * It's necessary to retain a copy of the original seed fs_devices in
2461 * fs_uuids so that filesystems which have been seeded can successfully
2462 * reference the seed device from open_seed_devices. This also supports
2463 * multiple fs seed.
2464 */
2465 old_devices = clone_fs_devices(fs_devices);
2466 if (IS_ERR(old_devices)) {
2467 kfree(seed_devices);
2468 return PTR_ERR(old_devices);
2469 }
2470
2471 list_add(&old_devices->fs_list, &fs_uuids);
2472
2473 memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
2474 seed_devices->opened = 1;
2475 INIT_LIST_HEAD(&seed_devices->devices);
2476 INIT_LIST_HEAD(&seed_devices->alloc_list);
2477 mutex_init(&seed_devices->device_list_mutex);
2478
2479 mutex_lock(&fs_devices->device_list_mutex);
2480 list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
2481 synchronize_rcu);
2482 list_for_each_entry(device, &seed_devices->devices, dev_list)
2483 device->fs_devices = seed_devices;
2484
2485 fs_devices->seeding = false;
2486 fs_devices->num_devices = 0;
2487 fs_devices->open_devices = 0;
2488 fs_devices->missing_devices = 0;
2489 fs_devices->rotating = false;
2490 list_add(&seed_devices->seed_list, &fs_devices->seed_list);
2491
2492 generate_random_uuid(fs_devices->fsid);
2493 memcpy(fs_devices->metadata_uuid, fs_devices->fsid, BTRFS_FSID_SIZE);
2494 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
2495 mutex_unlock(&fs_devices->device_list_mutex);
2496
2497 super_flags = btrfs_super_flags(disk_super) &
2498 ~BTRFS_SUPER_FLAG_SEEDING;
2499 btrfs_set_super_flags(disk_super, super_flags);
2500
2501 return 0;
2502 }
2503
2504 /*
2505 * Store the expected generation for seed devices in device items.
2506 */
btrfs_finish_sprout(struct btrfs_trans_handle * trans)2507 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans)
2508 {
2509 struct btrfs_fs_info *fs_info = trans->fs_info;
2510 struct btrfs_root *root = fs_info->chunk_root;
2511 struct btrfs_path *path;
2512 struct extent_buffer *leaf;
2513 struct btrfs_dev_item *dev_item;
2514 struct btrfs_device *device;
2515 struct btrfs_key key;
2516 u8 fs_uuid[BTRFS_FSID_SIZE];
2517 u8 dev_uuid[BTRFS_UUID_SIZE];
2518 u64 devid;
2519 int ret;
2520
2521 path = btrfs_alloc_path();
2522 if (!path)
2523 return -ENOMEM;
2524
2525 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2526 key.offset = 0;
2527 key.type = BTRFS_DEV_ITEM_KEY;
2528
2529 while (1) {
2530 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2531 if (ret < 0)
2532 goto error;
2533
2534 leaf = path->nodes[0];
2535 next_slot:
2536 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
2537 ret = btrfs_next_leaf(root, path);
2538 if (ret > 0)
2539 break;
2540 if (ret < 0)
2541 goto error;
2542 leaf = path->nodes[0];
2543 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2544 btrfs_release_path(path);
2545 continue;
2546 }
2547
2548 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2549 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
2550 key.type != BTRFS_DEV_ITEM_KEY)
2551 break;
2552
2553 dev_item = btrfs_item_ptr(leaf, path->slots[0],
2554 struct btrfs_dev_item);
2555 devid = btrfs_device_id(leaf, dev_item);
2556 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
2557 BTRFS_UUID_SIZE);
2558 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
2559 BTRFS_FSID_SIZE);
2560 device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
2561 fs_uuid, true);
2562 BUG_ON(!device); /* Logic error */
2563
2564 if (device->fs_devices->seeding) {
2565 btrfs_set_device_generation(leaf, dev_item,
2566 device->generation);
2567 btrfs_mark_buffer_dirty(leaf);
2568 }
2569
2570 path->slots[0]++;
2571 goto next_slot;
2572 }
2573 ret = 0;
2574 error:
2575 btrfs_free_path(path);
2576 return ret;
2577 }
2578
btrfs_init_new_device(struct btrfs_fs_info * fs_info,const char * device_path)2579 int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path)
2580 {
2581 struct btrfs_root *root = fs_info->dev_root;
2582 struct request_queue *q;
2583 struct btrfs_trans_handle *trans;
2584 struct btrfs_device *device;
2585 struct block_device *bdev;
2586 struct super_block *sb = fs_info->sb;
2587 struct rcu_string *name;
2588 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2589 u64 orig_super_total_bytes;
2590 u64 orig_super_num_devices;
2591 int seeding_dev = 0;
2592 int ret = 0;
2593 bool locked = false;
2594
2595 if (sb_rdonly(sb) && !fs_devices->seeding)
2596 return -EROFS;
2597
2598 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2599 fs_info->bdev_holder);
2600 if (IS_ERR(bdev))
2601 return PTR_ERR(bdev);
2602
2603 if (fs_devices->seeding) {
2604 seeding_dev = 1;
2605 down_write(&sb->s_umount);
2606 mutex_lock(&uuid_mutex);
2607 locked = true;
2608 }
2609
2610 sync_blockdev(bdev);
2611
2612 rcu_read_lock();
2613 list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) {
2614 if (device->bdev == bdev) {
2615 ret = -EEXIST;
2616 rcu_read_unlock();
2617 goto error;
2618 }
2619 }
2620 rcu_read_unlock();
2621
2622 device = btrfs_alloc_device(fs_info, NULL, NULL);
2623 if (IS_ERR(device)) {
2624 /* we can safely leave the fs_devices entry around */
2625 ret = PTR_ERR(device);
2626 goto error;
2627 }
2628
2629 name = rcu_string_strdup(device_path, GFP_KERNEL);
2630 if (!name) {
2631 ret = -ENOMEM;
2632 goto error_free_device;
2633 }
2634 rcu_assign_pointer(device->name, name);
2635
2636 trans = btrfs_start_transaction(root, 0);
2637 if (IS_ERR(trans)) {
2638 ret = PTR_ERR(trans);
2639 goto error_free_device;
2640 }
2641
2642 q = bdev_get_queue(bdev);
2643 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
2644 device->generation = trans->transid;
2645 device->io_width = fs_info->sectorsize;
2646 device->io_align = fs_info->sectorsize;
2647 device->sector_size = fs_info->sectorsize;
2648 device->total_bytes = round_down(i_size_read(bdev->bd_inode),
2649 fs_info->sectorsize);
2650 device->disk_total_bytes = device->total_bytes;
2651 device->commit_total_bytes = device->total_bytes;
2652 device->fs_info = fs_info;
2653 device->bdev = bdev;
2654 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
2655 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
2656 device->mode = FMODE_EXCL;
2657 device->dev_stats_valid = 1;
2658 set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE);
2659
2660 if (seeding_dev) {
2661 sb->s_flags &= ~SB_RDONLY;
2662 ret = btrfs_prepare_sprout(fs_info);
2663 if (ret) {
2664 btrfs_abort_transaction(trans, ret);
2665 goto error_trans;
2666 }
2667 }
2668
2669 device->fs_devices = fs_devices;
2670
2671 mutex_lock(&fs_devices->device_list_mutex);
2672 mutex_lock(&fs_info->chunk_mutex);
2673 list_add_rcu(&device->dev_list, &fs_devices->devices);
2674 list_add(&device->dev_alloc_list, &fs_devices->alloc_list);
2675 fs_devices->num_devices++;
2676 fs_devices->open_devices++;
2677 fs_devices->rw_devices++;
2678 fs_devices->total_devices++;
2679 fs_devices->total_rw_bytes += device->total_bytes;
2680
2681 atomic64_add(device->total_bytes, &fs_info->free_chunk_space);
2682
2683 if (!blk_queue_nonrot(q))
2684 fs_devices->rotating = true;
2685
2686 orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
2687 btrfs_set_super_total_bytes(fs_info->super_copy,
2688 round_down(orig_super_total_bytes + device->total_bytes,
2689 fs_info->sectorsize));
2690
2691 orig_super_num_devices = btrfs_super_num_devices(fs_info->super_copy);
2692 btrfs_set_super_num_devices(fs_info->super_copy,
2693 orig_super_num_devices + 1);
2694
2695 /*
2696 * we've got more storage, clear any full flags on the space
2697 * infos
2698 */
2699 btrfs_clear_space_info_full(fs_info);
2700
2701 mutex_unlock(&fs_info->chunk_mutex);
2702
2703 /* Add sysfs device entry */
2704 btrfs_sysfs_add_device(device);
2705
2706 mutex_unlock(&fs_devices->device_list_mutex);
2707
2708 if (seeding_dev) {
2709 mutex_lock(&fs_info->chunk_mutex);
2710 ret = init_first_rw_device(trans);
2711 mutex_unlock(&fs_info->chunk_mutex);
2712 if (ret) {
2713 btrfs_abort_transaction(trans, ret);
2714 goto error_sysfs;
2715 }
2716 }
2717
2718 ret = btrfs_add_dev_item(trans, device);
2719 if (ret) {
2720 btrfs_abort_transaction(trans, ret);
2721 goto error_sysfs;
2722 }
2723
2724 if (seeding_dev) {
2725 ret = btrfs_finish_sprout(trans);
2726 if (ret) {
2727 btrfs_abort_transaction(trans, ret);
2728 goto error_sysfs;
2729 }
2730
2731 /*
2732 * fs_devices now represents the newly sprouted filesystem and
2733 * its fsid has been changed by btrfs_prepare_sprout
2734 */
2735 btrfs_sysfs_update_sprout_fsid(fs_devices);
2736 }
2737
2738 ret = btrfs_commit_transaction(trans);
2739
2740 if (seeding_dev) {
2741 mutex_unlock(&uuid_mutex);
2742 up_write(&sb->s_umount);
2743 locked = false;
2744
2745 if (ret) /* transaction commit */
2746 return ret;
2747
2748 ret = btrfs_relocate_sys_chunks(fs_info);
2749 if (ret < 0)
2750 btrfs_handle_fs_error(fs_info, ret,
2751 "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command.");
2752 trans = btrfs_attach_transaction(root);
2753 if (IS_ERR(trans)) {
2754 if (PTR_ERR(trans) == -ENOENT)
2755 return 0;
2756 ret = PTR_ERR(trans);
2757 trans = NULL;
2758 goto error_sysfs;
2759 }
2760 ret = btrfs_commit_transaction(trans);
2761 }
2762
2763 /*
2764 * Now that we have written a new super block to this device, check all
2765 * other fs_devices list if device_path alienates any other scanned
2766 * device.
2767 * We can ignore the return value as it typically returns -EINVAL and
2768 * only succeeds if the device was an alien.
2769 */
2770 btrfs_forget_devices(device_path);
2771
2772 /* Update ctime/mtime for blkid or udev */
2773 update_dev_time(device_path);
2774
2775 return ret;
2776
2777 error_sysfs:
2778 btrfs_sysfs_remove_device(device);
2779 mutex_lock(&fs_info->fs_devices->device_list_mutex);
2780 mutex_lock(&fs_info->chunk_mutex);
2781 list_del_rcu(&device->dev_list);
2782 list_del(&device->dev_alloc_list);
2783 fs_info->fs_devices->num_devices--;
2784 fs_info->fs_devices->open_devices--;
2785 fs_info->fs_devices->rw_devices--;
2786 fs_info->fs_devices->total_devices--;
2787 fs_info->fs_devices->total_rw_bytes -= device->total_bytes;
2788 atomic64_sub(device->total_bytes, &fs_info->free_chunk_space);
2789 btrfs_set_super_total_bytes(fs_info->super_copy,
2790 orig_super_total_bytes);
2791 btrfs_set_super_num_devices(fs_info->super_copy,
2792 orig_super_num_devices);
2793 mutex_unlock(&fs_info->chunk_mutex);
2794 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2795 error_trans:
2796 if (seeding_dev)
2797 sb->s_flags |= SB_RDONLY;
2798 if (trans)
2799 btrfs_end_transaction(trans);
2800 error_free_device:
2801 btrfs_free_device(device);
2802 error:
2803 blkdev_put(bdev, FMODE_EXCL);
2804 if (locked) {
2805 mutex_unlock(&uuid_mutex);
2806 up_write(&sb->s_umount);
2807 }
2808 return ret;
2809 }
2810
btrfs_update_device(struct btrfs_trans_handle * trans,struct btrfs_device * device)2811 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
2812 struct btrfs_device *device)
2813 {
2814 int ret;
2815 struct btrfs_path *path;
2816 struct btrfs_root *root = device->fs_info->chunk_root;
2817 struct btrfs_dev_item *dev_item;
2818 struct extent_buffer *leaf;
2819 struct btrfs_key key;
2820
2821 path = btrfs_alloc_path();
2822 if (!path)
2823 return -ENOMEM;
2824
2825 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2826 key.type = BTRFS_DEV_ITEM_KEY;
2827 key.offset = device->devid;
2828
2829 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2830 if (ret < 0)
2831 goto out;
2832
2833 if (ret > 0) {
2834 ret = -ENOENT;
2835 goto out;
2836 }
2837
2838 leaf = path->nodes[0];
2839 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
2840
2841 btrfs_set_device_id(leaf, dev_item, device->devid);
2842 btrfs_set_device_type(leaf, dev_item, device->type);
2843 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
2844 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
2845 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
2846 btrfs_set_device_total_bytes(leaf, dev_item,
2847 btrfs_device_get_disk_total_bytes(device));
2848 btrfs_set_device_bytes_used(leaf, dev_item,
2849 btrfs_device_get_bytes_used(device));
2850 btrfs_mark_buffer_dirty(leaf);
2851
2852 out:
2853 btrfs_free_path(path);
2854 return ret;
2855 }
2856
btrfs_grow_device(struct btrfs_trans_handle * trans,struct btrfs_device * device,u64 new_size)2857 int btrfs_grow_device(struct btrfs_trans_handle *trans,
2858 struct btrfs_device *device, u64 new_size)
2859 {
2860 struct btrfs_fs_info *fs_info = device->fs_info;
2861 struct btrfs_super_block *super_copy = fs_info->super_copy;
2862 u64 old_total;
2863 u64 diff;
2864
2865 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
2866 return -EACCES;
2867
2868 new_size = round_down(new_size, fs_info->sectorsize);
2869
2870 mutex_lock(&fs_info->chunk_mutex);
2871 old_total = btrfs_super_total_bytes(super_copy);
2872 diff = round_down(new_size - device->total_bytes, fs_info->sectorsize);
2873
2874 if (new_size <= device->total_bytes ||
2875 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
2876 mutex_unlock(&fs_info->chunk_mutex);
2877 return -EINVAL;
2878 }
2879
2880 btrfs_set_super_total_bytes(super_copy,
2881 round_down(old_total + diff, fs_info->sectorsize));
2882 device->fs_devices->total_rw_bytes += diff;
2883
2884 btrfs_device_set_total_bytes(device, new_size);
2885 btrfs_device_set_disk_total_bytes(device, new_size);
2886 btrfs_clear_space_info_full(device->fs_info);
2887 if (list_empty(&device->post_commit_list))
2888 list_add_tail(&device->post_commit_list,
2889 &trans->transaction->dev_update_list);
2890 mutex_unlock(&fs_info->chunk_mutex);
2891
2892 return btrfs_update_device(trans, device);
2893 }
2894
btrfs_free_chunk(struct btrfs_trans_handle * trans,u64 chunk_offset)2895 static int btrfs_free_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
2896 {
2897 struct btrfs_fs_info *fs_info = trans->fs_info;
2898 struct btrfs_root *root = fs_info->chunk_root;
2899 int ret;
2900 struct btrfs_path *path;
2901 struct btrfs_key key;
2902
2903 path = btrfs_alloc_path();
2904 if (!path)
2905 return -ENOMEM;
2906
2907 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2908 key.offset = chunk_offset;
2909 key.type = BTRFS_CHUNK_ITEM_KEY;
2910
2911 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2912 if (ret < 0)
2913 goto out;
2914 else if (ret > 0) { /* Logic error or corruption */
2915 btrfs_handle_fs_error(fs_info, -ENOENT,
2916 "Failed lookup while freeing chunk.");
2917 ret = -ENOENT;
2918 goto out;
2919 }
2920
2921 ret = btrfs_del_item(trans, root, path);
2922 if (ret < 0)
2923 btrfs_handle_fs_error(fs_info, ret,
2924 "Failed to delete chunk item.");
2925 out:
2926 btrfs_free_path(path);
2927 return ret;
2928 }
2929
btrfs_del_sys_chunk(struct btrfs_fs_info * fs_info,u64 chunk_offset)2930 static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
2931 {
2932 struct btrfs_super_block *super_copy = fs_info->super_copy;
2933 struct btrfs_disk_key *disk_key;
2934 struct btrfs_chunk *chunk;
2935 u8 *ptr;
2936 int ret = 0;
2937 u32 num_stripes;
2938 u32 array_size;
2939 u32 len = 0;
2940 u32 cur;
2941 struct btrfs_key key;
2942
2943 mutex_lock(&fs_info->chunk_mutex);
2944 array_size = btrfs_super_sys_array_size(super_copy);
2945
2946 ptr = super_copy->sys_chunk_array;
2947 cur = 0;
2948
2949 while (cur < array_size) {
2950 disk_key = (struct btrfs_disk_key *)ptr;
2951 btrfs_disk_key_to_cpu(&key, disk_key);
2952
2953 len = sizeof(*disk_key);
2954
2955 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
2956 chunk = (struct btrfs_chunk *)(ptr + len);
2957 num_stripes = btrfs_stack_chunk_num_stripes(chunk);
2958 len += btrfs_chunk_item_size(num_stripes);
2959 } else {
2960 ret = -EIO;
2961 break;
2962 }
2963 if (key.objectid == BTRFS_FIRST_CHUNK_TREE_OBJECTID &&
2964 key.offset == chunk_offset) {
2965 memmove(ptr, ptr + len, array_size - (cur + len));
2966 array_size -= len;
2967 btrfs_set_super_sys_array_size(super_copy, array_size);
2968 } else {
2969 ptr += len;
2970 cur += len;
2971 }
2972 }
2973 mutex_unlock(&fs_info->chunk_mutex);
2974 return ret;
2975 }
2976
2977 /*
2978 * btrfs_get_chunk_map() - Find the mapping containing the given logical extent.
2979 * @logical: Logical block offset in bytes.
2980 * @length: Length of extent in bytes.
2981 *
2982 * Return: Chunk mapping or ERR_PTR.
2983 */
btrfs_get_chunk_map(struct btrfs_fs_info * fs_info,u64 logical,u64 length)2984 struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
2985 u64 logical, u64 length)
2986 {
2987 struct extent_map_tree *em_tree;
2988 struct extent_map *em;
2989
2990 em_tree = &fs_info->mapping_tree;
2991 read_lock(&em_tree->lock);
2992 em = lookup_extent_mapping(em_tree, logical, length);
2993 read_unlock(&em_tree->lock);
2994
2995 if (!em) {
2996 btrfs_crit(fs_info,
2997 "unable to find chunk map for logical %llu length %llu",
2998 logical, length);
2999 return ERR_PTR(-EINVAL);
3000 }
3001
3002 if (em->start > logical || em->start + em->len <= logical) {
3003 btrfs_crit(fs_info,
3004 "found a bad chunk map, wanted %llu-%llu, found %llu-%llu",
3005 logical, logical + length, em->start, em->start + em->len);
3006 free_extent_map(em);
3007 return ERR_PTR(-EINVAL);
3008 }
3009
3010 /* callers are responsible for dropping em's ref. */
3011 return em;
3012 }
3013
btrfs_remove_chunk(struct btrfs_trans_handle * trans,u64 chunk_offset)3014 int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
3015 {
3016 struct btrfs_fs_info *fs_info = trans->fs_info;
3017 struct extent_map *em;
3018 struct map_lookup *map;
3019 u64 dev_extent_len = 0;
3020 int i, ret = 0;
3021 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
3022
3023 em = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
3024 if (IS_ERR(em)) {
3025 /*
3026 * This is a logic error, but we don't want to just rely on the
3027 * user having built with ASSERT enabled, so if ASSERT doesn't
3028 * do anything we still error out.
3029 */
3030 ASSERT(0);
3031 return PTR_ERR(em);
3032 }
3033 map = em->map_lookup;
3034 mutex_lock(&fs_info->chunk_mutex);
3035 check_system_chunk(trans, map->type);
3036 mutex_unlock(&fs_info->chunk_mutex);
3037
3038 /*
3039 * Take the device list mutex to prevent races with the final phase of
3040 * a device replace operation that replaces the device object associated
3041 * with map stripes (dev-replace.c:btrfs_dev_replace_finishing()).
3042 */
3043 mutex_lock(&fs_devices->device_list_mutex);
3044 for (i = 0; i < map->num_stripes; i++) {
3045 struct btrfs_device *device = map->stripes[i].dev;
3046 ret = btrfs_free_dev_extent(trans, device,
3047 map->stripes[i].physical,
3048 &dev_extent_len);
3049 if (ret) {
3050 mutex_unlock(&fs_devices->device_list_mutex);
3051 btrfs_abort_transaction(trans, ret);
3052 goto out;
3053 }
3054
3055 if (device->bytes_used > 0) {
3056 mutex_lock(&fs_info->chunk_mutex);
3057 btrfs_device_set_bytes_used(device,
3058 device->bytes_used - dev_extent_len);
3059 atomic64_add(dev_extent_len, &fs_info->free_chunk_space);
3060 btrfs_clear_space_info_full(fs_info);
3061 mutex_unlock(&fs_info->chunk_mutex);
3062 }
3063
3064 ret = btrfs_update_device(trans, device);
3065 if (ret) {
3066 mutex_unlock(&fs_devices->device_list_mutex);
3067 btrfs_abort_transaction(trans, ret);
3068 goto out;
3069 }
3070 }
3071 mutex_unlock(&fs_devices->device_list_mutex);
3072
3073 ret = btrfs_free_chunk(trans, chunk_offset);
3074 if (ret) {
3075 btrfs_abort_transaction(trans, ret);
3076 goto out;
3077 }
3078
3079 trace_btrfs_chunk_free(fs_info, map, chunk_offset, em->len);
3080
3081 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
3082 ret = btrfs_del_sys_chunk(fs_info, chunk_offset);
3083 if (ret) {
3084 btrfs_abort_transaction(trans, ret);
3085 goto out;
3086 }
3087 }
3088
3089 ret = btrfs_remove_block_group(trans, chunk_offset, em);
3090 if (ret) {
3091 btrfs_abort_transaction(trans, ret);
3092 goto out;
3093 }
3094
3095 out:
3096 /* once for us */
3097 free_extent_map(em);
3098 return ret;
3099 }
3100
btrfs_relocate_chunk(struct btrfs_fs_info * fs_info,u64 chunk_offset)3101 static int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
3102 {
3103 struct btrfs_root *root = fs_info->chunk_root;
3104 struct btrfs_trans_handle *trans;
3105 struct btrfs_block_group *block_group;
3106 int ret;
3107
3108 /*
3109 * Prevent races with automatic removal of unused block groups.
3110 * After we relocate and before we remove the chunk with offset
3111 * chunk_offset, automatic removal of the block group can kick in,
3112 * resulting in a failure when calling btrfs_remove_chunk() below.
3113 *
3114 * Make sure to acquire this mutex before doing a tree search (dev
3115 * or chunk trees) to find chunks. Otherwise the cleaner kthread might
3116 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after
3117 * we release the path used to search the chunk/dev tree and before
3118 * the current task acquires this mutex and calls us.
3119 */
3120 lockdep_assert_held(&fs_info->delete_unused_bgs_mutex);
3121
3122 /* step one, relocate all the extents inside this chunk */
3123 btrfs_scrub_pause(fs_info);
3124 ret = btrfs_relocate_block_group(fs_info, chunk_offset);
3125 btrfs_scrub_continue(fs_info);
3126 if (ret)
3127 return ret;
3128
3129 block_group = btrfs_lookup_block_group(fs_info, chunk_offset);
3130 if (!block_group)
3131 return -ENOENT;
3132 btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group);
3133 btrfs_put_block_group(block_group);
3134
3135 trans = btrfs_start_trans_remove_block_group(root->fs_info,
3136 chunk_offset);
3137 if (IS_ERR(trans)) {
3138 ret = PTR_ERR(trans);
3139 btrfs_handle_fs_error(root->fs_info, ret, NULL);
3140 return ret;
3141 }
3142
3143 /*
3144 * step two, delete the device extents and the
3145 * chunk tree entries
3146 */
3147 ret = btrfs_remove_chunk(trans, chunk_offset);
3148 btrfs_end_transaction(trans);
3149 return ret;
3150 }
3151
btrfs_relocate_sys_chunks(struct btrfs_fs_info * fs_info)3152 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info)
3153 {
3154 struct btrfs_root *chunk_root = fs_info->chunk_root;
3155 struct btrfs_path *path;
3156 struct extent_buffer *leaf;
3157 struct btrfs_chunk *chunk;
3158 struct btrfs_key key;
3159 struct btrfs_key found_key;
3160 u64 chunk_type;
3161 bool retried = false;
3162 int failed = 0;
3163 int ret;
3164
3165 path = btrfs_alloc_path();
3166 if (!path)
3167 return -ENOMEM;
3168
3169 again:
3170 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3171 key.offset = (u64)-1;
3172 key.type = BTRFS_CHUNK_ITEM_KEY;
3173
3174 while (1) {
3175 mutex_lock(&fs_info->delete_unused_bgs_mutex);
3176 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3177 if (ret < 0) {
3178 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3179 goto error;
3180 }
3181 BUG_ON(ret == 0); /* Corruption */
3182
3183 ret = btrfs_previous_item(chunk_root, path, key.objectid,
3184 key.type);
3185 if (ret)
3186 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3187 if (ret < 0)
3188 goto error;
3189 if (ret > 0)
3190 break;
3191
3192 leaf = path->nodes[0];
3193 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3194
3195 chunk = btrfs_item_ptr(leaf, path->slots[0],
3196 struct btrfs_chunk);
3197 chunk_type = btrfs_chunk_type(leaf, chunk);
3198 btrfs_release_path(path);
3199
3200 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
3201 ret = btrfs_relocate_chunk(fs_info, found_key.offset);
3202 if (ret == -ENOSPC)
3203 failed++;
3204 else
3205 BUG_ON(ret);
3206 }
3207 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3208
3209 if (found_key.offset == 0)
3210 break;
3211 key.offset = found_key.offset - 1;
3212 }
3213 ret = 0;
3214 if (failed && !retried) {
3215 failed = 0;
3216 retried = true;
3217 goto again;
3218 } else if (WARN_ON(failed && retried)) {
3219 ret = -ENOSPC;
3220 }
3221 error:
3222 btrfs_free_path(path);
3223 return ret;
3224 }
3225
3226 /*
3227 * return 1 : allocate a data chunk successfully,
3228 * return <0: errors during allocating a data chunk,
3229 * return 0 : no need to allocate a data chunk.
3230 */
btrfs_may_alloc_data_chunk(struct btrfs_fs_info * fs_info,u64 chunk_offset)3231 static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info,
3232 u64 chunk_offset)
3233 {
3234 struct btrfs_block_group *cache;
3235 u64 bytes_used;
3236 u64 chunk_type;
3237
3238 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3239 ASSERT(cache);
3240 chunk_type = cache->flags;
3241 btrfs_put_block_group(cache);
3242
3243 if (!(chunk_type & BTRFS_BLOCK_GROUP_DATA))
3244 return 0;
3245
3246 spin_lock(&fs_info->data_sinfo->lock);
3247 bytes_used = fs_info->data_sinfo->bytes_used;
3248 spin_unlock(&fs_info->data_sinfo->lock);
3249
3250 if (!bytes_used) {
3251 struct btrfs_trans_handle *trans;
3252 int ret;
3253
3254 trans = btrfs_join_transaction(fs_info->tree_root);
3255 if (IS_ERR(trans))
3256 return PTR_ERR(trans);
3257
3258 ret = btrfs_force_chunk_alloc(trans, BTRFS_BLOCK_GROUP_DATA);
3259 btrfs_end_transaction(trans);
3260 if (ret < 0)
3261 return ret;
3262 return 1;
3263 }
3264
3265 return 0;
3266 }
3267
insert_balance_item(struct btrfs_fs_info * fs_info,struct btrfs_balance_control * bctl)3268 static int insert_balance_item(struct btrfs_fs_info *fs_info,
3269 struct btrfs_balance_control *bctl)
3270 {
3271 struct btrfs_root *root = fs_info->tree_root;
3272 struct btrfs_trans_handle *trans;
3273 struct btrfs_balance_item *item;
3274 struct btrfs_disk_balance_args disk_bargs;
3275 struct btrfs_path *path;
3276 struct extent_buffer *leaf;
3277 struct btrfs_key key;
3278 int ret, err;
3279
3280 path = btrfs_alloc_path();
3281 if (!path)
3282 return -ENOMEM;
3283
3284 trans = btrfs_start_transaction(root, 0);
3285 if (IS_ERR(trans)) {
3286 btrfs_free_path(path);
3287 return PTR_ERR(trans);
3288 }
3289
3290 key.objectid = BTRFS_BALANCE_OBJECTID;
3291 key.type = BTRFS_TEMPORARY_ITEM_KEY;
3292 key.offset = 0;
3293
3294 ret = btrfs_insert_empty_item(trans, root, path, &key,
3295 sizeof(*item));
3296 if (ret)
3297 goto out;
3298
3299 leaf = path->nodes[0];
3300 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
3301
3302 memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item));
3303
3304 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
3305 btrfs_set_balance_data(leaf, item, &disk_bargs);
3306 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
3307 btrfs_set_balance_meta(leaf, item, &disk_bargs);
3308 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
3309 btrfs_set_balance_sys(leaf, item, &disk_bargs);
3310
3311 btrfs_set_balance_flags(leaf, item, bctl->flags);
3312
3313 btrfs_mark_buffer_dirty(leaf);
3314 out:
3315 btrfs_free_path(path);
3316 err = btrfs_commit_transaction(trans);
3317 if (err && !ret)
3318 ret = err;
3319 return ret;
3320 }
3321
del_balance_item(struct btrfs_fs_info * fs_info)3322 static int del_balance_item(struct btrfs_fs_info *fs_info)
3323 {
3324 struct btrfs_root *root = fs_info->tree_root;
3325 struct btrfs_trans_handle *trans;
3326 struct btrfs_path *path;
3327 struct btrfs_key key;
3328 int ret, err;
3329
3330 path = btrfs_alloc_path();
3331 if (!path)
3332 return -ENOMEM;
3333
3334 trans = btrfs_start_transaction_fallback_global_rsv(root, 0);
3335 if (IS_ERR(trans)) {
3336 btrfs_free_path(path);
3337 return PTR_ERR(trans);
3338 }
3339
3340 key.objectid = BTRFS_BALANCE_OBJECTID;
3341 key.type = BTRFS_TEMPORARY_ITEM_KEY;
3342 key.offset = 0;
3343
3344 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3345 if (ret < 0)
3346 goto out;
3347 if (ret > 0) {
3348 ret = -ENOENT;
3349 goto out;
3350 }
3351
3352 ret = btrfs_del_item(trans, root, path);
3353 out:
3354 btrfs_free_path(path);
3355 err = btrfs_commit_transaction(trans);
3356 if (err && !ret)
3357 ret = err;
3358 return ret;
3359 }
3360
3361 /*
3362 * This is a heuristic used to reduce the number of chunks balanced on
3363 * resume after balance was interrupted.
3364 */
update_balance_args(struct btrfs_balance_control * bctl)3365 static void update_balance_args(struct btrfs_balance_control *bctl)
3366 {
3367 /*
3368 * Turn on soft mode for chunk types that were being converted.
3369 */
3370 if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
3371 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
3372 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
3373 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
3374 if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
3375 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
3376
3377 /*
3378 * Turn on usage filter if is not already used. The idea is
3379 * that chunks that we have already balanced should be
3380 * reasonably full. Don't do it for chunks that are being
3381 * converted - that will keep us from relocating unconverted
3382 * (albeit full) chunks.
3383 */
3384 if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3385 !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3386 !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3387 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
3388 bctl->data.usage = 90;
3389 }
3390 if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3391 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3392 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3393 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
3394 bctl->sys.usage = 90;
3395 }
3396 if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3397 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3398 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3399 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
3400 bctl->meta.usage = 90;
3401 }
3402 }
3403
3404 /*
3405 * Clear the balance status in fs_info and delete the balance item from disk.
3406 */
reset_balance_state(struct btrfs_fs_info * fs_info)3407 static void reset_balance_state(struct btrfs_fs_info *fs_info)
3408 {
3409 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3410 int ret;
3411
3412 BUG_ON(!fs_info->balance_ctl);
3413
3414 spin_lock(&fs_info->balance_lock);
3415 fs_info->balance_ctl = NULL;
3416 spin_unlock(&fs_info->balance_lock);
3417
3418 kfree(bctl);
3419 ret = del_balance_item(fs_info);
3420 if (ret)
3421 btrfs_handle_fs_error(fs_info, ret, NULL);
3422 }
3423
3424 /*
3425 * Balance filters. Return 1 if chunk should be filtered out
3426 * (should not be balanced).
3427 */
chunk_profiles_filter(u64 chunk_type,struct btrfs_balance_args * bargs)3428 static int chunk_profiles_filter(u64 chunk_type,
3429 struct btrfs_balance_args *bargs)
3430 {
3431 chunk_type = chunk_to_extended(chunk_type) &
3432 BTRFS_EXTENDED_PROFILE_MASK;
3433
3434 if (bargs->profiles & chunk_type)
3435 return 0;
3436
3437 return 1;
3438 }
3439
chunk_usage_range_filter(struct btrfs_fs_info * fs_info,u64 chunk_offset,struct btrfs_balance_args * bargs)3440 static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
3441 struct btrfs_balance_args *bargs)
3442 {
3443 struct btrfs_block_group *cache;
3444 u64 chunk_used;
3445 u64 user_thresh_min;
3446 u64 user_thresh_max;
3447 int ret = 1;
3448
3449 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3450 chunk_used = cache->used;
3451
3452 if (bargs->usage_min == 0)
3453 user_thresh_min = 0;
3454 else
3455 user_thresh_min = div_factor_fine(cache->length,
3456 bargs->usage_min);
3457
3458 if (bargs->usage_max == 0)
3459 user_thresh_max = 1;
3460 else if (bargs->usage_max > 100)
3461 user_thresh_max = cache->length;
3462 else
3463 user_thresh_max = div_factor_fine(cache->length,
3464 bargs->usage_max);
3465
3466 if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max)
3467 ret = 0;
3468
3469 btrfs_put_block_group(cache);
3470 return ret;
3471 }
3472
chunk_usage_filter(struct btrfs_fs_info * fs_info,u64 chunk_offset,struct btrfs_balance_args * bargs)3473 static int chunk_usage_filter(struct btrfs_fs_info *fs_info,
3474 u64 chunk_offset, struct btrfs_balance_args *bargs)
3475 {
3476 struct btrfs_block_group *cache;
3477 u64 chunk_used, user_thresh;
3478 int ret = 1;
3479
3480 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3481 chunk_used = cache->used;
3482
3483 if (bargs->usage_min == 0)
3484 user_thresh = 1;
3485 else if (bargs->usage > 100)
3486 user_thresh = cache->length;
3487 else
3488 user_thresh = div_factor_fine(cache->length, bargs->usage);
3489
3490 if (chunk_used < user_thresh)
3491 ret = 0;
3492
3493 btrfs_put_block_group(cache);
3494 return ret;
3495 }
3496
chunk_devid_filter(struct extent_buffer * leaf,struct btrfs_chunk * chunk,struct btrfs_balance_args * bargs)3497 static int chunk_devid_filter(struct extent_buffer *leaf,
3498 struct btrfs_chunk *chunk,
3499 struct btrfs_balance_args *bargs)
3500 {
3501 struct btrfs_stripe *stripe;
3502 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3503 int i;
3504
3505 for (i = 0; i < num_stripes; i++) {
3506 stripe = btrfs_stripe_nr(chunk, i);
3507 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
3508 return 0;
3509 }
3510
3511 return 1;
3512 }
3513
calc_data_stripes(u64 type,int num_stripes)3514 static u64 calc_data_stripes(u64 type, int num_stripes)
3515 {
3516 const int index = btrfs_bg_flags_to_raid_index(type);
3517 const int ncopies = btrfs_raid_array[index].ncopies;
3518 const int nparity = btrfs_raid_array[index].nparity;
3519
3520 if (nparity)
3521 return num_stripes - nparity;
3522 else
3523 return num_stripes / ncopies;
3524 }
3525
3526 /* [pstart, pend) */
chunk_drange_filter(struct extent_buffer * leaf,struct btrfs_chunk * chunk,struct btrfs_balance_args * bargs)3527 static int chunk_drange_filter(struct extent_buffer *leaf,
3528 struct btrfs_chunk *chunk,
3529 struct btrfs_balance_args *bargs)
3530 {
3531 struct btrfs_stripe *stripe;
3532 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3533 u64 stripe_offset;
3534 u64 stripe_length;
3535 u64 type;
3536 int factor;
3537 int i;
3538
3539 if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
3540 return 0;
3541
3542 type = btrfs_chunk_type(leaf, chunk);
3543 factor = calc_data_stripes(type, num_stripes);
3544
3545 for (i = 0; i < num_stripes; i++) {
3546 stripe = btrfs_stripe_nr(chunk, i);
3547 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
3548 continue;
3549
3550 stripe_offset = btrfs_stripe_offset(leaf, stripe);
3551 stripe_length = btrfs_chunk_length(leaf, chunk);
3552 stripe_length = div_u64(stripe_length, factor);
3553
3554 if (stripe_offset < bargs->pend &&
3555 stripe_offset + stripe_length > bargs->pstart)
3556 return 0;
3557 }
3558
3559 return 1;
3560 }
3561
3562 /* [vstart, vend) */
chunk_vrange_filter(struct extent_buffer * leaf,struct btrfs_chunk * chunk,u64 chunk_offset,struct btrfs_balance_args * bargs)3563 static int chunk_vrange_filter(struct extent_buffer *leaf,
3564 struct btrfs_chunk *chunk,
3565 u64 chunk_offset,
3566 struct btrfs_balance_args *bargs)
3567 {
3568 if (chunk_offset < bargs->vend &&
3569 chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
3570 /* at least part of the chunk is inside this vrange */
3571 return 0;
3572
3573 return 1;
3574 }
3575
chunk_stripes_range_filter(struct extent_buffer * leaf,struct btrfs_chunk * chunk,struct btrfs_balance_args * bargs)3576 static int chunk_stripes_range_filter(struct extent_buffer *leaf,
3577 struct btrfs_chunk *chunk,
3578 struct btrfs_balance_args *bargs)
3579 {
3580 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3581
3582 if (bargs->stripes_min <= num_stripes
3583 && num_stripes <= bargs->stripes_max)
3584 return 0;
3585
3586 return 1;
3587 }
3588
chunk_soft_convert_filter(u64 chunk_type,struct btrfs_balance_args * bargs)3589 static int chunk_soft_convert_filter(u64 chunk_type,
3590 struct btrfs_balance_args *bargs)
3591 {
3592 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
3593 return 0;
3594
3595 chunk_type = chunk_to_extended(chunk_type) &
3596 BTRFS_EXTENDED_PROFILE_MASK;
3597
3598 if (bargs->target == chunk_type)
3599 return 1;
3600
3601 return 0;
3602 }
3603
should_balance_chunk(struct extent_buffer * leaf,struct btrfs_chunk * chunk,u64 chunk_offset)3604 static int should_balance_chunk(struct extent_buffer *leaf,
3605 struct btrfs_chunk *chunk, u64 chunk_offset)
3606 {
3607 struct btrfs_fs_info *fs_info = leaf->fs_info;
3608 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3609 struct btrfs_balance_args *bargs = NULL;
3610 u64 chunk_type = btrfs_chunk_type(leaf, chunk);
3611
3612 /* type filter */
3613 if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
3614 (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
3615 return 0;
3616 }
3617
3618 if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3619 bargs = &bctl->data;
3620 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3621 bargs = &bctl->sys;
3622 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3623 bargs = &bctl->meta;
3624
3625 /* profiles filter */
3626 if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
3627 chunk_profiles_filter(chunk_type, bargs)) {
3628 return 0;
3629 }
3630
3631 /* usage filter */
3632 if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
3633 chunk_usage_filter(fs_info, chunk_offset, bargs)) {
3634 return 0;
3635 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3636 chunk_usage_range_filter(fs_info, chunk_offset, bargs)) {
3637 return 0;
3638 }
3639
3640 /* devid filter */
3641 if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
3642 chunk_devid_filter(leaf, chunk, bargs)) {
3643 return 0;
3644 }
3645
3646 /* drange filter, makes sense only with devid filter */
3647 if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
3648 chunk_drange_filter(leaf, chunk, bargs)) {
3649 return 0;
3650 }
3651
3652 /* vrange filter */
3653 if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
3654 chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
3655 return 0;
3656 }
3657
3658 /* stripes filter */
3659 if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) &&
3660 chunk_stripes_range_filter(leaf, chunk, bargs)) {
3661 return 0;
3662 }
3663
3664 /* soft profile changing mode */
3665 if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
3666 chunk_soft_convert_filter(chunk_type, bargs)) {
3667 return 0;
3668 }
3669
3670 /*
3671 * limited by count, must be the last filter
3672 */
3673 if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) {
3674 if (bargs->limit == 0)
3675 return 0;
3676 else
3677 bargs->limit--;
3678 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) {
3679 /*
3680 * Same logic as the 'limit' filter; the minimum cannot be
3681 * determined here because we do not have the global information
3682 * about the count of all chunks that satisfy the filters.
3683 */
3684 if (bargs->limit_max == 0)
3685 return 0;
3686 else
3687 bargs->limit_max--;
3688 }
3689
3690 return 1;
3691 }
3692
__btrfs_balance(struct btrfs_fs_info * fs_info)3693 static int __btrfs_balance(struct btrfs_fs_info *fs_info)
3694 {
3695 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3696 struct btrfs_root *chunk_root = fs_info->chunk_root;
3697 u64 chunk_type;
3698 struct btrfs_chunk *chunk;
3699 struct btrfs_path *path = NULL;
3700 struct btrfs_key key;
3701 struct btrfs_key found_key;
3702 struct extent_buffer *leaf;
3703 int slot;
3704 int ret;
3705 int enospc_errors = 0;
3706 bool counting = true;
3707 /* The single value limit and min/max limits use the same bytes in the */
3708 u64 limit_data = bctl->data.limit;
3709 u64 limit_meta = bctl->meta.limit;
3710 u64 limit_sys = bctl->sys.limit;
3711 u32 count_data = 0;
3712 u32 count_meta = 0;
3713 u32 count_sys = 0;
3714 int chunk_reserved = 0;
3715
3716 path = btrfs_alloc_path();
3717 if (!path) {
3718 ret = -ENOMEM;
3719 goto error;
3720 }
3721
3722 /* zero out stat counters */
3723 spin_lock(&fs_info->balance_lock);
3724 memset(&bctl->stat, 0, sizeof(bctl->stat));
3725 spin_unlock(&fs_info->balance_lock);
3726 again:
3727 if (!counting) {
3728 /*
3729 * The single value limit and min/max limits use the same bytes
3730 * in the
3731 */
3732 bctl->data.limit = limit_data;
3733 bctl->meta.limit = limit_meta;
3734 bctl->sys.limit = limit_sys;
3735 }
3736 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3737 key.offset = (u64)-1;
3738 key.type = BTRFS_CHUNK_ITEM_KEY;
3739
3740 while (1) {
3741 if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
3742 atomic_read(&fs_info->balance_cancel_req)) {
3743 ret = -ECANCELED;
3744 goto error;
3745 }
3746
3747 mutex_lock(&fs_info->delete_unused_bgs_mutex);
3748 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3749 if (ret < 0) {
3750 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3751 goto error;
3752 }
3753
3754 /*
3755 * this shouldn't happen, it means the last relocate
3756 * failed
3757 */
3758 if (ret == 0)
3759 BUG(); /* FIXME break ? */
3760
3761 ret = btrfs_previous_item(chunk_root, path, 0,
3762 BTRFS_CHUNK_ITEM_KEY);
3763 if (ret) {
3764 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3765 ret = 0;
3766 break;
3767 }
3768
3769 leaf = path->nodes[0];
3770 slot = path->slots[0];
3771 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3772
3773 if (found_key.objectid != key.objectid) {
3774 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3775 break;
3776 }
3777
3778 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
3779 chunk_type = btrfs_chunk_type(leaf, chunk);
3780
3781 if (!counting) {
3782 spin_lock(&fs_info->balance_lock);
3783 bctl->stat.considered++;
3784 spin_unlock(&fs_info->balance_lock);
3785 }
3786
3787 ret = should_balance_chunk(leaf, chunk, found_key.offset);
3788
3789 btrfs_release_path(path);
3790 if (!ret) {
3791 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3792 goto loop;
3793 }
3794
3795 if (counting) {
3796 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3797 spin_lock(&fs_info->balance_lock);
3798 bctl->stat.expected++;
3799 spin_unlock(&fs_info->balance_lock);
3800
3801 if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3802 count_data++;
3803 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3804 count_sys++;
3805 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3806 count_meta++;
3807
3808 goto loop;
3809 }
3810
3811 /*
3812 * Apply limit_min filter, no need to check if the LIMITS
3813 * filter is used, limit_min is 0 by default
3814 */
3815 if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) &&
3816 count_data < bctl->data.limit_min)
3817 || ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) &&
3818 count_meta < bctl->meta.limit_min)
3819 || ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) &&
3820 count_sys < bctl->sys.limit_min)) {
3821 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3822 goto loop;
3823 }
3824
3825 if (!chunk_reserved) {
3826 /*
3827 * We may be relocating the only data chunk we have,
3828 * which could potentially end up with losing data's
3829 * raid profile, so lets allocate an empty one in
3830 * advance.
3831 */
3832 ret = btrfs_may_alloc_data_chunk(fs_info,
3833 found_key.offset);
3834 if (ret < 0) {
3835 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3836 goto error;
3837 } else if (ret == 1) {
3838 chunk_reserved = 1;
3839 }
3840 }
3841
3842 ret = btrfs_relocate_chunk(fs_info, found_key.offset);
3843 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3844 if (ret == -ENOSPC) {
3845 enospc_errors++;
3846 } else if (ret == -ETXTBSY) {
3847 btrfs_info(fs_info,
3848 "skipping relocation of block group %llu due to active swapfile",
3849 found_key.offset);
3850 ret = 0;
3851 } else if (ret) {
3852 goto error;
3853 } else {
3854 spin_lock(&fs_info->balance_lock);
3855 bctl->stat.completed++;
3856 spin_unlock(&fs_info->balance_lock);
3857 }
3858 loop:
3859 if (found_key.offset == 0)
3860 break;
3861 key.offset = found_key.offset - 1;
3862 }
3863
3864 if (counting) {
3865 btrfs_release_path(path);
3866 counting = false;
3867 goto again;
3868 }
3869 error:
3870 btrfs_free_path(path);
3871 if (enospc_errors) {
3872 btrfs_info(fs_info, "%d enospc errors during balance",
3873 enospc_errors);
3874 if (!ret)
3875 ret = -ENOSPC;
3876 }
3877
3878 return ret;
3879 }
3880
3881 /**
3882 * alloc_profile_is_valid - see if a given profile is valid and reduced
3883 * @flags: profile to validate
3884 * @extended: if true @flags is treated as an extended profile
3885 */
alloc_profile_is_valid(u64 flags,int extended)3886 static int alloc_profile_is_valid(u64 flags, int extended)
3887 {
3888 u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
3889 BTRFS_BLOCK_GROUP_PROFILE_MASK);
3890
3891 flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
3892
3893 /* 1) check that all other bits are zeroed */
3894 if (flags & ~mask)
3895 return 0;
3896
3897 /* 2) see if profile is reduced */
3898 if (flags == 0)
3899 return !extended; /* "0" is valid for usual profiles */
3900
3901 return has_single_bit_set(flags);
3902 }
3903
balance_need_close(struct btrfs_fs_info * fs_info)3904 static inline int balance_need_close(struct btrfs_fs_info *fs_info)
3905 {
3906 /* cancel requested || normal exit path */
3907 return atomic_read(&fs_info->balance_cancel_req) ||
3908 (atomic_read(&fs_info->balance_pause_req) == 0 &&
3909 atomic_read(&fs_info->balance_cancel_req) == 0);
3910 }
3911
3912 /*
3913 * Validate target profile against allowed profiles and return true if it's OK.
3914 * Otherwise print the error message and return false.
3915 */
validate_convert_profile(struct btrfs_fs_info * fs_info,const struct btrfs_balance_args * bargs,u64 allowed,const char * type)3916 static inline int validate_convert_profile(struct btrfs_fs_info *fs_info,
3917 const struct btrfs_balance_args *bargs,
3918 u64 allowed, const char *type)
3919 {
3920 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
3921 return true;
3922
3923 /* Profile is valid and does not have bits outside of the allowed set */
3924 if (alloc_profile_is_valid(bargs->target, 1) &&
3925 (bargs->target & ~allowed) == 0)
3926 return true;
3927
3928 btrfs_err(fs_info, "balance: invalid convert %s profile %s",
3929 type, btrfs_bg_type_to_raid_name(bargs->target));
3930 return false;
3931 }
3932
3933 /*
3934 * Fill @buf with textual description of balance filter flags @bargs, up to
3935 * @size_buf including the terminating null. The output may be trimmed if it
3936 * does not fit into the provided buffer.
3937 */
describe_balance_args(struct btrfs_balance_args * bargs,char * buf,u32 size_buf)3938 static void describe_balance_args(struct btrfs_balance_args *bargs, char *buf,
3939 u32 size_buf)
3940 {
3941 int ret;
3942 u32 size_bp = size_buf;
3943 char *bp = buf;
3944 u64 flags = bargs->flags;
3945 char tmp_buf[128] = {'\0'};
3946
3947 if (!flags)
3948 return;
3949
3950 #define CHECK_APPEND_NOARG(a) \
3951 do { \
3952 ret = snprintf(bp, size_bp, (a)); \
3953 if (ret < 0 || ret >= size_bp) \
3954 goto out_overflow; \
3955 size_bp -= ret; \
3956 bp += ret; \
3957 } while (0)
3958
3959 #define CHECK_APPEND_1ARG(a, v1) \
3960 do { \
3961 ret = snprintf(bp, size_bp, (a), (v1)); \
3962 if (ret < 0 || ret >= size_bp) \
3963 goto out_overflow; \
3964 size_bp -= ret; \
3965 bp += ret; \
3966 } while (0)
3967
3968 #define CHECK_APPEND_2ARG(a, v1, v2) \
3969 do { \
3970 ret = snprintf(bp, size_bp, (a), (v1), (v2)); \
3971 if (ret < 0 || ret >= size_bp) \
3972 goto out_overflow; \
3973 size_bp -= ret; \
3974 bp += ret; \
3975 } while (0)
3976
3977 if (flags & BTRFS_BALANCE_ARGS_CONVERT)
3978 CHECK_APPEND_1ARG("convert=%s,",
3979 btrfs_bg_type_to_raid_name(bargs->target));
3980
3981 if (flags & BTRFS_BALANCE_ARGS_SOFT)
3982 CHECK_APPEND_NOARG("soft,");
3983
3984 if (flags & BTRFS_BALANCE_ARGS_PROFILES) {
3985 btrfs_describe_block_groups(bargs->profiles, tmp_buf,
3986 sizeof(tmp_buf));
3987 CHECK_APPEND_1ARG("profiles=%s,", tmp_buf);
3988 }
3989
3990 if (flags & BTRFS_BALANCE_ARGS_USAGE)
3991 CHECK_APPEND_1ARG("usage=%llu,", bargs->usage);
3992
3993 if (flags & BTRFS_BALANCE_ARGS_USAGE_RANGE)
3994 CHECK_APPEND_2ARG("usage=%u..%u,",
3995 bargs->usage_min, bargs->usage_max);
3996
3997 if (flags & BTRFS_BALANCE_ARGS_DEVID)
3998 CHECK_APPEND_1ARG("devid=%llu,", bargs->devid);
3999
4000 if (flags & BTRFS_BALANCE_ARGS_DRANGE)
4001 CHECK_APPEND_2ARG("drange=%llu..%llu,",
4002 bargs->pstart, bargs->pend);
4003
4004 if (flags & BTRFS_BALANCE_ARGS_VRANGE)
4005 CHECK_APPEND_2ARG("vrange=%llu..%llu,",
4006 bargs->vstart, bargs->vend);
4007
4008 if (flags & BTRFS_BALANCE_ARGS_LIMIT)
4009 CHECK_APPEND_1ARG("limit=%llu,", bargs->limit);
4010
4011 if (flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)
4012 CHECK_APPEND_2ARG("limit=%u..%u,",
4013 bargs->limit_min, bargs->limit_max);
4014
4015 if (flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE)
4016 CHECK_APPEND_2ARG("stripes=%u..%u,",
4017 bargs->stripes_min, bargs->stripes_max);
4018
4019 #undef CHECK_APPEND_2ARG
4020 #undef CHECK_APPEND_1ARG
4021 #undef CHECK_APPEND_NOARG
4022
4023 out_overflow:
4024
4025 if (size_bp < size_buf)
4026 buf[size_buf - size_bp - 1] = '\0'; /* remove last , */
4027 else
4028 buf[0] = '\0';
4029 }
4030
describe_balance_start_or_resume(struct btrfs_fs_info * fs_info)4031 static void describe_balance_start_or_resume(struct btrfs_fs_info *fs_info)
4032 {
4033 u32 size_buf = 1024;
4034 char tmp_buf[192] = {'\0'};
4035 char *buf;
4036 char *bp;
4037 u32 size_bp = size_buf;
4038 int ret;
4039 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
4040
4041 buf = kzalloc(size_buf, GFP_KERNEL);
4042 if (!buf)
4043 return;
4044
4045 bp = buf;
4046
4047 #define CHECK_APPEND_1ARG(a, v1) \
4048 do { \
4049 ret = snprintf(bp, size_bp, (a), (v1)); \
4050 if (ret < 0 || ret >= size_bp) \
4051 goto out_overflow; \
4052 size_bp -= ret; \
4053 bp += ret; \
4054 } while (0)
4055
4056 if (bctl->flags & BTRFS_BALANCE_FORCE)
4057 CHECK_APPEND_1ARG("%s", "-f ");
4058
4059 if (bctl->flags & BTRFS_BALANCE_DATA) {
4060 describe_balance_args(&bctl->data, tmp_buf, sizeof(tmp_buf));
4061 CHECK_APPEND_1ARG("-d%s ", tmp_buf);
4062 }
4063
4064 if (bctl->flags & BTRFS_BALANCE_METADATA) {
4065 describe_balance_args(&bctl->meta, tmp_buf, sizeof(tmp_buf));
4066 CHECK_APPEND_1ARG("-m%s ", tmp_buf);
4067 }
4068
4069 if (bctl->flags & BTRFS_BALANCE_SYSTEM) {
4070 describe_balance_args(&bctl->sys, tmp_buf, sizeof(tmp_buf));
4071 CHECK_APPEND_1ARG("-s%s ", tmp_buf);
4072 }
4073
4074 #undef CHECK_APPEND_1ARG
4075
4076 out_overflow:
4077
4078 if (size_bp < size_buf)
4079 buf[size_buf - size_bp - 1] = '\0'; /* remove last " " */
4080 btrfs_info(fs_info, "balance: %s %s",
4081 (bctl->flags & BTRFS_BALANCE_RESUME) ?
4082 "resume" : "start", buf);
4083
4084 kfree(buf);
4085 }
4086
4087 /*
4088 * Should be called with balance mutexe held
4089 */
btrfs_balance(struct btrfs_fs_info * fs_info,struct btrfs_balance_control * bctl,struct btrfs_ioctl_balance_args * bargs)4090 int btrfs_balance(struct btrfs_fs_info *fs_info,
4091 struct btrfs_balance_control *bctl,
4092 struct btrfs_ioctl_balance_args *bargs)
4093 {
4094 u64 meta_target, data_target;
4095 u64 allowed;
4096 int mixed = 0;
4097 int ret;
4098 u64 num_devices;
4099 unsigned seq;
4100 bool reducing_redundancy;
4101 int i;
4102
4103 if (btrfs_fs_closing(fs_info) ||
4104 atomic_read(&fs_info->balance_pause_req) ||
4105 btrfs_should_cancel_balance(fs_info)) {
4106 ret = -EINVAL;
4107 goto out;
4108 }
4109
4110 allowed = btrfs_super_incompat_flags(fs_info->super_copy);
4111 if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
4112 mixed = 1;
4113
4114 /*
4115 * In case of mixed groups both data and meta should be picked,
4116 * and identical options should be given for both of them.
4117 */
4118 allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
4119 if (mixed && (bctl->flags & allowed)) {
4120 if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
4121 !(bctl->flags & BTRFS_BALANCE_METADATA) ||
4122 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
4123 btrfs_err(fs_info,
4124 "balance: mixed groups data and metadata options must be the same");
4125 ret = -EINVAL;
4126 goto out;
4127 }
4128 }
4129
4130 /*
4131 * rw_devices will not change at the moment, device add/delete/replace
4132 * are exclusive
4133 */
4134 num_devices = fs_info->fs_devices->rw_devices;
4135
4136 /*
4137 * SINGLE profile on-disk has no profile bit, but in-memory we have a
4138 * special bit for it, to make it easier to distinguish. Thus we need
4139 * to set it manually, or balance would refuse the profile.
4140 */
4141 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
4142 for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++)
4143 if (num_devices >= btrfs_raid_array[i].devs_min)
4144 allowed |= btrfs_raid_array[i].bg_flag;
4145
4146 if (!validate_convert_profile(fs_info, &bctl->data, allowed, "data") ||
4147 !validate_convert_profile(fs_info, &bctl->meta, allowed, "metadata") ||
4148 !validate_convert_profile(fs_info, &bctl->sys, allowed, "system")) {
4149 ret = -EINVAL;
4150 goto out;
4151 }
4152
4153 /*
4154 * Allow to reduce metadata or system integrity only if force set for
4155 * profiles with redundancy (copies, parity)
4156 */
4157 allowed = 0;
4158 for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) {
4159 if (btrfs_raid_array[i].ncopies >= 2 ||
4160 btrfs_raid_array[i].tolerated_failures >= 1)
4161 allowed |= btrfs_raid_array[i].bg_flag;
4162 }
4163 do {
4164 seq = read_seqbegin(&fs_info->profiles_lock);
4165
4166 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
4167 (fs_info->avail_system_alloc_bits & allowed) &&
4168 !(bctl->sys.target & allowed)) ||
4169 ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
4170 (fs_info->avail_metadata_alloc_bits & allowed) &&
4171 !(bctl->meta.target & allowed)))
4172 reducing_redundancy = true;
4173 else
4174 reducing_redundancy = false;
4175
4176 /* if we're not converting, the target field is uninitialized */
4177 meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
4178 bctl->meta.target : fs_info->avail_metadata_alloc_bits;
4179 data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
4180 bctl->data.target : fs_info->avail_data_alloc_bits;
4181 } while (read_seqretry(&fs_info->profiles_lock, seq));
4182
4183 if (reducing_redundancy) {
4184 if (bctl->flags & BTRFS_BALANCE_FORCE) {
4185 btrfs_info(fs_info,
4186 "balance: force reducing metadata redundancy");
4187 } else {
4188 btrfs_err(fs_info,
4189 "balance: reduces metadata redundancy, use --force if you want this");
4190 ret = -EINVAL;
4191 goto out;
4192 }
4193 }
4194
4195 if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) <
4196 btrfs_get_num_tolerated_disk_barrier_failures(data_target)) {
4197 btrfs_warn(fs_info,
4198 "balance: metadata profile %s has lower redundancy than data profile %s",
4199 btrfs_bg_type_to_raid_name(meta_target),
4200 btrfs_bg_type_to_raid_name(data_target));
4201 }
4202
4203 if (fs_info->send_in_progress) {
4204 btrfs_warn_rl(fs_info,
4205 "cannot run balance while send operations are in progress (%d in progress)",
4206 fs_info->send_in_progress);
4207 ret = -EAGAIN;
4208 goto out;
4209 }
4210
4211 ret = insert_balance_item(fs_info, bctl);
4212 if (ret && ret != -EEXIST)
4213 goto out;
4214
4215 if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
4216 BUG_ON(ret == -EEXIST);
4217 BUG_ON(fs_info->balance_ctl);
4218 spin_lock(&fs_info->balance_lock);
4219 fs_info->balance_ctl = bctl;
4220 spin_unlock(&fs_info->balance_lock);
4221 } else {
4222 BUG_ON(ret != -EEXIST);
4223 spin_lock(&fs_info->balance_lock);
4224 update_balance_args(bctl);
4225 spin_unlock(&fs_info->balance_lock);
4226 }
4227
4228 ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4229 set_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags);
4230 describe_balance_start_or_resume(fs_info);
4231 mutex_unlock(&fs_info->balance_mutex);
4232
4233 ret = __btrfs_balance(fs_info);
4234
4235 mutex_lock(&fs_info->balance_mutex);
4236 if (ret == -ECANCELED && atomic_read(&fs_info->balance_pause_req))
4237 btrfs_info(fs_info, "balance: paused");
4238 /*
4239 * Balance can be canceled by:
4240 *
4241 * - Regular cancel request
4242 * Then ret == -ECANCELED and balance_cancel_req > 0
4243 *
4244 * - Fatal signal to "btrfs" process
4245 * Either the signal caught by wait_reserve_ticket() and callers
4246 * got -EINTR, or caught by btrfs_should_cancel_balance() and
4247 * got -ECANCELED.
4248 * Either way, in this case balance_cancel_req = 0, and
4249 * ret == -EINTR or ret == -ECANCELED.
4250 *
4251 * So here we only check the return value to catch canceled balance.
4252 */
4253 else if (ret == -ECANCELED || ret == -EINTR)
4254 btrfs_info(fs_info, "balance: canceled");
4255 else
4256 btrfs_info(fs_info, "balance: ended with status: %d", ret);
4257
4258 clear_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags);
4259
4260 if (bargs) {
4261 memset(bargs, 0, sizeof(*bargs));
4262 btrfs_update_ioctl_balance_args(fs_info, bargs);
4263 }
4264
4265 if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
4266 balance_need_close(fs_info)) {
4267 reset_balance_state(fs_info);
4268 btrfs_exclop_finish(fs_info);
4269 }
4270
4271 wake_up(&fs_info->balance_wait_q);
4272
4273 return ret;
4274 out:
4275 if (bctl->flags & BTRFS_BALANCE_RESUME)
4276 reset_balance_state(fs_info);
4277 else
4278 kfree(bctl);
4279 btrfs_exclop_finish(fs_info);
4280
4281 return ret;
4282 }
4283
balance_kthread(void * data)4284 static int balance_kthread(void *data)
4285 {
4286 struct btrfs_fs_info *fs_info = data;
4287 int ret = 0;
4288
4289 sb_start_write(fs_info->sb);
4290 mutex_lock(&fs_info->balance_mutex);
4291 if (fs_info->balance_ctl)
4292 ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL);
4293 mutex_unlock(&fs_info->balance_mutex);
4294 sb_end_write(fs_info->sb);
4295
4296 return ret;
4297 }
4298
btrfs_resume_balance_async(struct btrfs_fs_info * fs_info)4299 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
4300 {
4301 struct task_struct *tsk;
4302
4303 mutex_lock(&fs_info->balance_mutex);
4304 if (!fs_info->balance_ctl) {
4305 mutex_unlock(&fs_info->balance_mutex);
4306 return 0;
4307 }
4308 mutex_unlock(&fs_info->balance_mutex);
4309
4310 if (btrfs_test_opt(fs_info, SKIP_BALANCE)) {
4311 btrfs_info(fs_info, "balance: resume skipped");
4312 return 0;
4313 }
4314
4315 /*
4316 * A ro->rw remount sequence should continue with the paused balance
4317 * regardless of who pauses it, system or the user as of now, so set
4318 * the resume flag.
4319 */
4320 spin_lock(&fs_info->balance_lock);
4321 fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME;
4322 spin_unlock(&fs_info->balance_lock);
4323
4324 tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
4325 return PTR_ERR_OR_ZERO(tsk);
4326 }
4327
btrfs_recover_balance(struct btrfs_fs_info * fs_info)4328 int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
4329 {
4330 struct btrfs_balance_control *bctl;
4331 struct btrfs_balance_item *item;
4332 struct btrfs_disk_balance_args disk_bargs;
4333 struct btrfs_path *path;
4334 struct extent_buffer *leaf;
4335 struct btrfs_key key;
4336 int ret;
4337
4338 path = btrfs_alloc_path();
4339 if (!path)
4340 return -ENOMEM;
4341
4342 key.objectid = BTRFS_BALANCE_OBJECTID;
4343 key.type = BTRFS_TEMPORARY_ITEM_KEY;
4344 key.offset = 0;
4345
4346 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
4347 if (ret < 0)
4348 goto out;
4349 if (ret > 0) { /* ret = -ENOENT; */
4350 ret = 0;
4351 goto out;
4352 }
4353
4354 bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
4355 if (!bctl) {
4356 ret = -ENOMEM;
4357 goto out;
4358 }
4359
4360 leaf = path->nodes[0];
4361 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
4362
4363 bctl->flags = btrfs_balance_flags(leaf, item);
4364 bctl->flags |= BTRFS_BALANCE_RESUME;
4365
4366 btrfs_balance_data(leaf, item, &disk_bargs);
4367 btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
4368 btrfs_balance_meta(leaf, item, &disk_bargs);
4369 btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
4370 btrfs_balance_sys(leaf, item, &disk_bargs);
4371 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
4372
4373 /*
4374 * This should never happen, as the paused balance state is recovered
4375 * during mount without any chance of other exclusive ops to collide.
4376 *
4377 * This gives the exclusive op status to balance and keeps in paused
4378 * state until user intervention (cancel or umount). If the ownership
4379 * cannot be assigned, show a message but do not fail. The balance
4380 * is in a paused state and must have fs_info::balance_ctl properly
4381 * set up.
4382 */
4383 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE))
4384 btrfs_warn(fs_info,
4385 "balance: cannot set exclusive op status, resume manually");
4386
4387 btrfs_release_path(path);
4388
4389 mutex_lock(&fs_info->balance_mutex);
4390 BUG_ON(fs_info->balance_ctl);
4391 spin_lock(&fs_info->balance_lock);
4392 fs_info->balance_ctl = bctl;
4393 spin_unlock(&fs_info->balance_lock);
4394 mutex_unlock(&fs_info->balance_mutex);
4395 out:
4396 btrfs_free_path(path);
4397 return ret;
4398 }
4399
btrfs_pause_balance(struct btrfs_fs_info * fs_info)4400 int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
4401 {
4402 int ret = 0;
4403
4404 mutex_lock(&fs_info->balance_mutex);
4405 if (!fs_info->balance_ctl) {
4406 mutex_unlock(&fs_info->balance_mutex);
4407 return -ENOTCONN;
4408 }
4409
4410 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4411 atomic_inc(&fs_info->balance_pause_req);
4412 mutex_unlock(&fs_info->balance_mutex);
4413
4414 wait_event(fs_info->balance_wait_q,
4415 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4416
4417 mutex_lock(&fs_info->balance_mutex);
4418 /* we are good with balance_ctl ripped off from under us */
4419 BUG_ON(test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4420 atomic_dec(&fs_info->balance_pause_req);
4421 } else {
4422 ret = -ENOTCONN;
4423 }
4424
4425 mutex_unlock(&fs_info->balance_mutex);
4426 return ret;
4427 }
4428
btrfs_cancel_balance(struct btrfs_fs_info * fs_info)4429 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
4430 {
4431 mutex_lock(&fs_info->balance_mutex);
4432 if (!fs_info->balance_ctl) {
4433 mutex_unlock(&fs_info->balance_mutex);
4434 return -ENOTCONN;
4435 }
4436
4437 /*
4438 * A paused balance with the item stored on disk can be resumed at
4439 * mount time if the mount is read-write. Otherwise it's still paused
4440 * and we must not allow cancelling as it deletes the item.
4441 */
4442 if (sb_rdonly(fs_info->sb)) {
4443 mutex_unlock(&fs_info->balance_mutex);
4444 return -EROFS;
4445 }
4446
4447 atomic_inc(&fs_info->balance_cancel_req);
4448 /*
4449 * if we are running just wait and return, balance item is
4450 * deleted in btrfs_balance in this case
4451 */
4452 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4453 mutex_unlock(&fs_info->balance_mutex);
4454 wait_event(fs_info->balance_wait_q,
4455 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4456 mutex_lock(&fs_info->balance_mutex);
4457 } else {
4458 mutex_unlock(&fs_info->balance_mutex);
4459 /*
4460 * Lock released to allow other waiters to continue, we'll
4461 * reexamine the status again.
4462 */
4463 mutex_lock(&fs_info->balance_mutex);
4464
4465 if (fs_info->balance_ctl) {
4466 reset_balance_state(fs_info);
4467 btrfs_exclop_finish(fs_info);
4468 btrfs_info(fs_info, "balance: canceled");
4469 }
4470 }
4471
4472 ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4473 atomic_dec(&fs_info->balance_cancel_req);
4474 mutex_unlock(&fs_info->balance_mutex);
4475 return 0;
4476 }
4477
btrfs_uuid_scan_kthread(void * data)4478 int btrfs_uuid_scan_kthread(void *data)
4479 {
4480 struct btrfs_fs_info *fs_info = data;
4481 struct btrfs_root *root = fs_info->tree_root;
4482 struct btrfs_key key;
4483 struct btrfs_path *path = NULL;
4484 int ret = 0;
4485 struct extent_buffer *eb;
4486 int slot;
4487 struct btrfs_root_item root_item;
4488 u32 item_size;
4489 struct btrfs_trans_handle *trans = NULL;
4490 bool closing = false;
4491
4492 path = btrfs_alloc_path();
4493 if (!path) {
4494 ret = -ENOMEM;
4495 goto out;
4496 }
4497
4498 key.objectid = 0;
4499 key.type = BTRFS_ROOT_ITEM_KEY;
4500 key.offset = 0;
4501
4502 while (1) {
4503 if (btrfs_fs_closing(fs_info)) {
4504 closing = true;
4505 break;
4506 }
4507 ret = btrfs_search_forward(root, &key, path,
4508 BTRFS_OLDEST_GENERATION);
4509 if (ret) {
4510 if (ret > 0)
4511 ret = 0;
4512 break;
4513 }
4514
4515 if (key.type != BTRFS_ROOT_ITEM_KEY ||
4516 (key.objectid < BTRFS_FIRST_FREE_OBJECTID &&
4517 key.objectid != BTRFS_FS_TREE_OBJECTID) ||
4518 key.objectid > BTRFS_LAST_FREE_OBJECTID)
4519 goto skip;
4520
4521 eb = path->nodes[0];
4522 slot = path->slots[0];
4523 item_size = btrfs_item_size_nr(eb, slot);
4524 if (item_size < sizeof(root_item))
4525 goto skip;
4526
4527 read_extent_buffer(eb, &root_item,
4528 btrfs_item_ptr_offset(eb, slot),
4529 (int)sizeof(root_item));
4530 if (btrfs_root_refs(&root_item) == 0)
4531 goto skip;
4532
4533 if (!btrfs_is_empty_uuid(root_item.uuid) ||
4534 !btrfs_is_empty_uuid(root_item.received_uuid)) {
4535 if (trans)
4536 goto update_tree;
4537
4538 btrfs_release_path(path);
4539 /*
4540 * 1 - subvol uuid item
4541 * 1 - received_subvol uuid item
4542 */
4543 trans = btrfs_start_transaction(fs_info->uuid_root, 2);
4544 if (IS_ERR(trans)) {
4545 ret = PTR_ERR(trans);
4546 break;
4547 }
4548 continue;
4549 } else {
4550 goto skip;
4551 }
4552 update_tree:
4553 btrfs_release_path(path);
4554 if (!btrfs_is_empty_uuid(root_item.uuid)) {
4555 ret = btrfs_uuid_tree_add(trans, root_item.uuid,
4556 BTRFS_UUID_KEY_SUBVOL,
4557 key.objectid);
4558 if (ret < 0) {
4559 btrfs_warn(fs_info, "uuid_tree_add failed %d",
4560 ret);
4561 break;
4562 }
4563 }
4564
4565 if (!btrfs_is_empty_uuid(root_item.received_uuid)) {
4566 ret = btrfs_uuid_tree_add(trans,
4567 root_item.received_uuid,
4568 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
4569 key.objectid);
4570 if (ret < 0) {
4571 btrfs_warn(fs_info, "uuid_tree_add failed %d",
4572 ret);
4573 break;
4574 }
4575 }
4576
4577 skip:
4578 btrfs_release_path(path);
4579 if (trans) {
4580 ret = btrfs_end_transaction(trans);
4581 trans = NULL;
4582 if (ret)
4583 break;
4584 }
4585
4586 if (key.offset < (u64)-1) {
4587 key.offset++;
4588 } else if (key.type < BTRFS_ROOT_ITEM_KEY) {
4589 key.offset = 0;
4590 key.type = BTRFS_ROOT_ITEM_KEY;
4591 } else if (key.objectid < (u64)-1) {
4592 key.offset = 0;
4593 key.type = BTRFS_ROOT_ITEM_KEY;
4594 key.objectid++;
4595 } else {
4596 break;
4597 }
4598 cond_resched();
4599 }
4600
4601 out:
4602 btrfs_free_path(path);
4603 if (trans && !IS_ERR(trans))
4604 btrfs_end_transaction(trans);
4605 if (ret)
4606 btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret);
4607 else if (!closing)
4608 set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
4609 up(&fs_info->uuid_tree_rescan_sem);
4610 return 0;
4611 }
4612
btrfs_create_uuid_tree(struct btrfs_fs_info * fs_info)4613 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
4614 {
4615 struct btrfs_trans_handle *trans;
4616 struct btrfs_root *tree_root = fs_info->tree_root;
4617 struct btrfs_root *uuid_root;
4618 struct task_struct *task;
4619 int ret;
4620
4621 /*
4622 * 1 - root node
4623 * 1 - root item
4624 */
4625 trans = btrfs_start_transaction(tree_root, 2);
4626 if (IS_ERR(trans))
4627 return PTR_ERR(trans);
4628
4629 uuid_root = btrfs_create_tree(trans, BTRFS_UUID_TREE_OBJECTID);
4630 if (IS_ERR(uuid_root)) {
4631 ret = PTR_ERR(uuid_root);
4632 btrfs_abort_transaction(trans, ret);
4633 btrfs_end_transaction(trans);
4634 return ret;
4635 }
4636
4637 fs_info->uuid_root = uuid_root;
4638
4639 ret = btrfs_commit_transaction(trans);
4640 if (ret)
4641 return ret;
4642
4643 down(&fs_info->uuid_tree_rescan_sem);
4644 task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid");
4645 if (IS_ERR(task)) {
4646 /* fs_info->update_uuid_tree_gen remains 0 in all error case */
4647 btrfs_warn(fs_info, "failed to start uuid_scan task");
4648 up(&fs_info->uuid_tree_rescan_sem);
4649 return PTR_ERR(task);
4650 }
4651
4652 return 0;
4653 }
4654
4655 /*
4656 * shrinking a device means finding all of the device extents past
4657 * the new size, and then following the back refs to the chunks.
4658 * The chunk relocation code actually frees the device extent
4659 */
btrfs_shrink_device(struct btrfs_device * device,u64 new_size)4660 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
4661 {
4662 struct btrfs_fs_info *fs_info = device->fs_info;
4663 struct btrfs_root *root = fs_info->dev_root;
4664 struct btrfs_trans_handle *trans;
4665 struct btrfs_dev_extent *dev_extent = NULL;
4666 struct btrfs_path *path;
4667 u64 length;
4668 u64 chunk_offset;
4669 int ret;
4670 int slot;
4671 int failed = 0;
4672 bool retried = false;
4673 struct extent_buffer *l;
4674 struct btrfs_key key;
4675 struct btrfs_super_block *super_copy = fs_info->super_copy;
4676 u64 old_total = btrfs_super_total_bytes(super_copy);
4677 u64 old_size = btrfs_device_get_total_bytes(device);
4678 u64 diff;
4679 u64 start;
4680
4681 new_size = round_down(new_size, fs_info->sectorsize);
4682 start = new_size;
4683 diff = round_down(old_size - new_size, fs_info->sectorsize);
4684
4685 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
4686 return -EINVAL;
4687
4688 path = btrfs_alloc_path();
4689 if (!path)
4690 return -ENOMEM;
4691
4692 path->reada = READA_BACK;
4693
4694 trans = btrfs_start_transaction(root, 0);
4695 if (IS_ERR(trans)) {
4696 btrfs_free_path(path);
4697 return PTR_ERR(trans);
4698 }
4699
4700 mutex_lock(&fs_info->chunk_mutex);
4701
4702 btrfs_device_set_total_bytes(device, new_size);
4703 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
4704 device->fs_devices->total_rw_bytes -= diff;
4705 atomic64_sub(diff, &fs_info->free_chunk_space);
4706 }
4707
4708 /*
4709 * Once the device's size has been set to the new size, ensure all
4710 * in-memory chunks are synced to disk so that the loop below sees them
4711 * and relocates them accordingly.
4712 */
4713 if (contains_pending_extent(device, &start, diff)) {
4714 mutex_unlock(&fs_info->chunk_mutex);
4715 ret = btrfs_commit_transaction(trans);
4716 if (ret)
4717 goto done;
4718 } else {
4719 mutex_unlock(&fs_info->chunk_mutex);
4720 btrfs_end_transaction(trans);
4721 }
4722
4723 again:
4724 key.objectid = device->devid;
4725 key.offset = (u64)-1;
4726 key.type = BTRFS_DEV_EXTENT_KEY;
4727
4728 do {
4729 mutex_lock(&fs_info->delete_unused_bgs_mutex);
4730 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4731 if (ret < 0) {
4732 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4733 goto done;
4734 }
4735
4736 ret = btrfs_previous_item(root, path, 0, key.type);
4737 if (ret)
4738 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4739 if (ret < 0)
4740 goto done;
4741 if (ret) {
4742 ret = 0;
4743 btrfs_release_path(path);
4744 break;
4745 }
4746
4747 l = path->nodes[0];
4748 slot = path->slots[0];
4749 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
4750
4751 if (key.objectid != device->devid) {
4752 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4753 btrfs_release_path(path);
4754 break;
4755 }
4756
4757 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
4758 length = btrfs_dev_extent_length(l, dev_extent);
4759
4760 if (key.offset + length <= new_size) {
4761 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4762 btrfs_release_path(path);
4763 break;
4764 }
4765
4766 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
4767 btrfs_release_path(path);
4768
4769 /*
4770 * We may be relocating the only data chunk we have,
4771 * which could potentially end up with losing data's
4772 * raid profile, so lets allocate an empty one in
4773 * advance.
4774 */
4775 ret = btrfs_may_alloc_data_chunk(fs_info, chunk_offset);
4776 if (ret < 0) {
4777 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4778 goto done;
4779 }
4780
4781 ret = btrfs_relocate_chunk(fs_info, chunk_offset);
4782 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4783 if (ret == -ENOSPC) {
4784 failed++;
4785 } else if (ret) {
4786 if (ret == -ETXTBSY) {
4787 btrfs_warn(fs_info,
4788 "could not shrink block group %llu due to active swapfile",
4789 chunk_offset);
4790 }
4791 goto done;
4792 }
4793 } while (key.offset-- > 0);
4794
4795 if (failed && !retried) {
4796 failed = 0;
4797 retried = true;
4798 goto again;
4799 } else if (failed && retried) {
4800 ret = -ENOSPC;
4801 goto done;
4802 }
4803
4804 /* Shrinking succeeded, else we would be at "done". */
4805 trans = btrfs_start_transaction(root, 0);
4806 if (IS_ERR(trans)) {
4807 ret = PTR_ERR(trans);
4808 goto done;
4809 }
4810
4811 mutex_lock(&fs_info->chunk_mutex);
4812 /* Clear all state bits beyond the shrunk device size */
4813 clear_extent_bits(&device->alloc_state, new_size, (u64)-1,
4814 CHUNK_STATE_MASK);
4815
4816 btrfs_device_set_disk_total_bytes(device, new_size);
4817 if (list_empty(&device->post_commit_list))
4818 list_add_tail(&device->post_commit_list,
4819 &trans->transaction->dev_update_list);
4820
4821 WARN_ON(diff > old_total);
4822 btrfs_set_super_total_bytes(super_copy,
4823 round_down(old_total - diff, fs_info->sectorsize));
4824 mutex_unlock(&fs_info->chunk_mutex);
4825
4826 /* Now btrfs_update_device() will change the on-disk size. */
4827 ret = btrfs_update_device(trans, device);
4828 if (ret < 0) {
4829 btrfs_abort_transaction(trans, ret);
4830 btrfs_end_transaction(trans);
4831 } else {
4832 ret = btrfs_commit_transaction(trans);
4833 }
4834 done:
4835 btrfs_free_path(path);
4836 if (ret) {
4837 mutex_lock(&fs_info->chunk_mutex);
4838 btrfs_device_set_total_bytes(device, old_size);
4839 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
4840 device->fs_devices->total_rw_bytes += diff;
4841 atomic64_add(diff, &fs_info->free_chunk_space);
4842 mutex_unlock(&fs_info->chunk_mutex);
4843 }
4844 return ret;
4845 }
4846
btrfs_add_system_chunk(struct btrfs_fs_info * fs_info,struct btrfs_key * key,struct btrfs_chunk * chunk,int item_size)4847 static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info,
4848 struct btrfs_key *key,
4849 struct btrfs_chunk *chunk, int item_size)
4850 {
4851 struct btrfs_super_block *super_copy = fs_info->super_copy;
4852 struct btrfs_disk_key disk_key;
4853 u32 array_size;
4854 u8 *ptr;
4855
4856 mutex_lock(&fs_info->chunk_mutex);
4857 array_size = btrfs_super_sys_array_size(super_copy);
4858 if (array_size + item_size + sizeof(disk_key)
4859 > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
4860 mutex_unlock(&fs_info->chunk_mutex);
4861 return -EFBIG;
4862 }
4863
4864 ptr = super_copy->sys_chunk_array + array_size;
4865 btrfs_cpu_key_to_disk(&disk_key, key);
4866 memcpy(ptr, &disk_key, sizeof(disk_key));
4867 ptr += sizeof(disk_key);
4868 memcpy(ptr, chunk, item_size);
4869 item_size += sizeof(disk_key);
4870 btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
4871 mutex_unlock(&fs_info->chunk_mutex);
4872
4873 return 0;
4874 }
4875
4876 /*
4877 * sort the devices in descending order by max_avail, total_avail
4878 */
btrfs_cmp_device_info(const void * a,const void * b)4879 static int btrfs_cmp_device_info(const void *a, const void *b)
4880 {
4881 const struct btrfs_device_info *di_a = a;
4882 const struct btrfs_device_info *di_b = b;
4883
4884 if (di_a->max_avail > di_b->max_avail)
4885 return -1;
4886 if (di_a->max_avail < di_b->max_avail)
4887 return 1;
4888 if (di_a->total_avail > di_b->total_avail)
4889 return -1;
4890 if (di_a->total_avail < di_b->total_avail)
4891 return 1;
4892 return 0;
4893 }
4894
check_raid56_incompat_flag(struct btrfs_fs_info * info,u64 type)4895 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
4896 {
4897 if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK))
4898 return;
4899
4900 btrfs_set_fs_incompat(info, RAID56);
4901 }
4902
check_raid1c34_incompat_flag(struct btrfs_fs_info * info,u64 type)4903 static void check_raid1c34_incompat_flag(struct btrfs_fs_info *info, u64 type)
4904 {
4905 if (!(type & (BTRFS_BLOCK_GROUP_RAID1C3 | BTRFS_BLOCK_GROUP_RAID1C4)))
4906 return;
4907
4908 btrfs_set_fs_incompat(info, RAID1C34);
4909 }
4910
4911 /*
4912 * Structure used internally for __btrfs_alloc_chunk() function.
4913 * Wraps needed parameters.
4914 */
4915 struct alloc_chunk_ctl {
4916 u64 start;
4917 u64 type;
4918 /* Total number of stripes to allocate */
4919 int num_stripes;
4920 /* sub_stripes info for map */
4921 int sub_stripes;
4922 /* Stripes per device */
4923 int dev_stripes;
4924 /* Maximum number of devices to use */
4925 int devs_max;
4926 /* Minimum number of devices to use */
4927 int devs_min;
4928 /* ndevs has to be a multiple of this */
4929 int devs_increment;
4930 /* Number of copies */
4931 int ncopies;
4932 /* Number of stripes worth of bytes to store parity information */
4933 int nparity;
4934 u64 max_stripe_size;
4935 u64 max_chunk_size;
4936 u64 dev_extent_min;
4937 u64 stripe_size;
4938 u64 chunk_size;
4939 int ndevs;
4940 };
4941
init_alloc_chunk_ctl_policy_regular(struct btrfs_fs_devices * fs_devices,struct alloc_chunk_ctl * ctl)4942 static void init_alloc_chunk_ctl_policy_regular(
4943 struct btrfs_fs_devices *fs_devices,
4944 struct alloc_chunk_ctl *ctl)
4945 {
4946 u64 type = ctl->type;
4947
4948 if (type & BTRFS_BLOCK_GROUP_DATA) {
4949 ctl->max_stripe_size = SZ_1G;
4950 ctl->max_chunk_size = BTRFS_MAX_DATA_CHUNK_SIZE;
4951 } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
4952 /* For larger filesystems, use larger metadata chunks */
4953 if (fs_devices->total_rw_bytes > 50ULL * SZ_1G)
4954 ctl->max_stripe_size = SZ_1G;
4955 else
4956 ctl->max_stripe_size = SZ_256M;
4957 ctl->max_chunk_size = ctl->max_stripe_size;
4958 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
4959 ctl->max_stripe_size = SZ_32M;
4960 ctl->max_chunk_size = 2 * ctl->max_stripe_size;
4961 ctl->devs_max = min_t(int, ctl->devs_max,
4962 BTRFS_MAX_DEVS_SYS_CHUNK);
4963 } else {
4964 BUG();
4965 }
4966
4967 /* We don't want a chunk larger than 10% of writable space */
4968 ctl->max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
4969 ctl->max_chunk_size);
4970 ctl->dev_extent_min = BTRFS_STRIPE_LEN * ctl->dev_stripes;
4971 }
4972
init_alloc_chunk_ctl(struct btrfs_fs_devices * fs_devices,struct alloc_chunk_ctl * ctl)4973 static void init_alloc_chunk_ctl(struct btrfs_fs_devices *fs_devices,
4974 struct alloc_chunk_ctl *ctl)
4975 {
4976 int index = btrfs_bg_flags_to_raid_index(ctl->type);
4977
4978 ctl->sub_stripes = btrfs_raid_array[index].sub_stripes;
4979 ctl->dev_stripes = btrfs_raid_array[index].dev_stripes;
4980 ctl->devs_max = btrfs_raid_array[index].devs_max;
4981 if (!ctl->devs_max)
4982 ctl->devs_max = BTRFS_MAX_DEVS(fs_devices->fs_info);
4983 ctl->devs_min = btrfs_raid_array[index].devs_min;
4984 ctl->devs_increment = btrfs_raid_array[index].devs_increment;
4985 ctl->ncopies = btrfs_raid_array[index].ncopies;
4986 ctl->nparity = btrfs_raid_array[index].nparity;
4987 ctl->ndevs = 0;
4988
4989 switch (fs_devices->chunk_alloc_policy) {
4990 case BTRFS_CHUNK_ALLOC_REGULAR:
4991 init_alloc_chunk_ctl_policy_regular(fs_devices, ctl);
4992 break;
4993 default:
4994 BUG();
4995 }
4996 }
4997
gather_device_info(struct btrfs_fs_devices * fs_devices,struct alloc_chunk_ctl * ctl,struct btrfs_device_info * devices_info)4998 static int gather_device_info(struct btrfs_fs_devices *fs_devices,
4999 struct alloc_chunk_ctl *ctl,
5000 struct btrfs_device_info *devices_info)
5001 {
5002 struct btrfs_fs_info *info = fs_devices->fs_info;
5003 struct btrfs_device *device;
5004 u64 total_avail;
5005 u64 dev_extent_want = ctl->max_stripe_size * ctl->dev_stripes;
5006 int ret;
5007 int ndevs = 0;
5008 u64 max_avail;
5009 u64 dev_offset;
5010
5011 /*
5012 * in the first pass through the devices list, we gather information
5013 * about the available holes on each device.
5014 */
5015 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
5016 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
5017 WARN(1, KERN_ERR
5018 "BTRFS: read-only device in alloc_list\n");
5019 continue;
5020 }
5021
5022 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
5023 &device->dev_state) ||
5024 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
5025 continue;
5026
5027 if (device->total_bytes > device->bytes_used)
5028 total_avail = device->total_bytes - device->bytes_used;
5029 else
5030 total_avail = 0;
5031
5032 /* If there is no space on this device, skip it. */
5033 if (total_avail < ctl->dev_extent_min)
5034 continue;
5035
5036 ret = find_free_dev_extent(device, dev_extent_want, &dev_offset,
5037 &max_avail);
5038 if (ret && ret != -ENOSPC)
5039 return ret;
5040
5041 if (ret == 0)
5042 max_avail = dev_extent_want;
5043
5044 if (max_avail < ctl->dev_extent_min) {
5045 if (btrfs_test_opt(info, ENOSPC_DEBUG))
5046 btrfs_debug(info,
5047 "%s: devid %llu has no free space, have=%llu want=%llu",
5048 __func__, device->devid, max_avail,
5049 ctl->dev_extent_min);
5050 continue;
5051 }
5052
5053 if (ndevs == fs_devices->rw_devices) {
5054 WARN(1, "%s: found more than %llu devices\n",
5055 __func__, fs_devices->rw_devices);
5056 break;
5057 }
5058 devices_info[ndevs].dev_offset = dev_offset;
5059 devices_info[ndevs].max_avail = max_avail;
5060 devices_info[ndevs].total_avail = total_avail;
5061 devices_info[ndevs].dev = device;
5062 ++ndevs;
5063 }
5064 ctl->ndevs = ndevs;
5065
5066 /*
5067 * now sort the devices by hole size / available space
5068 */
5069 sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
5070 btrfs_cmp_device_info, NULL);
5071
5072 return 0;
5073 }
5074
decide_stripe_size_regular(struct alloc_chunk_ctl * ctl,struct btrfs_device_info * devices_info)5075 static int decide_stripe_size_regular(struct alloc_chunk_ctl *ctl,
5076 struct btrfs_device_info *devices_info)
5077 {
5078 /* Number of stripes that count for block group size */
5079 int data_stripes;
5080
5081 /*
5082 * The primary goal is to maximize the number of stripes, so use as
5083 * many devices as possible, even if the stripes are not maximum sized.
5084 *
5085 * The DUP profile stores more than one stripe per device, the
5086 * max_avail is the total size so we have to adjust.
5087 */
5088 ctl->stripe_size = div_u64(devices_info[ctl->ndevs - 1].max_avail,
5089 ctl->dev_stripes);
5090 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes;
5091
5092 /* This will have to be fixed for RAID1 and RAID10 over more drives */
5093 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies;
5094
5095 /*
5096 * Use the number of data stripes to figure out how big this chunk is
5097 * really going to be in terms of logical address space, and compare
5098 * that answer with the max chunk size. If it's higher, we try to
5099 * reduce stripe_size.
5100 */
5101 if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) {
5102 /*
5103 * Reduce stripe_size, round it up to a 16MB boundary again and
5104 * then use it, unless it ends up being even bigger than the
5105 * previous value we had already.
5106 */
5107 ctl->stripe_size = min(round_up(div_u64(ctl->max_chunk_size,
5108 data_stripes), SZ_16M),
5109 ctl->stripe_size);
5110 }
5111
5112 /* Align to BTRFS_STRIPE_LEN */
5113 ctl->stripe_size = round_down(ctl->stripe_size, BTRFS_STRIPE_LEN);
5114 ctl->chunk_size = ctl->stripe_size * data_stripes;
5115
5116 return 0;
5117 }
5118
decide_stripe_size(struct btrfs_fs_devices * fs_devices,struct alloc_chunk_ctl * ctl,struct btrfs_device_info * devices_info)5119 static int decide_stripe_size(struct btrfs_fs_devices *fs_devices,
5120 struct alloc_chunk_ctl *ctl,
5121 struct btrfs_device_info *devices_info)
5122 {
5123 struct btrfs_fs_info *info = fs_devices->fs_info;
5124
5125 /*
5126 * Round down to number of usable stripes, devs_increment can be any
5127 * number so we can't use round_down() that requires power of 2, while
5128 * rounddown is safe.
5129 */
5130 ctl->ndevs = rounddown(ctl->ndevs, ctl->devs_increment);
5131
5132 if (ctl->ndevs < ctl->devs_min) {
5133 if (btrfs_test_opt(info, ENOSPC_DEBUG)) {
5134 btrfs_debug(info,
5135 "%s: not enough devices with free space: have=%d minimum required=%d",
5136 __func__, ctl->ndevs, ctl->devs_min);
5137 }
5138 return -ENOSPC;
5139 }
5140
5141 ctl->ndevs = min(ctl->ndevs, ctl->devs_max);
5142
5143 switch (fs_devices->chunk_alloc_policy) {
5144 case BTRFS_CHUNK_ALLOC_REGULAR:
5145 return decide_stripe_size_regular(ctl, devices_info);
5146 default:
5147 BUG();
5148 }
5149 }
5150
create_chunk(struct btrfs_trans_handle * trans,struct alloc_chunk_ctl * ctl,struct btrfs_device_info * devices_info)5151 static int create_chunk(struct btrfs_trans_handle *trans,
5152 struct alloc_chunk_ctl *ctl,
5153 struct btrfs_device_info *devices_info)
5154 {
5155 struct btrfs_fs_info *info = trans->fs_info;
5156 struct map_lookup *map = NULL;
5157 struct extent_map_tree *em_tree;
5158 struct extent_map *em;
5159 u64 start = ctl->start;
5160 u64 type = ctl->type;
5161 int ret;
5162 int i;
5163 int j;
5164
5165 map = kmalloc(map_lookup_size(ctl->num_stripes), GFP_NOFS);
5166 if (!map)
5167 return -ENOMEM;
5168 map->num_stripes = ctl->num_stripes;
5169
5170 for (i = 0; i < ctl->ndevs; ++i) {
5171 for (j = 0; j < ctl->dev_stripes; ++j) {
5172 int s = i * ctl->dev_stripes + j;
5173 map->stripes[s].dev = devices_info[i].dev;
5174 map->stripes[s].physical = devices_info[i].dev_offset +
5175 j * ctl->stripe_size;
5176 }
5177 }
5178 map->stripe_len = BTRFS_STRIPE_LEN;
5179 map->io_align = BTRFS_STRIPE_LEN;
5180 map->io_width = BTRFS_STRIPE_LEN;
5181 map->type = type;
5182 map->sub_stripes = ctl->sub_stripes;
5183
5184 trace_btrfs_chunk_alloc(info, map, start, ctl->chunk_size);
5185
5186 em = alloc_extent_map();
5187 if (!em) {
5188 kfree(map);
5189 return -ENOMEM;
5190 }
5191 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
5192 em->map_lookup = map;
5193 em->start = start;
5194 em->len = ctl->chunk_size;
5195 em->block_start = 0;
5196 em->block_len = em->len;
5197 em->orig_block_len = ctl->stripe_size;
5198
5199 em_tree = &info->mapping_tree;
5200 write_lock(&em_tree->lock);
5201 ret = add_extent_mapping(em_tree, em, 0);
5202 if (ret) {
5203 write_unlock(&em_tree->lock);
5204 free_extent_map(em);
5205 return ret;
5206 }
5207 write_unlock(&em_tree->lock);
5208
5209 ret = btrfs_make_block_group(trans, 0, type, start, ctl->chunk_size);
5210 if (ret)
5211 goto error_del_extent;
5212
5213 for (i = 0; i < map->num_stripes; i++) {
5214 struct btrfs_device *dev = map->stripes[i].dev;
5215
5216 btrfs_device_set_bytes_used(dev,
5217 dev->bytes_used + ctl->stripe_size);
5218 if (list_empty(&dev->post_commit_list))
5219 list_add_tail(&dev->post_commit_list,
5220 &trans->transaction->dev_update_list);
5221 }
5222
5223 atomic64_sub(ctl->stripe_size * map->num_stripes,
5224 &info->free_chunk_space);
5225
5226 free_extent_map(em);
5227 check_raid56_incompat_flag(info, type);
5228 check_raid1c34_incompat_flag(info, type);
5229
5230 return 0;
5231
5232 error_del_extent:
5233 write_lock(&em_tree->lock);
5234 remove_extent_mapping(em_tree, em);
5235 write_unlock(&em_tree->lock);
5236
5237 /* One for our allocation */
5238 free_extent_map(em);
5239 /* One for the tree reference */
5240 free_extent_map(em);
5241
5242 return ret;
5243 }
5244
btrfs_alloc_chunk(struct btrfs_trans_handle * trans,u64 type)5245 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 type)
5246 {
5247 struct btrfs_fs_info *info = trans->fs_info;
5248 struct btrfs_fs_devices *fs_devices = info->fs_devices;
5249 struct btrfs_device_info *devices_info = NULL;
5250 struct alloc_chunk_ctl ctl;
5251 int ret;
5252
5253 lockdep_assert_held(&info->chunk_mutex);
5254
5255 if (!alloc_profile_is_valid(type, 0)) {
5256 ASSERT(0);
5257 return -EINVAL;
5258 }
5259
5260 if (list_empty(&fs_devices->alloc_list)) {
5261 if (btrfs_test_opt(info, ENOSPC_DEBUG))
5262 btrfs_debug(info, "%s: no writable device", __func__);
5263 return -ENOSPC;
5264 }
5265
5266 if (!(type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
5267 btrfs_err(info, "invalid chunk type 0x%llx requested", type);
5268 ASSERT(0);
5269 return -EINVAL;
5270 }
5271
5272 ctl.start = find_next_chunk(info);
5273 ctl.type = type;
5274 init_alloc_chunk_ctl(fs_devices, &ctl);
5275
5276 devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info),
5277 GFP_NOFS);
5278 if (!devices_info)
5279 return -ENOMEM;
5280
5281 ret = gather_device_info(fs_devices, &ctl, devices_info);
5282 if (ret < 0)
5283 goto out;
5284
5285 ret = decide_stripe_size(fs_devices, &ctl, devices_info);
5286 if (ret < 0)
5287 goto out;
5288
5289 ret = create_chunk(trans, &ctl, devices_info);
5290
5291 out:
5292 kfree(devices_info);
5293 return ret;
5294 }
5295
5296 /*
5297 * Chunk allocation falls into two parts. The first part does work
5298 * that makes the new allocated chunk usable, but does not do any operation
5299 * that modifies the chunk tree. The second part does the work that
5300 * requires modifying the chunk tree. This division is important for the
5301 * bootstrap process of adding storage to a seed btrfs.
5302 */
btrfs_finish_chunk_alloc(struct btrfs_trans_handle * trans,u64 chunk_offset,u64 chunk_size)5303 int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
5304 u64 chunk_offset, u64 chunk_size)
5305 {
5306 struct btrfs_fs_info *fs_info = trans->fs_info;
5307 struct btrfs_root *extent_root = fs_info->extent_root;
5308 struct btrfs_root *chunk_root = fs_info->chunk_root;
5309 struct btrfs_key key;
5310 struct btrfs_device *device;
5311 struct btrfs_chunk *chunk;
5312 struct btrfs_stripe *stripe;
5313 struct extent_map *em;
5314 struct map_lookup *map;
5315 size_t item_size;
5316 u64 dev_offset;
5317 u64 stripe_size;
5318 int i = 0;
5319 int ret = 0;
5320
5321 em = btrfs_get_chunk_map(fs_info, chunk_offset, chunk_size);
5322 if (IS_ERR(em))
5323 return PTR_ERR(em);
5324
5325 map = em->map_lookup;
5326 item_size = btrfs_chunk_item_size(map->num_stripes);
5327 stripe_size = em->orig_block_len;
5328
5329 chunk = kzalloc(item_size, GFP_NOFS);
5330 if (!chunk) {
5331 ret = -ENOMEM;
5332 goto out;
5333 }
5334
5335 /*
5336 * Take the device list mutex to prevent races with the final phase of
5337 * a device replace operation that replaces the device object associated
5338 * with the map's stripes, because the device object's id can change
5339 * at any time during that final phase of the device replace operation
5340 * (dev-replace.c:btrfs_dev_replace_finishing()).
5341 */
5342 mutex_lock(&fs_info->fs_devices->device_list_mutex);
5343 for (i = 0; i < map->num_stripes; i++) {
5344 device = map->stripes[i].dev;
5345 dev_offset = map->stripes[i].physical;
5346
5347 ret = btrfs_update_device(trans, device);
5348 if (ret)
5349 break;
5350 ret = btrfs_alloc_dev_extent(trans, device, chunk_offset,
5351 dev_offset, stripe_size);
5352 if (ret)
5353 break;
5354 }
5355 if (ret) {
5356 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
5357 goto out;
5358 }
5359
5360 stripe = &chunk->stripe;
5361 for (i = 0; i < map->num_stripes; i++) {
5362 device = map->stripes[i].dev;
5363 dev_offset = map->stripes[i].physical;
5364
5365 btrfs_set_stack_stripe_devid(stripe, device->devid);
5366 btrfs_set_stack_stripe_offset(stripe, dev_offset);
5367 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
5368 stripe++;
5369 }
5370 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
5371
5372 btrfs_set_stack_chunk_length(chunk, chunk_size);
5373 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
5374 btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
5375 btrfs_set_stack_chunk_type(chunk, map->type);
5376 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
5377 btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
5378 btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
5379 btrfs_set_stack_chunk_sector_size(chunk, fs_info->sectorsize);
5380 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
5381
5382 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
5383 key.type = BTRFS_CHUNK_ITEM_KEY;
5384 key.offset = chunk_offset;
5385
5386 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
5387 if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
5388 /*
5389 * TODO: Cleanup of inserted chunk root in case of
5390 * failure.
5391 */
5392 ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size);
5393 }
5394
5395 out:
5396 kfree(chunk);
5397 free_extent_map(em);
5398 return ret;
5399 }
5400
init_first_rw_device(struct btrfs_trans_handle * trans)5401 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans)
5402 {
5403 struct btrfs_fs_info *fs_info = trans->fs_info;
5404 u64 alloc_profile;
5405 int ret;
5406
5407 alloc_profile = btrfs_metadata_alloc_profile(fs_info);
5408 ret = btrfs_alloc_chunk(trans, alloc_profile);
5409 if (ret)
5410 return ret;
5411
5412 alloc_profile = btrfs_system_alloc_profile(fs_info);
5413 ret = btrfs_alloc_chunk(trans, alloc_profile);
5414 return ret;
5415 }
5416
btrfs_chunk_max_errors(struct map_lookup * map)5417 static inline int btrfs_chunk_max_errors(struct map_lookup *map)
5418 {
5419 const int index = btrfs_bg_flags_to_raid_index(map->type);
5420
5421 return btrfs_raid_array[index].tolerated_failures;
5422 }
5423
btrfs_chunk_readonly(struct btrfs_fs_info * fs_info,u64 chunk_offset)5424 int btrfs_chunk_readonly(struct btrfs_fs_info *fs_info, u64 chunk_offset)
5425 {
5426 struct extent_map *em;
5427 struct map_lookup *map;
5428 int readonly = 0;
5429 int miss_ndevs = 0;
5430 int i;
5431
5432 em = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
5433 if (IS_ERR(em))
5434 return 1;
5435
5436 map = em->map_lookup;
5437 for (i = 0; i < map->num_stripes; i++) {
5438 if (test_bit(BTRFS_DEV_STATE_MISSING,
5439 &map->stripes[i].dev->dev_state)) {
5440 miss_ndevs++;
5441 continue;
5442 }
5443 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE,
5444 &map->stripes[i].dev->dev_state)) {
5445 readonly = 1;
5446 goto end;
5447 }
5448 }
5449
5450 /*
5451 * If the number of missing devices is larger than max errors,
5452 * we can not write the data into that chunk successfully, so
5453 * set it readonly.
5454 */
5455 if (miss_ndevs > btrfs_chunk_max_errors(map))
5456 readonly = 1;
5457 end:
5458 free_extent_map(em);
5459 return readonly;
5460 }
5461
btrfs_mapping_tree_free(struct extent_map_tree * tree)5462 void btrfs_mapping_tree_free(struct extent_map_tree *tree)
5463 {
5464 struct extent_map *em;
5465
5466 while (1) {
5467 write_lock(&tree->lock);
5468 em = lookup_extent_mapping(tree, 0, (u64)-1);
5469 if (em)
5470 remove_extent_mapping(tree, em);
5471 write_unlock(&tree->lock);
5472 if (!em)
5473 break;
5474 /* once for us */
5475 free_extent_map(em);
5476 /* once for the tree */
5477 free_extent_map(em);
5478 }
5479 }
5480
btrfs_num_copies(struct btrfs_fs_info * fs_info,u64 logical,u64 len)5481 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5482 {
5483 struct extent_map *em;
5484 struct map_lookup *map;
5485 int ret;
5486
5487 em = btrfs_get_chunk_map(fs_info, logical, len);
5488 if (IS_ERR(em))
5489 /*
5490 * We could return errors for these cases, but that could get
5491 * ugly and we'd probably do the same thing which is just not do
5492 * anything else and exit, so return 1 so the callers don't try
5493 * to use other copies.
5494 */
5495 return 1;
5496
5497 map = em->map_lookup;
5498 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1_MASK))
5499 ret = map->num_stripes;
5500 else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5501 ret = map->sub_stripes;
5502 else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
5503 ret = 2;
5504 else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
5505 /*
5506 * There could be two corrupted data stripes, we need
5507 * to loop retry in order to rebuild the correct data.
5508 *
5509 * Fail a stripe at a time on every retry except the
5510 * stripe under reconstruction.
5511 */
5512 ret = map->num_stripes;
5513 else
5514 ret = 1;
5515 free_extent_map(em);
5516
5517 down_read(&fs_info->dev_replace.rwsem);
5518 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace) &&
5519 fs_info->dev_replace.tgtdev)
5520 ret++;
5521 up_read(&fs_info->dev_replace.rwsem);
5522
5523 return ret;
5524 }
5525
btrfs_full_stripe_len(struct btrfs_fs_info * fs_info,u64 logical)5526 unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
5527 u64 logical)
5528 {
5529 struct extent_map *em;
5530 struct map_lookup *map;
5531 unsigned long len = fs_info->sectorsize;
5532
5533 em = btrfs_get_chunk_map(fs_info, logical, len);
5534
5535 if (!WARN_ON(IS_ERR(em))) {
5536 map = em->map_lookup;
5537 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5538 len = map->stripe_len * nr_data_stripes(map);
5539 free_extent_map(em);
5540 }
5541 return len;
5542 }
5543
btrfs_is_parity_mirror(struct btrfs_fs_info * fs_info,u64 logical,u64 len)5544 int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5545 {
5546 struct extent_map *em;
5547 struct map_lookup *map;
5548 int ret = 0;
5549
5550 em = btrfs_get_chunk_map(fs_info, logical, len);
5551
5552 if(!WARN_ON(IS_ERR(em))) {
5553 map = em->map_lookup;
5554 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5555 ret = 1;
5556 free_extent_map(em);
5557 }
5558 return ret;
5559 }
5560
find_live_mirror(struct btrfs_fs_info * fs_info,struct map_lookup * map,int first,int dev_replace_is_ongoing)5561 static int find_live_mirror(struct btrfs_fs_info *fs_info,
5562 struct map_lookup *map, int first,
5563 int dev_replace_is_ongoing)
5564 {
5565 int i;
5566 int num_stripes;
5567 int preferred_mirror;
5568 int tolerance;
5569 struct btrfs_device *srcdev;
5570
5571 ASSERT((map->type &
5572 (BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10)));
5573
5574 if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5575 num_stripes = map->sub_stripes;
5576 else
5577 num_stripes = map->num_stripes;
5578
5579 preferred_mirror = first + current->pid % num_stripes;
5580
5581 if (dev_replace_is_ongoing &&
5582 fs_info->dev_replace.cont_reading_from_srcdev_mode ==
5583 BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID)
5584 srcdev = fs_info->dev_replace.srcdev;
5585 else
5586 srcdev = NULL;
5587
5588 /*
5589 * try to avoid the drive that is the source drive for a
5590 * dev-replace procedure, only choose it if no other non-missing
5591 * mirror is available
5592 */
5593 for (tolerance = 0; tolerance < 2; tolerance++) {
5594 if (map->stripes[preferred_mirror].dev->bdev &&
5595 (tolerance || map->stripes[preferred_mirror].dev != srcdev))
5596 return preferred_mirror;
5597 for (i = first; i < first + num_stripes; i++) {
5598 if (map->stripes[i].dev->bdev &&
5599 (tolerance || map->stripes[i].dev != srcdev))
5600 return i;
5601 }
5602 }
5603
5604 /* we couldn't find one that doesn't fail. Just return something
5605 * and the io error handling code will clean up eventually
5606 */
5607 return preferred_mirror;
5608 }
5609
5610 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */
sort_parity_stripes(struct btrfs_bio * bbio,int num_stripes)5611 static void sort_parity_stripes(struct btrfs_bio *bbio, int num_stripes)
5612 {
5613 int i;
5614 int again = 1;
5615
5616 while (again) {
5617 again = 0;
5618 for (i = 0; i < num_stripes - 1; i++) {
5619 /* Swap if parity is on a smaller index */
5620 if (bbio->raid_map[i] > bbio->raid_map[i + 1]) {
5621 swap(bbio->stripes[i], bbio->stripes[i + 1]);
5622 swap(bbio->raid_map[i], bbio->raid_map[i + 1]);
5623 again = 1;
5624 }
5625 }
5626 }
5627 }
5628
alloc_btrfs_bio(int total_stripes,int real_stripes)5629 static struct btrfs_bio *alloc_btrfs_bio(int total_stripes, int real_stripes)
5630 {
5631 struct btrfs_bio *bbio = kzalloc(
5632 /* the size of the btrfs_bio */
5633 sizeof(struct btrfs_bio) +
5634 /* plus the variable array for the stripes */
5635 sizeof(struct btrfs_bio_stripe) * (total_stripes) +
5636 /* plus the variable array for the tgt dev */
5637 sizeof(int) * (real_stripes) +
5638 /*
5639 * plus the raid_map, which includes both the tgt dev
5640 * and the stripes
5641 */
5642 sizeof(u64) * (total_stripes),
5643 GFP_NOFS|__GFP_NOFAIL);
5644
5645 atomic_set(&bbio->error, 0);
5646 refcount_set(&bbio->refs, 1);
5647
5648 bbio->tgtdev_map = (int *)(bbio->stripes + total_stripes);
5649 bbio->raid_map = (u64 *)(bbio->tgtdev_map + real_stripes);
5650
5651 return bbio;
5652 }
5653
btrfs_get_bbio(struct btrfs_bio * bbio)5654 void btrfs_get_bbio(struct btrfs_bio *bbio)
5655 {
5656 WARN_ON(!refcount_read(&bbio->refs));
5657 refcount_inc(&bbio->refs);
5658 }
5659
btrfs_put_bbio(struct btrfs_bio * bbio)5660 void btrfs_put_bbio(struct btrfs_bio *bbio)
5661 {
5662 if (!bbio)
5663 return;
5664 if (refcount_dec_and_test(&bbio->refs))
5665 kfree(bbio);
5666 }
5667
5668 /* can REQ_OP_DISCARD be sent with other REQ like REQ_OP_WRITE? */
5669 /*
5670 * Please note that, discard won't be sent to target device of device
5671 * replace.
5672 */
__btrfs_map_block_for_discard(struct btrfs_fs_info * fs_info,u64 logical,u64 * length_ret,struct btrfs_bio ** bbio_ret)5673 static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info,
5674 u64 logical, u64 *length_ret,
5675 struct btrfs_bio **bbio_ret)
5676 {
5677 struct extent_map *em;
5678 struct map_lookup *map;
5679 struct btrfs_bio *bbio;
5680 u64 length = *length_ret;
5681 u64 offset;
5682 u64 stripe_nr;
5683 u64 stripe_nr_end;
5684 u64 stripe_end_offset;
5685 u64 stripe_cnt;
5686 u64 stripe_len;
5687 u64 stripe_offset;
5688 u64 num_stripes;
5689 u32 stripe_index;
5690 u32 factor = 0;
5691 u32 sub_stripes = 0;
5692 u64 stripes_per_dev = 0;
5693 u32 remaining_stripes = 0;
5694 u32 last_stripe = 0;
5695 int ret = 0;
5696 int i;
5697
5698 /* discard always return a bbio */
5699 ASSERT(bbio_ret);
5700
5701 em = btrfs_get_chunk_map(fs_info, logical, length);
5702 if (IS_ERR(em))
5703 return PTR_ERR(em);
5704
5705 map = em->map_lookup;
5706 /* we don't discard raid56 yet */
5707 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5708 ret = -EOPNOTSUPP;
5709 goto out;
5710 }
5711
5712 offset = logical - em->start;
5713 length = min_t(u64, em->start + em->len - logical, length);
5714 *length_ret = length;
5715
5716 stripe_len = map->stripe_len;
5717 /*
5718 * stripe_nr counts the total number of stripes we have to stride
5719 * to get to this block
5720 */
5721 stripe_nr = div64_u64(offset, stripe_len);
5722
5723 /* stripe_offset is the offset of this block in its stripe */
5724 stripe_offset = offset - stripe_nr * stripe_len;
5725
5726 stripe_nr_end = round_up(offset + length, map->stripe_len);
5727 stripe_nr_end = div64_u64(stripe_nr_end, map->stripe_len);
5728 stripe_cnt = stripe_nr_end - stripe_nr;
5729 stripe_end_offset = stripe_nr_end * map->stripe_len -
5730 (offset + length);
5731 /*
5732 * after this, stripe_nr is the number of stripes on this
5733 * device we have to walk to find the data, and stripe_index is
5734 * the number of our device in the stripe array
5735 */
5736 num_stripes = 1;
5737 stripe_index = 0;
5738 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
5739 BTRFS_BLOCK_GROUP_RAID10)) {
5740 if (map->type & BTRFS_BLOCK_GROUP_RAID0)
5741 sub_stripes = 1;
5742 else
5743 sub_stripes = map->sub_stripes;
5744
5745 factor = map->num_stripes / sub_stripes;
5746 num_stripes = min_t(u64, map->num_stripes,
5747 sub_stripes * stripe_cnt);
5748 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
5749 stripe_index *= sub_stripes;
5750 stripes_per_dev = div_u64_rem(stripe_cnt, factor,
5751 &remaining_stripes);
5752 div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
5753 last_stripe *= sub_stripes;
5754 } else if (map->type & (BTRFS_BLOCK_GROUP_RAID1_MASK |
5755 BTRFS_BLOCK_GROUP_DUP)) {
5756 num_stripes = map->num_stripes;
5757 } else {
5758 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
5759 &stripe_index);
5760 }
5761
5762 bbio = alloc_btrfs_bio(num_stripes, 0);
5763 if (!bbio) {
5764 ret = -ENOMEM;
5765 goto out;
5766 }
5767
5768 for (i = 0; i < num_stripes; i++) {
5769 bbio->stripes[i].physical =
5770 map->stripes[stripe_index].physical +
5771 stripe_offset + stripe_nr * map->stripe_len;
5772 bbio->stripes[i].dev = map->stripes[stripe_index].dev;
5773
5774 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
5775 BTRFS_BLOCK_GROUP_RAID10)) {
5776 bbio->stripes[i].length = stripes_per_dev *
5777 map->stripe_len;
5778
5779 if (i / sub_stripes < remaining_stripes)
5780 bbio->stripes[i].length +=
5781 map->stripe_len;
5782
5783 /*
5784 * Special for the first stripe and
5785 * the last stripe:
5786 *
5787 * |-------|...|-------|
5788 * |----------|
5789 * off end_off
5790 */
5791 if (i < sub_stripes)
5792 bbio->stripes[i].length -=
5793 stripe_offset;
5794
5795 if (stripe_index >= last_stripe &&
5796 stripe_index <= (last_stripe +
5797 sub_stripes - 1))
5798 bbio->stripes[i].length -=
5799 stripe_end_offset;
5800
5801 if (i == sub_stripes - 1)
5802 stripe_offset = 0;
5803 } else {
5804 bbio->stripes[i].length = length;
5805 }
5806
5807 stripe_index++;
5808 if (stripe_index == map->num_stripes) {
5809 stripe_index = 0;
5810 stripe_nr++;
5811 }
5812 }
5813
5814 *bbio_ret = bbio;
5815 bbio->map_type = map->type;
5816 bbio->num_stripes = num_stripes;
5817 out:
5818 free_extent_map(em);
5819 return ret;
5820 }
5821
5822 /*
5823 * In dev-replace case, for repair case (that's the only case where the mirror
5824 * is selected explicitly when calling btrfs_map_block), blocks left of the
5825 * left cursor can also be read from the target drive.
5826 *
5827 * For REQ_GET_READ_MIRRORS, the target drive is added as the last one to the
5828 * array of stripes.
5829 * For READ, it also needs to be supported using the same mirror number.
5830 *
5831 * If the requested block is not left of the left cursor, EIO is returned. This
5832 * can happen because btrfs_num_copies() returns one more in the dev-replace
5833 * case.
5834 */
get_extra_mirror_from_replace(struct btrfs_fs_info * fs_info,u64 logical,u64 length,u64 srcdev_devid,int * mirror_num,u64 * physical)5835 static int get_extra_mirror_from_replace(struct btrfs_fs_info *fs_info,
5836 u64 logical, u64 length,
5837 u64 srcdev_devid, int *mirror_num,
5838 u64 *physical)
5839 {
5840 struct btrfs_bio *bbio = NULL;
5841 int num_stripes;
5842 int index_srcdev = 0;
5843 int found = 0;
5844 u64 physical_of_found = 0;
5845 int i;
5846 int ret = 0;
5847
5848 ret = __btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
5849 logical, &length, &bbio, 0, 0);
5850 if (ret) {
5851 ASSERT(bbio == NULL);
5852 return ret;
5853 }
5854
5855 num_stripes = bbio->num_stripes;
5856 if (*mirror_num > num_stripes) {
5857 /*
5858 * BTRFS_MAP_GET_READ_MIRRORS does not contain this mirror,
5859 * that means that the requested area is not left of the left
5860 * cursor
5861 */
5862 btrfs_put_bbio(bbio);
5863 return -EIO;
5864 }
5865
5866 /*
5867 * process the rest of the function using the mirror_num of the source
5868 * drive. Therefore look it up first. At the end, patch the device
5869 * pointer to the one of the target drive.
5870 */
5871 for (i = 0; i < num_stripes; i++) {
5872 if (bbio->stripes[i].dev->devid != srcdev_devid)
5873 continue;
5874
5875 /*
5876 * In case of DUP, in order to keep it simple, only add the
5877 * mirror with the lowest physical address
5878 */
5879 if (found &&
5880 physical_of_found <= bbio->stripes[i].physical)
5881 continue;
5882
5883 index_srcdev = i;
5884 found = 1;
5885 physical_of_found = bbio->stripes[i].physical;
5886 }
5887
5888 btrfs_put_bbio(bbio);
5889
5890 ASSERT(found);
5891 if (!found)
5892 return -EIO;
5893
5894 *mirror_num = index_srcdev + 1;
5895 *physical = physical_of_found;
5896 return ret;
5897 }
5898
handle_ops_on_dev_replace(enum btrfs_map_op op,struct btrfs_bio ** bbio_ret,struct btrfs_dev_replace * dev_replace,int * num_stripes_ret,int * max_errors_ret)5899 static void handle_ops_on_dev_replace(enum btrfs_map_op op,
5900 struct btrfs_bio **bbio_ret,
5901 struct btrfs_dev_replace *dev_replace,
5902 int *num_stripes_ret, int *max_errors_ret)
5903 {
5904 struct btrfs_bio *bbio = *bbio_ret;
5905 u64 srcdev_devid = dev_replace->srcdev->devid;
5906 int tgtdev_indexes = 0;
5907 int num_stripes = *num_stripes_ret;
5908 int max_errors = *max_errors_ret;
5909 int i;
5910
5911 if (op == BTRFS_MAP_WRITE) {
5912 int index_where_to_add;
5913
5914 /*
5915 * duplicate the write operations while the dev replace
5916 * procedure is running. Since the copying of the old disk to
5917 * the new disk takes place at run time while the filesystem is
5918 * mounted writable, the regular write operations to the old
5919 * disk have to be duplicated to go to the new disk as well.
5920 *
5921 * Note that device->missing is handled by the caller, and that
5922 * the write to the old disk is already set up in the stripes
5923 * array.
5924 */
5925 index_where_to_add = num_stripes;
5926 for (i = 0; i < num_stripes; i++) {
5927 if (bbio->stripes[i].dev->devid == srcdev_devid) {
5928 /* write to new disk, too */
5929 struct btrfs_bio_stripe *new =
5930 bbio->stripes + index_where_to_add;
5931 struct btrfs_bio_stripe *old =
5932 bbio->stripes + i;
5933
5934 new->physical = old->physical;
5935 new->length = old->length;
5936 new->dev = dev_replace->tgtdev;
5937 bbio->tgtdev_map[i] = index_where_to_add;
5938 index_where_to_add++;
5939 max_errors++;
5940 tgtdev_indexes++;
5941 }
5942 }
5943 num_stripes = index_where_to_add;
5944 } else if (op == BTRFS_MAP_GET_READ_MIRRORS) {
5945 int index_srcdev = 0;
5946 int found = 0;
5947 u64 physical_of_found = 0;
5948
5949 /*
5950 * During the dev-replace procedure, the target drive can also
5951 * be used to read data in case it is needed to repair a corrupt
5952 * block elsewhere. This is possible if the requested area is
5953 * left of the left cursor. In this area, the target drive is a
5954 * full copy of the source drive.
5955 */
5956 for (i = 0; i < num_stripes; i++) {
5957 if (bbio->stripes[i].dev->devid == srcdev_devid) {
5958 /*
5959 * In case of DUP, in order to keep it simple,
5960 * only add the mirror with the lowest physical
5961 * address
5962 */
5963 if (found &&
5964 physical_of_found <=
5965 bbio->stripes[i].physical)
5966 continue;
5967 index_srcdev = i;
5968 found = 1;
5969 physical_of_found = bbio->stripes[i].physical;
5970 }
5971 }
5972 if (found) {
5973 struct btrfs_bio_stripe *tgtdev_stripe =
5974 bbio->stripes + num_stripes;
5975
5976 tgtdev_stripe->physical = physical_of_found;
5977 tgtdev_stripe->length =
5978 bbio->stripes[index_srcdev].length;
5979 tgtdev_stripe->dev = dev_replace->tgtdev;
5980 bbio->tgtdev_map[index_srcdev] = num_stripes;
5981
5982 tgtdev_indexes++;
5983 num_stripes++;
5984 }
5985 }
5986
5987 *num_stripes_ret = num_stripes;
5988 *max_errors_ret = max_errors;
5989 bbio->num_tgtdevs = tgtdev_indexes;
5990 *bbio_ret = bbio;
5991 }
5992
need_full_stripe(enum btrfs_map_op op)5993 static bool need_full_stripe(enum btrfs_map_op op)
5994 {
5995 return (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS);
5996 }
5997
5998 /*
5999 * btrfs_get_io_geometry - calculates the geomery of a particular (address, len)
6000 * tuple. This information is used to calculate how big a
6001 * particular bio can get before it straddles a stripe.
6002 *
6003 * @fs_info - the filesystem
6004 * @logical - address that we want to figure out the geometry of
6005 * @len - the length of IO we are going to perform, starting at @logical
6006 * @op - type of operation - write or read
6007 * @io_geom - pointer used to return values
6008 *
6009 * Returns < 0 in case a chunk for the given logical address cannot be found,
6010 * usually shouldn't happen unless @logical is corrupted, 0 otherwise.
6011 */
btrfs_get_io_geometry(struct btrfs_fs_info * fs_info,enum btrfs_map_op op,u64 logical,u64 len,struct btrfs_io_geometry * io_geom)6012 int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
6013 u64 logical, u64 len, struct btrfs_io_geometry *io_geom)
6014 {
6015 struct extent_map *em;
6016 struct map_lookup *map;
6017 u64 offset;
6018 u64 stripe_offset;
6019 u64 stripe_nr;
6020 u64 stripe_len;
6021 u64 raid56_full_stripe_start = (u64)-1;
6022 int data_stripes;
6023 int ret = 0;
6024
6025 ASSERT(op != BTRFS_MAP_DISCARD);
6026
6027 em = btrfs_get_chunk_map(fs_info, logical, len);
6028 if (IS_ERR(em))
6029 return PTR_ERR(em);
6030
6031 map = em->map_lookup;
6032 /* Offset of this logical address in the chunk */
6033 offset = logical - em->start;
6034 /* Len of a stripe in a chunk */
6035 stripe_len = map->stripe_len;
6036 /* Stripe wher this block falls in */
6037 stripe_nr = div64_u64(offset, stripe_len);
6038 /* Offset of stripe in the chunk */
6039 stripe_offset = stripe_nr * stripe_len;
6040 if (offset < stripe_offset) {
6041 btrfs_crit(fs_info,
6042 "stripe math has gone wrong, stripe_offset=%llu offset=%llu start=%llu logical=%llu stripe_len=%llu",
6043 stripe_offset, offset, em->start, logical, stripe_len);
6044 ret = -EINVAL;
6045 goto out;
6046 }
6047
6048 /* stripe_offset is the offset of this block in its stripe */
6049 stripe_offset = offset - stripe_offset;
6050 data_stripes = nr_data_stripes(map);
6051
6052 if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
6053 u64 max_len = stripe_len - stripe_offset;
6054
6055 /*
6056 * In case of raid56, we need to know the stripe aligned start
6057 */
6058 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
6059 unsigned long full_stripe_len = stripe_len * data_stripes;
6060 raid56_full_stripe_start = offset;
6061
6062 /*
6063 * Allow a write of a full stripe, but make sure we
6064 * don't allow straddling of stripes
6065 */
6066 raid56_full_stripe_start = div64_u64(raid56_full_stripe_start,
6067 full_stripe_len);
6068 raid56_full_stripe_start *= full_stripe_len;
6069
6070 /*
6071 * For writes to RAID[56], allow a full stripeset across
6072 * all disks. For other RAID types and for RAID[56]
6073 * reads, just allow a single stripe (on a single disk).
6074 */
6075 if (op == BTRFS_MAP_WRITE) {
6076 max_len = stripe_len * data_stripes -
6077 (offset - raid56_full_stripe_start);
6078 }
6079 }
6080 len = min_t(u64, em->len - offset, max_len);
6081 } else {
6082 len = em->len - offset;
6083 }
6084
6085 io_geom->len = len;
6086 io_geom->offset = offset;
6087 io_geom->stripe_len = stripe_len;
6088 io_geom->stripe_nr = stripe_nr;
6089 io_geom->stripe_offset = stripe_offset;
6090 io_geom->raid56_stripe_offset = raid56_full_stripe_start;
6091
6092 out:
6093 /* once for us */
6094 free_extent_map(em);
6095 return ret;
6096 }
6097
__btrfs_map_block(struct btrfs_fs_info * fs_info,enum btrfs_map_op op,u64 logical,u64 * length,struct btrfs_bio ** bbio_ret,int mirror_num,int need_raid_map)6098 static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
6099 enum btrfs_map_op op,
6100 u64 logical, u64 *length,
6101 struct btrfs_bio **bbio_ret,
6102 int mirror_num, int need_raid_map)
6103 {
6104 struct extent_map *em;
6105 struct map_lookup *map;
6106 u64 stripe_offset;
6107 u64 stripe_nr;
6108 u64 stripe_len;
6109 u32 stripe_index;
6110 int data_stripes;
6111 int i;
6112 int ret = 0;
6113 int num_stripes;
6114 int max_errors = 0;
6115 int tgtdev_indexes = 0;
6116 struct btrfs_bio *bbio = NULL;
6117 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
6118 int dev_replace_is_ongoing = 0;
6119 int num_alloc_stripes;
6120 int patch_the_first_stripe_for_dev_replace = 0;
6121 u64 physical_to_patch_in_first_stripe = 0;
6122 u64 raid56_full_stripe_start = (u64)-1;
6123 struct btrfs_io_geometry geom;
6124
6125 ASSERT(bbio_ret);
6126 ASSERT(op != BTRFS_MAP_DISCARD);
6127
6128 ret = btrfs_get_io_geometry(fs_info, op, logical, *length, &geom);
6129 if (ret < 0)
6130 return ret;
6131
6132 em = btrfs_get_chunk_map(fs_info, logical, *length);
6133 ASSERT(!IS_ERR(em));
6134 map = em->map_lookup;
6135
6136 *length = geom.len;
6137 stripe_len = geom.stripe_len;
6138 stripe_nr = geom.stripe_nr;
6139 stripe_offset = geom.stripe_offset;
6140 raid56_full_stripe_start = geom.raid56_stripe_offset;
6141 data_stripes = nr_data_stripes(map);
6142
6143 down_read(&dev_replace->rwsem);
6144 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
6145 /*
6146 * Hold the semaphore for read during the whole operation, write is
6147 * requested at commit time but must wait.
6148 */
6149 if (!dev_replace_is_ongoing)
6150 up_read(&dev_replace->rwsem);
6151
6152 if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
6153 !need_full_stripe(op) && dev_replace->tgtdev != NULL) {
6154 ret = get_extra_mirror_from_replace(fs_info, logical, *length,
6155 dev_replace->srcdev->devid,
6156 &mirror_num,
6157 &physical_to_patch_in_first_stripe);
6158 if (ret)
6159 goto out;
6160 else
6161 patch_the_first_stripe_for_dev_replace = 1;
6162 } else if (mirror_num > map->num_stripes) {
6163 mirror_num = 0;
6164 }
6165
6166 num_stripes = 1;
6167 stripe_index = 0;
6168 if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
6169 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
6170 &stripe_index);
6171 if (!need_full_stripe(op))
6172 mirror_num = 1;
6173 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1_MASK) {
6174 if (need_full_stripe(op))
6175 num_stripes = map->num_stripes;
6176 else if (mirror_num)
6177 stripe_index = mirror_num - 1;
6178 else {
6179 stripe_index = find_live_mirror(fs_info, map, 0,
6180 dev_replace_is_ongoing);
6181 mirror_num = stripe_index + 1;
6182 }
6183
6184 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
6185 if (need_full_stripe(op)) {
6186 num_stripes = map->num_stripes;
6187 } else if (mirror_num) {
6188 stripe_index = mirror_num - 1;
6189 } else {
6190 mirror_num = 1;
6191 }
6192
6193 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
6194 u32 factor = map->num_stripes / map->sub_stripes;
6195
6196 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
6197 stripe_index *= map->sub_stripes;
6198
6199 if (need_full_stripe(op))
6200 num_stripes = map->sub_stripes;
6201 else if (mirror_num)
6202 stripe_index += mirror_num - 1;
6203 else {
6204 int old_stripe_index = stripe_index;
6205 stripe_index = find_live_mirror(fs_info, map,
6206 stripe_index,
6207 dev_replace_is_ongoing);
6208 mirror_num = stripe_index - old_stripe_index + 1;
6209 }
6210
6211 } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
6212 if (need_raid_map && (need_full_stripe(op) || mirror_num > 1)) {
6213 /* push stripe_nr back to the start of the full stripe */
6214 stripe_nr = div64_u64(raid56_full_stripe_start,
6215 stripe_len * data_stripes);
6216
6217 /* RAID[56] write or recovery. Return all stripes */
6218 num_stripes = map->num_stripes;
6219 max_errors = nr_parity_stripes(map);
6220
6221 *length = map->stripe_len;
6222 stripe_index = 0;
6223 stripe_offset = 0;
6224 } else {
6225 /*
6226 * Mirror #0 or #1 means the original data block.
6227 * Mirror #2 is RAID5 parity block.
6228 * Mirror #3 is RAID6 Q block.
6229 */
6230 stripe_nr = div_u64_rem(stripe_nr,
6231 data_stripes, &stripe_index);
6232 if (mirror_num > 1)
6233 stripe_index = data_stripes + mirror_num - 2;
6234
6235 /* We distribute the parity blocks across stripes */
6236 div_u64_rem(stripe_nr + stripe_index, map->num_stripes,
6237 &stripe_index);
6238 if (!need_full_stripe(op) && mirror_num <= 1)
6239 mirror_num = 1;
6240 }
6241 } else {
6242 /*
6243 * after this, stripe_nr is the number of stripes on this
6244 * device we have to walk to find the data, and stripe_index is
6245 * the number of our device in the stripe array
6246 */
6247 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
6248 &stripe_index);
6249 mirror_num = stripe_index + 1;
6250 }
6251 if (stripe_index >= map->num_stripes) {
6252 btrfs_crit(fs_info,
6253 "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u",
6254 stripe_index, map->num_stripes);
6255 ret = -EINVAL;
6256 goto out;
6257 }
6258
6259 num_alloc_stripes = num_stripes;
6260 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL) {
6261 if (op == BTRFS_MAP_WRITE)
6262 num_alloc_stripes <<= 1;
6263 if (op == BTRFS_MAP_GET_READ_MIRRORS)
6264 num_alloc_stripes++;
6265 tgtdev_indexes = num_stripes;
6266 }
6267
6268 bbio = alloc_btrfs_bio(num_alloc_stripes, tgtdev_indexes);
6269 if (!bbio) {
6270 ret = -ENOMEM;
6271 goto out;
6272 }
6273
6274 for (i = 0; i < num_stripes; i++) {
6275 bbio->stripes[i].physical = map->stripes[stripe_index].physical +
6276 stripe_offset + stripe_nr * map->stripe_len;
6277 bbio->stripes[i].dev = map->stripes[stripe_index].dev;
6278 stripe_index++;
6279 }
6280
6281 /* build raid_map */
6282 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && need_raid_map &&
6283 (need_full_stripe(op) || mirror_num > 1)) {
6284 u64 tmp;
6285 unsigned rot;
6286
6287 /* Work out the disk rotation on this stripe-set */
6288 div_u64_rem(stripe_nr, num_stripes, &rot);
6289
6290 /* Fill in the logical address of each stripe */
6291 tmp = stripe_nr * data_stripes;
6292 for (i = 0; i < data_stripes; i++)
6293 bbio->raid_map[(i+rot) % num_stripes] =
6294 em->start + (tmp + i) * map->stripe_len;
6295
6296 bbio->raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE;
6297 if (map->type & BTRFS_BLOCK_GROUP_RAID6)
6298 bbio->raid_map[(i+rot+1) % num_stripes] =
6299 RAID6_Q_STRIPE;
6300
6301 sort_parity_stripes(bbio, num_stripes);
6302 }
6303
6304 if (need_full_stripe(op))
6305 max_errors = btrfs_chunk_max_errors(map);
6306
6307 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL &&
6308 need_full_stripe(op)) {
6309 handle_ops_on_dev_replace(op, &bbio, dev_replace, &num_stripes,
6310 &max_errors);
6311 }
6312
6313 *bbio_ret = bbio;
6314 bbio->map_type = map->type;
6315 bbio->num_stripes = num_stripes;
6316 bbio->max_errors = max_errors;
6317 bbio->mirror_num = mirror_num;
6318
6319 /*
6320 * this is the case that REQ_READ && dev_replace_is_ongoing &&
6321 * mirror_num == num_stripes + 1 && dev_replace target drive is
6322 * available as a mirror
6323 */
6324 if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) {
6325 WARN_ON(num_stripes > 1);
6326 bbio->stripes[0].dev = dev_replace->tgtdev;
6327 bbio->stripes[0].physical = physical_to_patch_in_first_stripe;
6328 bbio->mirror_num = map->num_stripes + 1;
6329 }
6330 out:
6331 if (dev_replace_is_ongoing) {
6332 lockdep_assert_held(&dev_replace->rwsem);
6333 /* Unlock and let waiting writers proceed */
6334 up_read(&dev_replace->rwsem);
6335 }
6336 free_extent_map(em);
6337 return ret;
6338 }
6339
btrfs_map_block(struct btrfs_fs_info * fs_info,enum btrfs_map_op op,u64 logical,u64 * length,struct btrfs_bio ** bbio_ret,int mirror_num)6340 int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
6341 u64 logical, u64 *length,
6342 struct btrfs_bio **bbio_ret, int mirror_num)
6343 {
6344 if (op == BTRFS_MAP_DISCARD)
6345 return __btrfs_map_block_for_discard(fs_info, logical,
6346 length, bbio_ret);
6347
6348 return __btrfs_map_block(fs_info, op, logical, length, bbio_ret,
6349 mirror_num, 0);
6350 }
6351
6352 /* For Scrub/replace */
btrfs_map_sblock(struct btrfs_fs_info * fs_info,enum btrfs_map_op op,u64 logical,u64 * length,struct btrfs_bio ** bbio_ret)6353 int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
6354 u64 logical, u64 *length,
6355 struct btrfs_bio **bbio_ret)
6356 {
6357 return __btrfs_map_block(fs_info, op, logical, length, bbio_ret, 0, 1);
6358 }
6359
btrfs_end_bbio(struct btrfs_bio * bbio,struct bio * bio)6360 static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio)
6361 {
6362 bio->bi_private = bbio->private;
6363 bio->bi_end_io = bbio->end_io;
6364 bio_endio(bio);
6365
6366 btrfs_put_bbio(bbio);
6367 }
6368
btrfs_end_bio(struct bio * bio)6369 static void btrfs_end_bio(struct bio *bio)
6370 {
6371 struct btrfs_bio *bbio = bio->bi_private;
6372 int is_orig_bio = 0;
6373
6374 if (bio->bi_status) {
6375 atomic_inc(&bbio->error);
6376 if (bio->bi_status == BLK_STS_IOERR ||
6377 bio->bi_status == BLK_STS_TARGET) {
6378 struct btrfs_device *dev = btrfs_io_bio(bio)->device;
6379
6380 ASSERT(dev->bdev);
6381 if (bio_op(bio) == REQ_OP_WRITE)
6382 btrfs_dev_stat_inc_and_print(dev,
6383 BTRFS_DEV_STAT_WRITE_ERRS);
6384 else if (!(bio->bi_opf & REQ_RAHEAD))
6385 btrfs_dev_stat_inc_and_print(dev,
6386 BTRFS_DEV_STAT_READ_ERRS);
6387 if (bio->bi_opf & REQ_PREFLUSH)
6388 btrfs_dev_stat_inc_and_print(dev,
6389 BTRFS_DEV_STAT_FLUSH_ERRS);
6390 }
6391 }
6392
6393 if (bio == bbio->orig_bio)
6394 is_orig_bio = 1;
6395
6396 btrfs_bio_counter_dec(bbio->fs_info);
6397
6398 if (atomic_dec_and_test(&bbio->stripes_pending)) {
6399 if (!is_orig_bio) {
6400 bio_put(bio);
6401 bio = bbio->orig_bio;
6402 }
6403
6404 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
6405 /* only send an error to the higher layers if it is
6406 * beyond the tolerance of the btrfs bio
6407 */
6408 if (atomic_read(&bbio->error) > bbio->max_errors) {
6409 bio->bi_status = BLK_STS_IOERR;
6410 } else {
6411 /*
6412 * this bio is actually up to date, we didn't
6413 * go over the max number of errors
6414 */
6415 bio->bi_status = BLK_STS_OK;
6416 }
6417
6418 btrfs_end_bbio(bbio, bio);
6419 } else if (!is_orig_bio) {
6420 bio_put(bio);
6421 }
6422 }
6423
submit_stripe_bio(struct btrfs_bio * bbio,struct bio * bio,u64 physical,struct btrfs_device * dev)6424 static void submit_stripe_bio(struct btrfs_bio *bbio, struct bio *bio,
6425 u64 physical, struct btrfs_device *dev)
6426 {
6427 struct btrfs_fs_info *fs_info = bbio->fs_info;
6428
6429 bio->bi_private = bbio;
6430 btrfs_io_bio(bio)->device = dev;
6431 bio->bi_end_io = btrfs_end_bio;
6432 bio->bi_iter.bi_sector = physical >> 9;
6433 btrfs_debug_in_rcu(fs_info,
6434 "btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u",
6435 bio_op(bio), bio->bi_opf, (u64)bio->bi_iter.bi_sector,
6436 (unsigned long)dev->bdev->bd_dev, rcu_str_deref(dev->name),
6437 dev->devid, bio->bi_iter.bi_size);
6438 bio_set_dev(bio, dev->bdev);
6439
6440 btrfs_bio_counter_inc_noblocked(fs_info);
6441
6442 btrfsic_submit_bio(bio);
6443 }
6444
bbio_error(struct btrfs_bio * bbio,struct bio * bio,u64 logical)6445 static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
6446 {
6447 atomic_inc(&bbio->error);
6448 if (atomic_dec_and_test(&bbio->stripes_pending)) {
6449 /* Should be the original bio. */
6450 WARN_ON(bio != bbio->orig_bio);
6451
6452 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
6453 bio->bi_iter.bi_sector = logical >> 9;
6454 if (atomic_read(&bbio->error) > bbio->max_errors)
6455 bio->bi_status = BLK_STS_IOERR;
6456 else
6457 bio->bi_status = BLK_STS_OK;
6458 btrfs_end_bbio(bbio, bio);
6459 }
6460 }
6461
btrfs_map_bio(struct btrfs_fs_info * fs_info,struct bio * bio,int mirror_num)6462 blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
6463 int mirror_num)
6464 {
6465 struct btrfs_device *dev;
6466 struct bio *first_bio = bio;
6467 u64 logical = (u64)bio->bi_iter.bi_sector << 9;
6468 u64 length = 0;
6469 u64 map_length;
6470 int ret;
6471 int dev_nr;
6472 int total_devs;
6473 struct btrfs_bio *bbio = NULL;
6474
6475 length = bio->bi_iter.bi_size;
6476 map_length = length;
6477
6478 btrfs_bio_counter_inc_blocked(fs_info);
6479 ret = __btrfs_map_block(fs_info, btrfs_op(bio), logical,
6480 &map_length, &bbio, mirror_num, 1);
6481 if (ret) {
6482 btrfs_bio_counter_dec(fs_info);
6483 return errno_to_blk_status(ret);
6484 }
6485
6486 total_devs = bbio->num_stripes;
6487 bbio->orig_bio = first_bio;
6488 bbio->private = first_bio->bi_private;
6489 bbio->end_io = first_bio->bi_end_io;
6490 bbio->fs_info = fs_info;
6491 atomic_set(&bbio->stripes_pending, bbio->num_stripes);
6492
6493 if ((bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
6494 ((bio_op(bio) == REQ_OP_WRITE) || (mirror_num > 1))) {
6495 /* In this case, map_length has been set to the length of
6496 a single stripe; not the whole write */
6497 if (bio_op(bio) == REQ_OP_WRITE) {
6498 ret = raid56_parity_write(fs_info, bio, bbio,
6499 map_length);
6500 } else {
6501 ret = raid56_parity_recover(fs_info, bio, bbio,
6502 map_length, mirror_num, 1);
6503 }
6504
6505 btrfs_bio_counter_dec(fs_info);
6506 return errno_to_blk_status(ret);
6507 }
6508
6509 if (map_length < length) {
6510 btrfs_crit(fs_info,
6511 "mapping failed logical %llu bio len %llu len %llu",
6512 logical, length, map_length);
6513 BUG();
6514 }
6515
6516 for (dev_nr = 0; dev_nr < total_devs; dev_nr++) {
6517 dev = bbio->stripes[dev_nr].dev;
6518 if (!dev || !dev->bdev || test_bit(BTRFS_DEV_STATE_MISSING,
6519 &dev->dev_state) ||
6520 (bio_op(first_bio) == REQ_OP_WRITE &&
6521 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) {
6522 bbio_error(bbio, first_bio, logical);
6523 continue;
6524 }
6525
6526 if (dev_nr < total_devs - 1)
6527 bio = btrfs_bio_clone(first_bio);
6528 else
6529 bio = first_bio;
6530
6531 submit_stripe_bio(bbio, bio, bbio->stripes[dev_nr].physical, dev);
6532 }
6533 btrfs_bio_counter_dec(fs_info);
6534 return BLK_STS_OK;
6535 }
6536
6537 /*
6538 * Find a device specified by @devid or @uuid in the list of @fs_devices, or
6539 * return NULL.
6540 *
6541 * If devid and uuid are both specified, the match must be exact, otherwise
6542 * only devid is used.
6543 *
6544 * If @seed is true, traverse through the seed devices.
6545 */
btrfs_find_device(struct btrfs_fs_devices * fs_devices,u64 devid,u8 * uuid,u8 * fsid,bool seed)6546 struct btrfs_device *btrfs_find_device(struct btrfs_fs_devices *fs_devices,
6547 u64 devid, u8 *uuid, u8 *fsid,
6548 bool seed)
6549 {
6550 struct btrfs_device *device;
6551 struct btrfs_fs_devices *seed_devs;
6552
6553 if (!fsid || !memcmp(fs_devices->metadata_uuid, fsid, BTRFS_FSID_SIZE)) {
6554 list_for_each_entry(device, &fs_devices->devices, dev_list) {
6555 if (device->devid == devid &&
6556 (!uuid || memcmp(device->uuid, uuid,
6557 BTRFS_UUID_SIZE) == 0))
6558 return device;
6559 }
6560 }
6561
6562 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
6563 if (!fsid ||
6564 !memcmp(seed_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE)) {
6565 list_for_each_entry(device, &seed_devs->devices,
6566 dev_list) {
6567 if (device->devid == devid &&
6568 (!uuid || memcmp(device->uuid, uuid,
6569 BTRFS_UUID_SIZE) == 0))
6570 return device;
6571 }
6572 }
6573 }
6574
6575 return NULL;
6576 }
6577
add_missing_dev(struct btrfs_fs_devices * fs_devices,u64 devid,u8 * dev_uuid)6578 static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices,
6579 u64 devid, u8 *dev_uuid)
6580 {
6581 struct btrfs_device *device;
6582 unsigned int nofs_flag;
6583
6584 /*
6585 * We call this under the chunk_mutex, so we want to use NOFS for this
6586 * allocation, however we don't want to change btrfs_alloc_device() to
6587 * always do NOFS because we use it in a lot of other GFP_KERNEL safe
6588 * places.
6589 */
6590 nofs_flag = memalloc_nofs_save();
6591 device = btrfs_alloc_device(NULL, &devid, dev_uuid);
6592 memalloc_nofs_restore(nofs_flag);
6593 if (IS_ERR(device))
6594 return device;
6595
6596 list_add(&device->dev_list, &fs_devices->devices);
6597 device->fs_devices = fs_devices;
6598 fs_devices->num_devices++;
6599
6600 set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
6601 fs_devices->missing_devices++;
6602
6603 return device;
6604 }
6605
6606 /**
6607 * btrfs_alloc_device - allocate struct btrfs_device
6608 * @fs_info: used only for generating a new devid, can be NULL if
6609 * devid is provided (i.e. @devid != NULL).
6610 * @devid: a pointer to devid for this device. If NULL a new devid
6611 * is generated.
6612 * @uuid: a pointer to UUID for this device. If NULL a new UUID
6613 * is generated.
6614 *
6615 * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR()
6616 * on error. Returned struct is not linked onto any lists and must be
6617 * destroyed with btrfs_free_device.
6618 */
btrfs_alloc_device(struct btrfs_fs_info * fs_info,const u64 * devid,const u8 * uuid)6619 struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
6620 const u64 *devid,
6621 const u8 *uuid)
6622 {
6623 struct btrfs_device *dev;
6624 u64 tmp;
6625
6626 if (WARN_ON(!devid && !fs_info))
6627 return ERR_PTR(-EINVAL);
6628
6629 dev = __alloc_device(fs_info);
6630 if (IS_ERR(dev))
6631 return dev;
6632
6633 if (devid)
6634 tmp = *devid;
6635 else {
6636 int ret;
6637
6638 ret = find_next_devid(fs_info, &tmp);
6639 if (ret) {
6640 btrfs_free_device(dev);
6641 return ERR_PTR(ret);
6642 }
6643 }
6644 dev->devid = tmp;
6645
6646 if (uuid)
6647 memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE);
6648 else
6649 generate_random_uuid(dev->uuid);
6650
6651 return dev;
6652 }
6653
btrfs_report_missing_device(struct btrfs_fs_info * fs_info,u64 devid,u8 * uuid,bool error)6654 static void btrfs_report_missing_device(struct btrfs_fs_info *fs_info,
6655 u64 devid, u8 *uuid, bool error)
6656 {
6657 if (error)
6658 btrfs_err_rl(fs_info, "devid %llu uuid %pU is missing",
6659 devid, uuid);
6660 else
6661 btrfs_warn_rl(fs_info, "devid %llu uuid %pU is missing",
6662 devid, uuid);
6663 }
6664
calc_stripe_length(u64 type,u64 chunk_len,int num_stripes)6665 static u64 calc_stripe_length(u64 type, u64 chunk_len, int num_stripes)
6666 {
6667 int index = btrfs_bg_flags_to_raid_index(type);
6668 int ncopies = btrfs_raid_array[index].ncopies;
6669 const int nparity = btrfs_raid_array[index].nparity;
6670 int data_stripes;
6671
6672 if (nparity)
6673 data_stripes = num_stripes - nparity;
6674 else
6675 data_stripes = num_stripes / ncopies;
6676
6677 return div_u64(chunk_len, data_stripes);
6678 }
6679
read_one_chunk(struct btrfs_key * key,struct extent_buffer * leaf,struct btrfs_chunk * chunk)6680 static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf,
6681 struct btrfs_chunk *chunk)
6682 {
6683 struct btrfs_fs_info *fs_info = leaf->fs_info;
6684 struct extent_map_tree *map_tree = &fs_info->mapping_tree;
6685 struct map_lookup *map;
6686 struct extent_map *em;
6687 u64 logical;
6688 u64 length;
6689 u64 devid;
6690 u8 uuid[BTRFS_UUID_SIZE];
6691 int num_stripes;
6692 int ret;
6693 int i;
6694
6695 logical = key->offset;
6696 length = btrfs_chunk_length(leaf, chunk);
6697 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
6698
6699 /*
6700 * Only need to verify chunk item if we're reading from sys chunk array,
6701 * as chunk item in tree block is already verified by tree-checker.
6702 */
6703 if (leaf->start == BTRFS_SUPER_INFO_OFFSET) {
6704 ret = btrfs_check_chunk_valid(leaf, chunk, logical);
6705 if (ret)
6706 return ret;
6707 }
6708
6709 read_lock(&map_tree->lock);
6710 em = lookup_extent_mapping(map_tree, logical, 1);
6711 read_unlock(&map_tree->lock);
6712
6713 /* already mapped? */
6714 if (em && em->start <= logical && em->start + em->len > logical) {
6715 free_extent_map(em);
6716 return 0;
6717 } else if (em) {
6718 free_extent_map(em);
6719 }
6720
6721 em = alloc_extent_map();
6722 if (!em)
6723 return -ENOMEM;
6724 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
6725 if (!map) {
6726 free_extent_map(em);
6727 return -ENOMEM;
6728 }
6729
6730 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
6731 em->map_lookup = map;
6732 em->start = logical;
6733 em->len = length;
6734 em->orig_start = 0;
6735 em->block_start = 0;
6736 em->block_len = em->len;
6737
6738 map->num_stripes = num_stripes;
6739 map->io_width = btrfs_chunk_io_width(leaf, chunk);
6740 map->io_align = btrfs_chunk_io_align(leaf, chunk);
6741 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
6742 map->type = btrfs_chunk_type(leaf, chunk);
6743 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
6744 map->verified_stripes = 0;
6745 em->orig_block_len = calc_stripe_length(map->type, em->len,
6746 map->num_stripes);
6747 for (i = 0; i < num_stripes; i++) {
6748 map->stripes[i].physical =
6749 btrfs_stripe_offset_nr(leaf, chunk, i);
6750 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
6751 read_extent_buffer(leaf, uuid, (unsigned long)
6752 btrfs_stripe_dev_uuid_nr(chunk, i),
6753 BTRFS_UUID_SIZE);
6754 map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices,
6755 devid, uuid, NULL, true);
6756 if (!map->stripes[i].dev &&
6757 !btrfs_test_opt(fs_info, DEGRADED)) {
6758 free_extent_map(em);
6759 btrfs_report_missing_device(fs_info, devid, uuid, true);
6760 return -ENOENT;
6761 }
6762 if (!map->stripes[i].dev) {
6763 map->stripes[i].dev =
6764 add_missing_dev(fs_info->fs_devices, devid,
6765 uuid);
6766 if (IS_ERR(map->stripes[i].dev)) {
6767 free_extent_map(em);
6768 btrfs_err(fs_info,
6769 "failed to init missing dev %llu: %ld",
6770 devid, PTR_ERR(map->stripes[i].dev));
6771 return PTR_ERR(map->stripes[i].dev);
6772 }
6773 btrfs_report_missing_device(fs_info, devid, uuid, false);
6774 }
6775 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
6776 &(map->stripes[i].dev->dev_state));
6777
6778 }
6779
6780 write_lock(&map_tree->lock);
6781 ret = add_extent_mapping(map_tree, em, 0);
6782 write_unlock(&map_tree->lock);
6783 if (ret < 0) {
6784 btrfs_err(fs_info,
6785 "failed to add chunk map, start=%llu len=%llu: %d",
6786 em->start, em->len, ret);
6787 }
6788 free_extent_map(em);
6789
6790 return ret;
6791 }
6792
fill_device_from_item(struct extent_buffer * leaf,struct btrfs_dev_item * dev_item,struct btrfs_device * device)6793 static void fill_device_from_item(struct extent_buffer *leaf,
6794 struct btrfs_dev_item *dev_item,
6795 struct btrfs_device *device)
6796 {
6797 unsigned long ptr;
6798
6799 device->devid = btrfs_device_id(leaf, dev_item);
6800 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
6801 device->total_bytes = device->disk_total_bytes;
6802 device->commit_total_bytes = device->disk_total_bytes;
6803 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
6804 device->commit_bytes_used = device->bytes_used;
6805 device->type = btrfs_device_type(leaf, dev_item);
6806 device->io_align = btrfs_device_io_align(leaf, dev_item);
6807 device->io_width = btrfs_device_io_width(leaf, dev_item);
6808 device->sector_size = btrfs_device_sector_size(leaf, dev_item);
6809 WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID);
6810 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
6811
6812 ptr = btrfs_device_uuid(dev_item);
6813 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
6814 }
6815
open_seed_devices(struct btrfs_fs_info * fs_info,u8 * fsid)6816 static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info,
6817 u8 *fsid)
6818 {
6819 struct btrfs_fs_devices *fs_devices;
6820 int ret;
6821
6822 lockdep_assert_held(&uuid_mutex);
6823 ASSERT(fsid);
6824
6825 /* This will match only for multi-device seed fs */
6826 list_for_each_entry(fs_devices, &fs_info->fs_devices->seed_list, seed_list)
6827 if (!memcmp(fs_devices->fsid, fsid, BTRFS_FSID_SIZE))
6828 return fs_devices;
6829
6830
6831 fs_devices = find_fsid(fsid, NULL);
6832 if (!fs_devices) {
6833 if (!btrfs_test_opt(fs_info, DEGRADED))
6834 return ERR_PTR(-ENOENT);
6835
6836 fs_devices = alloc_fs_devices(fsid, NULL);
6837 if (IS_ERR(fs_devices))
6838 return fs_devices;
6839
6840 fs_devices->seeding = true;
6841 fs_devices->opened = 1;
6842 return fs_devices;
6843 }
6844
6845 /*
6846 * Upon first call for a seed fs fsid, just create a private copy of the
6847 * respective fs_devices and anchor it at fs_info->fs_devices->seed_list
6848 */
6849 fs_devices = clone_fs_devices(fs_devices);
6850 if (IS_ERR(fs_devices))
6851 return fs_devices;
6852
6853 ret = open_fs_devices(fs_devices, FMODE_READ, fs_info->bdev_holder);
6854 if (ret) {
6855 free_fs_devices(fs_devices);
6856 return ERR_PTR(ret);
6857 }
6858
6859 if (!fs_devices->seeding) {
6860 close_fs_devices(fs_devices);
6861 free_fs_devices(fs_devices);
6862 return ERR_PTR(-EINVAL);
6863 }
6864
6865 list_add(&fs_devices->seed_list, &fs_info->fs_devices->seed_list);
6866
6867 return fs_devices;
6868 }
6869
read_one_dev(struct extent_buffer * leaf,struct btrfs_dev_item * dev_item)6870 static int read_one_dev(struct extent_buffer *leaf,
6871 struct btrfs_dev_item *dev_item)
6872 {
6873 struct btrfs_fs_info *fs_info = leaf->fs_info;
6874 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6875 struct btrfs_device *device;
6876 u64 devid;
6877 int ret;
6878 u8 fs_uuid[BTRFS_FSID_SIZE];
6879 u8 dev_uuid[BTRFS_UUID_SIZE];
6880
6881 devid = btrfs_device_id(leaf, dev_item);
6882 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
6883 BTRFS_UUID_SIZE);
6884 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
6885 BTRFS_FSID_SIZE);
6886
6887 if (memcmp(fs_uuid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE)) {
6888 fs_devices = open_seed_devices(fs_info, fs_uuid);
6889 if (IS_ERR(fs_devices))
6890 return PTR_ERR(fs_devices);
6891 }
6892
6893 device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
6894 fs_uuid, true);
6895 if (!device) {
6896 if (!btrfs_test_opt(fs_info, DEGRADED)) {
6897 btrfs_report_missing_device(fs_info, devid,
6898 dev_uuid, true);
6899 return -ENOENT;
6900 }
6901
6902 device = add_missing_dev(fs_devices, devid, dev_uuid);
6903 if (IS_ERR(device)) {
6904 btrfs_err(fs_info,
6905 "failed to add missing dev %llu: %ld",
6906 devid, PTR_ERR(device));
6907 return PTR_ERR(device);
6908 }
6909 btrfs_report_missing_device(fs_info, devid, dev_uuid, false);
6910 } else {
6911 if (!device->bdev) {
6912 if (!btrfs_test_opt(fs_info, DEGRADED)) {
6913 btrfs_report_missing_device(fs_info,
6914 devid, dev_uuid, true);
6915 return -ENOENT;
6916 }
6917 btrfs_report_missing_device(fs_info, devid,
6918 dev_uuid, false);
6919 }
6920
6921 if (!device->bdev &&
6922 !test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
6923 /*
6924 * this happens when a device that was properly setup
6925 * in the device info lists suddenly goes bad.
6926 * device->bdev is NULL, and so we have to set
6927 * device->missing to one here
6928 */
6929 device->fs_devices->missing_devices++;
6930 set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
6931 }
6932
6933 /* Move the device to its own fs_devices */
6934 if (device->fs_devices != fs_devices) {
6935 ASSERT(test_bit(BTRFS_DEV_STATE_MISSING,
6936 &device->dev_state));
6937
6938 list_move(&device->dev_list, &fs_devices->devices);
6939 device->fs_devices->num_devices--;
6940 fs_devices->num_devices++;
6941
6942 device->fs_devices->missing_devices--;
6943 fs_devices->missing_devices++;
6944
6945 device->fs_devices = fs_devices;
6946 }
6947 }
6948
6949 if (device->fs_devices != fs_info->fs_devices) {
6950 BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state));
6951 if (device->generation !=
6952 btrfs_device_generation(leaf, dev_item))
6953 return -EINVAL;
6954 }
6955
6956 fill_device_from_item(leaf, dev_item, device);
6957 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
6958 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
6959 !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
6960 device->fs_devices->total_rw_bytes += device->total_bytes;
6961 atomic64_add(device->total_bytes - device->bytes_used,
6962 &fs_info->free_chunk_space);
6963 }
6964 ret = 0;
6965 return ret;
6966 }
6967
btrfs_read_sys_array(struct btrfs_fs_info * fs_info)6968 int btrfs_read_sys_array(struct btrfs_fs_info *fs_info)
6969 {
6970 struct btrfs_root *root = fs_info->tree_root;
6971 struct btrfs_super_block *super_copy = fs_info->super_copy;
6972 struct extent_buffer *sb;
6973 struct btrfs_disk_key *disk_key;
6974 struct btrfs_chunk *chunk;
6975 u8 *array_ptr;
6976 unsigned long sb_array_offset;
6977 int ret = 0;
6978 u32 num_stripes;
6979 u32 array_size;
6980 u32 len = 0;
6981 u32 cur_offset;
6982 u64 type;
6983 struct btrfs_key key;
6984
6985 ASSERT(BTRFS_SUPER_INFO_SIZE <= fs_info->nodesize);
6986 /*
6987 * This will create extent buffer of nodesize, superblock size is
6988 * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will
6989 * overallocate but we can keep it as-is, only the first page is used.
6990 */
6991 sb = btrfs_find_create_tree_block(fs_info, BTRFS_SUPER_INFO_OFFSET);
6992 if (IS_ERR(sb))
6993 return PTR_ERR(sb);
6994 set_extent_buffer_uptodate(sb);
6995 btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
6996 /*
6997 * The sb extent buffer is artificial and just used to read the system array.
6998 * set_extent_buffer_uptodate() call does not properly mark all it's
6999 * pages up-to-date when the page is larger: extent does not cover the
7000 * whole page and consequently check_page_uptodate does not find all
7001 * the page's extents up-to-date (the hole beyond sb),
7002 * write_extent_buffer then triggers a WARN_ON.
7003 *
7004 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
7005 * but sb spans only this function. Add an explicit SetPageUptodate call
7006 * to silence the warning eg. on PowerPC 64.
7007 */
7008 if (PAGE_SIZE > BTRFS_SUPER_INFO_SIZE)
7009 SetPageUptodate(sb->pages[0]);
7010
7011 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
7012 array_size = btrfs_super_sys_array_size(super_copy);
7013
7014 array_ptr = super_copy->sys_chunk_array;
7015 sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array);
7016 cur_offset = 0;
7017
7018 while (cur_offset < array_size) {
7019 disk_key = (struct btrfs_disk_key *)array_ptr;
7020 len = sizeof(*disk_key);
7021 if (cur_offset + len > array_size)
7022 goto out_short_read;
7023
7024 btrfs_disk_key_to_cpu(&key, disk_key);
7025
7026 array_ptr += len;
7027 sb_array_offset += len;
7028 cur_offset += len;
7029
7030 if (key.type != BTRFS_CHUNK_ITEM_KEY) {
7031 btrfs_err(fs_info,
7032 "unexpected item type %u in sys_array at offset %u",
7033 (u32)key.type, cur_offset);
7034 ret = -EIO;
7035 break;
7036 }
7037
7038 chunk = (struct btrfs_chunk *)sb_array_offset;
7039 /*
7040 * At least one btrfs_chunk with one stripe must be present,
7041 * exact stripe count check comes afterwards
7042 */
7043 len = btrfs_chunk_item_size(1);
7044 if (cur_offset + len > array_size)
7045 goto out_short_read;
7046
7047 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
7048 if (!num_stripes) {
7049 btrfs_err(fs_info,
7050 "invalid number of stripes %u in sys_array at offset %u",
7051 num_stripes, cur_offset);
7052 ret = -EIO;
7053 break;
7054 }
7055
7056 type = btrfs_chunk_type(sb, chunk);
7057 if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) {
7058 btrfs_err(fs_info,
7059 "invalid chunk type %llu in sys_array at offset %u",
7060 type, cur_offset);
7061 ret = -EIO;
7062 break;
7063 }
7064
7065 len = btrfs_chunk_item_size(num_stripes);
7066 if (cur_offset + len > array_size)
7067 goto out_short_read;
7068
7069 ret = read_one_chunk(&key, sb, chunk);
7070 if (ret)
7071 break;
7072
7073 array_ptr += len;
7074 sb_array_offset += len;
7075 cur_offset += len;
7076 }
7077 clear_extent_buffer_uptodate(sb);
7078 free_extent_buffer_stale(sb);
7079 return ret;
7080
7081 out_short_read:
7082 btrfs_err(fs_info, "sys_array too short to read %u bytes at offset %u",
7083 len, cur_offset);
7084 clear_extent_buffer_uptodate(sb);
7085 free_extent_buffer_stale(sb);
7086 return -EIO;
7087 }
7088
7089 /*
7090 * Check if all chunks in the fs are OK for read-write degraded mount
7091 *
7092 * If the @failing_dev is specified, it's accounted as missing.
7093 *
7094 * Return true if all chunks meet the minimal RW mount requirements.
7095 * Return false if any chunk doesn't meet the minimal RW mount requirements.
7096 */
btrfs_check_rw_degradable(struct btrfs_fs_info * fs_info,struct btrfs_device * failing_dev)7097 bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info,
7098 struct btrfs_device *failing_dev)
7099 {
7100 struct extent_map_tree *map_tree = &fs_info->mapping_tree;
7101 struct extent_map *em;
7102 u64 next_start = 0;
7103 bool ret = true;
7104
7105 read_lock(&map_tree->lock);
7106 em = lookup_extent_mapping(map_tree, 0, (u64)-1);
7107 read_unlock(&map_tree->lock);
7108 /* No chunk at all? Return false anyway */
7109 if (!em) {
7110 ret = false;
7111 goto out;
7112 }
7113 while (em) {
7114 struct map_lookup *map;
7115 int missing = 0;
7116 int max_tolerated;
7117 int i;
7118
7119 map = em->map_lookup;
7120 max_tolerated =
7121 btrfs_get_num_tolerated_disk_barrier_failures(
7122 map->type);
7123 for (i = 0; i < map->num_stripes; i++) {
7124 struct btrfs_device *dev = map->stripes[i].dev;
7125
7126 if (!dev || !dev->bdev ||
7127 test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) ||
7128 dev->last_flush_error)
7129 missing++;
7130 else if (failing_dev && failing_dev == dev)
7131 missing++;
7132 }
7133 if (missing > max_tolerated) {
7134 if (!failing_dev)
7135 btrfs_warn(fs_info,
7136 "chunk %llu missing %d devices, max tolerance is %d for writable mount",
7137 em->start, missing, max_tolerated);
7138 free_extent_map(em);
7139 ret = false;
7140 goto out;
7141 }
7142 next_start = extent_map_end(em);
7143 free_extent_map(em);
7144
7145 read_lock(&map_tree->lock);
7146 em = lookup_extent_mapping(map_tree, next_start,
7147 (u64)(-1) - next_start);
7148 read_unlock(&map_tree->lock);
7149 }
7150 out:
7151 return ret;
7152 }
7153
readahead_tree_node_children(struct extent_buffer * node)7154 static void readahead_tree_node_children(struct extent_buffer *node)
7155 {
7156 int i;
7157 const int nr_items = btrfs_header_nritems(node);
7158
7159 for (i = 0; i < nr_items; i++) {
7160 u64 start;
7161
7162 start = btrfs_node_blockptr(node, i);
7163 readahead_tree_block(node->fs_info, start);
7164 }
7165 }
7166
btrfs_read_chunk_tree(struct btrfs_fs_info * fs_info)7167 int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
7168 {
7169 struct btrfs_root *root = fs_info->chunk_root;
7170 struct btrfs_path *path;
7171 struct extent_buffer *leaf;
7172 struct btrfs_key key;
7173 struct btrfs_key found_key;
7174 int ret;
7175 int slot;
7176 u64 total_dev = 0;
7177 u64 last_ra_node = 0;
7178
7179 path = btrfs_alloc_path();
7180 if (!path)
7181 return -ENOMEM;
7182
7183 /*
7184 * uuid_mutex is needed only if we are mounting a sprout FS
7185 * otherwise we don't need it.
7186 */
7187 mutex_lock(&uuid_mutex);
7188
7189 /*
7190 * It is possible for mount and umount to race in such a way that
7191 * we execute this code path, but open_fs_devices failed to clear
7192 * total_rw_bytes. We certainly want it cleared before reading the
7193 * device items, so clear it here.
7194 */
7195 fs_info->fs_devices->total_rw_bytes = 0;
7196
7197 /*
7198 * Read all device items, and then all the chunk items. All
7199 * device items are found before any chunk item (their object id
7200 * is smaller than the lowest possible object id for a chunk
7201 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
7202 */
7203 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
7204 key.offset = 0;
7205 key.type = 0;
7206 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
7207 if (ret < 0)
7208 goto error;
7209 while (1) {
7210 struct extent_buffer *node;
7211
7212 leaf = path->nodes[0];
7213 slot = path->slots[0];
7214 if (slot >= btrfs_header_nritems(leaf)) {
7215 ret = btrfs_next_leaf(root, path);
7216 if (ret == 0)
7217 continue;
7218 if (ret < 0)
7219 goto error;
7220 break;
7221 }
7222 /*
7223 * The nodes on level 1 are not locked but we don't need to do
7224 * that during mount time as nothing else can access the tree
7225 */
7226 node = path->nodes[1];
7227 if (node) {
7228 if (last_ra_node != node->start) {
7229 readahead_tree_node_children(node);
7230 last_ra_node = node->start;
7231 }
7232 }
7233 btrfs_item_key_to_cpu(leaf, &found_key, slot);
7234 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
7235 struct btrfs_dev_item *dev_item;
7236 dev_item = btrfs_item_ptr(leaf, slot,
7237 struct btrfs_dev_item);
7238 ret = read_one_dev(leaf, dev_item);
7239 if (ret)
7240 goto error;
7241 total_dev++;
7242 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
7243 struct btrfs_chunk *chunk;
7244 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
7245 mutex_lock(&fs_info->chunk_mutex);
7246 ret = read_one_chunk(&found_key, leaf, chunk);
7247 mutex_unlock(&fs_info->chunk_mutex);
7248 if (ret)
7249 goto error;
7250 }
7251 path->slots[0]++;
7252 }
7253
7254 /*
7255 * After loading chunk tree, we've got all device information,
7256 * do another round of validation checks.
7257 */
7258 if (total_dev != fs_info->fs_devices->total_devices) {
7259 btrfs_warn(fs_info,
7260 "super block num_devices %llu mismatch with DEV_ITEM count %llu, will be repaired on next transaction commit",
7261 btrfs_super_num_devices(fs_info->super_copy),
7262 total_dev);
7263 fs_info->fs_devices->total_devices = total_dev;
7264 btrfs_set_super_num_devices(fs_info->super_copy, total_dev);
7265 }
7266 if (btrfs_super_total_bytes(fs_info->super_copy) <
7267 fs_info->fs_devices->total_rw_bytes) {
7268 btrfs_err(fs_info,
7269 "super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu",
7270 btrfs_super_total_bytes(fs_info->super_copy),
7271 fs_info->fs_devices->total_rw_bytes);
7272 ret = -EINVAL;
7273 goto error;
7274 }
7275 ret = 0;
7276 error:
7277 mutex_unlock(&uuid_mutex);
7278
7279 btrfs_free_path(path);
7280 return ret;
7281 }
7282
btrfs_init_devices_late(struct btrfs_fs_info * fs_info)7283 void btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
7284 {
7285 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs;
7286 struct btrfs_device *device;
7287
7288 fs_devices->fs_info = fs_info;
7289
7290 mutex_lock(&fs_devices->device_list_mutex);
7291 list_for_each_entry(device, &fs_devices->devices, dev_list)
7292 device->fs_info = fs_info;
7293
7294 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
7295 list_for_each_entry(device, &seed_devs->devices, dev_list)
7296 device->fs_info = fs_info;
7297
7298 seed_devs->fs_info = fs_info;
7299 }
7300 mutex_unlock(&fs_devices->device_list_mutex);
7301 }
7302
btrfs_dev_stats_value(const struct extent_buffer * eb,const struct btrfs_dev_stats_item * ptr,int index)7303 static u64 btrfs_dev_stats_value(const struct extent_buffer *eb,
7304 const struct btrfs_dev_stats_item *ptr,
7305 int index)
7306 {
7307 u64 val;
7308
7309 read_extent_buffer(eb, &val,
7310 offsetof(struct btrfs_dev_stats_item, values) +
7311 ((unsigned long)ptr) + (index * sizeof(u64)),
7312 sizeof(val));
7313 return val;
7314 }
7315
btrfs_set_dev_stats_value(struct extent_buffer * eb,struct btrfs_dev_stats_item * ptr,int index,u64 val)7316 static void btrfs_set_dev_stats_value(struct extent_buffer *eb,
7317 struct btrfs_dev_stats_item *ptr,
7318 int index, u64 val)
7319 {
7320 write_extent_buffer(eb, &val,
7321 offsetof(struct btrfs_dev_stats_item, values) +
7322 ((unsigned long)ptr) + (index * sizeof(u64)),
7323 sizeof(val));
7324 }
7325
btrfs_device_init_dev_stats(struct btrfs_device * device,struct btrfs_path * path)7326 static int btrfs_device_init_dev_stats(struct btrfs_device *device,
7327 struct btrfs_path *path)
7328 {
7329 struct btrfs_dev_stats_item *ptr;
7330 struct extent_buffer *eb;
7331 struct btrfs_key key;
7332 int item_size;
7333 int i, ret, slot;
7334
7335 key.objectid = BTRFS_DEV_STATS_OBJECTID;
7336 key.type = BTRFS_PERSISTENT_ITEM_KEY;
7337 key.offset = device->devid;
7338 ret = btrfs_search_slot(NULL, device->fs_info->dev_root, &key, path, 0, 0);
7339 if (ret) {
7340 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7341 btrfs_dev_stat_set(device, i, 0);
7342 device->dev_stats_valid = 1;
7343 btrfs_release_path(path);
7344 return ret < 0 ? ret : 0;
7345 }
7346 slot = path->slots[0];
7347 eb = path->nodes[0];
7348 item_size = btrfs_item_size_nr(eb, slot);
7349
7350 ptr = btrfs_item_ptr(eb, slot, struct btrfs_dev_stats_item);
7351
7352 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
7353 if (item_size >= (1 + i) * sizeof(__le64))
7354 btrfs_dev_stat_set(device, i,
7355 btrfs_dev_stats_value(eb, ptr, i));
7356 else
7357 btrfs_dev_stat_set(device, i, 0);
7358 }
7359
7360 device->dev_stats_valid = 1;
7361 btrfs_dev_stat_print_on_load(device);
7362 btrfs_release_path(path);
7363
7364 return 0;
7365 }
7366
btrfs_init_dev_stats(struct btrfs_fs_info * fs_info)7367 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
7368 {
7369 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs;
7370 struct btrfs_device *device;
7371 struct btrfs_path *path = NULL;
7372 int ret = 0;
7373
7374 path = btrfs_alloc_path();
7375 if (!path)
7376 return -ENOMEM;
7377
7378 mutex_lock(&fs_devices->device_list_mutex);
7379 list_for_each_entry(device, &fs_devices->devices, dev_list) {
7380 ret = btrfs_device_init_dev_stats(device, path);
7381 if (ret)
7382 goto out;
7383 }
7384 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
7385 list_for_each_entry(device, &seed_devs->devices, dev_list) {
7386 ret = btrfs_device_init_dev_stats(device, path);
7387 if (ret)
7388 goto out;
7389 }
7390 }
7391 out:
7392 mutex_unlock(&fs_devices->device_list_mutex);
7393
7394 btrfs_free_path(path);
7395 return ret;
7396 }
7397
update_dev_stat_item(struct btrfs_trans_handle * trans,struct btrfs_device * device)7398 static int update_dev_stat_item(struct btrfs_trans_handle *trans,
7399 struct btrfs_device *device)
7400 {
7401 struct btrfs_fs_info *fs_info = trans->fs_info;
7402 struct btrfs_root *dev_root = fs_info->dev_root;
7403 struct btrfs_path *path;
7404 struct btrfs_key key;
7405 struct extent_buffer *eb;
7406 struct btrfs_dev_stats_item *ptr;
7407 int ret;
7408 int i;
7409
7410 key.objectid = BTRFS_DEV_STATS_OBJECTID;
7411 key.type = BTRFS_PERSISTENT_ITEM_KEY;
7412 key.offset = device->devid;
7413
7414 path = btrfs_alloc_path();
7415 if (!path)
7416 return -ENOMEM;
7417 ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
7418 if (ret < 0) {
7419 btrfs_warn_in_rcu(fs_info,
7420 "error %d while searching for dev_stats item for device %s",
7421 ret, rcu_str_deref(device->name));
7422 goto out;
7423 }
7424
7425 if (ret == 0 &&
7426 btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
7427 /* need to delete old one and insert a new one */
7428 ret = btrfs_del_item(trans, dev_root, path);
7429 if (ret != 0) {
7430 btrfs_warn_in_rcu(fs_info,
7431 "delete too small dev_stats item for device %s failed %d",
7432 rcu_str_deref(device->name), ret);
7433 goto out;
7434 }
7435 ret = 1;
7436 }
7437
7438 if (ret == 1) {
7439 /* need to insert a new item */
7440 btrfs_release_path(path);
7441 ret = btrfs_insert_empty_item(trans, dev_root, path,
7442 &key, sizeof(*ptr));
7443 if (ret < 0) {
7444 btrfs_warn_in_rcu(fs_info,
7445 "insert dev_stats item for device %s failed %d",
7446 rcu_str_deref(device->name), ret);
7447 goto out;
7448 }
7449 }
7450
7451 eb = path->nodes[0];
7452 ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
7453 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7454 btrfs_set_dev_stats_value(eb, ptr, i,
7455 btrfs_dev_stat_read(device, i));
7456 btrfs_mark_buffer_dirty(eb);
7457
7458 out:
7459 btrfs_free_path(path);
7460 return ret;
7461 }
7462
7463 /*
7464 * called from commit_transaction. Writes all changed device stats to disk.
7465 */
btrfs_run_dev_stats(struct btrfs_trans_handle * trans)7466 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans)
7467 {
7468 struct btrfs_fs_info *fs_info = trans->fs_info;
7469 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7470 struct btrfs_device *device;
7471 int stats_cnt;
7472 int ret = 0;
7473
7474 mutex_lock(&fs_devices->device_list_mutex);
7475 list_for_each_entry(device, &fs_devices->devices, dev_list) {
7476 stats_cnt = atomic_read(&device->dev_stats_ccnt);
7477 if (!device->dev_stats_valid || stats_cnt == 0)
7478 continue;
7479
7480
7481 /*
7482 * There is a LOAD-LOAD control dependency between the value of
7483 * dev_stats_ccnt and updating the on-disk values which requires
7484 * reading the in-memory counters. Such control dependencies
7485 * require explicit read memory barriers.
7486 *
7487 * This memory barriers pairs with smp_mb__before_atomic in
7488 * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full
7489 * barrier implied by atomic_xchg in
7490 * btrfs_dev_stats_read_and_reset
7491 */
7492 smp_rmb();
7493
7494 ret = update_dev_stat_item(trans, device);
7495 if (!ret)
7496 atomic_sub(stats_cnt, &device->dev_stats_ccnt);
7497 }
7498 mutex_unlock(&fs_devices->device_list_mutex);
7499
7500 return ret;
7501 }
7502
btrfs_dev_stat_inc_and_print(struct btrfs_device * dev,int index)7503 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
7504 {
7505 btrfs_dev_stat_inc(dev, index);
7506 btrfs_dev_stat_print_on_error(dev);
7507 }
7508
btrfs_dev_stat_print_on_error(struct btrfs_device * dev)7509 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
7510 {
7511 if (!dev->dev_stats_valid)
7512 return;
7513 btrfs_err_rl_in_rcu(dev->fs_info,
7514 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7515 rcu_str_deref(dev->name),
7516 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
7517 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
7518 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
7519 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
7520 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
7521 }
7522
btrfs_dev_stat_print_on_load(struct btrfs_device * dev)7523 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
7524 {
7525 int i;
7526
7527 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7528 if (btrfs_dev_stat_read(dev, i) != 0)
7529 break;
7530 if (i == BTRFS_DEV_STAT_VALUES_MAX)
7531 return; /* all values == 0, suppress message */
7532
7533 btrfs_info_in_rcu(dev->fs_info,
7534 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7535 rcu_str_deref(dev->name),
7536 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
7537 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
7538 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
7539 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
7540 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
7541 }
7542
btrfs_get_dev_stats(struct btrfs_fs_info * fs_info,struct btrfs_ioctl_get_dev_stats * stats)7543 int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info,
7544 struct btrfs_ioctl_get_dev_stats *stats)
7545 {
7546 struct btrfs_device *dev;
7547 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7548 int i;
7549
7550 mutex_lock(&fs_devices->device_list_mutex);
7551 dev = btrfs_find_device(fs_info->fs_devices, stats->devid, NULL, NULL,
7552 true);
7553 mutex_unlock(&fs_devices->device_list_mutex);
7554
7555 if (!dev) {
7556 btrfs_warn(fs_info, "get dev_stats failed, device not found");
7557 return -ENODEV;
7558 } else if (!dev->dev_stats_valid) {
7559 btrfs_warn(fs_info, "get dev_stats failed, not yet valid");
7560 return -ENODEV;
7561 } else if (stats->flags & BTRFS_DEV_STATS_RESET) {
7562 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
7563 if (stats->nr_items > i)
7564 stats->values[i] =
7565 btrfs_dev_stat_read_and_reset(dev, i);
7566 else
7567 btrfs_dev_stat_set(dev, i, 0);
7568 }
7569 btrfs_info(fs_info, "device stats zeroed by %s (%d)",
7570 current->comm, task_pid_nr(current));
7571 } else {
7572 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7573 if (stats->nr_items > i)
7574 stats->values[i] = btrfs_dev_stat_read(dev, i);
7575 }
7576 if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
7577 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
7578 return 0;
7579 }
7580
7581 /*
7582 * Update the size and bytes used for each device where it changed. This is
7583 * delayed since we would otherwise get errors while writing out the
7584 * superblocks.
7585 *
7586 * Must be invoked during transaction commit.
7587 */
btrfs_commit_device_sizes(struct btrfs_transaction * trans)7588 void btrfs_commit_device_sizes(struct btrfs_transaction *trans)
7589 {
7590 struct btrfs_device *curr, *next;
7591
7592 ASSERT(trans->state == TRANS_STATE_COMMIT_DOING);
7593
7594 if (list_empty(&trans->dev_update_list))
7595 return;
7596
7597 /*
7598 * We don't need the device_list_mutex here. This list is owned by the
7599 * transaction and the transaction must complete before the device is
7600 * released.
7601 */
7602 mutex_lock(&trans->fs_info->chunk_mutex);
7603 list_for_each_entry_safe(curr, next, &trans->dev_update_list,
7604 post_commit_list) {
7605 list_del_init(&curr->post_commit_list);
7606 curr->commit_total_bytes = curr->disk_total_bytes;
7607 curr->commit_bytes_used = curr->bytes_used;
7608 }
7609 mutex_unlock(&trans->fs_info->chunk_mutex);
7610 }
7611
7612 /*
7613 * Multiplicity factor for simple profiles: DUP, RAID1-like and RAID10.
7614 */
btrfs_bg_type_to_factor(u64 flags)7615 int btrfs_bg_type_to_factor(u64 flags)
7616 {
7617 const int index = btrfs_bg_flags_to_raid_index(flags);
7618
7619 return btrfs_raid_array[index].ncopies;
7620 }
7621
7622
7623
verify_one_dev_extent(struct btrfs_fs_info * fs_info,u64 chunk_offset,u64 devid,u64 physical_offset,u64 physical_len)7624 static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
7625 u64 chunk_offset, u64 devid,
7626 u64 physical_offset, u64 physical_len)
7627 {
7628 struct extent_map_tree *em_tree = &fs_info->mapping_tree;
7629 struct extent_map *em;
7630 struct map_lookup *map;
7631 struct btrfs_device *dev;
7632 u64 stripe_len;
7633 bool found = false;
7634 int ret = 0;
7635 int i;
7636
7637 read_lock(&em_tree->lock);
7638 em = lookup_extent_mapping(em_tree, chunk_offset, 1);
7639 read_unlock(&em_tree->lock);
7640
7641 if (!em) {
7642 btrfs_err(fs_info,
7643 "dev extent physical offset %llu on devid %llu doesn't have corresponding chunk",
7644 physical_offset, devid);
7645 ret = -EUCLEAN;
7646 goto out;
7647 }
7648
7649 map = em->map_lookup;
7650 stripe_len = calc_stripe_length(map->type, em->len, map->num_stripes);
7651 if (physical_len != stripe_len) {
7652 btrfs_err(fs_info,
7653 "dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu",
7654 physical_offset, devid, em->start, physical_len,
7655 stripe_len);
7656 ret = -EUCLEAN;
7657 goto out;
7658 }
7659
7660 for (i = 0; i < map->num_stripes; i++) {
7661 if (map->stripes[i].dev->devid == devid &&
7662 map->stripes[i].physical == physical_offset) {
7663 found = true;
7664 if (map->verified_stripes >= map->num_stripes) {
7665 btrfs_err(fs_info,
7666 "too many dev extents for chunk %llu found",
7667 em->start);
7668 ret = -EUCLEAN;
7669 goto out;
7670 }
7671 map->verified_stripes++;
7672 break;
7673 }
7674 }
7675 if (!found) {
7676 btrfs_err(fs_info,
7677 "dev extent physical offset %llu devid %llu has no corresponding chunk",
7678 physical_offset, devid);
7679 ret = -EUCLEAN;
7680 }
7681
7682 /* Make sure no dev extent is beyond device bondary */
7683 dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL, true);
7684 if (!dev) {
7685 btrfs_err(fs_info, "failed to find devid %llu", devid);
7686 ret = -EUCLEAN;
7687 goto out;
7688 }
7689
7690 /* It's possible this device is a dummy for seed device */
7691 if (dev->disk_total_bytes == 0) {
7692 struct btrfs_fs_devices *devs;
7693
7694 devs = list_first_entry(&fs_info->fs_devices->seed_list,
7695 struct btrfs_fs_devices, seed_list);
7696 dev = btrfs_find_device(devs, devid, NULL, NULL, false);
7697 if (!dev) {
7698 btrfs_err(fs_info, "failed to find seed devid %llu",
7699 devid);
7700 ret = -EUCLEAN;
7701 goto out;
7702 }
7703 }
7704
7705 if (physical_offset + physical_len > dev->disk_total_bytes) {
7706 btrfs_err(fs_info,
7707 "dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu",
7708 devid, physical_offset, physical_len,
7709 dev->disk_total_bytes);
7710 ret = -EUCLEAN;
7711 goto out;
7712 }
7713 out:
7714 free_extent_map(em);
7715 return ret;
7716 }
7717
verify_chunk_dev_extent_mapping(struct btrfs_fs_info * fs_info)7718 static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info *fs_info)
7719 {
7720 struct extent_map_tree *em_tree = &fs_info->mapping_tree;
7721 struct extent_map *em;
7722 struct rb_node *node;
7723 int ret = 0;
7724
7725 read_lock(&em_tree->lock);
7726 for (node = rb_first_cached(&em_tree->map); node; node = rb_next(node)) {
7727 em = rb_entry(node, struct extent_map, rb_node);
7728 if (em->map_lookup->num_stripes !=
7729 em->map_lookup->verified_stripes) {
7730 btrfs_err(fs_info,
7731 "chunk %llu has missing dev extent, have %d expect %d",
7732 em->start, em->map_lookup->verified_stripes,
7733 em->map_lookup->num_stripes);
7734 ret = -EUCLEAN;
7735 goto out;
7736 }
7737 }
7738 out:
7739 read_unlock(&em_tree->lock);
7740 return ret;
7741 }
7742
7743 /*
7744 * Ensure that all dev extents are mapped to correct chunk, otherwise
7745 * later chunk allocation/free would cause unexpected behavior.
7746 *
7747 * NOTE: This will iterate through the whole device tree, which should be of
7748 * the same size level as the chunk tree. This slightly increases mount time.
7749 */
btrfs_verify_dev_extents(struct btrfs_fs_info * fs_info)7750 int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info)
7751 {
7752 struct btrfs_path *path;
7753 struct btrfs_root *root = fs_info->dev_root;
7754 struct btrfs_key key;
7755 u64 prev_devid = 0;
7756 u64 prev_dev_ext_end = 0;
7757 int ret = 0;
7758
7759 key.objectid = 1;
7760 key.type = BTRFS_DEV_EXTENT_KEY;
7761 key.offset = 0;
7762
7763 path = btrfs_alloc_path();
7764 if (!path)
7765 return -ENOMEM;
7766
7767 path->reada = READA_FORWARD;
7768 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
7769 if (ret < 0)
7770 goto out;
7771
7772 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
7773 ret = btrfs_next_item(root, path);
7774 if (ret < 0)
7775 goto out;
7776 /* No dev extents at all? Not good */
7777 if (ret > 0) {
7778 ret = -EUCLEAN;
7779 goto out;
7780 }
7781 }
7782 while (1) {
7783 struct extent_buffer *leaf = path->nodes[0];
7784 struct btrfs_dev_extent *dext;
7785 int slot = path->slots[0];
7786 u64 chunk_offset;
7787 u64 physical_offset;
7788 u64 physical_len;
7789 u64 devid;
7790
7791 btrfs_item_key_to_cpu(leaf, &key, slot);
7792 if (key.type != BTRFS_DEV_EXTENT_KEY)
7793 break;
7794 devid = key.objectid;
7795 physical_offset = key.offset;
7796
7797 dext = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent);
7798 chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dext);
7799 physical_len = btrfs_dev_extent_length(leaf, dext);
7800
7801 /* Check if this dev extent overlaps with the previous one */
7802 if (devid == prev_devid && physical_offset < prev_dev_ext_end) {
7803 btrfs_err(fs_info,
7804 "dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu",
7805 devid, physical_offset, prev_dev_ext_end);
7806 ret = -EUCLEAN;
7807 goto out;
7808 }
7809
7810 ret = verify_one_dev_extent(fs_info, chunk_offset, devid,
7811 physical_offset, physical_len);
7812 if (ret < 0)
7813 goto out;
7814 prev_devid = devid;
7815 prev_dev_ext_end = physical_offset + physical_len;
7816
7817 ret = btrfs_next_item(root, path);
7818 if (ret < 0)
7819 goto out;
7820 if (ret > 0) {
7821 ret = 0;
7822 break;
7823 }
7824 }
7825
7826 /* Ensure all chunks have corresponding dev extents */
7827 ret = verify_chunk_dev_extent_mapping(fs_info);
7828 out:
7829 btrfs_free_path(path);
7830 return ret;
7831 }
7832
7833 /*
7834 * Check whether the given block group or device is pinned by any inode being
7835 * used as a swapfile.
7836 */
btrfs_pinned_by_swapfile(struct btrfs_fs_info * fs_info,void * ptr)7837 bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr)
7838 {
7839 struct btrfs_swapfile_pin *sp;
7840 struct rb_node *node;
7841
7842 spin_lock(&fs_info->swapfile_pins_lock);
7843 node = fs_info->swapfile_pins.rb_node;
7844 while (node) {
7845 sp = rb_entry(node, struct btrfs_swapfile_pin, node);
7846 if (ptr < sp->ptr)
7847 node = node->rb_left;
7848 else if (ptr > sp->ptr)
7849 node = node->rb_right;
7850 else
7851 break;
7852 }
7853 spin_unlock(&fs_info->swapfile_pins_lock);
7854 return node != NULL;
7855 }
7856