1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2007 Oracle. All rights reserved.
4 */
5
6 #include <linux/sched.h>
7 #include <linux/sched/mm.h>
8 #include <linux/bio.h>
9 #include <linux/slab.h>
10 #include <linux/blkdev.h>
11 #include <linux/ratelimit.h>
12 #include <linux/kthread.h>
13 #include <linux/raid/pq.h>
14 #include <linux/semaphore.h>
15 #include <linux/uuid.h>
16 #include <linux/list_sort.h>
17 #include <linux/namei.h>
18 #include "misc.h"
19 #include "ctree.h"
20 #include "extent_map.h"
21 #include "disk-io.h"
22 #include "transaction.h"
23 #include "print-tree.h"
24 #include "volumes.h"
25 #include "raid56.h"
26 #include "async-thread.h"
27 #include "check-integrity.h"
28 #include "rcu-string.h"
29 #include "dev-replace.h"
30 #include "sysfs.h"
31 #include "tree-checker.h"
32 #include "space-info.h"
33 #include "block-group.h"
34 #include "discard.h"
35
36 const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
37 [BTRFS_RAID_RAID10] = {
38 .sub_stripes = 2,
39 .dev_stripes = 1,
40 .devs_max = 0, /* 0 == as many as possible */
41 .devs_min = 4,
42 .tolerated_failures = 1,
43 .devs_increment = 2,
44 .ncopies = 2,
45 .nparity = 0,
46 .raid_name = "raid10",
47 .bg_flag = BTRFS_BLOCK_GROUP_RAID10,
48 .mindev_error = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET,
49 },
50 [BTRFS_RAID_RAID1] = {
51 .sub_stripes = 1,
52 .dev_stripes = 1,
53 .devs_max = 2,
54 .devs_min = 2,
55 .tolerated_failures = 1,
56 .devs_increment = 2,
57 .ncopies = 2,
58 .nparity = 0,
59 .raid_name = "raid1",
60 .bg_flag = BTRFS_BLOCK_GROUP_RAID1,
61 .mindev_error = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET,
62 },
63 [BTRFS_RAID_RAID1C3] = {
64 .sub_stripes = 1,
65 .dev_stripes = 1,
66 .devs_max = 3,
67 .devs_min = 3,
68 .tolerated_failures = 2,
69 .devs_increment = 3,
70 .ncopies = 3,
71 .nparity = 0,
72 .raid_name = "raid1c3",
73 .bg_flag = BTRFS_BLOCK_GROUP_RAID1C3,
74 .mindev_error = BTRFS_ERROR_DEV_RAID1C3_MIN_NOT_MET,
75 },
76 [BTRFS_RAID_RAID1C4] = {
77 .sub_stripes = 1,
78 .dev_stripes = 1,
79 .devs_max = 4,
80 .devs_min = 4,
81 .tolerated_failures = 3,
82 .devs_increment = 4,
83 .ncopies = 4,
84 .nparity = 0,
85 .raid_name = "raid1c4",
86 .bg_flag = BTRFS_BLOCK_GROUP_RAID1C4,
87 .mindev_error = BTRFS_ERROR_DEV_RAID1C4_MIN_NOT_MET,
88 },
89 [BTRFS_RAID_DUP] = {
90 .sub_stripes = 1,
91 .dev_stripes = 2,
92 .devs_max = 1,
93 .devs_min = 1,
94 .tolerated_failures = 0,
95 .devs_increment = 1,
96 .ncopies = 2,
97 .nparity = 0,
98 .raid_name = "dup",
99 .bg_flag = BTRFS_BLOCK_GROUP_DUP,
100 .mindev_error = 0,
101 },
102 [BTRFS_RAID_RAID0] = {
103 .sub_stripes = 1,
104 .dev_stripes = 1,
105 .devs_max = 0,
106 .devs_min = 2,
107 .tolerated_failures = 0,
108 .devs_increment = 1,
109 .ncopies = 1,
110 .nparity = 0,
111 .raid_name = "raid0",
112 .bg_flag = BTRFS_BLOCK_GROUP_RAID0,
113 .mindev_error = 0,
114 },
115 [BTRFS_RAID_SINGLE] = {
116 .sub_stripes = 1,
117 .dev_stripes = 1,
118 .devs_max = 1,
119 .devs_min = 1,
120 .tolerated_failures = 0,
121 .devs_increment = 1,
122 .ncopies = 1,
123 .nparity = 0,
124 .raid_name = "single",
125 .bg_flag = 0,
126 .mindev_error = 0,
127 },
128 [BTRFS_RAID_RAID5] = {
129 .sub_stripes = 1,
130 .dev_stripes = 1,
131 .devs_max = 0,
132 .devs_min = 2,
133 .tolerated_failures = 1,
134 .devs_increment = 1,
135 .ncopies = 1,
136 .nparity = 1,
137 .raid_name = "raid5",
138 .bg_flag = BTRFS_BLOCK_GROUP_RAID5,
139 .mindev_error = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET,
140 },
141 [BTRFS_RAID_RAID6] = {
142 .sub_stripes = 1,
143 .dev_stripes = 1,
144 .devs_max = 0,
145 .devs_min = 3,
146 .tolerated_failures = 2,
147 .devs_increment = 1,
148 .ncopies = 1,
149 .nparity = 2,
150 .raid_name = "raid6",
151 .bg_flag = BTRFS_BLOCK_GROUP_RAID6,
152 .mindev_error = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET,
153 },
154 };
155
btrfs_bg_type_to_raid_name(u64 flags)156 const char *btrfs_bg_type_to_raid_name(u64 flags)
157 {
158 const int index = btrfs_bg_flags_to_raid_index(flags);
159
160 if (index >= BTRFS_NR_RAID_TYPES)
161 return NULL;
162
163 return btrfs_raid_array[index].raid_name;
164 }
165
166 /*
167 * Fill @buf with textual description of @bg_flags, no more than @size_buf
168 * bytes including terminating null byte.
169 */
btrfs_describe_block_groups(u64 bg_flags,char * buf,u32 size_buf)170 void btrfs_describe_block_groups(u64 bg_flags, char *buf, u32 size_buf)
171 {
172 int i;
173 int ret;
174 char *bp = buf;
175 u64 flags = bg_flags;
176 u32 size_bp = size_buf;
177
178 if (!flags) {
179 strcpy(bp, "NONE");
180 return;
181 }
182
183 #define DESCRIBE_FLAG(flag, desc) \
184 do { \
185 if (flags & (flag)) { \
186 ret = snprintf(bp, size_bp, "%s|", (desc)); \
187 if (ret < 0 || ret >= size_bp) \
188 goto out_overflow; \
189 size_bp -= ret; \
190 bp += ret; \
191 flags &= ~(flag); \
192 } \
193 } while (0)
194
195 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_DATA, "data");
196 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_SYSTEM, "system");
197 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_METADATA, "metadata");
198
199 DESCRIBE_FLAG(BTRFS_AVAIL_ALLOC_BIT_SINGLE, "single");
200 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
201 DESCRIBE_FLAG(btrfs_raid_array[i].bg_flag,
202 btrfs_raid_array[i].raid_name);
203 #undef DESCRIBE_FLAG
204
205 if (flags) {
206 ret = snprintf(bp, size_bp, "0x%llx|", flags);
207 size_bp -= ret;
208 }
209
210 if (size_bp < size_buf)
211 buf[size_buf - size_bp - 1] = '\0'; /* remove last | */
212
213 /*
214 * The text is trimmed, it's up to the caller to provide sufficiently
215 * large buffer
216 */
217 out_overflow:;
218 }
219
220 static int init_first_rw_device(struct btrfs_trans_handle *trans);
221 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info);
222 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
223 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
224 static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
225 enum btrfs_map_op op,
226 u64 logical, u64 *length,
227 struct btrfs_bio **bbio_ret,
228 int mirror_num, int need_raid_map);
229
230 /*
231 * Device locking
232 * ==============
233 *
234 * There are several mutexes that protect manipulation of devices and low-level
235 * structures like chunks but not block groups, extents or files
236 *
237 * uuid_mutex (global lock)
238 * ------------------------
239 * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from
240 * the SCAN_DEV ioctl registration or from mount either implicitly (the first
241 * device) or requested by the device= mount option
242 *
243 * the mutex can be very coarse and can cover long-running operations
244 *
245 * protects: updates to fs_devices counters like missing devices, rw devices,
246 * seeding, structure cloning, opening/closing devices at mount/umount time
247 *
248 * global::fs_devs - add, remove, updates to the global list
249 *
250 * does not protect: manipulation of the fs_devices::devices list in general
251 * but in mount context it could be used to exclude list modifications by eg.
252 * scan ioctl
253 *
254 * btrfs_device::name - renames (write side), read is RCU
255 *
256 * fs_devices::device_list_mutex (per-fs, with RCU)
257 * ------------------------------------------------
258 * protects updates to fs_devices::devices, ie. adding and deleting
259 *
260 * simple list traversal with read-only actions can be done with RCU protection
261 *
262 * may be used to exclude some operations from running concurrently without any
263 * modifications to the list (see write_all_supers)
264 *
265 * Is not required at mount and close times, because our device list is
266 * protected by the uuid_mutex at that point.
267 *
268 * balance_mutex
269 * -------------
270 * protects balance structures (status, state) and context accessed from
271 * several places (internally, ioctl)
272 *
273 * chunk_mutex
274 * -----------
275 * protects chunks, adding or removing during allocation, trim or when a new
276 * device is added/removed. Additionally it also protects post_commit_list of
277 * individual devices, since they can be added to the transaction's
278 * post_commit_list only with chunk_mutex held.
279 *
280 * cleaner_mutex
281 * -------------
282 * a big lock that is held by the cleaner thread and prevents running subvolume
283 * cleaning together with relocation or delayed iputs
284 *
285 *
286 * Lock nesting
287 * ============
288 *
289 * uuid_mutex
290 * device_list_mutex
291 * chunk_mutex
292 * balance_mutex
293 *
294 *
295 * Exclusive operations
296 * ====================
297 *
298 * Maintains the exclusivity of the following operations that apply to the
299 * whole filesystem and cannot run in parallel.
300 *
301 * - Balance (*)
302 * - Device add
303 * - Device remove
304 * - Device replace (*)
305 * - Resize
306 *
307 * The device operations (as above) can be in one of the following states:
308 *
309 * - Running state
310 * - Paused state
311 * - Completed state
312 *
313 * Only device operations marked with (*) can go into the Paused state for the
314 * following reasons:
315 *
316 * - ioctl (only Balance can be Paused through ioctl)
317 * - filesystem remounted as read-only
318 * - filesystem unmounted and mounted as read-only
319 * - system power-cycle and filesystem mounted as read-only
320 * - filesystem or device errors leading to forced read-only
321 *
322 * The status of exclusive operation is set and cleared atomically.
323 * During the course of Paused state, fs_info::exclusive_operation remains set.
324 * A device operation in Paused or Running state can be canceled or resumed
325 * either by ioctl (Balance only) or when remounted as read-write.
326 * The exclusive status is cleared when the device operation is canceled or
327 * completed.
328 */
329
330 DEFINE_MUTEX(uuid_mutex);
331 static LIST_HEAD(fs_uuids);
btrfs_get_fs_uuids(void)332 struct list_head * __attribute_const__ btrfs_get_fs_uuids(void)
333 {
334 return &fs_uuids;
335 }
336
337 /*
338 * alloc_fs_devices - allocate struct btrfs_fs_devices
339 * @fsid: if not NULL, copy the UUID to fs_devices::fsid
340 * @metadata_fsid: if not NULL, copy the UUID to fs_devices::metadata_fsid
341 *
342 * Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR().
343 * The returned struct is not linked onto any lists and can be destroyed with
344 * kfree() right away.
345 */
alloc_fs_devices(const u8 * fsid,const u8 * metadata_fsid)346 static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid,
347 const u8 *metadata_fsid)
348 {
349 struct btrfs_fs_devices *fs_devs;
350
351 fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL);
352 if (!fs_devs)
353 return ERR_PTR(-ENOMEM);
354
355 mutex_init(&fs_devs->device_list_mutex);
356
357 INIT_LIST_HEAD(&fs_devs->devices);
358 INIT_LIST_HEAD(&fs_devs->alloc_list);
359 INIT_LIST_HEAD(&fs_devs->fs_list);
360 INIT_LIST_HEAD(&fs_devs->seed_list);
361 if (fsid)
362 memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE);
363
364 if (metadata_fsid)
365 memcpy(fs_devs->metadata_uuid, metadata_fsid, BTRFS_FSID_SIZE);
366 else if (fsid)
367 memcpy(fs_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE);
368
369 return fs_devs;
370 }
371
btrfs_free_device(struct btrfs_device * device)372 void btrfs_free_device(struct btrfs_device *device)
373 {
374 WARN_ON(!list_empty(&device->post_commit_list));
375 rcu_string_free(device->name);
376 extent_io_tree_release(&device->alloc_state);
377 bio_put(device->flush_bio);
378 kfree(device);
379 }
380
free_fs_devices(struct btrfs_fs_devices * fs_devices)381 static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
382 {
383 struct btrfs_device *device;
384
385 WARN_ON(fs_devices->opened);
386 while (!list_empty(&fs_devices->devices)) {
387 device = list_entry(fs_devices->devices.next,
388 struct btrfs_device, dev_list);
389 list_del(&device->dev_list);
390 btrfs_free_device(device);
391 }
392 kfree(fs_devices);
393 }
394
btrfs_cleanup_fs_uuids(void)395 void __exit btrfs_cleanup_fs_uuids(void)
396 {
397 struct btrfs_fs_devices *fs_devices;
398
399 while (!list_empty(&fs_uuids)) {
400 fs_devices = list_entry(fs_uuids.next,
401 struct btrfs_fs_devices, fs_list);
402 list_del(&fs_devices->fs_list);
403 free_fs_devices(fs_devices);
404 }
405 }
406
407 /*
408 * Returns a pointer to a new btrfs_device on success; ERR_PTR() on error.
409 * Returned struct is not linked onto any lists and must be destroyed using
410 * btrfs_free_device.
411 */
__alloc_device(struct btrfs_fs_info * fs_info)412 static struct btrfs_device *__alloc_device(struct btrfs_fs_info *fs_info)
413 {
414 struct btrfs_device *dev;
415
416 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
417 if (!dev)
418 return ERR_PTR(-ENOMEM);
419
420 /*
421 * Preallocate a bio that's always going to be used for flushing device
422 * barriers and matches the device lifespan
423 */
424 dev->flush_bio = bio_alloc_bioset(GFP_KERNEL, 0, NULL);
425 if (!dev->flush_bio) {
426 kfree(dev);
427 return ERR_PTR(-ENOMEM);
428 }
429
430 INIT_LIST_HEAD(&dev->dev_list);
431 INIT_LIST_HEAD(&dev->dev_alloc_list);
432 INIT_LIST_HEAD(&dev->post_commit_list);
433
434 atomic_set(&dev->reada_in_flight, 0);
435 atomic_set(&dev->dev_stats_ccnt, 0);
436 btrfs_device_data_ordered_init(dev);
437 INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
438 INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
439 extent_io_tree_init(fs_info, &dev->alloc_state,
440 IO_TREE_DEVICE_ALLOC_STATE, NULL);
441
442 return dev;
443 }
444
find_fsid(const u8 * fsid,const u8 * metadata_fsid)445 static noinline struct btrfs_fs_devices *find_fsid(
446 const u8 *fsid, const u8 *metadata_fsid)
447 {
448 struct btrfs_fs_devices *fs_devices;
449
450 ASSERT(fsid);
451
452 /* Handle non-split brain cases */
453 list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
454 if (metadata_fsid) {
455 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0
456 && memcmp(metadata_fsid, fs_devices->metadata_uuid,
457 BTRFS_FSID_SIZE) == 0)
458 return fs_devices;
459 } else {
460 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
461 return fs_devices;
462 }
463 }
464 return NULL;
465 }
466
find_fsid_with_metadata_uuid(struct btrfs_super_block * disk_super)467 static struct btrfs_fs_devices *find_fsid_with_metadata_uuid(
468 struct btrfs_super_block *disk_super)
469 {
470
471 struct btrfs_fs_devices *fs_devices;
472
473 /*
474 * Handle scanned device having completed its fsid change but
475 * belonging to a fs_devices that was created by first scanning
476 * a device which didn't have its fsid/metadata_uuid changed
477 * at all and the CHANGING_FSID_V2 flag set.
478 */
479 list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
480 if (fs_devices->fsid_change &&
481 memcmp(disk_super->metadata_uuid, fs_devices->fsid,
482 BTRFS_FSID_SIZE) == 0 &&
483 memcmp(fs_devices->fsid, fs_devices->metadata_uuid,
484 BTRFS_FSID_SIZE) == 0) {
485 return fs_devices;
486 }
487 }
488 /*
489 * Handle scanned device having completed its fsid change but
490 * belonging to a fs_devices that was created by a device that
491 * has an outdated pair of fsid/metadata_uuid and
492 * CHANGING_FSID_V2 flag set.
493 */
494 list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
495 if (fs_devices->fsid_change &&
496 memcmp(fs_devices->metadata_uuid,
497 fs_devices->fsid, BTRFS_FSID_SIZE) != 0 &&
498 memcmp(disk_super->metadata_uuid, fs_devices->metadata_uuid,
499 BTRFS_FSID_SIZE) == 0) {
500 return fs_devices;
501 }
502 }
503
504 return find_fsid(disk_super->fsid, disk_super->metadata_uuid);
505 }
506
507
508 static int
btrfs_get_bdev_and_sb(const char * device_path,fmode_t flags,void * holder,int flush,struct block_device ** bdev,struct btrfs_super_block ** disk_super)509 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder,
510 int flush, struct block_device **bdev,
511 struct btrfs_super_block **disk_super)
512 {
513 int ret;
514
515 *bdev = blkdev_get_by_path(device_path, flags, holder);
516
517 if (IS_ERR(*bdev)) {
518 ret = PTR_ERR(*bdev);
519 goto error;
520 }
521
522 if (flush)
523 filemap_write_and_wait((*bdev)->bd_inode->i_mapping);
524 ret = set_blocksize(*bdev, BTRFS_BDEV_BLOCKSIZE);
525 if (ret) {
526 blkdev_put(*bdev, flags);
527 goto error;
528 }
529 invalidate_bdev(*bdev);
530 *disk_super = btrfs_read_dev_super(*bdev);
531 if (IS_ERR(*disk_super)) {
532 ret = PTR_ERR(*disk_super);
533 blkdev_put(*bdev, flags);
534 goto error;
535 }
536
537 return 0;
538
539 error:
540 *bdev = NULL;
541 return ret;
542 }
543
544 /*
545 * Check if the device in the path matches the device in the given struct device.
546 *
547 * Returns:
548 * true If it is the same device.
549 * false If it is not the same device or on error.
550 */
device_matched(const struct btrfs_device * device,const char * path)551 static bool device_matched(const struct btrfs_device *device, const char *path)
552 {
553 char *device_name;
554 struct block_device *bdev_old;
555 struct block_device *bdev_new;
556
557 /*
558 * If we are looking for a device with the matching dev_t, then skip
559 * device without a name (a missing device).
560 */
561 if (!device->name)
562 return false;
563
564 device_name = kzalloc(BTRFS_PATH_NAME_MAX, GFP_KERNEL);
565 if (!device_name)
566 return false;
567
568 rcu_read_lock();
569 scnprintf(device_name, BTRFS_PATH_NAME_MAX, "%s", rcu_str_deref(device->name));
570 rcu_read_unlock();
571
572 bdev_old = lookup_bdev(device_name);
573 kfree(device_name);
574 if (IS_ERR(bdev_old))
575 return false;
576
577 bdev_new = lookup_bdev(path);
578 if (IS_ERR(bdev_new))
579 return false;
580
581 if (bdev_old == bdev_new)
582 return true;
583
584 return false;
585 }
586
587 /*
588 * Search and remove all stale (devices which are not mounted) devices.
589 * When both inputs are NULL, it will search and release all stale devices.
590 * path: Optional. When provided will it release all unmounted devices
591 * matching this path only.
592 * skip_dev: Optional. Will skip this device when searching for the stale
593 * devices.
594 * Return: 0 for success or if @path is NULL.
595 * -EBUSY if @path is a mounted device.
596 * -ENOENT if @path does not match any device in the list.
597 */
btrfs_free_stale_devices(const char * path,struct btrfs_device * skip_device)598 static int btrfs_free_stale_devices(const char *path,
599 struct btrfs_device *skip_device)
600 {
601 struct btrfs_fs_devices *fs_devices, *tmp_fs_devices;
602 struct btrfs_device *device, *tmp_device;
603 int ret = 0;
604
605 lockdep_assert_held(&uuid_mutex);
606
607 if (path)
608 ret = -ENOENT;
609
610 list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) {
611
612 mutex_lock(&fs_devices->device_list_mutex);
613 list_for_each_entry_safe(device, tmp_device,
614 &fs_devices->devices, dev_list) {
615 if (skip_device && skip_device == device)
616 continue;
617 if (path && !device_matched(device, path))
618 continue;
619 if (fs_devices->opened) {
620 /* for an already deleted device return 0 */
621 if (path && ret != 0)
622 ret = -EBUSY;
623 break;
624 }
625
626 /* delete the stale device */
627 fs_devices->num_devices--;
628 list_del(&device->dev_list);
629 btrfs_free_device(device);
630
631 ret = 0;
632 }
633 mutex_unlock(&fs_devices->device_list_mutex);
634
635 if (fs_devices->num_devices == 0) {
636 btrfs_sysfs_remove_fsid(fs_devices);
637 list_del(&fs_devices->fs_list);
638 free_fs_devices(fs_devices);
639 }
640 }
641
642 return ret;
643 }
644
645 /*
646 * This is only used on mount, and we are protected from competing things
647 * messing with our fs_devices by the uuid_mutex, thus we do not need the
648 * fs_devices->device_list_mutex here.
649 */
btrfs_open_one_device(struct btrfs_fs_devices * fs_devices,struct btrfs_device * device,fmode_t flags,void * holder)650 static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
651 struct btrfs_device *device, fmode_t flags,
652 void *holder)
653 {
654 struct request_queue *q;
655 struct block_device *bdev;
656 struct btrfs_super_block *disk_super;
657 u64 devid;
658 int ret;
659
660 if (device->bdev)
661 return -EINVAL;
662 if (!device->name)
663 return -EINVAL;
664
665 ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
666 &bdev, &disk_super);
667 if (ret)
668 return ret;
669
670 devid = btrfs_stack_device_id(&disk_super->dev_item);
671 if (devid != device->devid)
672 goto error_free_page;
673
674 if (memcmp(device->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE))
675 goto error_free_page;
676
677 device->generation = btrfs_super_generation(disk_super);
678
679 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
680 if (btrfs_super_incompat_flags(disk_super) &
681 BTRFS_FEATURE_INCOMPAT_METADATA_UUID) {
682 pr_err(
683 "BTRFS: Invalid seeding and uuid-changed device detected\n");
684 goto error_free_page;
685 }
686
687 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
688 fs_devices->seeding = true;
689 } else {
690 if (bdev_read_only(bdev))
691 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
692 else
693 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
694 }
695
696 q = bdev_get_queue(bdev);
697 if (!blk_queue_nonrot(q))
698 fs_devices->rotating = true;
699
700 device->bdev = bdev;
701 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
702 device->mode = flags;
703
704 fs_devices->open_devices++;
705 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
706 device->devid != BTRFS_DEV_REPLACE_DEVID) {
707 fs_devices->rw_devices++;
708 list_add_tail(&device->dev_alloc_list, &fs_devices->alloc_list);
709 }
710 btrfs_release_disk_super(disk_super);
711
712 return 0;
713
714 error_free_page:
715 btrfs_release_disk_super(disk_super);
716 blkdev_put(bdev, flags);
717
718 return -EINVAL;
719 }
720
btrfs_sb_fsid_ptr(struct btrfs_super_block * sb)721 u8 *btrfs_sb_fsid_ptr(struct btrfs_super_block *sb)
722 {
723 bool has_metadata_uuid = (btrfs_super_incompat_flags(sb) &
724 BTRFS_FEATURE_INCOMPAT_METADATA_UUID);
725
726 return has_metadata_uuid ? sb->metadata_uuid : sb->fsid;
727 }
728
729 /*
730 * Handle scanned device having its CHANGING_FSID_V2 flag set and the fs_devices
731 * being created with a disk that has already completed its fsid change. Such
732 * disk can belong to an fs which has its FSID changed or to one which doesn't.
733 * Handle both cases here.
734 */
find_fsid_inprogress(struct btrfs_super_block * disk_super)735 static struct btrfs_fs_devices *find_fsid_inprogress(
736 struct btrfs_super_block *disk_super)
737 {
738 struct btrfs_fs_devices *fs_devices;
739
740 list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
741 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
742 BTRFS_FSID_SIZE) != 0 &&
743 memcmp(fs_devices->metadata_uuid, disk_super->fsid,
744 BTRFS_FSID_SIZE) == 0 && !fs_devices->fsid_change) {
745 return fs_devices;
746 }
747 }
748
749 return find_fsid(disk_super->fsid, NULL);
750 }
751
752
find_fsid_changed(struct btrfs_super_block * disk_super)753 static struct btrfs_fs_devices *find_fsid_changed(
754 struct btrfs_super_block *disk_super)
755 {
756 struct btrfs_fs_devices *fs_devices;
757
758 /*
759 * Handles the case where scanned device is part of an fs that had
760 * multiple successful changes of FSID but curently device didn't
761 * observe it. Meaning our fsid will be different than theirs. We need
762 * to handle two subcases :
763 * 1 - The fs still continues to have different METADATA/FSID uuids.
764 * 2 - The fs is switched back to its original FSID (METADATA/FSID
765 * are equal).
766 */
767 list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
768 /* Changed UUIDs */
769 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
770 BTRFS_FSID_SIZE) != 0 &&
771 memcmp(fs_devices->metadata_uuid, disk_super->metadata_uuid,
772 BTRFS_FSID_SIZE) == 0 &&
773 memcmp(fs_devices->fsid, disk_super->fsid,
774 BTRFS_FSID_SIZE) != 0)
775 return fs_devices;
776
777 /* Unchanged UUIDs */
778 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
779 BTRFS_FSID_SIZE) == 0 &&
780 memcmp(fs_devices->fsid, disk_super->metadata_uuid,
781 BTRFS_FSID_SIZE) == 0)
782 return fs_devices;
783 }
784
785 return NULL;
786 }
787
find_fsid_reverted_metadata(struct btrfs_super_block * disk_super)788 static struct btrfs_fs_devices *find_fsid_reverted_metadata(
789 struct btrfs_super_block *disk_super)
790 {
791 struct btrfs_fs_devices *fs_devices;
792
793 /*
794 * Handle the case where the scanned device is part of an fs whose last
795 * metadata UUID change reverted it to the original FSID. At the same
796 * time * fs_devices was first created by another constitutent device
797 * which didn't fully observe the operation. This results in an
798 * btrfs_fs_devices created with metadata/fsid different AND
799 * btrfs_fs_devices::fsid_change set AND the metadata_uuid of the
800 * fs_devices equal to the FSID of the disk.
801 */
802 list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
803 if (memcmp(fs_devices->fsid, fs_devices->metadata_uuid,
804 BTRFS_FSID_SIZE) != 0 &&
805 memcmp(fs_devices->metadata_uuid, disk_super->fsid,
806 BTRFS_FSID_SIZE) == 0 &&
807 fs_devices->fsid_change)
808 return fs_devices;
809 }
810
811 return NULL;
812 }
813 /*
814 * Add new device to list of registered devices
815 *
816 * Returns:
817 * device pointer which was just added or updated when successful
818 * error pointer when failed
819 */
device_list_add(const char * path,struct btrfs_super_block * disk_super,bool * new_device_added)820 static noinline struct btrfs_device *device_list_add(const char *path,
821 struct btrfs_super_block *disk_super,
822 bool *new_device_added)
823 {
824 struct btrfs_device *device;
825 struct btrfs_fs_devices *fs_devices = NULL;
826 struct rcu_string *name;
827 u64 found_transid = btrfs_super_generation(disk_super);
828 u64 devid = btrfs_stack_device_id(&disk_super->dev_item);
829 bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) &
830 BTRFS_FEATURE_INCOMPAT_METADATA_UUID);
831 bool fsid_change_in_progress = (btrfs_super_flags(disk_super) &
832 BTRFS_SUPER_FLAG_CHANGING_FSID_V2);
833
834 if (fsid_change_in_progress) {
835 if (!has_metadata_uuid)
836 fs_devices = find_fsid_inprogress(disk_super);
837 else
838 fs_devices = find_fsid_changed(disk_super);
839 } else if (has_metadata_uuid) {
840 fs_devices = find_fsid_with_metadata_uuid(disk_super);
841 } else {
842 fs_devices = find_fsid_reverted_metadata(disk_super);
843 if (!fs_devices)
844 fs_devices = find_fsid(disk_super->fsid, NULL);
845 }
846
847
848 if (!fs_devices) {
849 if (has_metadata_uuid)
850 fs_devices = alloc_fs_devices(disk_super->fsid,
851 disk_super->metadata_uuid);
852 else
853 fs_devices = alloc_fs_devices(disk_super->fsid, NULL);
854
855 if (IS_ERR(fs_devices))
856 return ERR_CAST(fs_devices);
857
858 fs_devices->fsid_change = fsid_change_in_progress;
859
860 mutex_lock(&fs_devices->device_list_mutex);
861 list_add(&fs_devices->fs_list, &fs_uuids);
862
863 device = NULL;
864 } else {
865 mutex_lock(&fs_devices->device_list_mutex);
866 device = btrfs_find_device(fs_devices, devid,
867 disk_super->dev_item.uuid, NULL, false);
868
869 /*
870 * If this disk has been pulled into an fs devices created by
871 * a device which had the CHANGING_FSID_V2 flag then replace the
872 * metadata_uuid/fsid values of the fs_devices.
873 */
874 if (fs_devices->fsid_change &&
875 found_transid > fs_devices->latest_generation) {
876 memcpy(fs_devices->fsid, disk_super->fsid,
877 BTRFS_FSID_SIZE);
878
879 if (has_metadata_uuid)
880 memcpy(fs_devices->metadata_uuid,
881 disk_super->metadata_uuid,
882 BTRFS_FSID_SIZE);
883 else
884 memcpy(fs_devices->metadata_uuid,
885 disk_super->fsid, BTRFS_FSID_SIZE);
886
887 fs_devices->fsid_change = false;
888 }
889 }
890
891 if (!device) {
892 if (fs_devices->opened) {
893 mutex_unlock(&fs_devices->device_list_mutex);
894 return ERR_PTR(-EBUSY);
895 }
896
897 device = btrfs_alloc_device(NULL, &devid,
898 disk_super->dev_item.uuid);
899 if (IS_ERR(device)) {
900 mutex_unlock(&fs_devices->device_list_mutex);
901 /* we can safely leave the fs_devices entry around */
902 return device;
903 }
904
905 name = rcu_string_strdup(path, GFP_NOFS);
906 if (!name) {
907 btrfs_free_device(device);
908 mutex_unlock(&fs_devices->device_list_mutex);
909 return ERR_PTR(-ENOMEM);
910 }
911 rcu_assign_pointer(device->name, name);
912
913 list_add_rcu(&device->dev_list, &fs_devices->devices);
914 fs_devices->num_devices++;
915
916 device->fs_devices = fs_devices;
917 *new_device_added = true;
918
919 if (disk_super->label[0])
920 pr_info(
921 "BTRFS: device label %s devid %llu transid %llu %s scanned by %s (%d)\n",
922 disk_super->label, devid, found_transid, path,
923 current->comm, task_pid_nr(current));
924 else
925 pr_info(
926 "BTRFS: device fsid %pU devid %llu transid %llu %s scanned by %s (%d)\n",
927 disk_super->fsid, devid, found_transid, path,
928 current->comm, task_pid_nr(current));
929
930 } else if (!device->name || strcmp(device->name->str, path)) {
931 /*
932 * When FS is already mounted.
933 * 1. If you are here and if the device->name is NULL that
934 * means this device was missing at time of FS mount.
935 * 2. If you are here and if the device->name is different
936 * from 'path' that means either
937 * a. The same device disappeared and reappeared with
938 * different name. or
939 * b. The missing-disk-which-was-replaced, has
940 * reappeared now.
941 *
942 * We must allow 1 and 2a above. But 2b would be a spurious
943 * and unintentional.
944 *
945 * Further in case of 1 and 2a above, the disk at 'path'
946 * would have missed some transaction when it was away and
947 * in case of 2a the stale bdev has to be updated as well.
948 * 2b must not be allowed at all time.
949 */
950
951 /*
952 * For now, we do allow update to btrfs_fs_device through the
953 * btrfs dev scan cli after FS has been mounted. We're still
954 * tracking a problem where systems fail mount by subvolume id
955 * when we reject replacement on a mounted FS.
956 */
957 if (!fs_devices->opened && found_transid < device->generation) {
958 /*
959 * That is if the FS is _not_ mounted and if you
960 * are here, that means there is more than one
961 * disk with same uuid and devid.We keep the one
962 * with larger generation number or the last-in if
963 * generation are equal.
964 */
965 mutex_unlock(&fs_devices->device_list_mutex);
966 return ERR_PTR(-EEXIST);
967 }
968
969 /*
970 * We are going to replace the device path for a given devid,
971 * make sure it's the same device if the device is mounted
972 */
973 if (device->bdev) {
974 struct block_device *path_bdev;
975
976 path_bdev = lookup_bdev(path);
977 if (IS_ERR(path_bdev)) {
978 mutex_unlock(&fs_devices->device_list_mutex);
979 return ERR_CAST(path_bdev);
980 }
981
982 if (device->bdev != path_bdev) {
983 bdput(path_bdev);
984 mutex_unlock(&fs_devices->device_list_mutex);
985 /*
986 * device->fs_info may not be reliable here, so
987 * pass in a NULL instead. This avoids a
988 * possible use-after-free when the fs_info and
989 * fs_info->sb are already torn down.
990 */
991 btrfs_warn_in_rcu(NULL,
992 "duplicate device %s devid %llu generation %llu scanned by %s (%d)",
993 path, devid, found_transid,
994 current->comm,
995 task_pid_nr(current));
996 return ERR_PTR(-EEXIST);
997 }
998 bdput(path_bdev);
999 btrfs_info_in_rcu(device->fs_info,
1000 "devid %llu device path %s changed to %s scanned by %s (%d)",
1001 devid, rcu_str_deref(device->name),
1002 path, current->comm,
1003 task_pid_nr(current));
1004 }
1005
1006 name = rcu_string_strdup(path, GFP_NOFS);
1007 if (!name) {
1008 mutex_unlock(&fs_devices->device_list_mutex);
1009 return ERR_PTR(-ENOMEM);
1010 }
1011 rcu_string_free(device->name);
1012 rcu_assign_pointer(device->name, name);
1013 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
1014 fs_devices->missing_devices--;
1015 clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
1016 }
1017 }
1018
1019 /*
1020 * Unmount does not free the btrfs_device struct but would zero
1021 * generation along with most of the other members. So just update
1022 * it back. We need it to pick the disk with largest generation
1023 * (as above).
1024 */
1025 if (!fs_devices->opened) {
1026 device->generation = found_transid;
1027 fs_devices->latest_generation = max_t(u64, found_transid,
1028 fs_devices->latest_generation);
1029 }
1030
1031 fs_devices->total_devices = btrfs_super_num_devices(disk_super);
1032
1033 mutex_unlock(&fs_devices->device_list_mutex);
1034 return device;
1035 }
1036
clone_fs_devices(struct btrfs_fs_devices * orig)1037 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
1038 {
1039 struct btrfs_fs_devices *fs_devices;
1040 struct btrfs_device *device;
1041 struct btrfs_device *orig_dev;
1042 int ret = 0;
1043
1044 lockdep_assert_held(&uuid_mutex);
1045
1046 fs_devices = alloc_fs_devices(orig->fsid, NULL);
1047 if (IS_ERR(fs_devices))
1048 return fs_devices;
1049
1050 fs_devices->total_devices = orig->total_devices;
1051
1052 list_for_each_entry(orig_dev, &orig->devices, dev_list) {
1053 struct rcu_string *name;
1054
1055 device = btrfs_alloc_device(NULL, &orig_dev->devid,
1056 orig_dev->uuid);
1057 if (IS_ERR(device)) {
1058 ret = PTR_ERR(device);
1059 goto error;
1060 }
1061
1062 /*
1063 * This is ok to do without rcu read locked because we hold the
1064 * uuid mutex so nothing we touch in here is going to disappear.
1065 */
1066 if (orig_dev->name) {
1067 name = rcu_string_strdup(orig_dev->name->str,
1068 GFP_KERNEL);
1069 if (!name) {
1070 btrfs_free_device(device);
1071 ret = -ENOMEM;
1072 goto error;
1073 }
1074 rcu_assign_pointer(device->name, name);
1075 }
1076
1077 list_add(&device->dev_list, &fs_devices->devices);
1078 device->fs_devices = fs_devices;
1079 fs_devices->num_devices++;
1080 }
1081 return fs_devices;
1082 error:
1083 free_fs_devices(fs_devices);
1084 return ERR_PTR(ret);
1085 }
1086
__btrfs_free_extra_devids(struct btrfs_fs_devices * fs_devices,int step,struct btrfs_device ** latest_dev)1087 static void __btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices,
1088 int step, struct btrfs_device **latest_dev)
1089 {
1090 struct btrfs_device *device, *next;
1091
1092 /* This is the initialized path, it is safe to release the devices. */
1093 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
1094 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state)) {
1095 if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
1096 &device->dev_state) &&
1097 !test_bit(BTRFS_DEV_STATE_MISSING,
1098 &device->dev_state) &&
1099 (!*latest_dev ||
1100 device->generation > (*latest_dev)->generation)) {
1101 *latest_dev = device;
1102 }
1103 continue;
1104 }
1105
1106 /*
1107 * We have already validated the presence of BTRFS_DEV_REPLACE_DEVID,
1108 * in btrfs_init_dev_replace() so just continue.
1109 */
1110 if (device->devid == BTRFS_DEV_REPLACE_DEVID)
1111 continue;
1112
1113 if (device->bdev) {
1114 blkdev_put(device->bdev, device->mode);
1115 device->bdev = NULL;
1116 fs_devices->open_devices--;
1117 }
1118 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
1119 list_del_init(&device->dev_alloc_list);
1120 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
1121 fs_devices->rw_devices--;
1122 }
1123 list_del_init(&device->dev_list);
1124 fs_devices->num_devices--;
1125 btrfs_free_device(device);
1126 }
1127
1128 }
1129
1130 /*
1131 * After we have read the system tree and know devids belonging to this
1132 * filesystem, remove the device which does not belong there.
1133 */
btrfs_free_extra_devids(struct btrfs_fs_devices * fs_devices,int step)1134 void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices, int step)
1135 {
1136 struct btrfs_device *latest_dev = NULL;
1137 struct btrfs_fs_devices *seed_dev;
1138
1139 mutex_lock(&uuid_mutex);
1140 __btrfs_free_extra_devids(fs_devices, step, &latest_dev);
1141
1142 list_for_each_entry(seed_dev, &fs_devices->seed_list, seed_list)
1143 __btrfs_free_extra_devids(seed_dev, step, &latest_dev);
1144
1145 fs_devices->latest_bdev = latest_dev->bdev;
1146
1147 mutex_unlock(&uuid_mutex);
1148 }
1149
btrfs_close_bdev(struct btrfs_device * device)1150 static void btrfs_close_bdev(struct btrfs_device *device)
1151 {
1152 if (!device->bdev)
1153 return;
1154
1155 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
1156 sync_blockdev(device->bdev);
1157 invalidate_bdev(device->bdev);
1158 }
1159
1160 blkdev_put(device->bdev, device->mode);
1161 }
1162
btrfs_close_one_device(struct btrfs_device * device)1163 static void btrfs_close_one_device(struct btrfs_device *device)
1164 {
1165 struct btrfs_fs_devices *fs_devices = device->fs_devices;
1166
1167 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
1168 device->devid != BTRFS_DEV_REPLACE_DEVID) {
1169 list_del_init(&device->dev_alloc_list);
1170 fs_devices->rw_devices--;
1171 }
1172
1173 if (device->devid == BTRFS_DEV_REPLACE_DEVID)
1174 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
1175
1176 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
1177 clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
1178 fs_devices->missing_devices--;
1179 }
1180
1181 btrfs_close_bdev(device);
1182 if (device->bdev) {
1183 fs_devices->open_devices--;
1184 device->bdev = NULL;
1185 }
1186 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
1187
1188 device->fs_info = NULL;
1189 atomic_set(&device->dev_stats_ccnt, 0);
1190 extent_io_tree_release(&device->alloc_state);
1191
1192 /*
1193 * Reset the flush error record. We might have a transient flush error
1194 * in this mount, and if so we aborted the current transaction and set
1195 * the fs to an error state, guaranteeing no super blocks can be further
1196 * committed. However that error might be transient and if we unmount the
1197 * filesystem and mount it again, we should allow the mount to succeed
1198 * (btrfs_check_rw_degradable() should not fail) - if after mounting the
1199 * filesystem again we still get flush errors, then we will again abort
1200 * any transaction and set the error state, guaranteeing no commits of
1201 * unsafe super blocks.
1202 */
1203 device->last_flush_error = 0;
1204
1205 /* Verify the device is back in a pristine state */
1206 ASSERT(!test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state));
1207 ASSERT(!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state));
1208 ASSERT(list_empty(&device->dev_alloc_list));
1209 ASSERT(list_empty(&device->post_commit_list));
1210 ASSERT(atomic_read(&device->reada_in_flight) == 0);
1211 }
1212
close_fs_devices(struct btrfs_fs_devices * fs_devices)1213 static void close_fs_devices(struct btrfs_fs_devices *fs_devices)
1214 {
1215 struct btrfs_device *device, *tmp;
1216
1217 lockdep_assert_held(&uuid_mutex);
1218
1219 if (--fs_devices->opened > 0)
1220 return;
1221
1222 list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list)
1223 btrfs_close_one_device(device);
1224
1225 WARN_ON(fs_devices->open_devices);
1226 WARN_ON(fs_devices->rw_devices);
1227 fs_devices->opened = 0;
1228 fs_devices->seeding = false;
1229 fs_devices->fs_info = NULL;
1230 }
1231
btrfs_close_devices(struct btrfs_fs_devices * fs_devices)1232 void btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
1233 {
1234 LIST_HEAD(list);
1235 struct btrfs_fs_devices *tmp;
1236
1237 mutex_lock(&uuid_mutex);
1238 close_fs_devices(fs_devices);
1239 if (!fs_devices->opened) {
1240 list_splice_init(&fs_devices->seed_list, &list);
1241
1242 /*
1243 * If the struct btrfs_fs_devices is not assembled with any
1244 * other device, it can be re-initialized during the next mount
1245 * without the needing device-scan step. Therefore, it can be
1246 * fully freed.
1247 */
1248 if (fs_devices->num_devices == 1) {
1249 list_del(&fs_devices->fs_list);
1250 free_fs_devices(fs_devices);
1251 }
1252 }
1253
1254
1255 list_for_each_entry_safe(fs_devices, tmp, &list, seed_list) {
1256 close_fs_devices(fs_devices);
1257 list_del(&fs_devices->seed_list);
1258 free_fs_devices(fs_devices);
1259 }
1260 mutex_unlock(&uuid_mutex);
1261 }
1262
open_fs_devices(struct btrfs_fs_devices * fs_devices,fmode_t flags,void * holder)1263 static int open_fs_devices(struct btrfs_fs_devices *fs_devices,
1264 fmode_t flags, void *holder)
1265 {
1266 struct btrfs_device *device;
1267 struct btrfs_device *latest_dev = NULL;
1268 struct btrfs_device *tmp_device;
1269
1270 flags |= FMODE_EXCL;
1271
1272 list_for_each_entry_safe(device, tmp_device, &fs_devices->devices,
1273 dev_list) {
1274 int ret;
1275
1276 ret = btrfs_open_one_device(fs_devices, device, flags, holder);
1277 if (ret == 0 &&
1278 (!latest_dev || device->generation > latest_dev->generation)) {
1279 latest_dev = device;
1280 } else if (ret == -ENODATA) {
1281 fs_devices->num_devices--;
1282 list_del(&device->dev_list);
1283 btrfs_free_device(device);
1284 }
1285 }
1286 if (fs_devices->open_devices == 0)
1287 return -EINVAL;
1288
1289 fs_devices->opened = 1;
1290 fs_devices->latest_bdev = latest_dev->bdev;
1291 fs_devices->total_rw_bytes = 0;
1292 fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_REGULAR;
1293
1294 return 0;
1295 }
1296
devid_cmp(void * priv,struct list_head * a,struct list_head * b)1297 static int devid_cmp(void *priv, struct list_head *a, struct list_head *b)
1298 {
1299 struct btrfs_device *dev1, *dev2;
1300
1301 dev1 = list_entry(a, struct btrfs_device, dev_list);
1302 dev2 = list_entry(b, struct btrfs_device, dev_list);
1303
1304 if (dev1->devid < dev2->devid)
1305 return -1;
1306 else if (dev1->devid > dev2->devid)
1307 return 1;
1308 return 0;
1309 }
1310
btrfs_open_devices(struct btrfs_fs_devices * fs_devices,fmode_t flags,void * holder)1311 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
1312 fmode_t flags, void *holder)
1313 {
1314 int ret;
1315
1316 lockdep_assert_held(&uuid_mutex);
1317 /*
1318 * The device_list_mutex cannot be taken here in case opening the
1319 * underlying device takes further locks like bd_mutex.
1320 *
1321 * We also don't need the lock here as this is called during mount and
1322 * exclusion is provided by uuid_mutex
1323 */
1324
1325 if (fs_devices->opened) {
1326 fs_devices->opened++;
1327 ret = 0;
1328 } else {
1329 list_sort(NULL, &fs_devices->devices, devid_cmp);
1330 ret = open_fs_devices(fs_devices, flags, holder);
1331 }
1332
1333 return ret;
1334 }
1335
btrfs_release_disk_super(struct btrfs_super_block * super)1336 void btrfs_release_disk_super(struct btrfs_super_block *super)
1337 {
1338 struct page *page = virt_to_page(super);
1339
1340 put_page(page);
1341 }
1342
btrfs_read_disk_super(struct block_device * bdev,u64 bytenr)1343 static struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev,
1344 u64 bytenr)
1345 {
1346 struct btrfs_super_block *disk_super;
1347 struct page *page;
1348 void *p;
1349 pgoff_t index;
1350
1351 /* make sure our super fits in the device */
1352 if (bytenr + PAGE_SIZE >= i_size_read(bdev->bd_inode))
1353 return ERR_PTR(-EINVAL);
1354
1355 /* make sure our super fits in the page */
1356 if (sizeof(*disk_super) > PAGE_SIZE)
1357 return ERR_PTR(-EINVAL);
1358
1359 /* make sure our super doesn't straddle pages on disk */
1360 index = bytenr >> PAGE_SHIFT;
1361 if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_SHIFT != index)
1362 return ERR_PTR(-EINVAL);
1363
1364 /* pull in the page with our super */
1365 page = read_cache_page_gfp(bdev->bd_inode->i_mapping, index, GFP_KERNEL);
1366
1367 if (IS_ERR(page))
1368 return ERR_CAST(page);
1369
1370 p = page_address(page);
1371
1372 /* align our pointer to the offset of the super block */
1373 disk_super = p + offset_in_page(bytenr);
1374
1375 if (btrfs_super_bytenr(disk_super) != bytenr ||
1376 btrfs_super_magic(disk_super) != BTRFS_MAGIC) {
1377 btrfs_release_disk_super(p);
1378 return ERR_PTR(-EINVAL);
1379 }
1380
1381 if (disk_super->label[0] && disk_super->label[BTRFS_LABEL_SIZE - 1])
1382 disk_super->label[BTRFS_LABEL_SIZE - 1] = 0;
1383
1384 return disk_super;
1385 }
1386
btrfs_forget_devices(const char * path)1387 int btrfs_forget_devices(const char *path)
1388 {
1389 int ret;
1390
1391 mutex_lock(&uuid_mutex);
1392 ret = btrfs_free_stale_devices(strlen(path) ? path : NULL, NULL);
1393 mutex_unlock(&uuid_mutex);
1394
1395 return ret;
1396 }
1397
1398 /*
1399 * Look for a btrfs signature on a device. This may be called out of the mount path
1400 * and we are not allowed to call set_blocksize during the scan. The superblock
1401 * is read via pagecache
1402 */
btrfs_scan_one_device(const char * path,fmode_t flags,void * holder)1403 struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags,
1404 void *holder)
1405 {
1406 struct btrfs_super_block *disk_super;
1407 bool new_device_added = false;
1408 struct btrfs_device *device = NULL;
1409 struct block_device *bdev;
1410 u64 bytenr;
1411
1412 lockdep_assert_held(&uuid_mutex);
1413
1414 /*
1415 * we would like to check all the supers, but that would make
1416 * a btrfs mount succeed after a mkfs from a different FS.
1417 * So, we need to add a special mount option to scan for
1418 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
1419 */
1420 bytenr = btrfs_sb_offset(0);
1421
1422 /*
1423 * Avoid using flag |= FMODE_EXCL here, as the systemd-udev may
1424 * initiate the device scan which may race with the user's mount
1425 * or mkfs command, resulting in failure.
1426 * Since the device scan is solely for reading purposes, there is
1427 * no need for FMODE_EXCL. Additionally, the devices are read again
1428 * during the mount process. It is ok to get some inconsistent
1429 * values temporarily, as the device paths of the fsid are the only
1430 * required information for assembling the volume.
1431 */
1432 bdev = blkdev_get_by_path(path, flags, holder);
1433 if (IS_ERR(bdev))
1434 return ERR_CAST(bdev);
1435
1436 disk_super = btrfs_read_disk_super(bdev, bytenr);
1437 if (IS_ERR(disk_super)) {
1438 device = ERR_CAST(disk_super);
1439 goto error_bdev_put;
1440 }
1441
1442 device = device_list_add(path, disk_super, &new_device_added);
1443 if (!IS_ERR(device)) {
1444 if (new_device_added)
1445 btrfs_free_stale_devices(path, device);
1446 }
1447
1448 btrfs_release_disk_super(disk_super);
1449
1450 error_bdev_put:
1451 blkdev_put(bdev, flags);
1452
1453 return device;
1454 }
1455
1456 /*
1457 * Try to find a chunk that intersects [start, start + len] range and when one
1458 * such is found, record the end of it in *start
1459 */
contains_pending_extent(struct btrfs_device * device,u64 * start,u64 len)1460 static bool contains_pending_extent(struct btrfs_device *device, u64 *start,
1461 u64 len)
1462 {
1463 u64 physical_start, physical_end;
1464
1465 lockdep_assert_held(&device->fs_info->chunk_mutex);
1466
1467 if (!find_first_extent_bit(&device->alloc_state, *start,
1468 &physical_start, &physical_end,
1469 CHUNK_ALLOCATED, NULL)) {
1470
1471 if (in_range(physical_start, *start, len) ||
1472 in_range(*start, physical_start,
1473 physical_end - physical_start)) {
1474 *start = physical_end + 1;
1475 return true;
1476 }
1477 }
1478 return false;
1479 }
1480
dev_extent_search_start(struct btrfs_device * device,u64 start)1481 static u64 dev_extent_search_start(struct btrfs_device *device, u64 start)
1482 {
1483 switch (device->fs_devices->chunk_alloc_policy) {
1484 case BTRFS_CHUNK_ALLOC_REGULAR:
1485 /*
1486 * We don't want to overwrite the superblock on the drive nor
1487 * any area used by the boot loader (grub for example), so we
1488 * make sure to start at an offset of at least 1MB.
1489 */
1490 return max_t(u64, start, SZ_1M);
1491 default:
1492 BUG();
1493 }
1494 }
1495
1496 /**
1497 * dev_extent_hole_check - check if specified hole is suitable for allocation
1498 * @device: the device which we have the hole
1499 * @hole_start: starting position of the hole
1500 * @hole_size: the size of the hole
1501 * @num_bytes: the size of the free space that we need
1502 *
1503 * This function may modify @hole_start and @hole_end to reflect the suitable
1504 * position for allocation. Returns 1 if hole position is updated, 0 otherwise.
1505 */
dev_extent_hole_check(struct btrfs_device * device,u64 * hole_start,u64 * hole_size,u64 num_bytes)1506 static bool dev_extent_hole_check(struct btrfs_device *device, u64 *hole_start,
1507 u64 *hole_size, u64 num_bytes)
1508 {
1509 bool changed = false;
1510 u64 hole_end = *hole_start + *hole_size;
1511
1512 /*
1513 * Check before we set max_hole_start, otherwise we could end up
1514 * sending back this offset anyway.
1515 */
1516 if (contains_pending_extent(device, hole_start, *hole_size)) {
1517 if (hole_end >= *hole_start)
1518 *hole_size = hole_end - *hole_start;
1519 else
1520 *hole_size = 0;
1521 changed = true;
1522 }
1523
1524 switch (device->fs_devices->chunk_alloc_policy) {
1525 case BTRFS_CHUNK_ALLOC_REGULAR:
1526 /* No extra check */
1527 break;
1528 default:
1529 BUG();
1530 }
1531
1532 return changed;
1533 }
1534
1535 /*
1536 * find_free_dev_extent_start - find free space in the specified device
1537 * @device: the device which we search the free space in
1538 * @num_bytes: the size of the free space that we need
1539 * @search_start: the position from which to begin the search
1540 * @start: store the start of the free space.
1541 * @len: the size of the free space. that we find, or the size
1542 * of the max free space if we don't find suitable free space
1543 *
1544 * this uses a pretty simple search, the expectation is that it is
1545 * called very infrequently and that a given device has a small number
1546 * of extents
1547 *
1548 * @start is used to store the start of the free space if we find. But if we
1549 * don't find suitable free space, it will be used to store the start position
1550 * of the max free space.
1551 *
1552 * @len is used to store the size of the free space that we find.
1553 * But if we don't find suitable free space, it is used to store the size of
1554 * the max free space.
1555 *
1556 * NOTE: This function will search *commit* root of device tree, and does extra
1557 * check to ensure dev extents are not double allocated.
1558 * This makes the function safe to allocate dev extents but may not report
1559 * correct usable device space, as device extent freed in current transaction
1560 * is not reported as avaiable.
1561 */
find_free_dev_extent_start(struct btrfs_device * device,u64 num_bytes,u64 search_start,u64 * start,u64 * len)1562 static int find_free_dev_extent_start(struct btrfs_device *device,
1563 u64 num_bytes, u64 search_start, u64 *start,
1564 u64 *len)
1565 {
1566 struct btrfs_fs_info *fs_info = device->fs_info;
1567 struct btrfs_root *root = fs_info->dev_root;
1568 struct btrfs_key key;
1569 struct btrfs_dev_extent *dev_extent;
1570 struct btrfs_path *path;
1571 u64 hole_size;
1572 u64 max_hole_start;
1573 u64 max_hole_size;
1574 u64 extent_end;
1575 u64 search_end = device->total_bytes;
1576 int ret;
1577 int slot;
1578 struct extent_buffer *l;
1579
1580 search_start = dev_extent_search_start(device, search_start);
1581
1582 path = btrfs_alloc_path();
1583 if (!path)
1584 return -ENOMEM;
1585
1586 max_hole_start = search_start;
1587 max_hole_size = 0;
1588
1589 again:
1590 if (search_start >= search_end ||
1591 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
1592 ret = -ENOSPC;
1593 goto out;
1594 }
1595
1596 path->reada = READA_FORWARD;
1597 path->search_commit_root = 1;
1598 path->skip_locking = 1;
1599
1600 key.objectid = device->devid;
1601 key.offset = search_start;
1602 key.type = BTRFS_DEV_EXTENT_KEY;
1603
1604 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1605 if (ret < 0)
1606 goto out;
1607 if (ret > 0) {
1608 ret = btrfs_previous_item(root, path, key.objectid, key.type);
1609 if (ret < 0)
1610 goto out;
1611 }
1612
1613 while (search_start < search_end) {
1614 l = path->nodes[0];
1615 slot = path->slots[0];
1616 if (slot >= btrfs_header_nritems(l)) {
1617 ret = btrfs_next_leaf(root, path);
1618 if (ret == 0)
1619 continue;
1620 if (ret < 0)
1621 goto out;
1622
1623 break;
1624 }
1625 btrfs_item_key_to_cpu(l, &key, slot);
1626
1627 if (key.objectid < device->devid)
1628 goto next;
1629
1630 if (key.objectid > device->devid)
1631 break;
1632
1633 if (key.type != BTRFS_DEV_EXTENT_KEY)
1634 goto next;
1635
1636 if (key.offset > search_end)
1637 break;
1638
1639 if (key.offset > search_start) {
1640 hole_size = key.offset - search_start;
1641 dev_extent_hole_check(device, &search_start, &hole_size,
1642 num_bytes);
1643
1644 if (hole_size > max_hole_size) {
1645 max_hole_start = search_start;
1646 max_hole_size = hole_size;
1647 }
1648
1649 /*
1650 * If this free space is greater than which we need,
1651 * it must be the max free space that we have found
1652 * until now, so max_hole_start must point to the start
1653 * of this free space and the length of this free space
1654 * is stored in max_hole_size. Thus, we return
1655 * max_hole_start and max_hole_size and go back to the
1656 * caller.
1657 */
1658 if (hole_size >= num_bytes) {
1659 ret = 0;
1660 goto out;
1661 }
1662 }
1663
1664 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1665 extent_end = key.offset + btrfs_dev_extent_length(l,
1666 dev_extent);
1667 if (extent_end > search_start)
1668 search_start = extent_end;
1669 next:
1670 path->slots[0]++;
1671 cond_resched();
1672 }
1673
1674 /*
1675 * At this point, search_start should be the end of
1676 * allocated dev extents, and when shrinking the device,
1677 * search_end may be smaller than search_start.
1678 */
1679 if (search_end > search_start) {
1680 hole_size = search_end - search_start;
1681 if (dev_extent_hole_check(device, &search_start, &hole_size,
1682 num_bytes)) {
1683 btrfs_release_path(path);
1684 goto again;
1685 }
1686
1687 if (hole_size > max_hole_size) {
1688 max_hole_start = search_start;
1689 max_hole_size = hole_size;
1690 }
1691 }
1692
1693 /* See above. */
1694 if (max_hole_size < num_bytes)
1695 ret = -ENOSPC;
1696 else
1697 ret = 0;
1698
1699 ASSERT(max_hole_start + max_hole_size <= search_end);
1700 out:
1701 btrfs_free_path(path);
1702 *start = max_hole_start;
1703 if (len)
1704 *len = max_hole_size;
1705 return ret;
1706 }
1707
find_free_dev_extent(struct btrfs_device * device,u64 num_bytes,u64 * start,u64 * len)1708 int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
1709 u64 *start, u64 *len)
1710 {
1711 /* FIXME use last free of some kind */
1712 return find_free_dev_extent_start(device, num_bytes, 0, start, len);
1713 }
1714
btrfs_free_dev_extent(struct btrfs_trans_handle * trans,struct btrfs_device * device,u64 start,u64 * dev_extent_len)1715 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1716 struct btrfs_device *device,
1717 u64 start, u64 *dev_extent_len)
1718 {
1719 struct btrfs_fs_info *fs_info = device->fs_info;
1720 struct btrfs_root *root = fs_info->dev_root;
1721 int ret;
1722 struct btrfs_path *path;
1723 struct btrfs_key key;
1724 struct btrfs_key found_key;
1725 struct extent_buffer *leaf = NULL;
1726 struct btrfs_dev_extent *extent = NULL;
1727
1728 path = btrfs_alloc_path();
1729 if (!path)
1730 return -ENOMEM;
1731
1732 key.objectid = device->devid;
1733 key.offset = start;
1734 key.type = BTRFS_DEV_EXTENT_KEY;
1735 again:
1736 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1737 if (ret > 0) {
1738 ret = btrfs_previous_item(root, path, key.objectid,
1739 BTRFS_DEV_EXTENT_KEY);
1740 if (ret)
1741 goto out;
1742 leaf = path->nodes[0];
1743 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1744 extent = btrfs_item_ptr(leaf, path->slots[0],
1745 struct btrfs_dev_extent);
1746 BUG_ON(found_key.offset > start || found_key.offset +
1747 btrfs_dev_extent_length(leaf, extent) < start);
1748 key = found_key;
1749 btrfs_release_path(path);
1750 goto again;
1751 } else if (ret == 0) {
1752 leaf = path->nodes[0];
1753 extent = btrfs_item_ptr(leaf, path->slots[0],
1754 struct btrfs_dev_extent);
1755 } else {
1756 btrfs_handle_fs_error(fs_info, ret, "Slot search failed");
1757 goto out;
1758 }
1759
1760 *dev_extent_len = btrfs_dev_extent_length(leaf, extent);
1761
1762 ret = btrfs_del_item(trans, root, path);
1763 if (ret) {
1764 btrfs_handle_fs_error(fs_info, ret,
1765 "Failed to remove dev extent item");
1766 } else {
1767 set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags);
1768 }
1769 out:
1770 btrfs_free_path(path);
1771 return ret;
1772 }
1773
btrfs_alloc_dev_extent(struct btrfs_trans_handle * trans,struct btrfs_device * device,u64 chunk_offset,u64 start,u64 num_bytes)1774 static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1775 struct btrfs_device *device,
1776 u64 chunk_offset, u64 start, u64 num_bytes)
1777 {
1778 int ret;
1779 struct btrfs_path *path;
1780 struct btrfs_fs_info *fs_info = device->fs_info;
1781 struct btrfs_root *root = fs_info->dev_root;
1782 struct btrfs_dev_extent *extent;
1783 struct extent_buffer *leaf;
1784 struct btrfs_key key;
1785
1786 WARN_ON(!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state));
1787 WARN_ON(test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state));
1788 path = btrfs_alloc_path();
1789 if (!path)
1790 return -ENOMEM;
1791
1792 key.objectid = device->devid;
1793 key.offset = start;
1794 key.type = BTRFS_DEV_EXTENT_KEY;
1795 ret = btrfs_insert_empty_item(trans, root, path, &key,
1796 sizeof(*extent));
1797 if (ret)
1798 goto out;
1799
1800 leaf = path->nodes[0];
1801 extent = btrfs_item_ptr(leaf, path->slots[0],
1802 struct btrfs_dev_extent);
1803 btrfs_set_dev_extent_chunk_tree(leaf, extent,
1804 BTRFS_CHUNK_TREE_OBJECTID);
1805 btrfs_set_dev_extent_chunk_objectid(leaf, extent,
1806 BTRFS_FIRST_CHUNK_TREE_OBJECTID);
1807 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
1808
1809 btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1810 btrfs_mark_buffer_dirty(leaf);
1811 out:
1812 btrfs_free_path(path);
1813 return ret;
1814 }
1815
find_next_chunk(struct btrfs_fs_info * fs_info)1816 static u64 find_next_chunk(struct btrfs_fs_info *fs_info)
1817 {
1818 struct extent_map_tree *em_tree;
1819 struct extent_map *em;
1820 struct rb_node *n;
1821 u64 ret = 0;
1822
1823 em_tree = &fs_info->mapping_tree;
1824 read_lock(&em_tree->lock);
1825 n = rb_last(&em_tree->map.rb_root);
1826 if (n) {
1827 em = rb_entry(n, struct extent_map, rb_node);
1828 ret = em->start + em->len;
1829 }
1830 read_unlock(&em_tree->lock);
1831
1832 return ret;
1833 }
1834
find_next_devid(struct btrfs_fs_info * fs_info,u64 * devid_ret)1835 static noinline int find_next_devid(struct btrfs_fs_info *fs_info,
1836 u64 *devid_ret)
1837 {
1838 int ret;
1839 struct btrfs_key key;
1840 struct btrfs_key found_key;
1841 struct btrfs_path *path;
1842
1843 path = btrfs_alloc_path();
1844 if (!path)
1845 return -ENOMEM;
1846
1847 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1848 key.type = BTRFS_DEV_ITEM_KEY;
1849 key.offset = (u64)-1;
1850
1851 ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0);
1852 if (ret < 0)
1853 goto error;
1854
1855 if (ret == 0) {
1856 /* Corruption */
1857 btrfs_err(fs_info, "corrupted chunk tree devid -1 matched");
1858 ret = -EUCLEAN;
1859 goto error;
1860 }
1861
1862 ret = btrfs_previous_item(fs_info->chunk_root, path,
1863 BTRFS_DEV_ITEMS_OBJECTID,
1864 BTRFS_DEV_ITEM_KEY);
1865 if (ret) {
1866 *devid_ret = 1;
1867 } else {
1868 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1869 path->slots[0]);
1870 *devid_ret = found_key.offset + 1;
1871 }
1872 ret = 0;
1873 error:
1874 btrfs_free_path(path);
1875 return ret;
1876 }
1877
1878 /*
1879 * the device information is stored in the chunk root
1880 * the btrfs_device struct should be fully filled in
1881 */
btrfs_add_dev_item(struct btrfs_trans_handle * trans,struct btrfs_device * device)1882 static int btrfs_add_dev_item(struct btrfs_trans_handle *trans,
1883 struct btrfs_device *device)
1884 {
1885 int ret;
1886 struct btrfs_path *path;
1887 struct btrfs_dev_item *dev_item;
1888 struct extent_buffer *leaf;
1889 struct btrfs_key key;
1890 unsigned long ptr;
1891
1892 path = btrfs_alloc_path();
1893 if (!path)
1894 return -ENOMEM;
1895
1896 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1897 key.type = BTRFS_DEV_ITEM_KEY;
1898 key.offset = device->devid;
1899
1900 ret = btrfs_insert_empty_item(trans, trans->fs_info->chunk_root, path,
1901 &key, sizeof(*dev_item));
1902 if (ret)
1903 goto out;
1904
1905 leaf = path->nodes[0];
1906 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1907
1908 btrfs_set_device_id(leaf, dev_item, device->devid);
1909 btrfs_set_device_generation(leaf, dev_item, 0);
1910 btrfs_set_device_type(leaf, dev_item, device->type);
1911 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1912 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1913 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1914 btrfs_set_device_total_bytes(leaf, dev_item,
1915 btrfs_device_get_disk_total_bytes(device));
1916 btrfs_set_device_bytes_used(leaf, dev_item,
1917 btrfs_device_get_bytes_used(device));
1918 btrfs_set_device_group(leaf, dev_item, 0);
1919 btrfs_set_device_seek_speed(leaf, dev_item, 0);
1920 btrfs_set_device_bandwidth(leaf, dev_item, 0);
1921 btrfs_set_device_start_offset(leaf, dev_item, 0);
1922
1923 ptr = btrfs_device_uuid(dev_item);
1924 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1925 ptr = btrfs_device_fsid(dev_item);
1926 write_extent_buffer(leaf, trans->fs_info->fs_devices->metadata_uuid,
1927 ptr, BTRFS_FSID_SIZE);
1928 btrfs_mark_buffer_dirty(leaf);
1929
1930 ret = 0;
1931 out:
1932 btrfs_free_path(path);
1933 return ret;
1934 }
1935
1936 /*
1937 * Function to update ctime/mtime for a given device path.
1938 * Mainly used for ctime/mtime based probe like libblkid.
1939 *
1940 * We don't care about errors here, this is just to be kind to userspace.
1941 */
update_dev_time(const char * device_path)1942 static void update_dev_time(const char *device_path)
1943 {
1944 struct path path;
1945 struct timespec64 now;
1946 int ret;
1947
1948 ret = kern_path(device_path, LOOKUP_FOLLOW, &path);
1949 if (ret)
1950 return;
1951
1952 now = current_time(d_inode(path.dentry));
1953 inode_update_time(d_inode(path.dentry), &now, S_MTIME | S_CTIME);
1954 path_put(&path);
1955 }
1956
btrfs_rm_dev_item(struct btrfs_device * device)1957 static int btrfs_rm_dev_item(struct btrfs_device *device)
1958 {
1959 struct btrfs_root *root = device->fs_info->chunk_root;
1960 int ret;
1961 struct btrfs_path *path;
1962 struct btrfs_key key;
1963 struct btrfs_trans_handle *trans;
1964
1965 path = btrfs_alloc_path();
1966 if (!path)
1967 return -ENOMEM;
1968
1969 trans = btrfs_start_transaction(root, 0);
1970 if (IS_ERR(trans)) {
1971 btrfs_free_path(path);
1972 return PTR_ERR(trans);
1973 }
1974 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1975 key.type = BTRFS_DEV_ITEM_KEY;
1976 key.offset = device->devid;
1977
1978 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1979 if (ret) {
1980 if (ret > 0)
1981 ret = -ENOENT;
1982 btrfs_abort_transaction(trans, ret);
1983 btrfs_end_transaction(trans);
1984 goto out;
1985 }
1986
1987 ret = btrfs_del_item(trans, root, path);
1988 if (ret) {
1989 btrfs_abort_transaction(trans, ret);
1990 btrfs_end_transaction(trans);
1991 }
1992
1993 out:
1994 btrfs_free_path(path);
1995 if (!ret)
1996 ret = btrfs_commit_transaction(trans);
1997 return ret;
1998 }
1999
2000 /*
2001 * Verify that @num_devices satisfies the RAID profile constraints in the whole
2002 * filesystem. It's up to the caller to adjust that number regarding eg. device
2003 * replace.
2004 */
btrfs_check_raid_min_devices(struct btrfs_fs_info * fs_info,u64 num_devices)2005 static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info,
2006 u64 num_devices)
2007 {
2008 u64 all_avail;
2009 unsigned seq;
2010 int i;
2011
2012 do {
2013 seq = read_seqbegin(&fs_info->profiles_lock);
2014
2015 all_avail = fs_info->avail_data_alloc_bits |
2016 fs_info->avail_system_alloc_bits |
2017 fs_info->avail_metadata_alloc_bits;
2018 } while (read_seqretry(&fs_info->profiles_lock, seq));
2019
2020 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
2021 if (!(all_avail & btrfs_raid_array[i].bg_flag))
2022 continue;
2023
2024 if (num_devices < btrfs_raid_array[i].devs_min) {
2025 int ret = btrfs_raid_array[i].mindev_error;
2026
2027 if (ret)
2028 return ret;
2029 }
2030 }
2031
2032 return 0;
2033 }
2034
btrfs_find_next_active_device(struct btrfs_fs_devices * fs_devs,struct btrfs_device * device)2035 static struct btrfs_device * btrfs_find_next_active_device(
2036 struct btrfs_fs_devices *fs_devs, struct btrfs_device *device)
2037 {
2038 struct btrfs_device *next_device;
2039
2040 list_for_each_entry(next_device, &fs_devs->devices, dev_list) {
2041 if (next_device != device &&
2042 !test_bit(BTRFS_DEV_STATE_MISSING, &next_device->dev_state)
2043 && next_device->bdev)
2044 return next_device;
2045 }
2046
2047 return NULL;
2048 }
2049
2050 /*
2051 * Helper function to check if the given device is part of s_bdev / latest_bdev
2052 * and replace it with the provided or the next active device, in the context
2053 * where this function called, there should be always be another device (or
2054 * this_dev) which is active.
2055 */
btrfs_assign_next_active_device(struct btrfs_device * device,struct btrfs_device * next_device)2056 void __cold btrfs_assign_next_active_device(struct btrfs_device *device,
2057 struct btrfs_device *next_device)
2058 {
2059 struct btrfs_fs_info *fs_info = device->fs_info;
2060
2061 if (!next_device)
2062 next_device = btrfs_find_next_active_device(fs_info->fs_devices,
2063 device);
2064 ASSERT(next_device);
2065
2066 if (fs_info->sb->s_bdev &&
2067 (fs_info->sb->s_bdev == device->bdev))
2068 fs_info->sb->s_bdev = next_device->bdev;
2069
2070 if (fs_info->fs_devices->latest_bdev == device->bdev)
2071 fs_info->fs_devices->latest_bdev = next_device->bdev;
2072 }
2073
2074 /*
2075 * Return btrfs_fs_devices::num_devices excluding the device that's being
2076 * currently replaced.
2077 */
btrfs_num_devices(struct btrfs_fs_info * fs_info)2078 static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info)
2079 {
2080 u64 num_devices = fs_info->fs_devices->num_devices;
2081
2082 down_read(&fs_info->dev_replace.rwsem);
2083 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
2084 ASSERT(num_devices > 1);
2085 num_devices--;
2086 }
2087 up_read(&fs_info->dev_replace.rwsem);
2088
2089 return num_devices;
2090 }
2091
btrfs_scratch_superblocks(struct btrfs_fs_info * fs_info,struct block_device * bdev,const char * device_path)2092 void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info,
2093 struct block_device *bdev,
2094 const char *device_path)
2095 {
2096 struct btrfs_super_block *disk_super;
2097 int copy_num;
2098
2099 if (!bdev)
2100 return;
2101
2102 for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX; copy_num++) {
2103 struct page *page;
2104 int ret;
2105
2106 disk_super = btrfs_read_dev_one_super(bdev, copy_num);
2107 if (IS_ERR(disk_super))
2108 continue;
2109
2110 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
2111
2112 page = virt_to_page(disk_super);
2113 set_page_dirty(page);
2114 lock_page(page);
2115 /* write_on_page() unlocks the page */
2116 ret = write_one_page(page);
2117 if (ret)
2118 btrfs_warn(fs_info,
2119 "error clearing superblock number %d (%d)",
2120 copy_num, ret);
2121 btrfs_release_disk_super(disk_super);
2122
2123 }
2124
2125 /* Notify udev that device has changed */
2126 btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
2127
2128 /* Update ctime/mtime for device path for libblkid */
2129 update_dev_time(device_path);
2130 }
2131
btrfs_rm_device(struct btrfs_fs_info * fs_info,const char * device_path,u64 devid)2132 int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path,
2133 u64 devid)
2134 {
2135 struct btrfs_device *device;
2136 struct btrfs_fs_devices *cur_devices;
2137 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2138 u64 num_devices;
2139 int ret = 0;
2140
2141 /*
2142 * The device list in fs_devices is accessed without locks (neither
2143 * uuid_mutex nor device_list_mutex) as it won't change on a mounted
2144 * filesystem and another device rm cannot run.
2145 */
2146 num_devices = btrfs_num_devices(fs_info);
2147
2148 ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1);
2149 if (ret)
2150 goto out;
2151
2152 device = btrfs_find_device_by_devspec(fs_info, devid, device_path);
2153
2154 if (IS_ERR(device)) {
2155 if (PTR_ERR(device) == -ENOENT &&
2156 device_path && strcmp(device_path, "missing") == 0)
2157 ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
2158 else
2159 ret = PTR_ERR(device);
2160 goto out;
2161 }
2162
2163 if (btrfs_pinned_by_swapfile(fs_info, device)) {
2164 btrfs_warn_in_rcu(fs_info,
2165 "cannot remove device %s (devid %llu) due to active swapfile",
2166 rcu_str_deref(device->name), device->devid);
2167 ret = -ETXTBSY;
2168 goto out;
2169 }
2170
2171 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
2172 ret = BTRFS_ERROR_DEV_TGT_REPLACE;
2173 goto out;
2174 }
2175
2176 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
2177 fs_info->fs_devices->rw_devices == 1) {
2178 ret = BTRFS_ERROR_DEV_ONLY_WRITABLE;
2179 goto out;
2180 }
2181
2182 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
2183 mutex_lock(&fs_info->chunk_mutex);
2184 list_del_init(&device->dev_alloc_list);
2185 device->fs_devices->rw_devices--;
2186 mutex_unlock(&fs_info->chunk_mutex);
2187 }
2188
2189 ret = btrfs_shrink_device(device, 0);
2190 if (!ret)
2191 btrfs_reada_remove_dev(device);
2192 if (ret)
2193 goto error_undo;
2194
2195 /*
2196 * TODO: the superblock still includes this device in its num_devices
2197 * counter although write_all_supers() is not locked out. This
2198 * could give a filesystem state which requires a degraded mount.
2199 */
2200 ret = btrfs_rm_dev_item(device);
2201 if (ret)
2202 goto error_undo;
2203
2204 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
2205 btrfs_scrub_cancel_dev(device);
2206
2207 /*
2208 * the device list mutex makes sure that we don't change
2209 * the device list while someone else is writing out all
2210 * the device supers. Whoever is writing all supers, should
2211 * lock the device list mutex before getting the number of
2212 * devices in the super block (super_copy). Conversely,
2213 * whoever updates the number of devices in the super block
2214 * (super_copy) should hold the device list mutex.
2215 */
2216
2217 /*
2218 * In normal cases the cur_devices == fs_devices. But in case
2219 * of deleting a seed device, the cur_devices should point to
2220 * its own fs_devices listed under the fs_devices->seed.
2221 */
2222 cur_devices = device->fs_devices;
2223 mutex_lock(&fs_devices->device_list_mutex);
2224 list_del_rcu(&device->dev_list);
2225
2226 cur_devices->num_devices--;
2227 cur_devices->total_devices--;
2228 /* Update total_devices of the parent fs_devices if it's seed */
2229 if (cur_devices != fs_devices)
2230 fs_devices->total_devices--;
2231
2232 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
2233 cur_devices->missing_devices--;
2234
2235 btrfs_assign_next_active_device(device, NULL);
2236
2237 if (device->bdev) {
2238 cur_devices->open_devices--;
2239 /* remove sysfs entry */
2240 btrfs_sysfs_remove_device(device);
2241 }
2242
2243 num_devices = btrfs_super_num_devices(fs_info->super_copy) - 1;
2244 btrfs_set_super_num_devices(fs_info->super_copy, num_devices);
2245 mutex_unlock(&fs_devices->device_list_mutex);
2246
2247 /*
2248 * at this point, the device is zero sized and detached from
2249 * the devices list. All that's left is to zero out the old
2250 * supers and free the device.
2251 */
2252 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
2253 btrfs_scratch_superblocks(fs_info, device->bdev,
2254 device->name->str);
2255
2256 btrfs_close_bdev(device);
2257 synchronize_rcu();
2258 btrfs_free_device(device);
2259
2260 if (cur_devices->open_devices == 0) {
2261 list_del_init(&cur_devices->seed_list);
2262 close_fs_devices(cur_devices);
2263 free_fs_devices(cur_devices);
2264 }
2265
2266 out:
2267 return ret;
2268
2269 error_undo:
2270 btrfs_reada_undo_remove_dev(device);
2271 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
2272 mutex_lock(&fs_info->chunk_mutex);
2273 list_add(&device->dev_alloc_list,
2274 &fs_devices->alloc_list);
2275 device->fs_devices->rw_devices++;
2276 mutex_unlock(&fs_info->chunk_mutex);
2277 }
2278 goto out;
2279 }
2280
btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device * srcdev)2281 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev)
2282 {
2283 struct btrfs_fs_devices *fs_devices;
2284
2285 lockdep_assert_held(&srcdev->fs_info->fs_devices->device_list_mutex);
2286
2287 /*
2288 * in case of fs with no seed, srcdev->fs_devices will point
2289 * to fs_devices of fs_info. However when the dev being replaced is
2290 * a seed dev it will point to the seed's local fs_devices. In short
2291 * srcdev will have its correct fs_devices in both the cases.
2292 */
2293 fs_devices = srcdev->fs_devices;
2294
2295 list_del_rcu(&srcdev->dev_list);
2296 list_del(&srcdev->dev_alloc_list);
2297 fs_devices->num_devices--;
2298 if (test_bit(BTRFS_DEV_STATE_MISSING, &srcdev->dev_state))
2299 fs_devices->missing_devices--;
2300
2301 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state))
2302 fs_devices->rw_devices--;
2303
2304 if (srcdev->bdev)
2305 fs_devices->open_devices--;
2306 }
2307
btrfs_rm_dev_replace_free_srcdev(struct btrfs_device * srcdev)2308 void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev)
2309 {
2310 struct btrfs_fs_devices *fs_devices = srcdev->fs_devices;
2311
2312 mutex_lock(&uuid_mutex);
2313
2314 btrfs_close_bdev(srcdev);
2315 synchronize_rcu();
2316 btrfs_free_device(srcdev);
2317
2318 /* if this is no devs we rather delete the fs_devices */
2319 if (!fs_devices->num_devices) {
2320 /*
2321 * On a mounted FS, num_devices can't be zero unless it's a
2322 * seed. In case of a seed device being replaced, the replace
2323 * target added to the sprout FS, so there will be no more
2324 * device left under the seed FS.
2325 */
2326 ASSERT(fs_devices->seeding);
2327
2328 list_del_init(&fs_devices->seed_list);
2329 close_fs_devices(fs_devices);
2330 free_fs_devices(fs_devices);
2331 }
2332 mutex_unlock(&uuid_mutex);
2333 }
2334
btrfs_destroy_dev_replace_tgtdev(struct btrfs_device * tgtdev)2335 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev)
2336 {
2337 struct btrfs_fs_devices *fs_devices = tgtdev->fs_info->fs_devices;
2338
2339 mutex_lock(&fs_devices->device_list_mutex);
2340
2341 btrfs_sysfs_remove_device(tgtdev);
2342
2343 if (tgtdev->bdev)
2344 fs_devices->open_devices--;
2345
2346 fs_devices->num_devices--;
2347
2348 btrfs_assign_next_active_device(tgtdev, NULL);
2349
2350 list_del_rcu(&tgtdev->dev_list);
2351
2352 mutex_unlock(&fs_devices->device_list_mutex);
2353
2354 /*
2355 * The update_dev_time() with in btrfs_scratch_superblocks()
2356 * may lead to a call to btrfs_show_devname() which will try
2357 * to hold device_list_mutex. And here this device
2358 * is already out of device list, so we don't have to hold
2359 * the device_list_mutex lock.
2360 */
2361 btrfs_scratch_superblocks(tgtdev->fs_info, tgtdev->bdev,
2362 tgtdev->name->str);
2363
2364 btrfs_close_bdev(tgtdev);
2365 synchronize_rcu();
2366 btrfs_free_device(tgtdev);
2367 }
2368
btrfs_find_device_by_path(struct btrfs_fs_info * fs_info,const char * device_path)2369 static struct btrfs_device *btrfs_find_device_by_path(
2370 struct btrfs_fs_info *fs_info, const char *device_path)
2371 {
2372 int ret = 0;
2373 struct btrfs_super_block *disk_super;
2374 u64 devid;
2375 u8 *dev_uuid;
2376 struct block_device *bdev;
2377 struct btrfs_device *device;
2378
2379 ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ,
2380 fs_info->bdev_holder, 0, &bdev, &disk_super);
2381 if (ret)
2382 return ERR_PTR(ret);
2383
2384 devid = btrfs_stack_device_id(&disk_super->dev_item);
2385 dev_uuid = disk_super->dev_item.uuid;
2386 if (btrfs_fs_incompat(fs_info, METADATA_UUID))
2387 device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
2388 disk_super->metadata_uuid, true);
2389 else
2390 device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
2391 disk_super->fsid, true);
2392
2393 btrfs_release_disk_super(disk_super);
2394 if (!device)
2395 device = ERR_PTR(-ENOENT);
2396 blkdev_put(bdev, FMODE_READ);
2397 return device;
2398 }
2399
2400 /*
2401 * Lookup a device given by device id, or the path if the id is 0.
2402 */
btrfs_find_device_by_devspec(struct btrfs_fs_info * fs_info,u64 devid,const char * device_path)2403 struct btrfs_device *btrfs_find_device_by_devspec(
2404 struct btrfs_fs_info *fs_info, u64 devid,
2405 const char *device_path)
2406 {
2407 struct btrfs_device *device;
2408
2409 if (devid) {
2410 device = btrfs_find_device(fs_info->fs_devices, devid, NULL,
2411 NULL, true);
2412 if (!device)
2413 return ERR_PTR(-ENOENT);
2414 return device;
2415 }
2416
2417 if (!device_path || !device_path[0])
2418 return ERR_PTR(-EINVAL);
2419
2420 if (strcmp(device_path, "missing") == 0) {
2421 /* Find first missing device */
2422 list_for_each_entry(device, &fs_info->fs_devices->devices,
2423 dev_list) {
2424 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
2425 &device->dev_state) && !device->bdev)
2426 return device;
2427 }
2428 return ERR_PTR(-ENOENT);
2429 }
2430
2431 return btrfs_find_device_by_path(fs_info, device_path);
2432 }
2433
2434 /*
2435 * does all the dirty work required for changing file system's UUID.
2436 */
btrfs_prepare_sprout(struct btrfs_fs_info * fs_info)2437 static int btrfs_prepare_sprout(struct btrfs_fs_info *fs_info)
2438 {
2439 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2440 struct btrfs_fs_devices *old_devices;
2441 struct btrfs_fs_devices *seed_devices;
2442 struct btrfs_super_block *disk_super = fs_info->super_copy;
2443 struct btrfs_device *device;
2444 u64 super_flags;
2445
2446 lockdep_assert_held(&uuid_mutex);
2447 if (!fs_devices->seeding)
2448 return -EINVAL;
2449
2450 /*
2451 * Private copy of the seed devices, anchored at
2452 * fs_info->fs_devices->seed_list
2453 */
2454 seed_devices = alloc_fs_devices(NULL, NULL);
2455 if (IS_ERR(seed_devices))
2456 return PTR_ERR(seed_devices);
2457
2458 /*
2459 * It's necessary to retain a copy of the original seed fs_devices in
2460 * fs_uuids so that filesystems which have been seeded can successfully
2461 * reference the seed device from open_seed_devices. This also supports
2462 * multiple fs seed.
2463 */
2464 old_devices = clone_fs_devices(fs_devices);
2465 if (IS_ERR(old_devices)) {
2466 kfree(seed_devices);
2467 return PTR_ERR(old_devices);
2468 }
2469
2470 list_add(&old_devices->fs_list, &fs_uuids);
2471
2472 memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
2473 seed_devices->opened = 1;
2474 INIT_LIST_HEAD(&seed_devices->devices);
2475 INIT_LIST_HEAD(&seed_devices->alloc_list);
2476 mutex_init(&seed_devices->device_list_mutex);
2477
2478 mutex_lock(&fs_devices->device_list_mutex);
2479 list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
2480 synchronize_rcu);
2481 list_for_each_entry(device, &seed_devices->devices, dev_list)
2482 device->fs_devices = seed_devices;
2483
2484 fs_devices->seeding = false;
2485 fs_devices->num_devices = 0;
2486 fs_devices->open_devices = 0;
2487 fs_devices->missing_devices = 0;
2488 fs_devices->rotating = false;
2489 list_add(&seed_devices->seed_list, &fs_devices->seed_list);
2490
2491 generate_random_uuid(fs_devices->fsid);
2492 memcpy(fs_devices->metadata_uuid, fs_devices->fsid, BTRFS_FSID_SIZE);
2493 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
2494 mutex_unlock(&fs_devices->device_list_mutex);
2495
2496 super_flags = btrfs_super_flags(disk_super) &
2497 ~BTRFS_SUPER_FLAG_SEEDING;
2498 btrfs_set_super_flags(disk_super, super_flags);
2499
2500 return 0;
2501 }
2502
2503 /*
2504 * Store the expected generation for seed devices in device items.
2505 */
btrfs_finish_sprout(struct btrfs_trans_handle * trans)2506 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans)
2507 {
2508 struct btrfs_fs_info *fs_info = trans->fs_info;
2509 struct btrfs_root *root = fs_info->chunk_root;
2510 struct btrfs_path *path;
2511 struct extent_buffer *leaf;
2512 struct btrfs_dev_item *dev_item;
2513 struct btrfs_device *device;
2514 struct btrfs_key key;
2515 u8 fs_uuid[BTRFS_FSID_SIZE];
2516 u8 dev_uuid[BTRFS_UUID_SIZE];
2517 u64 devid;
2518 int ret;
2519
2520 path = btrfs_alloc_path();
2521 if (!path)
2522 return -ENOMEM;
2523
2524 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2525 key.offset = 0;
2526 key.type = BTRFS_DEV_ITEM_KEY;
2527
2528 while (1) {
2529 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2530 if (ret < 0)
2531 goto error;
2532
2533 leaf = path->nodes[0];
2534 next_slot:
2535 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
2536 ret = btrfs_next_leaf(root, path);
2537 if (ret > 0)
2538 break;
2539 if (ret < 0)
2540 goto error;
2541 leaf = path->nodes[0];
2542 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2543 btrfs_release_path(path);
2544 continue;
2545 }
2546
2547 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2548 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
2549 key.type != BTRFS_DEV_ITEM_KEY)
2550 break;
2551
2552 dev_item = btrfs_item_ptr(leaf, path->slots[0],
2553 struct btrfs_dev_item);
2554 devid = btrfs_device_id(leaf, dev_item);
2555 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
2556 BTRFS_UUID_SIZE);
2557 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
2558 BTRFS_FSID_SIZE);
2559 device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
2560 fs_uuid, true);
2561 BUG_ON(!device); /* Logic error */
2562
2563 if (device->fs_devices->seeding) {
2564 btrfs_set_device_generation(leaf, dev_item,
2565 device->generation);
2566 btrfs_mark_buffer_dirty(leaf);
2567 }
2568
2569 path->slots[0]++;
2570 goto next_slot;
2571 }
2572 ret = 0;
2573 error:
2574 btrfs_free_path(path);
2575 return ret;
2576 }
2577
btrfs_init_new_device(struct btrfs_fs_info * fs_info,const char * device_path)2578 int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path)
2579 {
2580 struct btrfs_root *root = fs_info->dev_root;
2581 struct request_queue *q;
2582 struct btrfs_trans_handle *trans;
2583 struct btrfs_device *device;
2584 struct block_device *bdev;
2585 struct super_block *sb = fs_info->sb;
2586 struct rcu_string *name;
2587 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2588 u64 orig_super_total_bytes;
2589 u64 orig_super_num_devices;
2590 int seeding_dev = 0;
2591 int ret = 0;
2592 bool locked = false;
2593
2594 if (sb_rdonly(sb) && !fs_devices->seeding)
2595 return -EROFS;
2596
2597 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2598 fs_info->bdev_holder);
2599 if (IS_ERR(bdev))
2600 return PTR_ERR(bdev);
2601
2602 if (fs_devices->seeding) {
2603 seeding_dev = 1;
2604 down_write(&sb->s_umount);
2605 mutex_lock(&uuid_mutex);
2606 locked = true;
2607 }
2608
2609 sync_blockdev(bdev);
2610
2611 rcu_read_lock();
2612 list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) {
2613 if (device->bdev == bdev) {
2614 ret = -EEXIST;
2615 rcu_read_unlock();
2616 goto error;
2617 }
2618 }
2619 rcu_read_unlock();
2620
2621 device = btrfs_alloc_device(fs_info, NULL, NULL);
2622 if (IS_ERR(device)) {
2623 /* we can safely leave the fs_devices entry around */
2624 ret = PTR_ERR(device);
2625 goto error;
2626 }
2627
2628 name = rcu_string_strdup(device_path, GFP_KERNEL);
2629 if (!name) {
2630 ret = -ENOMEM;
2631 goto error_free_device;
2632 }
2633 rcu_assign_pointer(device->name, name);
2634
2635 trans = btrfs_start_transaction(root, 0);
2636 if (IS_ERR(trans)) {
2637 ret = PTR_ERR(trans);
2638 goto error_free_device;
2639 }
2640
2641 q = bdev_get_queue(bdev);
2642 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
2643 device->generation = trans->transid;
2644 device->io_width = fs_info->sectorsize;
2645 device->io_align = fs_info->sectorsize;
2646 device->sector_size = fs_info->sectorsize;
2647 device->total_bytes = round_down(i_size_read(bdev->bd_inode),
2648 fs_info->sectorsize);
2649 device->disk_total_bytes = device->total_bytes;
2650 device->commit_total_bytes = device->total_bytes;
2651 device->fs_info = fs_info;
2652 device->bdev = bdev;
2653 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
2654 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
2655 device->mode = FMODE_EXCL;
2656 device->dev_stats_valid = 1;
2657 set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE);
2658
2659 if (seeding_dev) {
2660 sb->s_flags &= ~SB_RDONLY;
2661 ret = btrfs_prepare_sprout(fs_info);
2662 if (ret) {
2663 btrfs_abort_transaction(trans, ret);
2664 goto error_trans;
2665 }
2666 }
2667
2668 device->fs_devices = fs_devices;
2669
2670 mutex_lock(&fs_devices->device_list_mutex);
2671 mutex_lock(&fs_info->chunk_mutex);
2672 list_add_rcu(&device->dev_list, &fs_devices->devices);
2673 list_add(&device->dev_alloc_list, &fs_devices->alloc_list);
2674 fs_devices->num_devices++;
2675 fs_devices->open_devices++;
2676 fs_devices->rw_devices++;
2677 fs_devices->total_devices++;
2678 fs_devices->total_rw_bytes += device->total_bytes;
2679
2680 atomic64_add(device->total_bytes, &fs_info->free_chunk_space);
2681
2682 if (!blk_queue_nonrot(q))
2683 fs_devices->rotating = true;
2684
2685 orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
2686 btrfs_set_super_total_bytes(fs_info->super_copy,
2687 round_down(orig_super_total_bytes + device->total_bytes,
2688 fs_info->sectorsize));
2689
2690 orig_super_num_devices = btrfs_super_num_devices(fs_info->super_copy);
2691 btrfs_set_super_num_devices(fs_info->super_copy,
2692 orig_super_num_devices + 1);
2693
2694 /*
2695 * we've got more storage, clear any full flags on the space
2696 * infos
2697 */
2698 btrfs_clear_space_info_full(fs_info);
2699
2700 mutex_unlock(&fs_info->chunk_mutex);
2701
2702 /* Add sysfs device entry */
2703 btrfs_sysfs_add_device(device);
2704
2705 mutex_unlock(&fs_devices->device_list_mutex);
2706
2707 if (seeding_dev) {
2708 mutex_lock(&fs_info->chunk_mutex);
2709 ret = init_first_rw_device(trans);
2710 mutex_unlock(&fs_info->chunk_mutex);
2711 if (ret) {
2712 btrfs_abort_transaction(trans, ret);
2713 goto error_sysfs;
2714 }
2715 }
2716
2717 ret = btrfs_add_dev_item(trans, device);
2718 if (ret) {
2719 btrfs_abort_transaction(trans, ret);
2720 goto error_sysfs;
2721 }
2722
2723 if (seeding_dev) {
2724 ret = btrfs_finish_sprout(trans);
2725 if (ret) {
2726 btrfs_abort_transaction(trans, ret);
2727 goto error_sysfs;
2728 }
2729
2730 /*
2731 * fs_devices now represents the newly sprouted filesystem and
2732 * its fsid has been changed by btrfs_prepare_sprout
2733 */
2734 btrfs_sysfs_update_sprout_fsid(fs_devices);
2735 }
2736
2737 ret = btrfs_commit_transaction(trans);
2738
2739 if (seeding_dev) {
2740 mutex_unlock(&uuid_mutex);
2741 up_write(&sb->s_umount);
2742 locked = false;
2743
2744 if (ret) /* transaction commit */
2745 return ret;
2746
2747 ret = btrfs_relocate_sys_chunks(fs_info);
2748 if (ret < 0)
2749 btrfs_handle_fs_error(fs_info, ret,
2750 "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command.");
2751 trans = btrfs_attach_transaction(root);
2752 if (IS_ERR(trans)) {
2753 if (PTR_ERR(trans) == -ENOENT)
2754 return 0;
2755 ret = PTR_ERR(trans);
2756 trans = NULL;
2757 goto error_sysfs;
2758 }
2759 ret = btrfs_commit_transaction(trans);
2760 }
2761
2762 /*
2763 * Now that we have written a new super block to this device, check all
2764 * other fs_devices list if device_path alienates any other scanned
2765 * device.
2766 * We can ignore the return value as it typically returns -EINVAL and
2767 * only succeeds if the device was an alien.
2768 */
2769 btrfs_forget_devices(device_path);
2770
2771 /* Update ctime/mtime for blkid or udev */
2772 update_dev_time(device_path);
2773
2774 return ret;
2775
2776 error_sysfs:
2777 btrfs_sysfs_remove_device(device);
2778 mutex_lock(&fs_info->fs_devices->device_list_mutex);
2779 mutex_lock(&fs_info->chunk_mutex);
2780 list_del_rcu(&device->dev_list);
2781 list_del(&device->dev_alloc_list);
2782 fs_info->fs_devices->num_devices--;
2783 fs_info->fs_devices->open_devices--;
2784 fs_info->fs_devices->rw_devices--;
2785 fs_info->fs_devices->total_devices--;
2786 fs_info->fs_devices->total_rw_bytes -= device->total_bytes;
2787 atomic64_sub(device->total_bytes, &fs_info->free_chunk_space);
2788 btrfs_set_super_total_bytes(fs_info->super_copy,
2789 orig_super_total_bytes);
2790 btrfs_set_super_num_devices(fs_info->super_copy,
2791 orig_super_num_devices);
2792 mutex_unlock(&fs_info->chunk_mutex);
2793 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2794 error_trans:
2795 if (seeding_dev)
2796 sb->s_flags |= SB_RDONLY;
2797 if (trans)
2798 btrfs_end_transaction(trans);
2799 error_free_device:
2800 btrfs_free_device(device);
2801 error:
2802 blkdev_put(bdev, FMODE_EXCL);
2803 if (locked) {
2804 mutex_unlock(&uuid_mutex);
2805 up_write(&sb->s_umount);
2806 }
2807 return ret;
2808 }
2809
btrfs_update_device(struct btrfs_trans_handle * trans,struct btrfs_device * device)2810 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
2811 struct btrfs_device *device)
2812 {
2813 int ret;
2814 struct btrfs_path *path;
2815 struct btrfs_root *root = device->fs_info->chunk_root;
2816 struct btrfs_dev_item *dev_item;
2817 struct extent_buffer *leaf;
2818 struct btrfs_key key;
2819
2820 path = btrfs_alloc_path();
2821 if (!path)
2822 return -ENOMEM;
2823
2824 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2825 key.type = BTRFS_DEV_ITEM_KEY;
2826 key.offset = device->devid;
2827
2828 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2829 if (ret < 0)
2830 goto out;
2831
2832 if (ret > 0) {
2833 ret = -ENOENT;
2834 goto out;
2835 }
2836
2837 leaf = path->nodes[0];
2838 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
2839
2840 btrfs_set_device_id(leaf, dev_item, device->devid);
2841 btrfs_set_device_type(leaf, dev_item, device->type);
2842 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
2843 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
2844 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
2845 btrfs_set_device_total_bytes(leaf, dev_item,
2846 btrfs_device_get_disk_total_bytes(device));
2847 btrfs_set_device_bytes_used(leaf, dev_item,
2848 btrfs_device_get_bytes_used(device));
2849 btrfs_mark_buffer_dirty(leaf);
2850
2851 out:
2852 btrfs_free_path(path);
2853 return ret;
2854 }
2855
btrfs_grow_device(struct btrfs_trans_handle * trans,struct btrfs_device * device,u64 new_size)2856 int btrfs_grow_device(struct btrfs_trans_handle *trans,
2857 struct btrfs_device *device, u64 new_size)
2858 {
2859 struct btrfs_fs_info *fs_info = device->fs_info;
2860 struct btrfs_super_block *super_copy = fs_info->super_copy;
2861 u64 old_total;
2862 u64 diff;
2863
2864 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
2865 return -EACCES;
2866
2867 new_size = round_down(new_size, fs_info->sectorsize);
2868
2869 mutex_lock(&fs_info->chunk_mutex);
2870 old_total = btrfs_super_total_bytes(super_copy);
2871 diff = round_down(new_size - device->total_bytes, fs_info->sectorsize);
2872
2873 if (new_size <= device->total_bytes ||
2874 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
2875 mutex_unlock(&fs_info->chunk_mutex);
2876 return -EINVAL;
2877 }
2878
2879 btrfs_set_super_total_bytes(super_copy,
2880 round_down(old_total + diff, fs_info->sectorsize));
2881 device->fs_devices->total_rw_bytes += diff;
2882
2883 btrfs_device_set_total_bytes(device, new_size);
2884 btrfs_device_set_disk_total_bytes(device, new_size);
2885 btrfs_clear_space_info_full(device->fs_info);
2886 if (list_empty(&device->post_commit_list))
2887 list_add_tail(&device->post_commit_list,
2888 &trans->transaction->dev_update_list);
2889 mutex_unlock(&fs_info->chunk_mutex);
2890
2891 return btrfs_update_device(trans, device);
2892 }
2893
btrfs_free_chunk(struct btrfs_trans_handle * trans,u64 chunk_offset)2894 static int btrfs_free_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
2895 {
2896 struct btrfs_fs_info *fs_info = trans->fs_info;
2897 struct btrfs_root *root = fs_info->chunk_root;
2898 int ret;
2899 struct btrfs_path *path;
2900 struct btrfs_key key;
2901
2902 path = btrfs_alloc_path();
2903 if (!path)
2904 return -ENOMEM;
2905
2906 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2907 key.offset = chunk_offset;
2908 key.type = BTRFS_CHUNK_ITEM_KEY;
2909
2910 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2911 if (ret < 0)
2912 goto out;
2913 else if (ret > 0) { /* Logic error or corruption */
2914 btrfs_handle_fs_error(fs_info, -ENOENT,
2915 "Failed lookup while freeing chunk.");
2916 ret = -ENOENT;
2917 goto out;
2918 }
2919
2920 ret = btrfs_del_item(trans, root, path);
2921 if (ret < 0)
2922 btrfs_handle_fs_error(fs_info, ret,
2923 "Failed to delete chunk item.");
2924 out:
2925 btrfs_free_path(path);
2926 return ret;
2927 }
2928
btrfs_del_sys_chunk(struct btrfs_fs_info * fs_info,u64 chunk_offset)2929 static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
2930 {
2931 struct btrfs_super_block *super_copy = fs_info->super_copy;
2932 struct btrfs_disk_key *disk_key;
2933 struct btrfs_chunk *chunk;
2934 u8 *ptr;
2935 int ret = 0;
2936 u32 num_stripes;
2937 u32 array_size;
2938 u32 len = 0;
2939 u32 cur;
2940 struct btrfs_key key;
2941
2942 mutex_lock(&fs_info->chunk_mutex);
2943 array_size = btrfs_super_sys_array_size(super_copy);
2944
2945 ptr = super_copy->sys_chunk_array;
2946 cur = 0;
2947
2948 while (cur < array_size) {
2949 disk_key = (struct btrfs_disk_key *)ptr;
2950 btrfs_disk_key_to_cpu(&key, disk_key);
2951
2952 len = sizeof(*disk_key);
2953
2954 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
2955 chunk = (struct btrfs_chunk *)(ptr + len);
2956 num_stripes = btrfs_stack_chunk_num_stripes(chunk);
2957 len += btrfs_chunk_item_size(num_stripes);
2958 } else {
2959 ret = -EIO;
2960 break;
2961 }
2962 if (key.objectid == BTRFS_FIRST_CHUNK_TREE_OBJECTID &&
2963 key.offset == chunk_offset) {
2964 memmove(ptr, ptr + len, array_size - (cur + len));
2965 array_size -= len;
2966 btrfs_set_super_sys_array_size(super_copy, array_size);
2967 } else {
2968 ptr += len;
2969 cur += len;
2970 }
2971 }
2972 mutex_unlock(&fs_info->chunk_mutex);
2973 return ret;
2974 }
2975
2976 /*
2977 * btrfs_get_chunk_map() - Find the mapping containing the given logical extent.
2978 * @logical: Logical block offset in bytes.
2979 * @length: Length of extent in bytes.
2980 *
2981 * Return: Chunk mapping or ERR_PTR.
2982 */
btrfs_get_chunk_map(struct btrfs_fs_info * fs_info,u64 logical,u64 length)2983 struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
2984 u64 logical, u64 length)
2985 {
2986 struct extent_map_tree *em_tree;
2987 struct extent_map *em;
2988
2989 em_tree = &fs_info->mapping_tree;
2990 read_lock(&em_tree->lock);
2991 em = lookup_extent_mapping(em_tree, logical, length);
2992 read_unlock(&em_tree->lock);
2993
2994 if (!em) {
2995 btrfs_crit(fs_info,
2996 "unable to find chunk map for logical %llu length %llu",
2997 logical, length);
2998 return ERR_PTR(-EINVAL);
2999 }
3000
3001 if (em->start > logical || em->start + em->len <= logical) {
3002 btrfs_crit(fs_info,
3003 "found a bad chunk map, wanted %llu-%llu, found %llu-%llu",
3004 logical, logical + length, em->start, em->start + em->len);
3005 free_extent_map(em);
3006 return ERR_PTR(-EINVAL);
3007 }
3008
3009 /* callers are responsible for dropping em's ref. */
3010 return em;
3011 }
3012
btrfs_remove_chunk(struct btrfs_trans_handle * trans,u64 chunk_offset)3013 int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
3014 {
3015 struct btrfs_fs_info *fs_info = trans->fs_info;
3016 struct extent_map *em;
3017 struct map_lookup *map;
3018 u64 dev_extent_len = 0;
3019 int i, ret = 0;
3020 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
3021
3022 em = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
3023 if (IS_ERR(em)) {
3024 /*
3025 * This is a logic error, but we don't want to just rely on the
3026 * user having built with ASSERT enabled, so if ASSERT doesn't
3027 * do anything we still error out.
3028 */
3029 ASSERT(0);
3030 return PTR_ERR(em);
3031 }
3032 map = em->map_lookup;
3033 mutex_lock(&fs_info->chunk_mutex);
3034 check_system_chunk(trans, map->type);
3035 mutex_unlock(&fs_info->chunk_mutex);
3036
3037 /*
3038 * Take the device list mutex to prevent races with the final phase of
3039 * a device replace operation that replaces the device object associated
3040 * with map stripes (dev-replace.c:btrfs_dev_replace_finishing()).
3041 */
3042 mutex_lock(&fs_devices->device_list_mutex);
3043 for (i = 0; i < map->num_stripes; i++) {
3044 struct btrfs_device *device = map->stripes[i].dev;
3045 ret = btrfs_free_dev_extent(trans, device,
3046 map->stripes[i].physical,
3047 &dev_extent_len);
3048 if (ret) {
3049 mutex_unlock(&fs_devices->device_list_mutex);
3050 btrfs_abort_transaction(trans, ret);
3051 goto out;
3052 }
3053
3054 if (device->bytes_used > 0) {
3055 mutex_lock(&fs_info->chunk_mutex);
3056 btrfs_device_set_bytes_used(device,
3057 device->bytes_used - dev_extent_len);
3058 atomic64_add(dev_extent_len, &fs_info->free_chunk_space);
3059 btrfs_clear_space_info_full(fs_info);
3060 mutex_unlock(&fs_info->chunk_mutex);
3061 }
3062
3063 ret = btrfs_update_device(trans, device);
3064 if (ret) {
3065 mutex_unlock(&fs_devices->device_list_mutex);
3066 btrfs_abort_transaction(trans, ret);
3067 goto out;
3068 }
3069 }
3070 mutex_unlock(&fs_devices->device_list_mutex);
3071
3072 ret = btrfs_free_chunk(trans, chunk_offset);
3073 if (ret) {
3074 btrfs_abort_transaction(trans, ret);
3075 goto out;
3076 }
3077
3078 trace_btrfs_chunk_free(fs_info, map, chunk_offset, em->len);
3079
3080 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
3081 ret = btrfs_del_sys_chunk(fs_info, chunk_offset);
3082 if (ret) {
3083 btrfs_abort_transaction(trans, ret);
3084 goto out;
3085 }
3086 }
3087
3088 ret = btrfs_remove_block_group(trans, chunk_offset, em);
3089 if (ret) {
3090 btrfs_abort_transaction(trans, ret);
3091 goto out;
3092 }
3093
3094 out:
3095 /* once for us */
3096 free_extent_map(em);
3097 return ret;
3098 }
3099
btrfs_relocate_chunk(struct btrfs_fs_info * fs_info,u64 chunk_offset)3100 static int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
3101 {
3102 struct btrfs_root *root = fs_info->chunk_root;
3103 struct btrfs_trans_handle *trans;
3104 struct btrfs_block_group *block_group;
3105 int ret;
3106
3107 /*
3108 * Prevent races with automatic removal of unused block groups.
3109 * After we relocate and before we remove the chunk with offset
3110 * chunk_offset, automatic removal of the block group can kick in,
3111 * resulting in a failure when calling btrfs_remove_chunk() below.
3112 *
3113 * Make sure to acquire this mutex before doing a tree search (dev
3114 * or chunk trees) to find chunks. Otherwise the cleaner kthread might
3115 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after
3116 * we release the path used to search the chunk/dev tree and before
3117 * the current task acquires this mutex and calls us.
3118 */
3119 lockdep_assert_held(&fs_info->delete_unused_bgs_mutex);
3120
3121 /* step one, relocate all the extents inside this chunk */
3122 btrfs_scrub_pause(fs_info);
3123 ret = btrfs_relocate_block_group(fs_info, chunk_offset);
3124 btrfs_scrub_continue(fs_info);
3125 if (ret)
3126 return ret;
3127
3128 block_group = btrfs_lookup_block_group(fs_info, chunk_offset);
3129 if (!block_group)
3130 return -ENOENT;
3131 btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group);
3132 btrfs_put_block_group(block_group);
3133
3134 trans = btrfs_start_trans_remove_block_group(root->fs_info,
3135 chunk_offset);
3136 if (IS_ERR(trans)) {
3137 ret = PTR_ERR(trans);
3138 btrfs_handle_fs_error(root->fs_info, ret, NULL);
3139 return ret;
3140 }
3141
3142 /*
3143 * step two, delete the device extents and the
3144 * chunk tree entries
3145 */
3146 ret = btrfs_remove_chunk(trans, chunk_offset);
3147 btrfs_end_transaction(trans);
3148 return ret;
3149 }
3150
btrfs_relocate_sys_chunks(struct btrfs_fs_info * fs_info)3151 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info)
3152 {
3153 struct btrfs_root *chunk_root = fs_info->chunk_root;
3154 struct btrfs_path *path;
3155 struct extent_buffer *leaf;
3156 struct btrfs_chunk *chunk;
3157 struct btrfs_key key;
3158 struct btrfs_key found_key;
3159 u64 chunk_type;
3160 bool retried = false;
3161 int failed = 0;
3162 int ret;
3163
3164 path = btrfs_alloc_path();
3165 if (!path)
3166 return -ENOMEM;
3167
3168 again:
3169 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3170 key.offset = (u64)-1;
3171 key.type = BTRFS_CHUNK_ITEM_KEY;
3172
3173 while (1) {
3174 mutex_lock(&fs_info->delete_unused_bgs_mutex);
3175 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3176 if (ret < 0) {
3177 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3178 goto error;
3179 }
3180 BUG_ON(ret == 0); /* Corruption */
3181
3182 ret = btrfs_previous_item(chunk_root, path, key.objectid,
3183 key.type);
3184 if (ret)
3185 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3186 if (ret < 0)
3187 goto error;
3188 if (ret > 0)
3189 break;
3190
3191 leaf = path->nodes[0];
3192 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3193
3194 chunk = btrfs_item_ptr(leaf, path->slots[0],
3195 struct btrfs_chunk);
3196 chunk_type = btrfs_chunk_type(leaf, chunk);
3197 btrfs_release_path(path);
3198
3199 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
3200 ret = btrfs_relocate_chunk(fs_info, found_key.offset);
3201 if (ret == -ENOSPC)
3202 failed++;
3203 else
3204 BUG_ON(ret);
3205 }
3206 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3207
3208 if (found_key.offset == 0)
3209 break;
3210 key.offset = found_key.offset - 1;
3211 }
3212 ret = 0;
3213 if (failed && !retried) {
3214 failed = 0;
3215 retried = true;
3216 goto again;
3217 } else if (WARN_ON(failed && retried)) {
3218 ret = -ENOSPC;
3219 }
3220 error:
3221 btrfs_free_path(path);
3222 return ret;
3223 }
3224
3225 /*
3226 * return 1 : allocate a data chunk successfully,
3227 * return <0: errors during allocating a data chunk,
3228 * return 0 : no need to allocate a data chunk.
3229 */
btrfs_may_alloc_data_chunk(struct btrfs_fs_info * fs_info,u64 chunk_offset)3230 static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info,
3231 u64 chunk_offset)
3232 {
3233 struct btrfs_block_group *cache;
3234 u64 bytes_used;
3235 u64 chunk_type;
3236
3237 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3238 ASSERT(cache);
3239 chunk_type = cache->flags;
3240 btrfs_put_block_group(cache);
3241
3242 if (!(chunk_type & BTRFS_BLOCK_GROUP_DATA))
3243 return 0;
3244
3245 spin_lock(&fs_info->data_sinfo->lock);
3246 bytes_used = fs_info->data_sinfo->bytes_used;
3247 spin_unlock(&fs_info->data_sinfo->lock);
3248
3249 if (!bytes_used) {
3250 struct btrfs_trans_handle *trans;
3251 int ret;
3252
3253 trans = btrfs_join_transaction(fs_info->tree_root);
3254 if (IS_ERR(trans))
3255 return PTR_ERR(trans);
3256
3257 ret = btrfs_force_chunk_alloc(trans, BTRFS_BLOCK_GROUP_DATA);
3258 btrfs_end_transaction(trans);
3259 if (ret < 0)
3260 return ret;
3261 return 1;
3262 }
3263
3264 return 0;
3265 }
3266
insert_balance_item(struct btrfs_fs_info * fs_info,struct btrfs_balance_control * bctl)3267 static int insert_balance_item(struct btrfs_fs_info *fs_info,
3268 struct btrfs_balance_control *bctl)
3269 {
3270 struct btrfs_root *root = fs_info->tree_root;
3271 struct btrfs_trans_handle *trans;
3272 struct btrfs_balance_item *item;
3273 struct btrfs_disk_balance_args disk_bargs;
3274 struct btrfs_path *path;
3275 struct extent_buffer *leaf;
3276 struct btrfs_key key;
3277 int ret, err;
3278
3279 path = btrfs_alloc_path();
3280 if (!path)
3281 return -ENOMEM;
3282
3283 trans = btrfs_start_transaction(root, 0);
3284 if (IS_ERR(trans)) {
3285 btrfs_free_path(path);
3286 return PTR_ERR(trans);
3287 }
3288
3289 key.objectid = BTRFS_BALANCE_OBJECTID;
3290 key.type = BTRFS_TEMPORARY_ITEM_KEY;
3291 key.offset = 0;
3292
3293 ret = btrfs_insert_empty_item(trans, root, path, &key,
3294 sizeof(*item));
3295 if (ret)
3296 goto out;
3297
3298 leaf = path->nodes[0];
3299 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
3300
3301 memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item));
3302
3303 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
3304 btrfs_set_balance_data(leaf, item, &disk_bargs);
3305 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
3306 btrfs_set_balance_meta(leaf, item, &disk_bargs);
3307 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
3308 btrfs_set_balance_sys(leaf, item, &disk_bargs);
3309
3310 btrfs_set_balance_flags(leaf, item, bctl->flags);
3311
3312 btrfs_mark_buffer_dirty(leaf);
3313 out:
3314 btrfs_free_path(path);
3315 err = btrfs_commit_transaction(trans);
3316 if (err && !ret)
3317 ret = err;
3318 return ret;
3319 }
3320
del_balance_item(struct btrfs_fs_info * fs_info)3321 static int del_balance_item(struct btrfs_fs_info *fs_info)
3322 {
3323 struct btrfs_root *root = fs_info->tree_root;
3324 struct btrfs_trans_handle *trans;
3325 struct btrfs_path *path;
3326 struct btrfs_key key;
3327 int ret, err;
3328
3329 path = btrfs_alloc_path();
3330 if (!path)
3331 return -ENOMEM;
3332
3333 trans = btrfs_start_transaction_fallback_global_rsv(root, 0);
3334 if (IS_ERR(trans)) {
3335 btrfs_free_path(path);
3336 return PTR_ERR(trans);
3337 }
3338
3339 key.objectid = BTRFS_BALANCE_OBJECTID;
3340 key.type = BTRFS_TEMPORARY_ITEM_KEY;
3341 key.offset = 0;
3342
3343 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3344 if (ret < 0)
3345 goto out;
3346 if (ret > 0) {
3347 ret = -ENOENT;
3348 goto out;
3349 }
3350
3351 ret = btrfs_del_item(trans, root, path);
3352 out:
3353 btrfs_free_path(path);
3354 err = btrfs_commit_transaction(trans);
3355 if (err && !ret)
3356 ret = err;
3357 return ret;
3358 }
3359
3360 /*
3361 * This is a heuristic used to reduce the number of chunks balanced on
3362 * resume after balance was interrupted.
3363 */
update_balance_args(struct btrfs_balance_control * bctl)3364 static void update_balance_args(struct btrfs_balance_control *bctl)
3365 {
3366 /*
3367 * Turn on soft mode for chunk types that were being converted.
3368 */
3369 if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
3370 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
3371 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
3372 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
3373 if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
3374 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
3375
3376 /*
3377 * Turn on usage filter if is not already used. The idea is
3378 * that chunks that we have already balanced should be
3379 * reasonably full. Don't do it for chunks that are being
3380 * converted - that will keep us from relocating unconverted
3381 * (albeit full) chunks.
3382 */
3383 if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3384 !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3385 !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3386 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
3387 bctl->data.usage = 90;
3388 }
3389 if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3390 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3391 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3392 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
3393 bctl->sys.usage = 90;
3394 }
3395 if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3396 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3397 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3398 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
3399 bctl->meta.usage = 90;
3400 }
3401 }
3402
3403 /*
3404 * Clear the balance status in fs_info and delete the balance item from disk.
3405 */
reset_balance_state(struct btrfs_fs_info * fs_info)3406 static void reset_balance_state(struct btrfs_fs_info *fs_info)
3407 {
3408 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3409 int ret;
3410
3411 BUG_ON(!fs_info->balance_ctl);
3412
3413 spin_lock(&fs_info->balance_lock);
3414 fs_info->balance_ctl = NULL;
3415 spin_unlock(&fs_info->balance_lock);
3416
3417 kfree(bctl);
3418 ret = del_balance_item(fs_info);
3419 if (ret)
3420 btrfs_handle_fs_error(fs_info, ret, NULL);
3421 }
3422
3423 /*
3424 * Balance filters. Return 1 if chunk should be filtered out
3425 * (should not be balanced).
3426 */
chunk_profiles_filter(u64 chunk_type,struct btrfs_balance_args * bargs)3427 static int chunk_profiles_filter(u64 chunk_type,
3428 struct btrfs_balance_args *bargs)
3429 {
3430 chunk_type = chunk_to_extended(chunk_type) &
3431 BTRFS_EXTENDED_PROFILE_MASK;
3432
3433 if (bargs->profiles & chunk_type)
3434 return 0;
3435
3436 return 1;
3437 }
3438
chunk_usage_range_filter(struct btrfs_fs_info * fs_info,u64 chunk_offset,struct btrfs_balance_args * bargs)3439 static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
3440 struct btrfs_balance_args *bargs)
3441 {
3442 struct btrfs_block_group *cache;
3443 u64 chunk_used;
3444 u64 user_thresh_min;
3445 u64 user_thresh_max;
3446 int ret = 1;
3447
3448 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3449 chunk_used = cache->used;
3450
3451 if (bargs->usage_min == 0)
3452 user_thresh_min = 0;
3453 else
3454 user_thresh_min = div_factor_fine(cache->length,
3455 bargs->usage_min);
3456
3457 if (bargs->usage_max == 0)
3458 user_thresh_max = 1;
3459 else if (bargs->usage_max > 100)
3460 user_thresh_max = cache->length;
3461 else
3462 user_thresh_max = div_factor_fine(cache->length,
3463 bargs->usage_max);
3464
3465 if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max)
3466 ret = 0;
3467
3468 btrfs_put_block_group(cache);
3469 return ret;
3470 }
3471
chunk_usage_filter(struct btrfs_fs_info * fs_info,u64 chunk_offset,struct btrfs_balance_args * bargs)3472 static int chunk_usage_filter(struct btrfs_fs_info *fs_info,
3473 u64 chunk_offset, struct btrfs_balance_args *bargs)
3474 {
3475 struct btrfs_block_group *cache;
3476 u64 chunk_used, user_thresh;
3477 int ret = 1;
3478
3479 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3480 chunk_used = cache->used;
3481
3482 if (bargs->usage_min == 0)
3483 user_thresh = 1;
3484 else if (bargs->usage > 100)
3485 user_thresh = cache->length;
3486 else
3487 user_thresh = div_factor_fine(cache->length, bargs->usage);
3488
3489 if (chunk_used < user_thresh)
3490 ret = 0;
3491
3492 btrfs_put_block_group(cache);
3493 return ret;
3494 }
3495
chunk_devid_filter(struct extent_buffer * leaf,struct btrfs_chunk * chunk,struct btrfs_balance_args * bargs)3496 static int chunk_devid_filter(struct extent_buffer *leaf,
3497 struct btrfs_chunk *chunk,
3498 struct btrfs_balance_args *bargs)
3499 {
3500 struct btrfs_stripe *stripe;
3501 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3502 int i;
3503
3504 for (i = 0; i < num_stripes; i++) {
3505 stripe = btrfs_stripe_nr(chunk, i);
3506 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
3507 return 0;
3508 }
3509
3510 return 1;
3511 }
3512
calc_data_stripes(u64 type,int num_stripes)3513 static u64 calc_data_stripes(u64 type, int num_stripes)
3514 {
3515 const int index = btrfs_bg_flags_to_raid_index(type);
3516 const int ncopies = btrfs_raid_array[index].ncopies;
3517 const int nparity = btrfs_raid_array[index].nparity;
3518
3519 if (nparity)
3520 return num_stripes - nparity;
3521 else
3522 return num_stripes / ncopies;
3523 }
3524
3525 /* [pstart, pend) */
chunk_drange_filter(struct extent_buffer * leaf,struct btrfs_chunk * chunk,struct btrfs_balance_args * bargs)3526 static int chunk_drange_filter(struct extent_buffer *leaf,
3527 struct btrfs_chunk *chunk,
3528 struct btrfs_balance_args *bargs)
3529 {
3530 struct btrfs_stripe *stripe;
3531 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3532 u64 stripe_offset;
3533 u64 stripe_length;
3534 u64 type;
3535 int factor;
3536 int i;
3537
3538 if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
3539 return 0;
3540
3541 type = btrfs_chunk_type(leaf, chunk);
3542 factor = calc_data_stripes(type, num_stripes);
3543
3544 for (i = 0; i < num_stripes; i++) {
3545 stripe = btrfs_stripe_nr(chunk, i);
3546 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
3547 continue;
3548
3549 stripe_offset = btrfs_stripe_offset(leaf, stripe);
3550 stripe_length = btrfs_chunk_length(leaf, chunk);
3551 stripe_length = div_u64(stripe_length, factor);
3552
3553 if (stripe_offset < bargs->pend &&
3554 stripe_offset + stripe_length > bargs->pstart)
3555 return 0;
3556 }
3557
3558 return 1;
3559 }
3560
3561 /* [vstart, vend) */
chunk_vrange_filter(struct extent_buffer * leaf,struct btrfs_chunk * chunk,u64 chunk_offset,struct btrfs_balance_args * bargs)3562 static int chunk_vrange_filter(struct extent_buffer *leaf,
3563 struct btrfs_chunk *chunk,
3564 u64 chunk_offset,
3565 struct btrfs_balance_args *bargs)
3566 {
3567 if (chunk_offset < bargs->vend &&
3568 chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
3569 /* at least part of the chunk is inside this vrange */
3570 return 0;
3571
3572 return 1;
3573 }
3574
chunk_stripes_range_filter(struct extent_buffer * leaf,struct btrfs_chunk * chunk,struct btrfs_balance_args * bargs)3575 static int chunk_stripes_range_filter(struct extent_buffer *leaf,
3576 struct btrfs_chunk *chunk,
3577 struct btrfs_balance_args *bargs)
3578 {
3579 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3580
3581 if (bargs->stripes_min <= num_stripes
3582 && num_stripes <= bargs->stripes_max)
3583 return 0;
3584
3585 return 1;
3586 }
3587
chunk_soft_convert_filter(u64 chunk_type,struct btrfs_balance_args * bargs)3588 static int chunk_soft_convert_filter(u64 chunk_type,
3589 struct btrfs_balance_args *bargs)
3590 {
3591 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
3592 return 0;
3593
3594 chunk_type = chunk_to_extended(chunk_type) &
3595 BTRFS_EXTENDED_PROFILE_MASK;
3596
3597 if (bargs->target == chunk_type)
3598 return 1;
3599
3600 return 0;
3601 }
3602
should_balance_chunk(struct extent_buffer * leaf,struct btrfs_chunk * chunk,u64 chunk_offset)3603 static int should_balance_chunk(struct extent_buffer *leaf,
3604 struct btrfs_chunk *chunk, u64 chunk_offset)
3605 {
3606 struct btrfs_fs_info *fs_info = leaf->fs_info;
3607 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3608 struct btrfs_balance_args *bargs = NULL;
3609 u64 chunk_type = btrfs_chunk_type(leaf, chunk);
3610
3611 /* type filter */
3612 if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
3613 (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
3614 return 0;
3615 }
3616
3617 if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3618 bargs = &bctl->data;
3619 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3620 bargs = &bctl->sys;
3621 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3622 bargs = &bctl->meta;
3623
3624 /* profiles filter */
3625 if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
3626 chunk_profiles_filter(chunk_type, bargs)) {
3627 return 0;
3628 }
3629
3630 /* usage filter */
3631 if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
3632 chunk_usage_filter(fs_info, chunk_offset, bargs)) {
3633 return 0;
3634 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3635 chunk_usage_range_filter(fs_info, chunk_offset, bargs)) {
3636 return 0;
3637 }
3638
3639 /* devid filter */
3640 if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
3641 chunk_devid_filter(leaf, chunk, bargs)) {
3642 return 0;
3643 }
3644
3645 /* drange filter, makes sense only with devid filter */
3646 if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
3647 chunk_drange_filter(leaf, chunk, bargs)) {
3648 return 0;
3649 }
3650
3651 /* vrange filter */
3652 if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
3653 chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
3654 return 0;
3655 }
3656
3657 /* stripes filter */
3658 if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) &&
3659 chunk_stripes_range_filter(leaf, chunk, bargs)) {
3660 return 0;
3661 }
3662
3663 /* soft profile changing mode */
3664 if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
3665 chunk_soft_convert_filter(chunk_type, bargs)) {
3666 return 0;
3667 }
3668
3669 /*
3670 * limited by count, must be the last filter
3671 */
3672 if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) {
3673 if (bargs->limit == 0)
3674 return 0;
3675 else
3676 bargs->limit--;
3677 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) {
3678 /*
3679 * Same logic as the 'limit' filter; the minimum cannot be
3680 * determined here because we do not have the global information
3681 * about the count of all chunks that satisfy the filters.
3682 */
3683 if (bargs->limit_max == 0)
3684 return 0;
3685 else
3686 bargs->limit_max--;
3687 }
3688
3689 return 1;
3690 }
3691
__btrfs_balance(struct btrfs_fs_info * fs_info)3692 static int __btrfs_balance(struct btrfs_fs_info *fs_info)
3693 {
3694 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3695 struct btrfs_root *chunk_root = fs_info->chunk_root;
3696 u64 chunk_type;
3697 struct btrfs_chunk *chunk;
3698 struct btrfs_path *path = NULL;
3699 struct btrfs_key key;
3700 struct btrfs_key found_key;
3701 struct extent_buffer *leaf;
3702 int slot;
3703 int ret;
3704 int enospc_errors = 0;
3705 bool counting = true;
3706 /* The single value limit and min/max limits use the same bytes in the */
3707 u64 limit_data = bctl->data.limit;
3708 u64 limit_meta = bctl->meta.limit;
3709 u64 limit_sys = bctl->sys.limit;
3710 u32 count_data = 0;
3711 u32 count_meta = 0;
3712 u32 count_sys = 0;
3713 int chunk_reserved = 0;
3714
3715 path = btrfs_alloc_path();
3716 if (!path) {
3717 ret = -ENOMEM;
3718 goto error;
3719 }
3720
3721 /* zero out stat counters */
3722 spin_lock(&fs_info->balance_lock);
3723 memset(&bctl->stat, 0, sizeof(bctl->stat));
3724 spin_unlock(&fs_info->balance_lock);
3725 again:
3726 if (!counting) {
3727 /*
3728 * The single value limit and min/max limits use the same bytes
3729 * in the
3730 */
3731 bctl->data.limit = limit_data;
3732 bctl->meta.limit = limit_meta;
3733 bctl->sys.limit = limit_sys;
3734 }
3735 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3736 key.offset = (u64)-1;
3737 key.type = BTRFS_CHUNK_ITEM_KEY;
3738
3739 while (1) {
3740 if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
3741 atomic_read(&fs_info->balance_cancel_req)) {
3742 ret = -ECANCELED;
3743 goto error;
3744 }
3745
3746 mutex_lock(&fs_info->delete_unused_bgs_mutex);
3747 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3748 if (ret < 0) {
3749 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3750 goto error;
3751 }
3752
3753 /*
3754 * this shouldn't happen, it means the last relocate
3755 * failed
3756 */
3757 if (ret == 0)
3758 BUG(); /* FIXME break ? */
3759
3760 ret = btrfs_previous_item(chunk_root, path, 0,
3761 BTRFS_CHUNK_ITEM_KEY);
3762 if (ret) {
3763 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3764 ret = 0;
3765 break;
3766 }
3767
3768 leaf = path->nodes[0];
3769 slot = path->slots[0];
3770 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3771
3772 if (found_key.objectid != key.objectid) {
3773 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3774 break;
3775 }
3776
3777 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
3778 chunk_type = btrfs_chunk_type(leaf, chunk);
3779
3780 if (!counting) {
3781 spin_lock(&fs_info->balance_lock);
3782 bctl->stat.considered++;
3783 spin_unlock(&fs_info->balance_lock);
3784 }
3785
3786 ret = should_balance_chunk(leaf, chunk, found_key.offset);
3787
3788 btrfs_release_path(path);
3789 if (!ret) {
3790 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3791 goto loop;
3792 }
3793
3794 if (counting) {
3795 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3796 spin_lock(&fs_info->balance_lock);
3797 bctl->stat.expected++;
3798 spin_unlock(&fs_info->balance_lock);
3799
3800 if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3801 count_data++;
3802 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3803 count_sys++;
3804 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3805 count_meta++;
3806
3807 goto loop;
3808 }
3809
3810 /*
3811 * Apply limit_min filter, no need to check if the LIMITS
3812 * filter is used, limit_min is 0 by default
3813 */
3814 if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) &&
3815 count_data < bctl->data.limit_min)
3816 || ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) &&
3817 count_meta < bctl->meta.limit_min)
3818 || ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) &&
3819 count_sys < bctl->sys.limit_min)) {
3820 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3821 goto loop;
3822 }
3823
3824 if (!chunk_reserved) {
3825 /*
3826 * We may be relocating the only data chunk we have,
3827 * which could potentially end up with losing data's
3828 * raid profile, so lets allocate an empty one in
3829 * advance.
3830 */
3831 ret = btrfs_may_alloc_data_chunk(fs_info,
3832 found_key.offset);
3833 if (ret < 0) {
3834 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3835 goto error;
3836 } else if (ret == 1) {
3837 chunk_reserved = 1;
3838 }
3839 }
3840
3841 ret = btrfs_relocate_chunk(fs_info, found_key.offset);
3842 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3843 if (ret == -ENOSPC) {
3844 enospc_errors++;
3845 } else if (ret == -ETXTBSY) {
3846 btrfs_info(fs_info,
3847 "skipping relocation of block group %llu due to active swapfile",
3848 found_key.offset);
3849 ret = 0;
3850 } else if (ret) {
3851 goto error;
3852 } else {
3853 spin_lock(&fs_info->balance_lock);
3854 bctl->stat.completed++;
3855 spin_unlock(&fs_info->balance_lock);
3856 }
3857 loop:
3858 if (found_key.offset == 0)
3859 break;
3860 key.offset = found_key.offset - 1;
3861 }
3862
3863 if (counting) {
3864 btrfs_release_path(path);
3865 counting = false;
3866 goto again;
3867 }
3868 error:
3869 btrfs_free_path(path);
3870 if (enospc_errors) {
3871 btrfs_info(fs_info, "%d enospc errors during balance",
3872 enospc_errors);
3873 if (!ret)
3874 ret = -ENOSPC;
3875 }
3876
3877 return ret;
3878 }
3879
3880 /**
3881 * alloc_profile_is_valid - see if a given profile is valid and reduced
3882 * @flags: profile to validate
3883 * @extended: if true @flags is treated as an extended profile
3884 */
alloc_profile_is_valid(u64 flags,int extended)3885 static int alloc_profile_is_valid(u64 flags, int extended)
3886 {
3887 u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
3888 BTRFS_BLOCK_GROUP_PROFILE_MASK);
3889
3890 flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
3891
3892 /* 1) check that all other bits are zeroed */
3893 if (flags & ~mask)
3894 return 0;
3895
3896 /* 2) see if profile is reduced */
3897 if (flags == 0)
3898 return !extended; /* "0" is valid for usual profiles */
3899
3900 return has_single_bit_set(flags);
3901 }
3902
balance_need_close(struct btrfs_fs_info * fs_info)3903 static inline int balance_need_close(struct btrfs_fs_info *fs_info)
3904 {
3905 /* cancel requested || normal exit path */
3906 return atomic_read(&fs_info->balance_cancel_req) ||
3907 (atomic_read(&fs_info->balance_pause_req) == 0 &&
3908 atomic_read(&fs_info->balance_cancel_req) == 0);
3909 }
3910
3911 /*
3912 * Validate target profile against allowed profiles and return true if it's OK.
3913 * Otherwise print the error message and return false.
3914 */
validate_convert_profile(struct btrfs_fs_info * fs_info,const struct btrfs_balance_args * bargs,u64 allowed,const char * type)3915 static inline int validate_convert_profile(struct btrfs_fs_info *fs_info,
3916 const struct btrfs_balance_args *bargs,
3917 u64 allowed, const char *type)
3918 {
3919 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
3920 return true;
3921
3922 /* Profile is valid and does not have bits outside of the allowed set */
3923 if (alloc_profile_is_valid(bargs->target, 1) &&
3924 (bargs->target & ~allowed) == 0)
3925 return true;
3926
3927 btrfs_err(fs_info, "balance: invalid convert %s profile %s",
3928 type, btrfs_bg_type_to_raid_name(bargs->target));
3929 return false;
3930 }
3931
3932 /*
3933 * Fill @buf with textual description of balance filter flags @bargs, up to
3934 * @size_buf including the terminating null. The output may be trimmed if it
3935 * does not fit into the provided buffer.
3936 */
describe_balance_args(struct btrfs_balance_args * bargs,char * buf,u32 size_buf)3937 static void describe_balance_args(struct btrfs_balance_args *bargs, char *buf,
3938 u32 size_buf)
3939 {
3940 int ret;
3941 u32 size_bp = size_buf;
3942 char *bp = buf;
3943 u64 flags = bargs->flags;
3944 char tmp_buf[128] = {'\0'};
3945
3946 if (!flags)
3947 return;
3948
3949 #define CHECK_APPEND_NOARG(a) \
3950 do { \
3951 ret = snprintf(bp, size_bp, (a)); \
3952 if (ret < 0 || ret >= size_bp) \
3953 goto out_overflow; \
3954 size_bp -= ret; \
3955 bp += ret; \
3956 } while (0)
3957
3958 #define CHECK_APPEND_1ARG(a, v1) \
3959 do { \
3960 ret = snprintf(bp, size_bp, (a), (v1)); \
3961 if (ret < 0 || ret >= size_bp) \
3962 goto out_overflow; \
3963 size_bp -= ret; \
3964 bp += ret; \
3965 } while (0)
3966
3967 #define CHECK_APPEND_2ARG(a, v1, v2) \
3968 do { \
3969 ret = snprintf(bp, size_bp, (a), (v1), (v2)); \
3970 if (ret < 0 || ret >= size_bp) \
3971 goto out_overflow; \
3972 size_bp -= ret; \
3973 bp += ret; \
3974 } while (0)
3975
3976 if (flags & BTRFS_BALANCE_ARGS_CONVERT)
3977 CHECK_APPEND_1ARG("convert=%s,",
3978 btrfs_bg_type_to_raid_name(bargs->target));
3979
3980 if (flags & BTRFS_BALANCE_ARGS_SOFT)
3981 CHECK_APPEND_NOARG("soft,");
3982
3983 if (flags & BTRFS_BALANCE_ARGS_PROFILES) {
3984 btrfs_describe_block_groups(bargs->profiles, tmp_buf,
3985 sizeof(tmp_buf));
3986 CHECK_APPEND_1ARG("profiles=%s,", tmp_buf);
3987 }
3988
3989 if (flags & BTRFS_BALANCE_ARGS_USAGE)
3990 CHECK_APPEND_1ARG("usage=%llu,", bargs->usage);
3991
3992 if (flags & BTRFS_BALANCE_ARGS_USAGE_RANGE)
3993 CHECK_APPEND_2ARG("usage=%u..%u,",
3994 bargs->usage_min, bargs->usage_max);
3995
3996 if (flags & BTRFS_BALANCE_ARGS_DEVID)
3997 CHECK_APPEND_1ARG("devid=%llu,", bargs->devid);
3998
3999 if (flags & BTRFS_BALANCE_ARGS_DRANGE)
4000 CHECK_APPEND_2ARG("drange=%llu..%llu,",
4001 bargs->pstart, bargs->pend);
4002
4003 if (flags & BTRFS_BALANCE_ARGS_VRANGE)
4004 CHECK_APPEND_2ARG("vrange=%llu..%llu,",
4005 bargs->vstart, bargs->vend);
4006
4007 if (flags & BTRFS_BALANCE_ARGS_LIMIT)
4008 CHECK_APPEND_1ARG("limit=%llu,", bargs->limit);
4009
4010 if (flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)
4011 CHECK_APPEND_2ARG("limit=%u..%u,",
4012 bargs->limit_min, bargs->limit_max);
4013
4014 if (flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE)
4015 CHECK_APPEND_2ARG("stripes=%u..%u,",
4016 bargs->stripes_min, bargs->stripes_max);
4017
4018 #undef CHECK_APPEND_2ARG
4019 #undef CHECK_APPEND_1ARG
4020 #undef CHECK_APPEND_NOARG
4021
4022 out_overflow:
4023
4024 if (size_bp < size_buf)
4025 buf[size_buf - size_bp - 1] = '\0'; /* remove last , */
4026 else
4027 buf[0] = '\0';
4028 }
4029
describe_balance_start_or_resume(struct btrfs_fs_info * fs_info)4030 static void describe_balance_start_or_resume(struct btrfs_fs_info *fs_info)
4031 {
4032 u32 size_buf = 1024;
4033 char tmp_buf[192] = {'\0'};
4034 char *buf;
4035 char *bp;
4036 u32 size_bp = size_buf;
4037 int ret;
4038 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
4039
4040 buf = kzalloc(size_buf, GFP_KERNEL);
4041 if (!buf)
4042 return;
4043
4044 bp = buf;
4045
4046 #define CHECK_APPEND_1ARG(a, v1) \
4047 do { \
4048 ret = snprintf(bp, size_bp, (a), (v1)); \
4049 if (ret < 0 || ret >= size_bp) \
4050 goto out_overflow; \
4051 size_bp -= ret; \
4052 bp += ret; \
4053 } while (0)
4054
4055 if (bctl->flags & BTRFS_BALANCE_FORCE)
4056 CHECK_APPEND_1ARG("%s", "-f ");
4057
4058 if (bctl->flags & BTRFS_BALANCE_DATA) {
4059 describe_balance_args(&bctl->data, tmp_buf, sizeof(tmp_buf));
4060 CHECK_APPEND_1ARG("-d%s ", tmp_buf);
4061 }
4062
4063 if (bctl->flags & BTRFS_BALANCE_METADATA) {
4064 describe_balance_args(&bctl->meta, tmp_buf, sizeof(tmp_buf));
4065 CHECK_APPEND_1ARG("-m%s ", tmp_buf);
4066 }
4067
4068 if (bctl->flags & BTRFS_BALANCE_SYSTEM) {
4069 describe_balance_args(&bctl->sys, tmp_buf, sizeof(tmp_buf));
4070 CHECK_APPEND_1ARG("-s%s ", tmp_buf);
4071 }
4072
4073 #undef CHECK_APPEND_1ARG
4074
4075 out_overflow:
4076
4077 if (size_bp < size_buf)
4078 buf[size_buf - size_bp - 1] = '\0'; /* remove last " " */
4079 btrfs_info(fs_info, "balance: %s %s",
4080 (bctl->flags & BTRFS_BALANCE_RESUME) ?
4081 "resume" : "start", buf);
4082
4083 kfree(buf);
4084 }
4085
4086 /*
4087 * Should be called with balance mutexe held
4088 */
btrfs_balance(struct btrfs_fs_info * fs_info,struct btrfs_balance_control * bctl,struct btrfs_ioctl_balance_args * bargs)4089 int btrfs_balance(struct btrfs_fs_info *fs_info,
4090 struct btrfs_balance_control *bctl,
4091 struct btrfs_ioctl_balance_args *bargs)
4092 {
4093 u64 meta_target, data_target;
4094 u64 allowed;
4095 int mixed = 0;
4096 int ret;
4097 u64 num_devices;
4098 unsigned seq;
4099 bool reducing_redundancy;
4100 int i;
4101
4102 if (btrfs_fs_closing(fs_info) ||
4103 atomic_read(&fs_info->balance_pause_req) ||
4104 btrfs_should_cancel_balance(fs_info)) {
4105 ret = -EINVAL;
4106 goto out;
4107 }
4108
4109 allowed = btrfs_super_incompat_flags(fs_info->super_copy);
4110 if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
4111 mixed = 1;
4112
4113 /*
4114 * In case of mixed groups both data and meta should be picked,
4115 * and identical options should be given for both of them.
4116 */
4117 allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
4118 if (mixed && (bctl->flags & allowed)) {
4119 if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
4120 !(bctl->flags & BTRFS_BALANCE_METADATA) ||
4121 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
4122 btrfs_err(fs_info,
4123 "balance: mixed groups data and metadata options must be the same");
4124 ret = -EINVAL;
4125 goto out;
4126 }
4127 }
4128
4129 /*
4130 * rw_devices will not change at the moment, device add/delete/replace
4131 * are exclusive
4132 */
4133 num_devices = fs_info->fs_devices->rw_devices;
4134
4135 /*
4136 * SINGLE profile on-disk has no profile bit, but in-memory we have a
4137 * special bit for it, to make it easier to distinguish. Thus we need
4138 * to set it manually, or balance would refuse the profile.
4139 */
4140 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
4141 for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++)
4142 if (num_devices >= btrfs_raid_array[i].devs_min)
4143 allowed |= btrfs_raid_array[i].bg_flag;
4144
4145 if (!validate_convert_profile(fs_info, &bctl->data, allowed, "data") ||
4146 !validate_convert_profile(fs_info, &bctl->meta, allowed, "metadata") ||
4147 !validate_convert_profile(fs_info, &bctl->sys, allowed, "system")) {
4148 ret = -EINVAL;
4149 goto out;
4150 }
4151
4152 /*
4153 * Allow to reduce metadata or system integrity only if force set for
4154 * profiles with redundancy (copies, parity)
4155 */
4156 allowed = 0;
4157 for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) {
4158 if (btrfs_raid_array[i].ncopies >= 2 ||
4159 btrfs_raid_array[i].tolerated_failures >= 1)
4160 allowed |= btrfs_raid_array[i].bg_flag;
4161 }
4162 do {
4163 seq = read_seqbegin(&fs_info->profiles_lock);
4164
4165 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
4166 (fs_info->avail_system_alloc_bits & allowed) &&
4167 !(bctl->sys.target & allowed)) ||
4168 ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
4169 (fs_info->avail_metadata_alloc_bits & allowed) &&
4170 !(bctl->meta.target & allowed)))
4171 reducing_redundancy = true;
4172 else
4173 reducing_redundancy = false;
4174
4175 /* if we're not converting, the target field is uninitialized */
4176 meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
4177 bctl->meta.target : fs_info->avail_metadata_alloc_bits;
4178 data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
4179 bctl->data.target : fs_info->avail_data_alloc_bits;
4180 } while (read_seqretry(&fs_info->profiles_lock, seq));
4181
4182 if (reducing_redundancy) {
4183 if (bctl->flags & BTRFS_BALANCE_FORCE) {
4184 btrfs_info(fs_info,
4185 "balance: force reducing metadata redundancy");
4186 } else {
4187 btrfs_err(fs_info,
4188 "balance: reduces metadata redundancy, use --force if you want this");
4189 ret = -EINVAL;
4190 goto out;
4191 }
4192 }
4193
4194 if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) <
4195 btrfs_get_num_tolerated_disk_barrier_failures(data_target)) {
4196 btrfs_warn(fs_info,
4197 "balance: metadata profile %s has lower redundancy than data profile %s",
4198 btrfs_bg_type_to_raid_name(meta_target),
4199 btrfs_bg_type_to_raid_name(data_target));
4200 }
4201
4202 if (fs_info->send_in_progress) {
4203 btrfs_warn_rl(fs_info,
4204 "cannot run balance while send operations are in progress (%d in progress)",
4205 fs_info->send_in_progress);
4206 ret = -EAGAIN;
4207 goto out;
4208 }
4209
4210 ret = insert_balance_item(fs_info, bctl);
4211 if (ret && ret != -EEXIST)
4212 goto out;
4213
4214 if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
4215 BUG_ON(ret == -EEXIST);
4216 BUG_ON(fs_info->balance_ctl);
4217 spin_lock(&fs_info->balance_lock);
4218 fs_info->balance_ctl = bctl;
4219 spin_unlock(&fs_info->balance_lock);
4220 } else {
4221 BUG_ON(ret != -EEXIST);
4222 spin_lock(&fs_info->balance_lock);
4223 update_balance_args(bctl);
4224 spin_unlock(&fs_info->balance_lock);
4225 }
4226
4227 ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4228 set_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags);
4229 describe_balance_start_or_resume(fs_info);
4230 mutex_unlock(&fs_info->balance_mutex);
4231
4232 ret = __btrfs_balance(fs_info);
4233
4234 mutex_lock(&fs_info->balance_mutex);
4235 if (ret == -ECANCELED && atomic_read(&fs_info->balance_pause_req))
4236 btrfs_info(fs_info, "balance: paused");
4237 /*
4238 * Balance can be canceled by:
4239 *
4240 * - Regular cancel request
4241 * Then ret == -ECANCELED and balance_cancel_req > 0
4242 *
4243 * - Fatal signal to "btrfs" process
4244 * Either the signal caught by wait_reserve_ticket() and callers
4245 * got -EINTR, or caught by btrfs_should_cancel_balance() and
4246 * got -ECANCELED.
4247 * Either way, in this case balance_cancel_req = 0, and
4248 * ret == -EINTR or ret == -ECANCELED.
4249 *
4250 * So here we only check the return value to catch canceled balance.
4251 */
4252 else if (ret == -ECANCELED || ret == -EINTR)
4253 btrfs_info(fs_info, "balance: canceled");
4254 else
4255 btrfs_info(fs_info, "balance: ended with status: %d", ret);
4256
4257 clear_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags);
4258
4259 if (bargs) {
4260 memset(bargs, 0, sizeof(*bargs));
4261 btrfs_update_ioctl_balance_args(fs_info, bargs);
4262 }
4263
4264 if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
4265 balance_need_close(fs_info)) {
4266 reset_balance_state(fs_info);
4267 btrfs_exclop_finish(fs_info);
4268 }
4269
4270 wake_up(&fs_info->balance_wait_q);
4271
4272 return ret;
4273 out:
4274 if (bctl->flags & BTRFS_BALANCE_RESUME)
4275 reset_balance_state(fs_info);
4276 else
4277 kfree(bctl);
4278 btrfs_exclop_finish(fs_info);
4279
4280 return ret;
4281 }
4282
balance_kthread(void * data)4283 static int balance_kthread(void *data)
4284 {
4285 struct btrfs_fs_info *fs_info = data;
4286 int ret = 0;
4287
4288 sb_start_write(fs_info->sb);
4289 mutex_lock(&fs_info->balance_mutex);
4290 if (fs_info->balance_ctl)
4291 ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL);
4292 mutex_unlock(&fs_info->balance_mutex);
4293 sb_end_write(fs_info->sb);
4294
4295 return ret;
4296 }
4297
btrfs_resume_balance_async(struct btrfs_fs_info * fs_info)4298 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
4299 {
4300 struct task_struct *tsk;
4301
4302 mutex_lock(&fs_info->balance_mutex);
4303 if (!fs_info->balance_ctl) {
4304 mutex_unlock(&fs_info->balance_mutex);
4305 return 0;
4306 }
4307 mutex_unlock(&fs_info->balance_mutex);
4308
4309 if (btrfs_test_opt(fs_info, SKIP_BALANCE)) {
4310 btrfs_info(fs_info, "balance: resume skipped");
4311 return 0;
4312 }
4313
4314 /*
4315 * A ro->rw remount sequence should continue with the paused balance
4316 * regardless of who pauses it, system or the user as of now, so set
4317 * the resume flag.
4318 */
4319 spin_lock(&fs_info->balance_lock);
4320 fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME;
4321 spin_unlock(&fs_info->balance_lock);
4322
4323 tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
4324 return PTR_ERR_OR_ZERO(tsk);
4325 }
4326
btrfs_recover_balance(struct btrfs_fs_info * fs_info)4327 int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
4328 {
4329 struct btrfs_balance_control *bctl;
4330 struct btrfs_balance_item *item;
4331 struct btrfs_disk_balance_args disk_bargs;
4332 struct btrfs_path *path;
4333 struct extent_buffer *leaf;
4334 struct btrfs_key key;
4335 int ret;
4336
4337 path = btrfs_alloc_path();
4338 if (!path)
4339 return -ENOMEM;
4340
4341 key.objectid = BTRFS_BALANCE_OBJECTID;
4342 key.type = BTRFS_TEMPORARY_ITEM_KEY;
4343 key.offset = 0;
4344
4345 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
4346 if (ret < 0)
4347 goto out;
4348 if (ret > 0) { /* ret = -ENOENT; */
4349 ret = 0;
4350 goto out;
4351 }
4352
4353 bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
4354 if (!bctl) {
4355 ret = -ENOMEM;
4356 goto out;
4357 }
4358
4359 leaf = path->nodes[0];
4360 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
4361
4362 bctl->flags = btrfs_balance_flags(leaf, item);
4363 bctl->flags |= BTRFS_BALANCE_RESUME;
4364
4365 btrfs_balance_data(leaf, item, &disk_bargs);
4366 btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
4367 btrfs_balance_meta(leaf, item, &disk_bargs);
4368 btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
4369 btrfs_balance_sys(leaf, item, &disk_bargs);
4370 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
4371
4372 /*
4373 * This should never happen, as the paused balance state is recovered
4374 * during mount without any chance of other exclusive ops to collide.
4375 *
4376 * This gives the exclusive op status to balance and keeps in paused
4377 * state until user intervention (cancel or umount). If the ownership
4378 * cannot be assigned, show a message but do not fail. The balance
4379 * is in a paused state and must have fs_info::balance_ctl properly
4380 * set up.
4381 */
4382 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE))
4383 btrfs_warn(fs_info,
4384 "balance: cannot set exclusive op status, resume manually");
4385
4386 btrfs_release_path(path);
4387
4388 mutex_lock(&fs_info->balance_mutex);
4389 BUG_ON(fs_info->balance_ctl);
4390 spin_lock(&fs_info->balance_lock);
4391 fs_info->balance_ctl = bctl;
4392 spin_unlock(&fs_info->balance_lock);
4393 mutex_unlock(&fs_info->balance_mutex);
4394 out:
4395 btrfs_free_path(path);
4396 return ret;
4397 }
4398
btrfs_pause_balance(struct btrfs_fs_info * fs_info)4399 int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
4400 {
4401 int ret = 0;
4402
4403 mutex_lock(&fs_info->balance_mutex);
4404 if (!fs_info->balance_ctl) {
4405 mutex_unlock(&fs_info->balance_mutex);
4406 return -ENOTCONN;
4407 }
4408
4409 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4410 atomic_inc(&fs_info->balance_pause_req);
4411 mutex_unlock(&fs_info->balance_mutex);
4412
4413 wait_event(fs_info->balance_wait_q,
4414 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4415
4416 mutex_lock(&fs_info->balance_mutex);
4417 /* we are good with balance_ctl ripped off from under us */
4418 BUG_ON(test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4419 atomic_dec(&fs_info->balance_pause_req);
4420 } else {
4421 ret = -ENOTCONN;
4422 }
4423
4424 mutex_unlock(&fs_info->balance_mutex);
4425 return ret;
4426 }
4427
btrfs_cancel_balance(struct btrfs_fs_info * fs_info)4428 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
4429 {
4430 mutex_lock(&fs_info->balance_mutex);
4431 if (!fs_info->balance_ctl) {
4432 mutex_unlock(&fs_info->balance_mutex);
4433 return -ENOTCONN;
4434 }
4435
4436 /*
4437 * A paused balance with the item stored on disk can be resumed at
4438 * mount time if the mount is read-write. Otherwise it's still paused
4439 * and we must not allow cancelling as it deletes the item.
4440 */
4441 if (sb_rdonly(fs_info->sb)) {
4442 mutex_unlock(&fs_info->balance_mutex);
4443 return -EROFS;
4444 }
4445
4446 atomic_inc(&fs_info->balance_cancel_req);
4447 /*
4448 * if we are running just wait and return, balance item is
4449 * deleted in btrfs_balance in this case
4450 */
4451 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4452 mutex_unlock(&fs_info->balance_mutex);
4453 wait_event(fs_info->balance_wait_q,
4454 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4455 mutex_lock(&fs_info->balance_mutex);
4456 } else {
4457 mutex_unlock(&fs_info->balance_mutex);
4458 /*
4459 * Lock released to allow other waiters to continue, we'll
4460 * reexamine the status again.
4461 */
4462 mutex_lock(&fs_info->balance_mutex);
4463
4464 if (fs_info->balance_ctl) {
4465 reset_balance_state(fs_info);
4466 btrfs_exclop_finish(fs_info);
4467 btrfs_info(fs_info, "balance: canceled");
4468 }
4469 }
4470
4471 ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4472 atomic_dec(&fs_info->balance_cancel_req);
4473 mutex_unlock(&fs_info->balance_mutex);
4474 return 0;
4475 }
4476
btrfs_uuid_scan_kthread(void * data)4477 int btrfs_uuid_scan_kthread(void *data)
4478 {
4479 struct btrfs_fs_info *fs_info = data;
4480 struct btrfs_root *root = fs_info->tree_root;
4481 struct btrfs_key key;
4482 struct btrfs_path *path = NULL;
4483 int ret = 0;
4484 struct extent_buffer *eb;
4485 int slot;
4486 struct btrfs_root_item root_item;
4487 u32 item_size;
4488 struct btrfs_trans_handle *trans = NULL;
4489 bool closing = false;
4490
4491 path = btrfs_alloc_path();
4492 if (!path) {
4493 ret = -ENOMEM;
4494 goto out;
4495 }
4496
4497 key.objectid = 0;
4498 key.type = BTRFS_ROOT_ITEM_KEY;
4499 key.offset = 0;
4500
4501 while (1) {
4502 if (btrfs_fs_closing(fs_info)) {
4503 closing = true;
4504 break;
4505 }
4506 ret = btrfs_search_forward(root, &key, path,
4507 BTRFS_OLDEST_GENERATION);
4508 if (ret) {
4509 if (ret > 0)
4510 ret = 0;
4511 break;
4512 }
4513
4514 if (key.type != BTRFS_ROOT_ITEM_KEY ||
4515 (key.objectid < BTRFS_FIRST_FREE_OBJECTID &&
4516 key.objectid != BTRFS_FS_TREE_OBJECTID) ||
4517 key.objectid > BTRFS_LAST_FREE_OBJECTID)
4518 goto skip;
4519
4520 eb = path->nodes[0];
4521 slot = path->slots[0];
4522 item_size = btrfs_item_size_nr(eb, slot);
4523 if (item_size < sizeof(root_item))
4524 goto skip;
4525
4526 read_extent_buffer(eb, &root_item,
4527 btrfs_item_ptr_offset(eb, slot),
4528 (int)sizeof(root_item));
4529 if (btrfs_root_refs(&root_item) == 0)
4530 goto skip;
4531
4532 if (!btrfs_is_empty_uuid(root_item.uuid) ||
4533 !btrfs_is_empty_uuid(root_item.received_uuid)) {
4534 if (trans)
4535 goto update_tree;
4536
4537 btrfs_release_path(path);
4538 /*
4539 * 1 - subvol uuid item
4540 * 1 - received_subvol uuid item
4541 */
4542 trans = btrfs_start_transaction(fs_info->uuid_root, 2);
4543 if (IS_ERR(trans)) {
4544 ret = PTR_ERR(trans);
4545 break;
4546 }
4547 continue;
4548 } else {
4549 goto skip;
4550 }
4551 update_tree:
4552 btrfs_release_path(path);
4553 if (!btrfs_is_empty_uuid(root_item.uuid)) {
4554 ret = btrfs_uuid_tree_add(trans, root_item.uuid,
4555 BTRFS_UUID_KEY_SUBVOL,
4556 key.objectid);
4557 if (ret < 0) {
4558 btrfs_warn(fs_info, "uuid_tree_add failed %d",
4559 ret);
4560 break;
4561 }
4562 }
4563
4564 if (!btrfs_is_empty_uuid(root_item.received_uuid)) {
4565 ret = btrfs_uuid_tree_add(trans,
4566 root_item.received_uuid,
4567 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
4568 key.objectid);
4569 if (ret < 0) {
4570 btrfs_warn(fs_info, "uuid_tree_add failed %d",
4571 ret);
4572 break;
4573 }
4574 }
4575
4576 skip:
4577 btrfs_release_path(path);
4578 if (trans) {
4579 ret = btrfs_end_transaction(trans);
4580 trans = NULL;
4581 if (ret)
4582 break;
4583 }
4584
4585 if (key.offset < (u64)-1) {
4586 key.offset++;
4587 } else if (key.type < BTRFS_ROOT_ITEM_KEY) {
4588 key.offset = 0;
4589 key.type = BTRFS_ROOT_ITEM_KEY;
4590 } else if (key.objectid < (u64)-1) {
4591 key.offset = 0;
4592 key.type = BTRFS_ROOT_ITEM_KEY;
4593 key.objectid++;
4594 } else {
4595 break;
4596 }
4597 cond_resched();
4598 }
4599
4600 out:
4601 btrfs_free_path(path);
4602 if (trans && !IS_ERR(trans))
4603 btrfs_end_transaction(trans);
4604 if (ret)
4605 btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret);
4606 else if (!closing)
4607 set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
4608 up(&fs_info->uuid_tree_rescan_sem);
4609 return 0;
4610 }
4611
btrfs_create_uuid_tree(struct btrfs_fs_info * fs_info)4612 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
4613 {
4614 struct btrfs_trans_handle *trans;
4615 struct btrfs_root *tree_root = fs_info->tree_root;
4616 struct btrfs_root *uuid_root;
4617 struct task_struct *task;
4618 int ret;
4619
4620 /*
4621 * 1 - root node
4622 * 1 - root item
4623 */
4624 trans = btrfs_start_transaction(tree_root, 2);
4625 if (IS_ERR(trans))
4626 return PTR_ERR(trans);
4627
4628 uuid_root = btrfs_create_tree(trans, BTRFS_UUID_TREE_OBJECTID);
4629 if (IS_ERR(uuid_root)) {
4630 ret = PTR_ERR(uuid_root);
4631 btrfs_abort_transaction(trans, ret);
4632 btrfs_end_transaction(trans);
4633 return ret;
4634 }
4635
4636 fs_info->uuid_root = uuid_root;
4637
4638 ret = btrfs_commit_transaction(trans);
4639 if (ret)
4640 return ret;
4641
4642 down(&fs_info->uuid_tree_rescan_sem);
4643 task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid");
4644 if (IS_ERR(task)) {
4645 /* fs_info->update_uuid_tree_gen remains 0 in all error case */
4646 btrfs_warn(fs_info, "failed to start uuid_scan task");
4647 up(&fs_info->uuid_tree_rescan_sem);
4648 return PTR_ERR(task);
4649 }
4650
4651 return 0;
4652 }
4653
4654 /*
4655 * shrinking a device means finding all of the device extents past
4656 * the new size, and then following the back refs to the chunks.
4657 * The chunk relocation code actually frees the device extent
4658 */
btrfs_shrink_device(struct btrfs_device * device,u64 new_size)4659 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
4660 {
4661 struct btrfs_fs_info *fs_info = device->fs_info;
4662 struct btrfs_root *root = fs_info->dev_root;
4663 struct btrfs_trans_handle *trans;
4664 struct btrfs_dev_extent *dev_extent = NULL;
4665 struct btrfs_path *path;
4666 u64 length;
4667 u64 chunk_offset;
4668 int ret;
4669 int slot;
4670 int failed = 0;
4671 bool retried = false;
4672 struct extent_buffer *l;
4673 struct btrfs_key key;
4674 struct btrfs_super_block *super_copy = fs_info->super_copy;
4675 u64 old_total = btrfs_super_total_bytes(super_copy);
4676 u64 old_size = btrfs_device_get_total_bytes(device);
4677 u64 diff;
4678 u64 start;
4679
4680 new_size = round_down(new_size, fs_info->sectorsize);
4681 start = new_size;
4682 diff = round_down(old_size - new_size, fs_info->sectorsize);
4683
4684 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
4685 return -EINVAL;
4686
4687 path = btrfs_alloc_path();
4688 if (!path)
4689 return -ENOMEM;
4690
4691 path->reada = READA_BACK;
4692
4693 trans = btrfs_start_transaction(root, 0);
4694 if (IS_ERR(trans)) {
4695 btrfs_free_path(path);
4696 return PTR_ERR(trans);
4697 }
4698
4699 mutex_lock(&fs_info->chunk_mutex);
4700
4701 btrfs_device_set_total_bytes(device, new_size);
4702 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
4703 device->fs_devices->total_rw_bytes -= diff;
4704 atomic64_sub(diff, &fs_info->free_chunk_space);
4705 }
4706
4707 /*
4708 * Once the device's size has been set to the new size, ensure all
4709 * in-memory chunks are synced to disk so that the loop below sees them
4710 * and relocates them accordingly.
4711 */
4712 if (contains_pending_extent(device, &start, diff)) {
4713 mutex_unlock(&fs_info->chunk_mutex);
4714 ret = btrfs_commit_transaction(trans);
4715 if (ret)
4716 goto done;
4717 } else {
4718 mutex_unlock(&fs_info->chunk_mutex);
4719 btrfs_end_transaction(trans);
4720 }
4721
4722 again:
4723 key.objectid = device->devid;
4724 key.offset = (u64)-1;
4725 key.type = BTRFS_DEV_EXTENT_KEY;
4726
4727 do {
4728 mutex_lock(&fs_info->delete_unused_bgs_mutex);
4729 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4730 if (ret < 0) {
4731 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4732 goto done;
4733 }
4734
4735 ret = btrfs_previous_item(root, path, 0, key.type);
4736 if (ret)
4737 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4738 if (ret < 0)
4739 goto done;
4740 if (ret) {
4741 ret = 0;
4742 btrfs_release_path(path);
4743 break;
4744 }
4745
4746 l = path->nodes[0];
4747 slot = path->slots[0];
4748 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
4749
4750 if (key.objectid != device->devid) {
4751 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4752 btrfs_release_path(path);
4753 break;
4754 }
4755
4756 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
4757 length = btrfs_dev_extent_length(l, dev_extent);
4758
4759 if (key.offset + length <= new_size) {
4760 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4761 btrfs_release_path(path);
4762 break;
4763 }
4764
4765 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
4766 btrfs_release_path(path);
4767
4768 /*
4769 * We may be relocating the only data chunk we have,
4770 * which could potentially end up with losing data's
4771 * raid profile, so lets allocate an empty one in
4772 * advance.
4773 */
4774 ret = btrfs_may_alloc_data_chunk(fs_info, chunk_offset);
4775 if (ret < 0) {
4776 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4777 goto done;
4778 }
4779
4780 ret = btrfs_relocate_chunk(fs_info, chunk_offset);
4781 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4782 if (ret == -ENOSPC) {
4783 failed++;
4784 } else if (ret) {
4785 if (ret == -ETXTBSY) {
4786 btrfs_warn(fs_info,
4787 "could not shrink block group %llu due to active swapfile",
4788 chunk_offset);
4789 }
4790 goto done;
4791 }
4792 } while (key.offset-- > 0);
4793
4794 if (failed && !retried) {
4795 failed = 0;
4796 retried = true;
4797 goto again;
4798 } else if (failed && retried) {
4799 ret = -ENOSPC;
4800 goto done;
4801 }
4802
4803 /* Shrinking succeeded, else we would be at "done". */
4804 trans = btrfs_start_transaction(root, 0);
4805 if (IS_ERR(trans)) {
4806 ret = PTR_ERR(trans);
4807 goto done;
4808 }
4809
4810 mutex_lock(&fs_info->chunk_mutex);
4811 /* Clear all state bits beyond the shrunk device size */
4812 clear_extent_bits(&device->alloc_state, new_size, (u64)-1,
4813 CHUNK_STATE_MASK);
4814
4815 btrfs_device_set_disk_total_bytes(device, new_size);
4816 if (list_empty(&device->post_commit_list))
4817 list_add_tail(&device->post_commit_list,
4818 &trans->transaction->dev_update_list);
4819
4820 WARN_ON(diff > old_total);
4821 btrfs_set_super_total_bytes(super_copy,
4822 round_down(old_total - diff, fs_info->sectorsize));
4823 mutex_unlock(&fs_info->chunk_mutex);
4824
4825 /* Now btrfs_update_device() will change the on-disk size. */
4826 ret = btrfs_update_device(trans, device);
4827 if (ret < 0) {
4828 btrfs_abort_transaction(trans, ret);
4829 btrfs_end_transaction(trans);
4830 } else {
4831 ret = btrfs_commit_transaction(trans);
4832 }
4833 done:
4834 btrfs_free_path(path);
4835 if (ret) {
4836 mutex_lock(&fs_info->chunk_mutex);
4837 btrfs_device_set_total_bytes(device, old_size);
4838 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
4839 device->fs_devices->total_rw_bytes += diff;
4840 atomic64_add(diff, &fs_info->free_chunk_space);
4841 mutex_unlock(&fs_info->chunk_mutex);
4842 }
4843 return ret;
4844 }
4845
btrfs_add_system_chunk(struct btrfs_fs_info * fs_info,struct btrfs_key * key,struct btrfs_chunk * chunk,int item_size)4846 static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info,
4847 struct btrfs_key *key,
4848 struct btrfs_chunk *chunk, int item_size)
4849 {
4850 struct btrfs_super_block *super_copy = fs_info->super_copy;
4851 struct btrfs_disk_key disk_key;
4852 u32 array_size;
4853 u8 *ptr;
4854
4855 mutex_lock(&fs_info->chunk_mutex);
4856 array_size = btrfs_super_sys_array_size(super_copy);
4857 if (array_size + item_size + sizeof(disk_key)
4858 > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
4859 mutex_unlock(&fs_info->chunk_mutex);
4860 return -EFBIG;
4861 }
4862
4863 ptr = super_copy->sys_chunk_array + array_size;
4864 btrfs_cpu_key_to_disk(&disk_key, key);
4865 memcpy(ptr, &disk_key, sizeof(disk_key));
4866 ptr += sizeof(disk_key);
4867 memcpy(ptr, chunk, item_size);
4868 item_size += sizeof(disk_key);
4869 btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
4870 mutex_unlock(&fs_info->chunk_mutex);
4871
4872 return 0;
4873 }
4874
4875 /*
4876 * sort the devices in descending order by max_avail, total_avail
4877 */
btrfs_cmp_device_info(const void * a,const void * b)4878 static int btrfs_cmp_device_info(const void *a, const void *b)
4879 {
4880 const struct btrfs_device_info *di_a = a;
4881 const struct btrfs_device_info *di_b = b;
4882
4883 if (di_a->max_avail > di_b->max_avail)
4884 return -1;
4885 if (di_a->max_avail < di_b->max_avail)
4886 return 1;
4887 if (di_a->total_avail > di_b->total_avail)
4888 return -1;
4889 if (di_a->total_avail < di_b->total_avail)
4890 return 1;
4891 return 0;
4892 }
4893
check_raid56_incompat_flag(struct btrfs_fs_info * info,u64 type)4894 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
4895 {
4896 if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK))
4897 return;
4898
4899 btrfs_set_fs_incompat(info, RAID56);
4900 }
4901
check_raid1c34_incompat_flag(struct btrfs_fs_info * info,u64 type)4902 static void check_raid1c34_incompat_flag(struct btrfs_fs_info *info, u64 type)
4903 {
4904 if (!(type & (BTRFS_BLOCK_GROUP_RAID1C3 | BTRFS_BLOCK_GROUP_RAID1C4)))
4905 return;
4906
4907 btrfs_set_fs_incompat(info, RAID1C34);
4908 }
4909
4910 /*
4911 * Structure used internally for __btrfs_alloc_chunk() function.
4912 * Wraps needed parameters.
4913 */
4914 struct alloc_chunk_ctl {
4915 u64 start;
4916 u64 type;
4917 /* Total number of stripes to allocate */
4918 int num_stripes;
4919 /* sub_stripes info for map */
4920 int sub_stripes;
4921 /* Stripes per device */
4922 int dev_stripes;
4923 /* Maximum number of devices to use */
4924 int devs_max;
4925 /* Minimum number of devices to use */
4926 int devs_min;
4927 /* ndevs has to be a multiple of this */
4928 int devs_increment;
4929 /* Number of copies */
4930 int ncopies;
4931 /* Number of stripes worth of bytes to store parity information */
4932 int nparity;
4933 u64 max_stripe_size;
4934 u64 max_chunk_size;
4935 u64 dev_extent_min;
4936 u64 stripe_size;
4937 u64 chunk_size;
4938 int ndevs;
4939 };
4940
init_alloc_chunk_ctl_policy_regular(struct btrfs_fs_devices * fs_devices,struct alloc_chunk_ctl * ctl)4941 static void init_alloc_chunk_ctl_policy_regular(
4942 struct btrfs_fs_devices *fs_devices,
4943 struct alloc_chunk_ctl *ctl)
4944 {
4945 u64 type = ctl->type;
4946
4947 if (type & BTRFS_BLOCK_GROUP_DATA) {
4948 ctl->max_stripe_size = SZ_1G;
4949 ctl->max_chunk_size = BTRFS_MAX_DATA_CHUNK_SIZE;
4950 } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
4951 /* For larger filesystems, use larger metadata chunks */
4952 if (fs_devices->total_rw_bytes > 50ULL * SZ_1G)
4953 ctl->max_stripe_size = SZ_1G;
4954 else
4955 ctl->max_stripe_size = SZ_256M;
4956 ctl->max_chunk_size = ctl->max_stripe_size;
4957 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
4958 ctl->max_stripe_size = SZ_32M;
4959 ctl->max_chunk_size = 2 * ctl->max_stripe_size;
4960 ctl->devs_max = min_t(int, ctl->devs_max,
4961 BTRFS_MAX_DEVS_SYS_CHUNK);
4962 } else {
4963 BUG();
4964 }
4965
4966 /* We don't want a chunk larger than 10% of writable space */
4967 ctl->max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
4968 ctl->max_chunk_size);
4969 ctl->dev_extent_min = BTRFS_STRIPE_LEN * ctl->dev_stripes;
4970 }
4971
init_alloc_chunk_ctl(struct btrfs_fs_devices * fs_devices,struct alloc_chunk_ctl * ctl)4972 static void init_alloc_chunk_ctl(struct btrfs_fs_devices *fs_devices,
4973 struct alloc_chunk_ctl *ctl)
4974 {
4975 int index = btrfs_bg_flags_to_raid_index(ctl->type);
4976
4977 ctl->sub_stripes = btrfs_raid_array[index].sub_stripes;
4978 ctl->dev_stripes = btrfs_raid_array[index].dev_stripes;
4979 ctl->devs_max = btrfs_raid_array[index].devs_max;
4980 if (!ctl->devs_max)
4981 ctl->devs_max = BTRFS_MAX_DEVS(fs_devices->fs_info);
4982 ctl->devs_min = btrfs_raid_array[index].devs_min;
4983 ctl->devs_increment = btrfs_raid_array[index].devs_increment;
4984 ctl->ncopies = btrfs_raid_array[index].ncopies;
4985 ctl->nparity = btrfs_raid_array[index].nparity;
4986 ctl->ndevs = 0;
4987
4988 switch (fs_devices->chunk_alloc_policy) {
4989 case BTRFS_CHUNK_ALLOC_REGULAR:
4990 init_alloc_chunk_ctl_policy_regular(fs_devices, ctl);
4991 break;
4992 default:
4993 BUG();
4994 }
4995 }
4996
gather_device_info(struct btrfs_fs_devices * fs_devices,struct alloc_chunk_ctl * ctl,struct btrfs_device_info * devices_info)4997 static int gather_device_info(struct btrfs_fs_devices *fs_devices,
4998 struct alloc_chunk_ctl *ctl,
4999 struct btrfs_device_info *devices_info)
5000 {
5001 struct btrfs_fs_info *info = fs_devices->fs_info;
5002 struct btrfs_device *device;
5003 u64 total_avail;
5004 u64 dev_extent_want = ctl->max_stripe_size * ctl->dev_stripes;
5005 int ret;
5006 int ndevs = 0;
5007 u64 max_avail;
5008 u64 dev_offset;
5009
5010 /*
5011 * in the first pass through the devices list, we gather information
5012 * about the available holes on each device.
5013 */
5014 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
5015 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
5016 WARN(1, KERN_ERR
5017 "BTRFS: read-only device in alloc_list\n");
5018 continue;
5019 }
5020
5021 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
5022 &device->dev_state) ||
5023 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
5024 continue;
5025
5026 if (device->total_bytes > device->bytes_used)
5027 total_avail = device->total_bytes - device->bytes_used;
5028 else
5029 total_avail = 0;
5030
5031 /* If there is no space on this device, skip it. */
5032 if (total_avail < ctl->dev_extent_min)
5033 continue;
5034
5035 ret = find_free_dev_extent(device, dev_extent_want, &dev_offset,
5036 &max_avail);
5037 if (ret && ret != -ENOSPC)
5038 return ret;
5039
5040 if (ret == 0)
5041 max_avail = dev_extent_want;
5042
5043 if (max_avail < ctl->dev_extent_min) {
5044 if (btrfs_test_opt(info, ENOSPC_DEBUG))
5045 btrfs_debug(info,
5046 "%s: devid %llu has no free space, have=%llu want=%llu",
5047 __func__, device->devid, max_avail,
5048 ctl->dev_extent_min);
5049 continue;
5050 }
5051
5052 if (ndevs == fs_devices->rw_devices) {
5053 WARN(1, "%s: found more than %llu devices\n",
5054 __func__, fs_devices->rw_devices);
5055 break;
5056 }
5057 devices_info[ndevs].dev_offset = dev_offset;
5058 devices_info[ndevs].max_avail = max_avail;
5059 devices_info[ndevs].total_avail = total_avail;
5060 devices_info[ndevs].dev = device;
5061 ++ndevs;
5062 }
5063 ctl->ndevs = ndevs;
5064
5065 /*
5066 * now sort the devices by hole size / available space
5067 */
5068 sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
5069 btrfs_cmp_device_info, NULL);
5070
5071 return 0;
5072 }
5073
decide_stripe_size_regular(struct alloc_chunk_ctl * ctl,struct btrfs_device_info * devices_info)5074 static int decide_stripe_size_regular(struct alloc_chunk_ctl *ctl,
5075 struct btrfs_device_info *devices_info)
5076 {
5077 /* Number of stripes that count for block group size */
5078 int data_stripes;
5079
5080 /*
5081 * The primary goal is to maximize the number of stripes, so use as
5082 * many devices as possible, even if the stripes are not maximum sized.
5083 *
5084 * The DUP profile stores more than one stripe per device, the
5085 * max_avail is the total size so we have to adjust.
5086 */
5087 ctl->stripe_size = div_u64(devices_info[ctl->ndevs - 1].max_avail,
5088 ctl->dev_stripes);
5089 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes;
5090
5091 /* This will have to be fixed for RAID1 and RAID10 over more drives */
5092 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies;
5093
5094 /*
5095 * Use the number of data stripes to figure out how big this chunk is
5096 * really going to be in terms of logical address space, and compare
5097 * that answer with the max chunk size. If it's higher, we try to
5098 * reduce stripe_size.
5099 */
5100 if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) {
5101 /*
5102 * Reduce stripe_size, round it up to a 16MB boundary again and
5103 * then use it, unless it ends up being even bigger than the
5104 * previous value we had already.
5105 */
5106 ctl->stripe_size = min(round_up(div_u64(ctl->max_chunk_size,
5107 data_stripes), SZ_16M),
5108 ctl->stripe_size);
5109 }
5110
5111 /* Align to BTRFS_STRIPE_LEN */
5112 ctl->stripe_size = round_down(ctl->stripe_size, BTRFS_STRIPE_LEN);
5113 ctl->chunk_size = ctl->stripe_size * data_stripes;
5114
5115 return 0;
5116 }
5117
decide_stripe_size(struct btrfs_fs_devices * fs_devices,struct alloc_chunk_ctl * ctl,struct btrfs_device_info * devices_info)5118 static int decide_stripe_size(struct btrfs_fs_devices *fs_devices,
5119 struct alloc_chunk_ctl *ctl,
5120 struct btrfs_device_info *devices_info)
5121 {
5122 struct btrfs_fs_info *info = fs_devices->fs_info;
5123
5124 /*
5125 * Round down to number of usable stripes, devs_increment can be any
5126 * number so we can't use round_down() that requires power of 2, while
5127 * rounddown is safe.
5128 */
5129 ctl->ndevs = rounddown(ctl->ndevs, ctl->devs_increment);
5130
5131 if (ctl->ndevs < ctl->devs_min) {
5132 if (btrfs_test_opt(info, ENOSPC_DEBUG)) {
5133 btrfs_debug(info,
5134 "%s: not enough devices with free space: have=%d minimum required=%d",
5135 __func__, ctl->ndevs, ctl->devs_min);
5136 }
5137 return -ENOSPC;
5138 }
5139
5140 ctl->ndevs = min(ctl->ndevs, ctl->devs_max);
5141
5142 switch (fs_devices->chunk_alloc_policy) {
5143 case BTRFS_CHUNK_ALLOC_REGULAR:
5144 return decide_stripe_size_regular(ctl, devices_info);
5145 default:
5146 BUG();
5147 }
5148 }
5149
create_chunk(struct btrfs_trans_handle * trans,struct alloc_chunk_ctl * ctl,struct btrfs_device_info * devices_info)5150 static int create_chunk(struct btrfs_trans_handle *trans,
5151 struct alloc_chunk_ctl *ctl,
5152 struct btrfs_device_info *devices_info)
5153 {
5154 struct btrfs_fs_info *info = trans->fs_info;
5155 struct map_lookup *map = NULL;
5156 struct extent_map_tree *em_tree;
5157 struct extent_map *em;
5158 u64 start = ctl->start;
5159 u64 type = ctl->type;
5160 int ret;
5161 int i;
5162 int j;
5163
5164 map = kmalloc(map_lookup_size(ctl->num_stripes), GFP_NOFS);
5165 if (!map)
5166 return -ENOMEM;
5167 map->num_stripes = ctl->num_stripes;
5168
5169 for (i = 0; i < ctl->ndevs; ++i) {
5170 for (j = 0; j < ctl->dev_stripes; ++j) {
5171 int s = i * ctl->dev_stripes + j;
5172 map->stripes[s].dev = devices_info[i].dev;
5173 map->stripes[s].physical = devices_info[i].dev_offset +
5174 j * ctl->stripe_size;
5175 }
5176 }
5177 map->stripe_len = BTRFS_STRIPE_LEN;
5178 map->io_align = BTRFS_STRIPE_LEN;
5179 map->io_width = BTRFS_STRIPE_LEN;
5180 map->type = type;
5181 map->sub_stripes = ctl->sub_stripes;
5182
5183 trace_btrfs_chunk_alloc(info, map, start, ctl->chunk_size);
5184
5185 em = alloc_extent_map();
5186 if (!em) {
5187 kfree(map);
5188 return -ENOMEM;
5189 }
5190 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
5191 em->map_lookup = map;
5192 em->start = start;
5193 em->len = ctl->chunk_size;
5194 em->block_start = 0;
5195 em->block_len = em->len;
5196 em->orig_block_len = ctl->stripe_size;
5197
5198 em_tree = &info->mapping_tree;
5199 write_lock(&em_tree->lock);
5200 ret = add_extent_mapping(em_tree, em, 0);
5201 if (ret) {
5202 write_unlock(&em_tree->lock);
5203 free_extent_map(em);
5204 return ret;
5205 }
5206 write_unlock(&em_tree->lock);
5207
5208 ret = btrfs_make_block_group(trans, 0, type, start, ctl->chunk_size);
5209 if (ret)
5210 goto error_del_extent;
5211
5212 for (i = 0; i < map->num_stripes; i++) {
5213 struct btrfs_device *dev = map->stripes[i].dev;
5214
5215 btrfs_device_set_bytes_used(dev,
5216 dev->bytes_used + ctl->stripe_size);
5217 if (list_empty(&dev->post_commit_list))
5218 list_add_tail(&dev->post_commit_list,
5219 &trans->transaction->dev_update_list);
5220 }
5221
5222 atomic64_sub(ctl->stripe_size * map->num_stripes,
5223 &info->free_chunk_space);
5224
5225 free_extent_map(em);
5226 check_raid56_incompat_flag(info, type);
5227 check_raid1c34_incompat_flag(info, type);
5228
5229 return 0;
5230
5231 error_del_extent:
5232 write_lock(&em_tree->lock);
5233 remove_extent_mapping(em_tree, em);
5234 write_unlock(&em_tree->lock);
5235
5236 /* One for our allocation */
5237 free_extent_map(em);
5238 /* One for the tree reference */
5239 free_extent_map(em);
5240
5241 return ret;
5242 }
5243
btrfs_alloc_chunk(struct btrfs_trans_handle * trans,u64 type)5244 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 type)
5245 {
5246 struct btrfs_fs_info *info = trans->fs_info;
5247 struct btrfs_fs_devices *fs_devices = info->fs_devices;
5248 struct btrfs_device_info *devices_info = NULL;
5249 struct alloc_chunk_ctl ctl;
5250 int ret;
5251
5252 lockdep_assert_held(&info->chunk_mutex);
5253
5254 if (!alloc_profile_is_valid(type, 0)) {
5255 ASSERT(0);
5256 return -EINVAL;
5257 }
5258
5259 if (list_empty(&fs_devices->alloc_list)) {
5260 if (btrfs_test_opt(info, ENOSPC_DEBUG))
5261 btrfs_debug(info, "%s: no writable device", __func__);
5262 return -ENOSPC;
5263 }
5264
5265 if (!(type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
5266 btrfs_err(info, "invalid chunk type 0x%llx requested", type);
5267 ASSERT(0);
5268 return -EINVAL;
5269 }
5270
5271 ctl.start = find_next_chunk(info);
5272 ctl.type = type;
5273 init_alloc_chunk_ctl(fs_devices, &ctl);
5274
5275 devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info),
5276 GFP_NOFS);
5277 if (!devices_info)
5278 return -ENOMEM;
5279
5280 ret = gather_device_info(fs_devices, &ctl, devices_info);
5281 if (ret < 0)
5282 goto out;
5283
5284 ret = decide_stripe_size(fs_devices, &ctl, devices_info);
5285 if (ret < 0)
5286 goto out;
5287
5288 ret = create_chunk(trans, &ctl, devices_info);
5289
5290 out:
5291 kfree(devices_info);
5292 return ret;
5293 }
5294
5295 /*
5296 * Chunk allocation falls into two parts. The first part does work
5297 * that makes the new allocated chunk usable, but does not do any operation
5298 * that modifies the chunk tree. The second part does the work that
5299 * requires modifying the chunk tree. This division is important for the
5300 * bootstrap process of adding storage to a seed btrfs.
5301 */
btrfs_finish_chunk_alloc(struct btrfs_trans_handle * trans,u64 chunk_offset,u64 chunk_size)5302 int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
5303 u64 chunk_offset, u64 chunk_size)
5304 {
5305 struct btrfs_fs_info *fs_info = trans->fs_info;
5306 struct btrfs_root *extent_root = fs_info->extent_root;
5307 struct btrfs_root *chunk_root = fs_info->chunk_root;
5308 struct btrfs_key key;
5309 struct btrfs_device *device;
5310 struct btrfs_chunk *chunk;
5311 struct btrfs_stripe *stripe;
5312 struct extent_map *em;
5313 struct map_lookup *map;
5314 size_t item_size;
5315 u64 dev_offset;
5316 u64 stripe_size;
5317 int i = 0;
5318 int ret = 0;
5319
5320 em = btrfs_get_chunk_map(fs_info, chunk_offset, chunk_size);
5321 if (IS_ERR(em))
5322 return PTR_ERR(em);
5323
5324 map = em->map_lookup;
5325 item_size = btrfs_chunk_item_size(map->num_stripes);
5326 stripe_size = em->orig_block_len;
5327
5328 chunk = kzalloc(item_size, GFP_NOFS);
5329 if (!chunk) {
5330 ret = -ENOMEM;
5331 goto out;
5332 }
5333
5334 /*
5335 * Take the device list mutex to prevent races with the final phase of
5336 * a device replace operation that replaces the device object associated
5337 * with the map's stripes, because the device object's id can change
5338 * at any time during that final phase of the device replace operation
5339 * (dev-replace.c:btrfs_dev_replace_finishing()).
5340 */
5341 mutex_lock(&fs_info->fs_devices->device_list_mutex);
5342 for (i = 0; i < map->num_stripes; i++) {
5343 device = map->stripes[i].dev;
5344 dev_offset = map->stripes[i].physical;
5345
5346 ret = btrfs_update_device(trans, device);
5347 if (ret)
5348 break;
5349 ret = btrfs_alloc_dev_extent(trans, device, chunk_offset,
5350 dev_offset, stripe_size);
5351 if (ret)
5352 break;
5353 }
5354 if (ret) {
5355 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
5356 goto out;
5357 }
5358
5359 stripe = &chunk->stripe;
5360 for (i = 0; i < map->num_stripes; i++) {
5361 device = map->stripes[i].dev;
5362 dev_offset = map->stripes[i].physical;
5363
5364 btrfs_set_stack_stripe_devid(stripe, device->devid);
5365 btrfs_set_stack_stripe_offset(stripe, dev_offset);
5366 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
5367 stripe++;
5368 }
5369 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
5370
5371 btrfs_set_stack_chunk_length(chunk, chunk_size);
5372 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
5373 btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
5374 btrfs_set_stack_chunk_type(chunk, map->type);
5375 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
5376 btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
5377 btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
5378 btrfs_set_stack_chunk_sector_size(chunk, fs_info->sectorsize);
5379 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
5380
5381 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
5382 key.type = BTRFS_CHUNK_ITEM_KEY;
5383 key.offset = chunk_offset;
5384
5385 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
5386 if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
5387 /*
5388 * TODO: Cleanup of inserted chunk root in case of
5389 * failure.
5390 */
5391 ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size);
5392 }
5393
5394 out:
5395 kfree(chunk);
5396 free_extent_map(em);
5397 return ret;
5398 }
5399
init_first_rw_device(struct btrfs_trans_handle * trans)5400 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans)
5401 {
5402 struct btrfs_fs_info *fs_info = trans->fs_info;
5403 u64 alloc_profile;
5404 int ret;
5405
5406 alloc_profile = btrfs_metadata_alloc_profile(fs_info);
5407 ret = btrfs_alloc_chunk(trans, alloc_profile);
5408 if (ret)
5409 return ret;
5410
5411 alloc_profile = btrfs_system_alloc_profile(fs_info);
5412 ret = btrfs_alloc_chunk(trans, alloc_profile);
5413 return ret;
5414 }
5415
btrfs_chunk_max_errors(struct map_lookup * map)5416 static inline int btrfs_chunk_max_errors(struct map_lookup *map)
5417 {
5418 const int index = btrfs_bg_flags_to_raid_index(map->type);
5419
5420 return btrfs_raid_array[index].tolerated_failures;
5421 }
5422
btrfs_chunk_readonly(struct btrfs_fs_info * fs_info,u64 chunk_offset)5423 int btrfs_chunk_readonly(struct btrfs_fs_info *fs_info, u64 chunk_offset)
5424 {
5425 struct extent_map *em;
5426 struct map_lookup *map;
5427 int readonly = 0;
5428 int miss_ndevs = 0;
5429 int i;
5430
5431 em = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
5432 if (IS_ERR(em))
5433 return 1;
5434
5435 map = em->map_lookup;
5436 for (i = 0; i < map->num_stripes; i++) {
5437 if (test_bit(BTRFS_DEV_STATE_MISSING,
5438 &map->stripes[i].dev->dev_state)) {
5439 miss_ndevs++;
5440 continue;
5441 }
5442 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE,
5443 &map->stripes[i].dev->dev_state)) {
5444 readonly = 1;
5445 goto end;
5446 }
5447 }
5448
5449 /*
5450 * If the number of missing devices is larger than max errors,
5451 * we can not write the data into that chunk successfully, so
5452 * set it readonly.
5453 */
5454 if (miss_ndevs > btrfs_chunk_max_errors(map))
5455 readonly = 1;
5456 end:
5457 free_extent_map(em);
5458 return readonly;
5459 }
5460
btrfs_mapping_tree_free(struct extent_map_tree * tree)5461 void btrfs_mapping_tree_free(struct extent_map_tree *tree)
5462 {
5463 struct extent_map *em;
5464
5465 while (1) {
5466 write_lock(&tree->lock);
5467 em = lookup_extent_mapping(tree, 0, (u64)-1);
5468 if (em)
5469 remove_extent_mapping(tree, em);
5470 write_unlock(&tree->lock);
5471 if (!em)
5472 break;
5473 /* once for us */
5474 free_extent_map(em);
5475 /* once for the tree */
5476 free_extent_map(em);
5477 }
5478 }
5479
btrfs_num_copies(struct btrfs_fs_info * fs_info,u64 logical,u64 len)5480 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5481 {
5482 struct extent_map *em;
5483 struct map_lookup *map;
5484 int ret;
5485
5486 em = btrfs_get_chunk_map(fs_info, logical, len);
5487 if (IS_ERR(em))
5488 /*
5489 * We could return errors for these cases, but that could get
5490 * ugly and we'd probably do the same thing which is just not do
5491 * anything else and exit, so return 1 so the callers don't try
5492 * to use other copies.
5493 */
5494 return 1;
5495
5496 map = em->map_lookup;
5497 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1_MASK))
5498 ret = map->num_stripes;
5499 else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5500 ret = map->sub_stripes;
5501 else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
5502 ret = 2;
5503 else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
5504 /*
5505 * There could be two corrupted data stripes, we need
5506 * to loop retry in order to rebuild the correct data.
5507 *
5508 * Fail a stripe at a time on every retry except the
5509 * stripe under reconstruction.
5510 */
5511 ret = map->num_stripes;
5512 else
5513 ret = 1;
5514 free_extent_map(em);
5515
5516 down_read(&fs_info->dev_replace.rwsem);
5517 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace) &&
5518 fs_info->dev_replace.tgtdev)
5519 ret++;
5520 up_read(&fs_info->dev_replace.rwsem);
5521
5522 return ret;
5523 }
5524
btrfs_full_stripe_len(struct btrfs_fs_info * fs_info,u64 logical)5525 unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
5526 u64 logical)
5527 {
5528 struct extent_map *em;
5529 struct map_lookup *map;
5530 unsigned long len = fs_info->sectorsize;
5531
5532 em = btrfs_get_chunk_map(fs_info, logical, len);
5533
5534 if (!WARN_ON(IS_ERR(em))) {
5535 map = em->map_lookup;
5536 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5537 len = map->stripe_len * nr_data_stripes(map);
5538 free_extent_map(em);
5539 }
5540 return len;
5541 }
5542
btrfs_is_parity_mirror(struct btrfs_fs_info * fs_info,u64 logical,u64 len)5543 int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5544 {
5545 struct extent_map *em;
5546 struct map_lookup *map;
5547 int ret = 0;
5548
5549 em = btrfs_get_chunk_map(fs_info, logical, len);
5550
5551 if(!WARN_ON(IS_ERR(em))) {
5552 map = em->map_lookup;
5553 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5554 ret = 1;
5555 free_extent_map(em);
5556 }
5557 return ret;
5558 }
5559
find_live_mirror(struct btrfs_fs_info * fs_info,struct map_lookup * map,int first,int dev_replace_is_ongoing)5560 static int find_live_mirror(struct btrfs_fs_info *fs_info,
5561 struct map_lookup *map, int first,
5562 int dev_replace_is_ongoing)
5563 {
5564 int i;
5565 int num_stripes;
5566 int preferred_mirror;
5567 int tolerance;
5568 struct btrfs_device *srcdev;
5569
5570 ASSERT((map->type &
5571 (BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10)));
5572
5573 if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5574 num_stripes = map->sub_stripes;
5575 else
5576 num_stripes = map->num_stripes;
5577
5578 preferred_mirror = first + current->pid % num_stripes;
5579
5580 if (dev_replace_is_ongoing &&
5581 fs_info->dev_replace.cont_reading_from_srcdev_mode ==
5582 BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID)
5583 srcdev = fs_info->dev_replace.srcdev;
5584 else
5585 srcdev = NULL;
5586
5587 /*
5588 * try to avoid the drive that is the source drive for a
5589 * dev-replace procedure, only choose it if no other non-missing
5590 * mirror is available
5591 */
5592 for (tolerance = 0; tolerance < 2; tolerance++) {
5593 if (map->stripes[preferred_mirror].dev->bdev &&
5594 (tolerance || map->stripes[preferred_mirror].dev != srcdev))
5595 return preferred_mirror;
5596 for (i = first; i < first + num_stripes; i++) {
5597 if (map->stripes[i].dev->bdev &&
5598 (tolerance || map->stripes[i].dev != srcdev))
5599 return i;
5600 }
5601 }
5602
5603 /* we couldn't find one that doesn't fail. Just return something
5604 * and the io error handling code will clean up eventually
5605 */
5606 return preferred_mirror;
5607 }
5608
5609 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */
sort_parity_stripes(struct btrfs_bio * bbio,int num_stripes)5610 static void sort_parity_stripes(struct btrfs_bio *bbio, int num_stripes)
5611 {
5612 int i;
5613 int again = 1;
5614
5615 while (again) {
5616 again = 0;
5617 for (i = 0; i < num_stripes - 1; i++) {
5618 /* Swap if parity is on a smaller index */
5619 if (bbio->raid_map[i] > bbio->raid_map[i + 1]) {
5620 swap(bbio->stripes[i], bbio->stripes[i + 1]);
5621 swap(bbio->raid_map[i], bbio->raid_map[i + 1]);
5622 again = 1;
5623 }
5624 }
5625 }
5626 }
5627
alloc_btrfs_bio(int total_stripes,int real_stripes)5628 static struct btrfs_bio *alloc_btrfs_bio(int total_stripes, int real_stripes)
5629 {
5630 struct btrfs_bio *bbio = kzalloc(
5631 /* the size of the btrfs_bio */
5632 sizeof(struct btrfs_bio) +
5633 /* plus the variable array for the stripes */
5634 sizeof(struct btrfs_bio_stripe) * (total_stripes) +
5635 /* plus the variable array for the tgt dev */
5636 sizeof(int) * (real_stripes) +
5637 /*
5638 * plus the raid_map, which includes both the tgt dev
5639 * and the stripes
5640 */
5641 sizeof(u64) * (total_stripes),
5642 GFP_NOFS|__GFP_NOFAIL);
5643
5644 atomic_set(&bbio->error, 0);
5645 refcount_set(&bbio->refs, 1);
5646
5647 bbio->tgtdev_map = (int *)(bbio->stripes + total_stripes);
5648 bbio->raid_map = (u64 *)(bbio->tgtdev_map + real_stripes);
5649
5650 return bbio;
5651 }
5652
btrfs_get_bbio(struct btrfs_bio * bbio)5653 void btrfs_get_bbio(struct btrfs_bio *bbio)
5654 {
5655 WARN_ON(!refcount_read(&bbio->refs));
5656 refcount_inc(&bbio->refs);
5657 }
5658
btrfs_put_bbio(struct btrfs_bio * bbio)5659 void btrfs_put_bbio(struct btrfs_bio *bbio)
5660 {
5661 if (!bbio)
5662 return;
5663 if (refcount_dec_and_test(&bbio->refs))
5664 kfree(bbio);
5665 }
5666
5667 /* can REQ_OP_DISCARD be sent with other REQ like REQ_OP_WRITE? */
5668 /*
5669 * Please note that, discard won't be sent to target device of device
5670 * replace.
5671 */
__btrfs_map_block_for_discard(struct btrfs_fs_info * fs_info,u64 logical,u64 * length_ret,struct btrfs_bio ** bbio_ret)5672 static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info,
5673 u64 logical, u64 *length_ret,
5674 struct btrfs_bio **bbio_ret)
5675 {
5676 struct extent_map *em;
5677 struct map_lookup *map;
5678 struct btrfs_bio *bbio;
5679 u64 length = *length_ret;
5680 u64 offset;
5681 u64 stripe_nr;
5682 u64 stripe_nr_end;
5683 u64 stripe_end_offset;
5684 u64 stripe_cnt;
5685 u64 stripe_len;
5686 u64 stripe_offset;
5687 u64 num_stripes;
5688 u32 stripe_index;
5689 u32 factor = 0;
5690 u32 sub_stripes = 0;
5691 u64 stripes_per_dev = 0;
5692 u32 remaining_stripes = 0;
5693 u32 last_stripe = 0;
5694 int ret = 0;
5695 int i;
5696
5697 /* discard always return a bbio */
5698 ASSERT(bbio_ret);
5699
5700 em = btrfs_get_chunk_map(fs_info, logical, length);
5701 if (IS_ERR(em))
5702 return PTR_ERR(em);
5703
5704 map = em->map_lookup;
5705 /* we don't discard raid56 yet */
5706 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5707 ret = -EOPNOTSUPP;
5708 goto out;
5709 }
5710
5711 offset = logical - em->start;
5712 length = min_t(u64, em->start + em->len - logical, length);
5713 *length_ret = length;
5714
5715 stripe_len = map->stripe_len;
5716 /*
5717 * stripe_nr counts the total number of stripes we have to stride
5718 * to get to this block
5719 */
5720 stripe_nr = div64_u64(offset, stripe_len);
5721
5722 /* stripe_offset is the offset of this block in its stripe */
5723 stripe_offset = offset - stripe_nr * stripe_len;
5724
5725 stripe_nr_end = round_up(offset + length, map->stripe_len);
5726 stripe_nr_end = div64_u64(stripe_nr_end, map->stripe_len);
5727 stripe_cnt = stripe_nr_end - stripe_nr;
5728 stripe_end_offset = stripe_nr_end * map->stripe_len -
5729 (offset + length);
5730 /*
5731 * after this, stripe_nr is the number of stripes on this
5732 * device we have to walk to find the data, and stripe_index is
5733 * the number of our device in the stripe array
5734 */
5735 num_stripes = 1;
5736 stripe_index = 0;
5737 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
5738 BTRFS_BLOCK_GROUP_RAID10)) {
5739 if (map->type & BTRFS_BLOCK_GROUP_RAID0)
5740 sub_stripes = 1;
5741 else
5742 sub_stripes = map->sub_stripes;
5743
5744 factor = map->num_stripes / sub_stripes;
5745 num_stripes = min_t(u64, map->num_stripes,
5746 sub_stripes * stripe_cnt);
5747 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
5748 stripe_index *= sub_stripes;
5749 stripes_per_dev = div_u64_rem(stripe_cnt, factor,
5750 &remaining_stripes);
5751 div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
5752 last_stripe *= sub_stripes;
5753 } else if (map->type & (BTRFS_BLOCK_GROUP_RAID1_MASK |
5754 BTRFS_BLOCK_GROUP_DUP)) {
5755 num_stripes = map->num_stripes;
5756 } else {
5757 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
5758 &stripe_index);
5759 }
5760
5761 bbio = alloc_btrfs_bio(num_stripes, 0);
5762 if (!bbio) {
5763 ret = -ENOMEM;
5764 goto out;
5765 }
5766
5767 for (i = 0; i < num_stripes; i++) {
5768 bbio->stripes[i].physical =
5769 map->stripes[stripe_index].physical +
5770 stripe_offset + stripe_nr * map->stripe_len;
5771 bbio->stripes[i].dev = map->stripes[stripe_index].dev;
5772
5773 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
5774 BTRFS_BLOCK_GROUP_RAID10)) {
5775 bbio->stripes[i].length = stripes_per_dev *
5776 map->stripe_len;
5777
5778 if (i / sub_stripes < remaining_stripes)
5779 bbio->stripes[i].length +=
5780 map->stripe_len;
5781
5782 /*
5783 * Special for the first stripe and
5784 * the last stripe:
5785 *
5786 * |-------|...|-------|
5787 * |----------|
5788 * off end_off
5789 */
5790 if (i < sub_stripes)
5791 bbio->stripes[i].length -=
5792 stripe_offset;
5793
5794 if (stripe_index >= last_stripe &&
5795 stripe_index <= (last_stripe +
5796 sub_stripes - 1))
5797 bbio->stripes[i].length -=
5798 stripe_end_offset;
5799
5800 if (i == sub_stripes - 1)
5801 stripe_offset = 0;
5802 } else {
5803 bbio->stripes[i].length = length;
5804 }
5805
5806 stripe_index++;
5807 if (stripe_index == map->num_stripes) {
5808 stripe_index = 0;
5809 stripe_nr++;
5810 }
5811 }
5812
5813 *bbio_ret = bbio;
5814 bbio->map_type = map->type;
5815 bbio->num_stripes = num_stripes;
5816 out:
5817 free_extent_map(em);
5818 return ret;
5819 }
5820
5821 /*
5822 * In dev-replace case, for repair case (that's the only case where the mirror
5823 * is selected explicitly when calling btrfs_map_block), blocks left of the
5824 * left cursor can also be read from the target drive.
5825 *
5826 * For REQ_GET_READ_MIRRORS, the target drive is added as the last one to the
5827 * array of stripes.
5828 * For READ, it also needs to be supported using the same mirror number.
5829 *
5830 * If the requested block is not left of the left cursor, EIO is returned. This
5831 * can happen because btrfs_num_copies() returns one more in the dev-replace
5832 * case.
5833 */
get_extra_mirror_from_replace(struct btrfs_fs_info * fs_info,u64 logical,u64 length,u64 srcdev_devid,int * mirror_num,u64 * physical)5834 static int get_extra_mirror_from_replace(struct btrfs_fs_info *fs_info,
5835 u64 logical, u64 length,
5836 u64 srcdev_devid, int *mirror_num,
5837 u64 *physical)
5838 {
5839 struct btrfs_bio *bbio = NULL;
5840 int num_stripes;
5841 int index_srcdev = 0;
5842 int found = 0;
5843 u64 physical_of_found = 0;
5844 int i;
5845 int ret = 0;
5846
5847 ret = __btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
5848 logical, &length, &bbio, 0, 0);
5849 if (ret) {
5850 ASSERT(bbio == NULL);
5851 return ret;
5852 }
5853
5854 num_stripes = bbio->num_stripes;
5855 if (*mirror_num > num_stripes) {
5856 /*
5857 * BTRFS_MAP_GET_READ_MIRRORS does not contain this mirror,
5858 * that means that the requested area is not left of the left
5859 * cursor
5860 */
5861 btrfs_put_bbio(bbio);
5862 return -EIO;
5863 }
5864
5865 /*
5866 * process the rest of the function using the mirror_num of the source
5867 * drive. Therefore look it up first. At the end, patch the device
5868 * pointer to the one of the target drive.
5869 */
5870 for (i = 0; i < num_stripes; i++) {
5871 if (bbio->stripes[i].dev->devid != srcdev_devid)
5872 continue;
5873
5874 /*
5875 * In case of DUP, in order to keep it simple, only add the
5876 * mirror with the lowest physical address
5877 */
5878 if (found &&
5879 physical_of_found <= bbio->stripes[i].physical)
5880 continue;
5881
5882 index_srcdev = i;
5883 found = 1;
5884 physical_of_found = bbio->stripes[i].physical;
5885 }
5886
5887 btrfs_put_bbio(bbio);
5888
5889 ASSERT(found);
5890 if (!found)
5891 return -EIO;
5892
5893 *mirror_num = index_srcdev + 1;
5894 *physical = physical_of_found;
5895 return ret;
5896 }
5897
handle_ops_on_dev_replace(enum btrfs_map_op op,struct btrfs_bio ** bbio_ret,struct btrfs_dev_replace * dev_replace,int * num_stripes_ret,int * max_errors_ret)5898 static void handle_ops_on_dev_replace(enum btrfs_map_op op,
5899 struct btrfs_bio **bbio_ret,
5900 struct btrfs_dev_replace *dev_replace,
5901 int *num_stripes_ret, int *max_errors_ret)
5902 {
5903 struct btrfs_bio *bbio = *bbio_ret;
5904 u64 srcdev_devid = dev_replace->srcdev->devid;
5905 int tgtdev_indexes = 0;
5906 int num_stripes = *num_stripes_ret;
5907 int max_errors = *max_errors_ret;
5908 int i;
5909
5910 if (op == BTRFS_MAP_WRITE) {
5911 int index_where_to_add;
5912
5913 /*
5914 * duplicate the write operations while the dev replace
5915 * procedure is running. Since the copying of the old disk to
5916 * the new disk takes place at run time while the filesystem is
5917 * mounted writable, the regular write operations to the old
5918 * disk have to be duplicated to go to the new disk as well.
5919 *
5920 * Note that device->missing is handled by the caller, and that
5921 * the write to the old disk is already set up in the stripes
5922 * array.
5923 */
5924 index_where_to_add = num_stripes;
5925 for (i = 0; i < num_stripes; i++) {
5926 if (bbio->stripes[i].dev->devid == srcdev_devid) {
5927 /* write to new disk, too */
5928 struct btrfs_bio_stripe *new =
5929 bbio->stripes + index_where_to_add;
5930 struct btrfs_bio_stripe *old =
5931 bbio->stripes + i;
5932
5933 new->physical = old->physical;
5934 new->length = old->length;
5935 new->dev = dev_replace->tgtdev;
5936 bbio->tgtdev_map[i] = index_where_to_add;
5937 index_where_to_add++;
5938 max_errors++;
5939 tgtdev_indexes++;
5940 }
5941 }
5942 num_stripes = index_where_to_add;
5943 } else if (op == BTRFS_MAP_GET_READ_MIRRORS) {
5944 int index_srcdev = 0;
5945 int found = 0;
5946 u64 physical_of_found = 0;
5947
5948 /*
5949 * During the dev-replace procedure, the target drive can also
5950 * be used to read data in case it is needed to repair a corrupt
5951 * block elsewhere. This is possible if the requested area is
5952 * left of the left cursor. In this area, the target drive is a
5953 * full copy of the source drive.
5954 */
5955 for (i = 0; i < num_stripes; i++) {
5956 if (bbio->stripes[i].dev->devid == srcdev_devid) {
5957 /*
5958 * In case of DUP, in order to keep it simple,
5959 * only add the mirror with the lowest physical
5960 * address
5961 */
5962 if (found &&
5963 physical_of_found <=
5964 bbio->stripes[i].physical)
5965 continue;
5966 index_srcdev = i;
5967 found = 1;
5968 physical_of_found = bbio->stripes[i].physical;
5969 }
5970 }
5971 if (found) {
5972 struct btrfs_bio_stripe *tgtdev_stripe =
5973 bbio->stripes + num_stripes;
5974
5975 tgtdev_stripe->physical = physical_of_found;
5976 tgtdev_stripe->length =
5977 bbio->stripes[index_srcdev].length;
5978 tgtdev_stripe->dev = dev_replace->tgtdev;
5979 bbio->tgtdev_map[index_srcdev] = num_stripes;
5980
5981 tgtdev_indexes++;
5982 num_stripes++;
5983 }
5984 }
5985
5986 *num_stripes_ret = num_stripes;
5987 *max_errors_ret = max_errors;
5988 bbio->num_tgtdevs = tgtdev_indexes;
5989 *bbio_ret = bbio;
5990 }
5991
need_full_stripe(enum btrfs_map_op op)5992 static bool need_full_stripe(enum btrfs_map_op op)
5993 {
5994 return (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS);
5995 }
5996
5997 /*
5998 * btrfs_get_io_geometry - calculates the geomery of a particular (address, len)
5999 * tuple. This information is used to calculate how big a
6000 * particular bio can get before it straddles a stripe.
6001 *
6002 * @fs_info - the filesystem
6003 * @logical - address that we want to figure out the geometry of
6004 * @len - the length of IO we are going to perform, starting at @logical
6005 * @op - type of operation - write or read
6006 * @io_geom - pointer used to return values
6007 *
6008 * Returns < 0 in case a chunk for the given logical address cannot be found,
6009 * usually shouldn't happen unless @logical is corrupted, 0 otherwise.
6010 */
btrfs_get_io_geometry(struct btrfs_fs_info * fs_info,enum btrfs_map_op op,u64 logical,u64 len,struct btrfs_io_geometry * io_geom)6011 int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
6012 u64 logical, u64 len, struct btrfs_io_geometry *io_geom)
6013 {
6014 struct extent_map *em;
6015 struct map_lookup *map;
6016 u64 offset;
6017 u64 stripe_offset;
6018 u64 stripe_nr;
6019 u64 stripe_len;
6020 u64 raid56_full_stripe_start = (u64)-1;
6021 int data_stripes;
6022 int ret = 0;
6023
6024 ASSERT(op != BTRFS_MAP_DISCARD);
6025
6026 em = btrfs_get_chunk_map(fs_info, logical, len);
6027 if (IS_ERR(em))
6028 return PTR_ERR(em);
6029
6030 map = em->map_lookup;
6031 /* Offset of this logical address in the chunk */
6032 offset = logical - em->start;
6033 /* Len of a stripe in a chunk */
6034 stripe_len = map->stripe_len;
6035 /* Stripe wher this block falls in */
6036 stripe_nr = div64_u64(offset, stripe_len);
6037 /* Offset of stripe in the chunk */
6038 stripe_offset = stripe_nr * stripe_len;
6039 if (offset < stripe_offset) {
6040 btrfs_crit(fs_info,
6041 "stripe math has gone wrong, stripe_offset=%llu offset=%llu start=%llu logical=%llu stripe_len=%llu",
6042 stripe_offset, offset, em->start, logical, stripe_len);
6043 ret = -EINVAL;
6044 goto out;
6045 }
6046
6047 /* stripe_offset is the offset of this block in its stripe */
6048 stripe_offset = offset - stripe_offset;
6049 data_stripes = nr_data_stripes(map);
6050
6051 if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
6052 u64 max_len = stripe_len - stripe_offset;
6053
6054 /*
6055 * In case of raid56, we need to know the stripe aligned start
6056 */
6057 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
6058 unsigned long full_stripe_len = stripe_len * data_stripes;
6059 raid56_full_stripe_start = offset;
6060
6061 /*
6062 * Allow a write of a full stripe, but make sure we
6063 * don't allow straddling of stripes
6064 */
6065 raid56_full_stripe_start = div64_u64(raid56_full_stripe_start,
6066 full_stripe_len);
6067 raid56_full_stripe_start *= full_stripe_len;
6068
6069 /*
6070 * For writes to RAID[56], allow a full stripeset across
6071 * all disks. For other RAID types and for RAID[56]
6072 * reads, just allow a single stripe (on a single disk).
6073 */
6074 if (op == BTRFS_MAP_WRITE) {
6075 max_len = stripe_len * data_stripes -
6076 (offset - raid56_full_stripe_start);
6077 }
6078 }
6079 len = min_t(u64, em->len - offset, max_len);
6080 } else {
6081 len = em->len - offset;
6082 }
6083
6084 io_geom->len = len;
6085 io_geom->offset = offset;
6086 io_geom->stripe_len = stripe_len;
6087 io_geom->stripe_nr = stripe_nr;
6088 io_geom->stripe_offset = stripe_offset;
6089 io_geom->raid56_stripe_offset = raid56_full_stripe_start;
6090
6091 out:
6092 /* once for us */
6093 free_extent_map(em);
6094 return ret;
6095 }
6096
__btrfs_map_block(struct btrfs_fs_info * fs_info,enum btrfs_map_op op,u64 logical,u64 * length,struct btrfs_bio ** bbio_ret,int mirror_num,int need_raid_map)6097 static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
6098 enum btrfs_map_op op,
6099 u64 logical, u64 *length,
6100 struct btrfs_bio **bbio_ret,
6101 int mirror_num, int need_raid_map)
6102 {
6103 struct extent_map *em;
6104 struct map_lookup *map;
6105 u64 stripe_offset;
6106 u64 stripe_nr;
6107 u64 stripe_len;
6108 u32 stripe_index;
6109 int data_stripes;
6110 int i;
6111 int ret = 0;
6112 int num_stripes;
6113 int max_errors = 0;
6114 int tgtdev_indexes = 0;
6115 struct btrfs_bio *bbio = NULL;
6116 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
6117 int dev_replace_is_ongoing = 0;
6118 int num_alloc_stripes;
6119 int patch_the_first_stripe_for_dev_replace = 0;
6120 u64 physical_to_patch_in_first_stripe = 0;
6121 u64 raid56_full_stripe_start = (u64)-1;
6122 struct btrfs_io_geometry geom;
6123
6124 ASSERT(bbio_ret);
6125 ASSERT(op != BTRFS_MAP_DISCARD);
6126
6127 ret = btrfs_get_io_geometry(fs_info, op, logical, *length, &geom);
6128 if (ret < 0)
6129 return ret;
6130
6131 em = btrfs_get_chunk_map(fs_info, logical, *length);
6132 ASSERT(!IS_ERR(em));
6133 map = em->map_lookup;
6134
6135 *length = geom.len;
6136 stripe_len = geom.stripe_len;
6137 stripe_nr = geom.stripe_nr;
6138 stripe_offset = geom.stripe_offset;
6139 raid56_full_stripe_start = geom.raid56_stripe_offset;
6140 data_stripes = nr_data_stripes(map);
6141
6142 down_read(&dev_replace->rwsem);
6143 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
6144 /*
6145 * Hold the semaphore for read during the whole operation, write is
6146 * requested at commit time but must wait.
6147 */
6148 if (!dev_replace_is_ongoing)
6149 up_read(&dev_replace->rwsem);
6150
6151 if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
6152 !need_full_stripe(op) && dev_replace->tgtdev != NULL) {
6153 ret = get_extra_mirror_from_replace(fs_info, logical, *length,
6154 dev_replace->srcdev->devid,
6155 &mirror_num,
6156 &physical_to_patch_in_first_stripe);
6157 if (ret)
6158 goto out;
6159 else
6160 patch_the_first_stripe_for_dev_replace = 1;
6161 } else if (mirror_num > map->num_stripes) {
6162 mirror_num = 0;
6163 }
6164
6165 num_stripes = 1;
6166 stripe_index = 0;
6167 if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
6168 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
6169 &stripe_index);
6170 if (!need_full_stripe(op))
6171 mirror_num = 1;
6172 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1_MASK) {
6173 if (need_full_stripe(op))
6174 num_stripes = map->num_stripes;
6175 else if (mirror_num)
6176 stripe_index = mirror_num - 1;
6177 else {
6178 stripe_index = find_live_mirror(fs_info, map, 0,
6179 dev_replace_is_ongoing);
6180 mirror_num = stripe_index + 1;
6181 }
6182
6183 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
6184 if (need_full_stripe(op)) {
6185 num_stripes = map->num_stripes;
6186 } else if (mirror_num) {
6187 stripe_index = mirror_num - 1;
6188 } else {
6189 mirror_num = 1;
6190 }
6191
6192 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
6193 u32 factor = map->num_stripes / map->sub_stripes;
6194
6195 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
6196 stripe_index *= map->sub_stripes;
6197
6198 if (need_full_stripe(op))
6199 num_stripes = map->sub_stripes;
6200 else if (mirror_num)
6201 stripe_index += mirror_num - 1;
6202 else {
6203 int old_stripe_index = stripe_index;
6204 stripe_index = find_live_mirror(fs_info, map,
6205 stripe_index,
6206 dev_replace_is_ongoing);
6207 mirror_num = stripe_index - old_stripe_index + 1;
6208 }
6209
6210 } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
6211 if (need_raid_map && (need_full_stripe(op) || mirror_num > 1)) {
6212 /* push stripe_nr back to the start of the full stripe */
6213 stripe_nr = div64_u64(raid56_full_stripe_start,
6214 stripe_len * data_stripes);
6215
6216 /* RAID[56] write or recovery. Return all stripes */
6217 num_stripes = map->num_stripes;
6218 max_errors = nr_parity_stripes(map);
6219
6220 *length = map->stripe_len;
6221 stripe_index = 0;
6222 stripe_offset = 0;
6223 } else {
6224 /*
6225 * Mirror #0 or #1 means the original data block.
6226 * Mirror #2 is RAID5 parity block.
6227 * Mirror #3 is RAID6 Q block.
6228 */
6229 stripe_nr = div_u64_rem(stripe_nr,
6230 data_stripes, &stripe_index);
6231 if (mirror_num > 1)
6232 stripe_index = data_stripes + mirror_num - 2;
6233
6234 /* We distribute the parity blocks across stripes */
6235 div_u64_rem(stripe_nr + stripe_index, map->num_stripes,
6236 &stripe_index);
6237 if (!need_full_stripe(op) && mirror_num <= 1)
6238 mirror_num = 1;
6239 }
6240 } else {
6241 /*
6242 * after this, stripe_nr is the number of stripes on this
6243 * device we have to walk to find the data, and stripe_index is
6244 * the number of our device in the stripe array
6245 */
6246 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
6247 &stripe_index);
6248 mirror_num = stripe_index + 1;
6249 }
6250 if (stripe_index >= map->num_stripes) {
6251 btrfs_crit(fs_info,
6252 "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u",
6253 stripe_index, map->num_stripes);
6254 ret = -EINVAL;
6255 goto out;
6256 }
6257
6258 num_alloc_stripes = num_stripes;
6259 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL) {
6260 if (op == BTRFS_MAP_WRITE)
6261 num_alloc_stripes <<= 1;
6262 if (op == BTRFS_MAP_GET_READ_MIRRORS)
6263 num_alloc_stripes++;
6264 tgtdev_indexes = num_stripes;
6265 }
6266
6267 bbio = alloc_btrfs_bio(num_alloc_stripes, tgtdev_indexes);
6268 if (!bbio) {
6269 ret = -ENOMEM;
6270 goto out;
6271 }
6272
6273 for (i = 0; i < num_stripes; i++) {
6274 bbio->stripes[i].physical = map->stripes[stripe_index].physical +
6275 stripe_offset + stripe_nr * map->stripe_len;
6276 bbio->stripes[i].dev = map->stripes[stripe_index].dev;
6277 stripe_index++;
6278 }
6279
6280 /* build raid_map */
6281 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && need_raid_map &&
6282 (need_full_stripe(op) || mirror_num > 1)) {
6283 u64 tmp;
6284 unsigned rot;
6285
6286 /* Work out the disk rotation on this stripe-set */
6287 div_u64_rem(stripe_nr, num_stripes, &rot);
6288
6289 /* Fill in the logical address of each stripe */
6290 tmp = stripe_nr * data_stripes;
6291 for (i = 0; i < data_stripes; i++)
6292 bbio->raid_map[(i+rot) % num_stripes] =
6293 em->start + (tmp + i) * map->stripe_len;
6294
6295 bbio->raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE;
6296 if (map->type & BTRFS_BLOCK_GROUP_RAID6)
6297 bbio->raid_map[(i+rot+1) % num_stripes] =
6298 RAID6_Q_STRIPE;
6299
6300 sort_parity_stripes(bbio, num_stripes);
6301 }
6302
6303 if (need_full_stripe(op))
6304 max_errors = btrfs_chunk_max_errors(map);
6305
6306 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL &&
6307 need_full_stripe(op)) {
6308 handle_ops_on_dev_replace(op, &bbio, dev_replace, &num_stripes,
6309 &max_errors);
6310 }
6311
6312 *bbio_ret = bbio;
6313 bbio->map_type = map->type;
6314 bbio->num_stripes = num_stripes;
6315 bbio->max_errors = max_errors;
6316 bbio->mirror_num = mirror_num;
6317
6318 /*
6319 * this is the case that REQ_READ && dev_replace_is_ongoing &&
6320 * mirror_num == num_stripes + 1 && dev_replace target drive is
6321 * available as a mirror
6322 */
6323 if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) {
6324 WARN_ON(num_stripes > 1);
6325 bbio->stripes[0].dev = dev_replace->tgtdev;
6326 bbio->stripes[0].physical = physical_to_patch_in_first_stripe;
6327 bbio->mirror_num = map->num_stripes + 1;
6328 }
6329 out:
6330 if (dev_replace_is_ongoing) {
6331 lockdep_assert_held(&dev_replace->rwsem);
6332 /* Unlock and let waiting writers proceed */
6333 up_read(&dev_replace->rwsem);
6334 }
6335 free_extent_map(em);
6336 return ret;
6337 }
6338
btrfs_map_block(struct btrfs_fs_info * fs_info,enum btrfs_map_op op,u64 logical,u64 * length,struct btrfs_bio ** bbio_ret,int mirror_num)6339 int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
6340 u64 logical, u64 *length,
6341 struct btrfs_bio **bbio_ret, int mirror_num)
6342 {
6343 if (op == BTRFS_MAP_DISCARD)
6344 return __btrfs_map_block_for_discard(fs_info, logical,
6345 length, bbio_ret);
6346
6347 return __btrfs_map_block(fs_info, op, logical, length, bbio_ret,
6348 mirror_num, 0);
6349 }
6350
6351 /* For Scrub/replace */
btrfs_map_sblock(struct btrfs_fs_info * fs_info,enum btrfs_map_op op,u64 logical,u64 * length,struct btrfs_bio ** bbio_ret)6352 int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
6353 u64 logical, u64 *length,
6354 struct btrfs_bio **bbio_ret)
6355 {
6356 return __btrfs_map_block(fs_info, op, logical, length, bbio_ret, 0, 1);
6357 }
6358
btrfs_end_bbio(struct btrfs_bio * bbio,struct bio * bio)6359 static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio)
6360 {
6361 bio->bi_private = bbio->private;
6362 bio->bi_end_io = bbio->end_io;
6363 bio_endio(bio);
6364
6365 btrfs_put_bbio(bbio);
6366 }
6367
btrfs_end_bio(struct bio * bio)6368 static void btrfs_end_bio(struct bio *bio)
6369 {
6370 struct btrfs_bio *bbio = bio->bi_private;
6371 int is_orig_bio = 0;
6372
6373 if (bio->bi_status) {
6374 atomic_inc(&bbio->error);
6375 if (bio->bi_status == BLK_STS_IOERR ||
6376 bio->bi_status == BLK_STS_TARGET) {
6377 struct btrfs_device *dev = btrfs_io_bio(bio)->device;
6378
6379 ASSERT(dev->bdev);
6380 if (bio_op(bio) == REQ_OP_WRITE)
6381 btrfs_dev_stat_inc_and_print(dev,
6382 BTRFS_DEV_STAT_WRITE_ERRS);
6383 else if (!(bio->bi_opf & REQ_RAHEAD))
6384 btrfs_dev_stat_inc_and_print(dev,
6385 BTRFS_DEV_STAT_READ_ERRS);
6386 if (bio->bi_opf & REQ_PREFLUSH)
6387 btrfs_dev_stat_inc_and_print(dev,
6388 BTRFS_DEV_STAT_FLUSH_ERRS);
6389 }
6390 }
6391
6392 if (bio == bbio->orig_bio)
6393 is_orig_bio = 1;
6394
6395 btrfs_bio_counter_dec(bbio->fs_info);
6396
6397 if (atomic_dec_and_test(&bbio->stripes_pending)) {
6398 if (!is_orig_bio) {
6399 bio_put(bio);
6400 bio = bbio->orig_bio;
6401 }
6402
6403 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
6404 /* only send an error to the higher layers if it is
6405 * beyond the tolerance of the btrfs bio
6406 */
6407 if (atomic_read(&bbio->error) > bbio->max_errors) {
6408 bio->bi_status = BLK_STS_IOERR;
6409 } else {
6410 /*
6411 * this bio is actually up to date, we didn't
6412 * go over the max number of errors
6413 */
6414 bio->bi_status = BLK_STS_OK;
6415 }
6416
6417 btrfs_end_bbio(bbio, bio);
6418 } else if (!is_orig_bio) {
6419 bio_put(bio);
6420 }
6421 }
6422
submit_stripe_bio(struct btrfs_bio * bbio,struct bio * bio,u64 physical,struct btrfs_device * dev)6423 static void submit_stripe_bio(struct btrfs_bio *bbio, struct bio *bio,
6424 u64 physical, struct btrfs_device *dev)
6425 {
6426 struct btrfs_fs_info *fs_info = bbio->fs_info;
6427
6428 bio->bi_private = bbio;
6429 btrfs_io_bio(bio)->device = dev;
6430 bio->bi_end_io = btrfs_end_bio;
6431 bio->bi_iter.bi_sector = physical >> 9;
6432 btrfs_debug_in_rcu(fs_info,
6433 "btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u",
6434 bio_op(bio), bio->bi_opf, (u64)bio->bi_iter.bi_sector,
6435 (unsigned long)dev->bdev->bd_dev, rcu_str_deref(dev->name),
6436 dev->devid, bio->bi_iter.bi_size);
6437 bio_set_dev(bio, dev->bdev);
6438
6439 btrfs_bio_counter_inc_noblocked(fs_info);
6440
6441 btrfsic_submit_bio(bio);
6442 }
6443
bbio_error(struct btrfs_bio * bbio,struct bio * bio,u64 logical)6444 static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
6445 {
6446 atomic_inc(&bbio->error);
6447 if (atomic_dec_and_test(&bbio->stripes_pending)) {
6448 /* Should be the original bio. */
6449 WARN_ON(bio != bbio->orig_bio);
6450
6451 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
6452 bio->bi_iter.bi_sector = logical >> 9;
6453 if (atomic_read(&bbio->error) > bbio->max_errors)
6454 bio->bi_status = BLK_STS_IOERR;
6455 else
6456 bio->bi_status = BLK_STS_OK;
6457 btrfs_end_bbio(bbio, bio);
6458 }
6459 }
6460
btrfs_map_bio(struct btrfs_fs_info * fs_info,struct bio * bio,int mirror_num)6461 blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
6462 int mirror_num)
6463 {
6464 struct btrfs_device *dev;
6465 struct bio *first_bio = bio;
6466 u64 logical = (u64)bio->bi_iter.bi_sector << 9;
6467 u64 length = 0;
6468 u64 map_length;
6469 int ret;
6470 int dev_nr;
6471 int total_devs;
6472 struct btrfs_bio *bbio = NULL;
6473
6474 length = bio->bi_iter.bi_size;
6475 map_length = length;
6476
6477 btrfs_bio_counter_inc_blocked(fs_info);
6478 ret = __btrfs_map_block(fs_info, btrfs_op(bio), logical,
6479 &map_length, &bbio, mirror_num, 1);
6480 if (ret) {
6481 btrfs_bio_counter_dec(fs_info);
6482 return errno_to_blk_status(ret);
6483 }
6484
6485 total_devs = bbio->num_stripes;
6486 bbio->orig_bio = first_bio;
6487 bbio->private = first_bio->bi_private;
6488 bbio->end_io = first_bio->bi_end_io;
6489 bbio->fs_info = fs_info;
6490 atomic_set(&bbio->stripes_pending, bbio->num_stripes);
6491
6492 if ((bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
6493 ((bio_op(bio) == REQ_OP_WRITE) || (mirror_num > 1))) {
6494 /* In this case, map_length has been set to the length of
6495 a single stripe; not the whole write */
6496 if (bio_op(bio) == REQ_OP_WRITE) {
6497 ret = raid56_parity_write(fs_info, bio, bbio,
6498 map_length);
6499 } else {
6500 ret = raid56_parity_recover(fs_info, bio, bbio,
6501 map_length, mirror_num, 1);
6502 }
6503
6504 btrfs_bio_counter_dec(fs_info);
6505 return errno_to_blk_status(ret);
6506 }
6507
6508 if (map_length < length) {
6509 btrfs_crit(fs_info,
6510 "mapping failed logical %llu bio len %llu len %llu",
6511 logical, length, map_length);
6512 BUG();
6513 }
6514
6515 for (dev_nr = 0; dev_nr < total_devs; dev_nr++) {
6516 dev = bbio->stripes[dev_nr].dev;
6517 if (!dev || !dev->bdev || test_bit(BTRFS_DEV_STATE_MISSING,
6518 &dev->dev_state) ||
6519 (bio_op(first_bio) == REQ_OP_WRITE &&
6520 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) {
6521 bbio_error(bbio, first_bio, logical);
6522 continue;
6523 }
6524
6525 if (dev_nr < total_devs - 1)
6526 bio = btrfs_bio_clone(first_bio);
6527 else
6528 bio = first_bio;
6529
6530 submit_stripe_bio(bbio, bio, bbio->stripes[dev_nr].physical, dev);
6531 }
6532 btrfs_bio_counter_dec(fs_info);
6533 return BLK_STS_OK;
6534 }
6535
6536 /*
6537 * Find a device specified by @devid or @uuid in the list of @fs_devices, or
6538 * return NULL.
6539 *
6540 * If devid and uuid are both specified, the match must be exact, otherwise
6541 * only devid is used.
6542 *
6543 * If @seed is true, traverse through the seed devices.
6544 */
btrfs_find_device(struct btrfs_fs_devices * fs_devices,u64 devid,u8 * uuid,u8 * fsid,bool seed)6545 struct btrfs_device *btrfs_find_device(struct btrfs_fs_devices *fs_devices,
6546 u64 devid, u8 *uuid, u8 *fsid,
6547 bool seed)
6548 {
6549 struct btrfs_device *device;
6550 struct btrfs_fs_devices *seed_devs;
6551
6552 if (!fsid || !memcmp(fs_devices->metadata_uuid, fsid, BTRFS_FSID_SIZE)) {
6553 list_for_each_entry(device, &fs_devices->devices, dev_list) {
6554 if (device->devid == devid &&
6555 (!uuid || memcmp(device->uuid, uuid,
6556 BTRFS_UUID_SIZE) == 0))
6557 return device;
6558 }
6559 }
6560
6561 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
6562 if (!fsid ||
6563 !memcmp(seed_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE)) {
6564 list_for_each_entry(device, &seed_devs->devices,
6565 dev_list) {
6566 if (device->devid == devid &&
6567 (!uuid || memcmp(device->uuid, uuid,
6568 BTRFS_UUID_SIZE) == 0))
6569 return device;
6570 }
6571 }
6572 }
6573
6574 return NULL;
6575 }
6576
add_missing_dev(struct btrfs_fs_devices * fs_devices,u64 devid,u8 * dev_uuid)6577 static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices,
6578 u64 devid, u8 *dev_uuid)
6579 {
6580 struct btrfs_device *device;
6581 unsigned int nofs_flag;
6582
6583 /*
6584 * We call this under the chunk_mutex, so we want to use NOFS for this
6585 * allocation, however we don't want to change btrfs_alloc_device() to
6586 * always do NOFS because we use it in a lot of other GFP_KERNEL safe
6587 * places.
6588 */
6589 nofs_flag = memalloc_nofs_save();
6590 device = btrfs_alloc_device(NULL, &devid, dev_uuid);
6591 memalloc_nofs_restore(nofs_flag);
6592 if (IS_ERR(device))
6593 return device;
6594
6595 list_add(&device->dev_list, &fs_devices->devices);
6596 device->fs_devices = fs_devices;
6597 fs_devices->num_devices++;
6598
6599 set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
6600 fs_devices->missing_devices++;
6601
6602 return device;
6603 }
6604
6605 /**
6606 * btrfs_alloc_device - allocate struct btrfs_device
6607 * @fs_info: used only for generating a new devid, can be NULL if
6608 * devid is provided (i.e. @devid != NULL).
6609 * @devid: a pointer to devid for this device. If NULL a new devid
6610 * is generated.
6611 * @uuid: a pointer to UUID for this device. If NULL a new UUID
6612 * is generated.
6613 *
6614 * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR()
6615 * on error. Returned struct is not linked onto any lists and must be
6616 * destroyed with btrfs_free_device.
6617 */
btrfs_alloc_device(struct btrfs_fs_info * fs_info,const u64 * devid,const u8 * uuid)6618 struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
6619 const u64 *devid,
6620 const u8 *uuid)
6621 {
6622 struct btrfs_device *dev;
6623 u64 tmp;
6624
6625 if (WARN_ON(!devid && !fs_info))
6626 return ERR_PTR(-EINVAL);
6627
6628 dev = __alloc_device(fs_info);
6629 if (IS_ERR(dev))
6630 return dev;
6631
6632 if (devid)
6633 tmp = *devid;
6634 else {
6635 int ret;
6636
6637 ret = find_next_devid(fs_info, &tmp);
6638 if (ret) {
6639 btrfs_free_device(dev);
6640 return ERR_PTR(ret);
6641 }
6642 }
6643 dev->devid = tmp;
6644
6645 if (uuid)
6646 memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE);
6647 else
6648 generate_random_uuid(dev->uuid);
6649
6650 return dev;
6651 }
6652
btrfs_report_missing_device(struct btrfs_fs_info * fs_info,u64 devid,u8 * uuid,bool error)6653 static void btrfs_report_missing_device(struct btrfs_fs_info *fs_info,
6654 u64 devid, u8 *uuid, bool error)
6655 {
6656 if (error)
6657 btrfs_err_rl(fs_info, "devid %llu uuid %pU is missing",
6658 devid, uuid);
6659 else
6660 btrfs_warn_rl(fs_info, "devid %llu uuid %pU is missing",
6661 devid, uuid);
6662 }
6663
calc_stripe_length(u64 type,u64 chunk_len,int num_stripes)6664 static u64 calc_stripe_length(u64 type, u64 chunk_len, int num_stripes)
6665 {
6666 int index = btrfs_bg_flags_to_raid_index(type);
6667 int ncopies = btrfs_raid_array[index].ncopies;
6668 const int nparity = btrfs_raid_array[index].nparity;
6669 int data_stripes;
6670
6671 if (nparity)
6672 data_stripes = num_stripes - nparity;
6673 else
6674 data_stripes = num_stripes / ncopies;
6675
6676 return div_u64(chunk_len, data_stripes);
6677 }
6678
read_one_chunk(struct btrfs_key * key,struct extent_buffer * leaf,struct btrfs_chunk * chunk)6679 static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf,
6680 struct btrfs_chunk *chunk)
6681 {
6682 struct btrfs_fs_info *fs_info = leaf->fs_info;
6683 struct extent_map_tree *map_tree = &fs_info->mapping_tree;
6684 struct map_lookup *map;
6685 struct extent_map *em;
6686 u64 logical;
6687 u64 length;
6688 u64 devid;
6689 u8 uuid[BTRFS_UUID_SIZE];
6690 int num_stripes;
6691 int ret;
6692 int i;
6693
6694 logical = key->offset;
6695 length = btrfs_chunk_length(leaf, chunk);
6696 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
6697
6698 /*
6699 * Only need to verify chunk item if we're reading from sys chunk array,
6700 * as chunk item in tree block is already verified by tree-checker.
6701 */
6702 if (leaf->start == BTRFS_SUPER_INFO_OFFSET) {
6703 ret = btrfs_check_chunk_valid(leaf, chunk, logical);
6704 if (ret)
6705 return ret;
6706 }
6707
6708 read_lock(&map_tree->lock);
6709 em = lookup_extent_mapping(map_tree, logical, 1);
6710 read_unlock(&map_tree->lock);
6711
6712 /* already mapped? */
6713 if (em && em->start <= logical && em->start + em->len > logical) {
6714 free_extent_map(em);
6715 return 0;
6716 } else if (em) {
6717 free_extent_map(em);
6718 }
6719
6720 em = alloc_extent_map();
6721 if (!em)
6722 return -ENOMEM;
6723 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
6724 if (!map) {
6725 free_extent_map(em);
6726 return -ENOMEM;
6727 }
6728
6729 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
6730 em->map_lookup = map;
6731 em->start = logical;
6732 em->len = length;
6733 em->orig_start = 0;
6734 em->block_start = 0;
6735 em->block_len = em->len;
6736
6737 map->num_stripes = num_stripes;
6738 map->io_width = btrfs_chunk_io_width(leaf, chunk);
6739 map->io_align = btrfs_chunk_io_align(leaf, chunk);
6740 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
6741 map->type = btrfs_chunk_type(leaf, chunk);
6742 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
6743 map->verified_stripes = 0;
6744 em->orig_block_len = calc_stripe_length(map->type, em->len,
6745 map->num_stripes);
6746 for (i = 0; i < num_stripes; i++) {
6747 map->stripes[i].physical =
6748 btrfs_stripe_offset_nr(leaf, chunk, i);
6749 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
6750 read_extent_buffer(leaf, uuid, (unsigned long)
6751 btrfs_stripe_dev_uuid_nr(chunk, i),
6752 BTRFS_UUID_SIZE);
6753 map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices,
6754 devid, uuid, NULL, true);
6755 if (!map->stripes[i].dev &&
6756 !btrfs_test_opt(fs_info, DEGRADED)) {
6757 free_extent_map(em);
6758 btrfs_report_missing_device(fs_info, devid, uuid, true);
6759 return -ENOENT;
6760 }
6761 if (!map->stripes[i].dev) {
6762 map->stripes[i].dev =
6763 add_missing_dev(fs_info->fs_devices, devid,
6764 uuid);
6765 if (IS_ERR(map->stripes[i].dev)) {
6766 free_extent_map(em);
6767 btrfs_err(fs_info,
6768 "failed to init missing dev %llu: %ld",
6769 devid, PTR_ERR(map->stripes[i].dev));
6770 return PTR_ERR(map->stripes[i].dev);
6771 }
6772 btrfs_report_missing_device(fs_info, devid, uuid, false);
6773 }
6774 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
6775 &(map->stripes[i].dev->dev_state));
6776
6777 }
6778
6779 write_lock(&map_tree->lock);
6780 ret = add_extent_mapping(map_tree, em, 0);
6781 write_unlock(&map_tree->lock);
6782 if (ret < 0) {
6783 btrfs_err(fs_info,
6784 "failed to add chunk map, start=%llu len=%llu: %d",
6785 em->start, em->len, ret);
6786 }
6787 free_extent_map(em);
6788
6789 return ret;
6790 }
6791
fill_device_from_item(struct extent_buffer * leaf,struct btrfs_dev_item * dev_item,struct btrfs_device * device)6792 static void fill_device_from_item(struct extent_buffer *leaf,
6793 struct btrfs_dev_item *dev_item,
6794 struct btrfs_device *device)
6795 {
6796 unsigned long ptr;
6797
6798 device->devid = btrfs_device_id(leaf, dev_item);
6799 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
6800 device->total_bytes = device->disk_total_bytes;
6801 device->commit_total_bytes = device->disk_total_bytes;
6802 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
6803 device->commit_bytes_used = device->bytes_used;
6804 device->type = btrfs_device_type(leaf, dev_item);
6805 device->io_align = btrfs_device_io_align(leaf, dev_item);
6806 device->io_width = btrfs_device_io_width(leaf, dev_item);
6807 device->sector_size = btrfs_device_sector_size(leaf, dev_item);
6808 WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID);
6809 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
6810
6811 ptr = btrfs_device_uuid(dev_item);
6812 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
6813 }
6814
open_seed_devices(struct btrfs_fs_info * fs_info,u8 * fsid)6815 static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info,
6816 u8 *fsid)
6817 {
6818 struct btrfs_fs_devices *fs_devices;
6819 int ret;
6820
6821 lockdep_assert_held(&uuid_mutex);
6822 ASSERT(fsid);
6823
6824 /* This will match only for multi-device seed fs */
6825 list_for_each_entry(fs_devices, &fs_info->fs_devices->seed_list, seed_list)
6826 if (!memcmp(fs_devices->fsid, fsid, BTRFS_FSID_SIZE))
6827 return fs_devices;
6828
6829
6830 fs_devices = find_fsid(fsid, NULL);
6831 if (!fs_devices) {
6832 if (!btrfs_test_opt(fs_info, DEGRADED))
6833 return ERR_PTR(-ENOENT);
6834
6835 fs_devices = alloc_fs_devices(fsid, NULL);
6836 if (IS_ERR(fs_devices))
6837 return fs_devices;
6838
6839 fs_devices->seeding = true;
6840 fs_devices->opened = 1;
6841 return fs_devices;
6842 }
6843
6844 /*
6845 * Upon first call for a seed fs fsid, just create a private copy of the
6846 * respective fs_devices and anchor it at fs_info->fs_devices->seed_list
6847 */
6848 fs_devices = clone_fs_devices(fs_devices);
6849 if (IS_ERR(fs_devices))
6850 return fs_devices;
6851
6852 ret = open_fs_devices(fs_devices, FMODE_READ, fs_info->bdev_holder);
6853 if (ret) {
6854 free_fs_devices(fs_devices);
6855 return ERR_PTR(ret);
6856 }
6857
6858 if (!fs_devices->seeding) {
6859 close_fs_devices(fs_devices);
6860 free_fs_devices(fs_devices);
6861 return ERR_PTR(-EINVAL);
6862 }
6863
6864 list_add(&fs_devices->seed_list, &fs_info->fs_devices->seed_list);
6865
6866 return fs_devices;
6867 }
6868
read_one_dev(struct extent_buffer * leaf,struct btrfs_dev_item * dev_item)6869 static int read_one_dev(struct extent_buffer *leaf,
6870 struct btrfs_dev_item *dev_item)
6871 {
6872 struct btrfs_fs_info *fs_info = leaf->fs_info;
6873 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6874 struct btrfs_device *device;
6875 u64 devid;
6876 int ret;
6877 u8 fs_uuid[BTRFS_FSID_SIZE];
6878 u8 dev_uuid[BTRFS_UUID_SIZE];
6879
6880 devid = btrfs_device_id(leaf, dev_item);
6881 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
6882 BTRFS_UUID_SIZE);
6883 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
6884 BTRFS_FSID_SIZE);
6885
6886 if (memcmp(fs_uuid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE)) {
6887 fs_devices = open_seed_devices(fs_info, fs_uuid);
6888 if (IS_ERR(fs_devices))
6889 return PTR_ERR(fs_devices);
6890 }
6891
6892 device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
6893 fs_uuid, true);
6894 if (!device) {
6895 if (!btrfs_test_opt(fs_info, DEGRADED)) {
6896 btrfs_report_missing_device(fs_info, devid,
6897 dev_uuid, true);
6898 return -ENOENT;
6899 }
6900
6901 device = add_missing_dev(fs_devices, devid, dev_uuid);
6902 if (IS_ERR(device)) {
6903 btrfs_err(fs_info,
6904 "failed to add missing dev %llu: %ld",
6905 devid, PTR_ERR(device));
6906 return PTR_ERR(device);
6907 }
6908 btrfs_report_missing_device(fs_info, devid, dev_uuid, false);
6909 } else {
6910 if (!device->bdev) {
6911 if (!btrfs_test_opt(fs_info, DEGRADED)) {
6912 btrfs_report_missing_device(fs_info,
6913 devid, dev_uuid, true);
6914 return -ENOENT;
6915 }
6916 btrfs_report_missing_device(fs_info, devid,
6917 dev_uuid, false);
6918 }
6919
6920 if (!device->bdev &&
6921 !test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
6922 /*
6923 * this happens when a device that was properly setup
6924 * in the device info lists suddenly goes bad.
6925 * device->bdev is NULL, and so we have to set
6926 * device->missing to one here
6927 */
6928 device->fs_devices->missing_devices++;
6929 set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
6930 }
6931
6932 /* Move the device to its own fs_devices */
6933 if (device->fs_devices != fs_devices) {
6934 ASSERT(test_bit(BTRFS_DEV_STATE_MISSING,
6935 &device->dev_state));
6936
6937 list_move(&device->dev_list, &fs_devices->devices);
6938 device->fs_devices->num_devices--;
6939 fs_devices->num_devices++;
6940
6941 device->fs_devices->missing_devices--;
6942 fs_devices->missing_devices++;
6943
6944 device->fs_devices = fs_devices;
6945 }
6946 }
6947
6948 if (device->fs_devices != fs_info->fs_devices) {
6949 BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state));
6950 if (device->generation !=
6951 btrfs_device_generation(leaf, dev_item))
6952 return -EINVAL;
6953 }
6954
6955 fill_device_from_item(leaf, dev_item, device);
6956 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
6957 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
6958 !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
6959 device->fs_devices->total_rw_bytes += device->total_bytes;
6960 atomic64_add(device->total_bytes - device->bytes_used,
6961 &fs_info->free_chunk_space);
6962 }
6963 ret = 0;
6964 return ret;
6965 }
6966
btrfs_read_sys_array(struct btrfs_fs_info * fs_info)6967 int btrfs_read_sys_array(struct btrfs_fs_info *fs_info)
6968 {
6969 struct btrfs_root *root = fs_info->tree_root;
6970 struct btrfs_super_block *super_copy = fs_info->super_copy;
6971 struct extent_buffer *sb;
6972 struct btrfs_disk_key *disk_key;
6973 struct btrfs_chunk *chunk;
6974 u8 *array_ptr;
6975 unsigned long sb_array_offset;
6976 int ret = 0;
6977 u32 num_stripes;
6978 u32 array_size;
6979 u32 len = 0;
6980 u32 cur_offset;
6981 u64 type;
6982 struct btrfs_key key;
6983
6984 ASSERT(BTRFS_SUPER_INFO_SIZE <= fs_info->nodesize);
6985 /*
6986 * This will create extent buffer of nodesize, superblock size is
6987 * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will
6988 * overallocate but we can keep it as-is, only the first page is used.
6989 */
6990 sb = btrfs_find_create_tree_block(fs_info, BTRFS_SUPER_INFO_OFFSET);
6991 if (IS_ERR(sb))
6992 return PTR_ERR(sb);
6993 set_extent_buffer_uptodate(sb);
6994 btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
6995 /*
6996 * The sb extent buffer is artificial and just used to read the system array.
6997 * set_extent_buffer_uptodate() call does not properly mark all it's
6998 * pages up-to-date when the page is larger: extent does not cover the
6999 * whole page and consequently check_page_uptodate does not find all
7000 * the page's extents up-to-date (the hole beyond sb),
7001 * write_extent_buffer then triggers a WARN_ON.
7002 *
7003 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
7004 * but sb spans only this function. Add an explicit SetPageUptodate call
7005 * to silence the warning eg. on PowerPC 64.
7006 */
7007 if (PAGE_SIZE > BTRFS_SUPER_INFO_SIZE)
7008 SetPageUptodate(sb->pages[0]);
7009
7010 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
7011 array_size = btrfs_super_sys_array_size(super_copy);
7012
7013 array_ptr = super_copy->sys_chunk_array;
7014 sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array);
7015 cur_offset = 0;
7016
7017 while (cur_offset < array_size) {
7018 disk_key = (struct btrfs_disk_key *)array_ptr;
7019 len = sizeof(*disk_key);
7020 if (cur_offset + len > array_size)
7021 goto out_short_read;
7022
7023 btrfs_disk_key_to_cpu(&key, disk_key);
7024
7025 array_ptr += len;
7026 sb_array_offset += len;
7027 cur_offset += len;
7028
7029 if (key.type != BTRFS_CHUNK_ITEM_KEY) {
7030 btrfs_err(fs_info,
7031 "unexpected item type %u in sys_array at offset %u",
7032 (u32)key.type, cur_offset);
7033 ret = -EIO;
7034 break;
7035 }
7036
7037 chunk = (struct btrfs_chunk *)sb_array_offset;
7038 /*
7039 * At least one btrfs_chunk with one stripe must be present,
7040 * exact stripe count check comes afterwards
7041 */
7042 len = btrfs_chunk_item_size(1);
7043 if (cur_offset + len > array_size)
7044 goto out_short_read;
7045
7046 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
7047 if (!num_stripes) {
7048 btrfs_err(fs_info,
7049 "invalid number of stripes %u in sys_array at offset %u",
7050 num_stripes, cur_offset);
7051 ret = -EIO;
7052 break;
7053 }
7054
7055 type = btrfs_chunk_type(sb, chunk);
7056 if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) {
7057 btrfs_err(fs_info,
7058 "invalid chunk type %llu in sys_array at offset %u",
7059 type, cur_offset);
7060 ret = -EIO;
7061 break;
7062 }
7063
7064 len = btrfs_chunk_item_size(num_stripes);
7065 if (cur_offset + len > array_size)
7066 goto out_short_read;
7067
7068 ret = read_one_chunk(&key, sb, chunk);
7069 if (ret)
7070 break;
7071
7072 array_ptr += len;
7073 sb_array_offset += len;
7074 cur_offset += len;
7075 }
7076 clear_extent_buffer_uptodate(sb);
7077 free_extent_buffer_stale(sb);
7078 return ret;
7079
7080 out_short_read:
7081 btrfs_err(fs_info, "sys_array too short to read %u bytes at offset %u",
7082 len, cur_offset);
7083 clear_extent_buffer_uptodate(sb);
7084 free_extent_buffer_stale(sb);
7085 return -EIO;
7086 }
7087
7088 /*
7089 * Check if all chunks in the fs are OK for read-write degraded mount
7090 *
7091 * If the @failing_dev is specified, it's accounted as missing.
7092 *
7093 * Return true if all chunks meet the minimal RW mount requirements.
7094 * Return false if any chunk doesn't meet the minimal RW mount requirements.
7095 */
btrfs_check_rw_degradable(struct btrfs_fs_info * fs_info,struct btrfs_device * failing_dev)7096 bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info,
7097 struct btrfs_device *failing_dev)
7098 {
7099 struct extent_map_tree *map_tree = &fs_info->mapping_tree;
7100 struct extent_map *em;
7101 u64 next_start = 0;
7102 bool ret = true;
7103
7104 read_lock(&map_tree->lock);
7105 em = lookup_extent_mapping(map_tree, 0, (u64)-1);
7106 read_unlock(&map_tree->lock);
7107 /* No chunk at all? Return false anyway */
7108 if (!em) {
7109 ret = false;
7110 goto out;
7111 }
7112 while (em) {
7113 struct map_lookup *map;
7114 int missing = 0;
7115 int max_tolerated;
7116 int i;
7117
7118 map = em->map_lookup;
7119 max_tolerated =
7120 btrfs_get_num_tolerated_disk_barrier_failures(
7121 map->type);
7122 for (i = 0; i < map->num_stripes; i++) {
7123 struct btrfs_device *dev = map->stripes[i].dev;
7124
7125 if (!dev || !dev->bdev ||
7126 test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) ||
7127 dev->last_flush_error)
7128 missing++;
7129 else if (failing_dev && failing_dev == dev)
7130 missing++;
7131 }
7132 if (missing > max_tolerated) {
7133 if (!failing_dev)
7134 btrfs_warn(fs_info,
7135 "chunk %llu missing %d devices, max tolerance is %d for writable mount",
7136 em->start, missing, max_tolerated);
7137 free_extent_map(em);
7138 ret = false;
7139 goto out;
7140 }
7141 next_start = extent_map_end(em);
7142 free_extent_map(em);
7143
7144 read_lock(&map_tree->lock);
7145 em = lookup_extent_mapping(map_tree, next_start,
7146 (u64)(-1) - next_start);
7147 read_unlock(&map_tree->lock);
7148 }
7149 out:
7150 return ret;
7151 }
7152
readahead_tree_node_children(struct extent_buffer * node)7153 static void readahead_tree_node_children(struct extent_buffer *node)
7154 {
7155 int i;
7156 const int nr_items = btrfs_header_nritems(node);
7157
7158 for (i = 0; i < nr_items; i++) {
7159 u64 start;
7160
7161 start = btrfs_node_blockptr(node, i);
7162 readahead_tree_block(node->fs_info, start);
7163 }
7164 }
7165
btrfs_read_chunk_tree(struct btrfs_fs_info * fs_info)7166 int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
7167 {
7168 struct btrfs_root *root = fs_info->chunk_root;
7169 struct btrfs_path *path;
7170 struct extent_buffer *leaf;
7171 struct btrfs_key key;
7172 struct btrfs_key found_key;
7173 int ret;
7174 int slot;
7175 u64 total_dev = 0;
7176 u64 last_ra_node = 0;
7177
7178 path = btrfs_alloc_path();
7179 if (!path)
7180 return -ENOMEM;
7181
7182 /*
7183 * uuid_mutex is needed only if we are mounting a sprout FS
7184 * otherwise we don't need it.
7185 */
7186 mutex_lock(&uuid_mutex);
7187
7188 /*
7189 * It is possible for mount and umount to race in such a way that
7190 * we execute this code path, but open_fs_devices failed to clear
7191 * total_rw_bytes. We certainly want it cleared before reading the
7192 * device items, so clear it here.
7193 */
7194 fs_info->fs_devices->total_rw_bytes = 0;
7195
7196 /*
7197 * Read all device items, and then all the chunk items. All
7198 * device items are found before any chunk item (their object id
7199 * is smaller than the lowest possible object id for a chunk
7200 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
7201 */
7202 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
7203 key.offset = 0;
7204 key.type = 0;
7205 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
7206 if (ret < 0)
7207 goto error;
7208 while (1) {
7209 struct extent_buffer *node;
7210
7211 leaf = path->nodes[0];
7212 slot = path->slots[0];
7213 if (slot >= btrfs_header_nritems(leaf)) {
7214 ret = btrfs_next_leaf(root, path);
7215 if (ret == 0)
7216 continue;
7217 if (ret < 0)
7218 goto error;
7219 break;
7220 }
7221 /*
7222 * The nodes on level 1 are not locked but we don't need to do
7223 * that during mount time as nothing else can access the tree
7224 */
7225 node = path->nodes[1];
7226 if (node) {
7227 if (last_ra_node != node->start) {
7228 readahead_tree_node_children(node);
7229 last_ra_node = node->start;
7230 }
7231 }
7232 btrfs_item_key_to_cpu(leaf, &found_key, slot);
7233 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
7234 struct btrfs_dev_item *dev_item;
7235 dev_item = btrfs_item_ptr(leaf, slot,
7236 struct btrfs_dev_item);
7237 ret = read_one_dev(leaf, dev_item);
7238 if (ret)
7239 goto error;
7240 total_dev++;
7241 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
7242 struct btrfs_chunk *chunk;
7243 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
7244 mutex_lock(&fs_info->chunk_mutex);
7245 ret = read_one_chunk(&found_key, leaf, chunk);
7246 mutex_unlock(&fs_info->chunk_mutex);
7247 if (ret)
7248 goto error;
7249 }
7250 path->slots[0]++;
7251 }
7252
7253 /*
7254 * After loading chunk tree, we've got all device information,
7255 * do another round of validation checks.
7256 */
7257 if (total_dev != fs_info->fs_devices->total_devices) {
7258 btrfs_warn(fs_info,
7259 "super block num_devices %llu mismatch with DEV_ITEM count %llu, will be repaired on next transaction commit",
7260 btrfs_super_num_devices(fs_info->super_copy),
7261 total_dev);
7262 fs_info->fs_devices->total_devices = total_dev;
7263 btrfs_set_super_num_devices(fs_info->super_copy, total_dev);
7264 }
7265 if (btrfs_super_total_bytes(fs_info->super_copy) <
7266 fs_info->fs_devices->total_rw_bytes) {
7267 btrfs_err(fs_info,
7268 "super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu",
7269 btrfs_super_total_bytes(fs_info->super_copy),
7270 fs_info->fs_devices->total_rw_bytes);
7271 ret = -EINVAL;
7272 goto error;
7273 }
7274 ret = 0;
7275 error:
7276 mutex_unlock(&uuid_mutex);
7277
7278 btrfs_free_path(path);
7279 return ret;
7280 }
7281
btrfs_init_devices_late(struct btrfs_fs_info * fs_info)7282 void btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
7283 {
7284 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs;
7285 struct btrfs_device *device;
7286
7287 fs_devices->fs_info = fs_info;
7288
7289 mutex_lock(&fs_devices->device_list_mutex);
7290 list_for_each_entry(device, &fs_devices->devices, dev_list)
7291 device->fs_info = fs_info;
7292
7293 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
7294 list_for_each_entry(device, &seed_devs->devices, dev_list)
7295 device->fs_info = fs_info;
7296
7297 seed_devs->fs_info = fs_info;
7298 }
7299 mutex_unlock(&fs_devices->device_list_mutex);
7300 }
7301
btrfs_dev_stats_value(const struct extent_buffer * eb,const struct btrfs_dev_stats_item * ptr,int index)7302 static u64 btrfs_dev_stats_value(const struct extent_buffer *eb,
7303 const struct btrfs_dev_stats_item *ptr,
7304 int index)
7305 {
7306 u64 val;
7307
7308 read_extent_buffer(eb, &val,
7309 offsetof(struct btrfs_dev_stats_item, values) +
7310 ((unsigned long)ptr) + (index * sizeof(u64)),
7311 sizeof(val));
7312 return val;
7313 }
7314
btrfs_set_dev_stats_value(struct extent_buffer * eb,struct btrfs_dev_stats_item * ptr,int index,u64 val)7315 static void btrfs_set_dev_stats_value(struct extent_buffer *eb,
7316 struct btrfs_dev_stats_item *ptr,
7317 int index, u64 val)
7318 {
7319 write_extent_buffer(eb, &val,
7320 offsetof(struct btrfs_dev_stats_item, values) +
7321 ((unsigned long)ptr) + (index * sizeof(u64)),
7322 sizeof(val));
7323 }
7324
btrfs_device_init_dev_stats(struct btrfs_device * device,struct btrfs_path * path)7325 static int btrfs_device_init_dev_stats(struct btrfs_device *device,
7326 struct btrfs_path *path)
7327 {
7328 struct btrfs_dev_stats_item *ptr;
7329 struct extent_buffer *eb;
7330 struct btrfs_key key;
7331 int item_size;
7332 int i, ret, slot;
7333
7334 key.objectid = BTRFS_DEV_STATS_OBJECTID;
7335 key.type = BTRFS_PERSISTENT_ITEM_KEY;
7336 key.offset = device->devid;
7337 ret = btrfs_search_slot(NULL, device->fs_info->dev_root, &key, path, 0, 0);
7338 if (ret) {
7339 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7340 btrfs_dev_stat_set(device, i, 0);
7341 device->dev_stats_valid = 1;
7342 btrfs_release_path(path);
7343 return ret < 0 ? ret : 0;
7344 }
7345 slot = path->slots[0];
7346 eb = path->nodes[0];
7347 item_size = btrfs_item_size_nr(eb, slot);
7348
7349 ptr = btrfs_item_ptr(eb, slot, struct btrfs_dev_stats_item);
7350
7351 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
7352 if (item_size >= (1 + i) * sizeof(__le64))
7353 btrfs_dev_stat_set(device, i,
7354 btrfs_dev_stats_value(eb, ptr, i));
7355 else
7356 btrfs_dev_stat_set(device, i, 0);
7357 }
7358
7359 device->dev_stats_valid = 1;
7360 btrfs_dev_stat_print_on_load(device);
7361 btrfs_release_path(path);
7362
7363 return 0;
7364 }
7365
btrfs_init_dev_stats(struct btrfs_fs_info * fs_info)7366 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
7367 {
7368 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs;
7369 struct btrfs_device *device;
7370 struct btrfs_path *path = NULL;
7371 int ret = 0;
7372
7373 path = btrfs_alloc_path();
7374 if (!path)
7375 return -ENOMEM;
7376
7377 mutex_lock(&fs_devices->device_list_mutex);
7378 list_for_each_entry(device, &fs_devices->devices, dev_list) {
7379 ret = btrfs_device_init_dev_stats(device, path);
7380 if (ret)
7381 goto out;
7382 }
7383 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
7384 list_for_each_entry(device, &seed_devs->devices, dev_list) {
7385 ret = btrfs_device_init_dev_stats(device, path);
7386 if (ret)
7387 goto out;
7388 }
7389 }
7390 out:
7391 mutex_unlock(&fs_devices->device_list_mutex);
7392
7393 btrfs_free_path(path);
7394 return ret;
7395 }
7396
update_dev_stat_item(struct btrfs_trans_handle * trans,struct btrfs_device * device)7397 static int update_dev_stat_item(struct btrfs_trans_handle *trans,
7398 struct btrfs_device *device)
7399 {
7400 struct btrfs_fs_info *fs_info = trans->fs_info;
7401 struct btrfs_root *dev_root = fs_info->dev_root;
7402 struct btrfs_path *path;
7403 struct btrfs_key key;
7404 struct extent_buffer *eb;
7405 struct btrfs_dev_stats_item *ptr;
7406 int ret;
7407 int i;
7408
7409 key.objectid = BTRFS_DEV_STATS_OBJECTID;
7410 key.type = BTRFS_PERSISTENT_ITEM_KEY;
7411 key.offset = device->devid;
7412
7413 path = btrfs_alloc_path();
7414 if (!path)
7415 return -ENOMEM;
7416 ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
7417 if (ret < 0) {
7418 btrfs_warn_in_rcu(fs_info,
7419 "error %d while searching for dev_stats item for device %s",
7420 ret, rcu_str_deref(device->name));
7421 goto out;
7422 }
7423
7424 if (ret == 0 &&
7425 btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
7426 /* need to delete old one and insert a new one */
7427 ret = btrfs_del_item(trans, dev_root, path);
7428 if (ret != 0) {
7429 btrfs_warn_in_rcu(fs_info,
7430 "delete too small dev_stats item for device %s failed %d",
7431 rcu_str_deref(device->name), ret);
7432 goto out;
7433 }
7434 ret = 1;
7435 }
7436
7437 if (ret == 1) {
7438 /* need to insert a new item */
7439 btrfs_release_path(path);
7440 ret = btrfs_insert_empty_item(trans, dev_root, path,
7441 &key, sizeof(*ptr));
7442 if (ret < 0) {
7443 btrfs_warn_in_rcu(fs_info,
7444 "insert dev_stats item for device %s failed %d",
7445 rcu_str_deref(device->name), ret);
7446 goto out;
7447 }
7448 }
7449
7450 eb = path->nodes[0];
7451 ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
7452 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7453 btrfs_set_dev_stats_value(eb, ptr, i,
7454 btrfs_dev_stat_read(device, i));
7455 btrfs_mark_buffer_dirty(eb);
7456
7457 out:
7458 btrfs_free_path(path);
7459 return ret;
7460 }
7461
7462 /*
7463 * called from commit_transaction. Writes all changed device stats to disk.
7464 */
btrfs_run_dev_stats(struct btrfs_trans_handle * trans)7465 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans)
7466 {
7467 struct btrfs_fs_info *fs_info = trans->fs_info;
7468 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7469 struct btrfs_device *device;
7470 int stats_cnt;
7471 int ret = 0;
7472
7473 mutex_lock(&fs_devices->device_list_mutex);
7474 list_for_each_entry(device, &fs_devices->devices, dev_list) {
7475 stats_cnt = atomic_read(&device->dev_stats_ccnt);
7476 if (!device->dev_stats_valid || stats_cnt == 0)
7477 continue;
7478
7479
7480 /*
7481 * There is a LOAD-LOAD control dependency between the value of
7482 * dev_stats_ccnt and updating the on-disk values which requires
7483 * reading the in-memory counters. Such control dependencies
7484 * require explicit read memory barriers.
7485 *
7486 * This memory barriers pairs with smp_mb__before_atomic in
7487 * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full
7488 * barrier implied by atomic_xchg in
7489 * btrfs_dev_stats_read_and_reset
7490 */
7491 smp_rmb();
7492
7493 ret = update_dev_stat_item(trans, device);
7494 if (!ret)
7495 atomic_sub(stats_cnt, &device->dev_stats_ccnt);
7496 }
7497 mutex_unlock(&fs_devices->device_list_mutex);
7498
7499 return ret;
7500 }
7501
btrfs_dev_stat_inc_and_print(struct btrfs_device * dev,int index)7502 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
7503 {
7504 btrfs_dev_stat_inc(dev, index);
7505 btrfs_dev_stat_print_on_error(dev);
7506 }
7507
btrfs_dev_stat_print_on_error(struct btrfs_device * dev)7508 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
7509 {
7510 if (!dev->dev_stats_valid)
7511 return;
7512 btrfs_err_rl_in_rcu(dev->fs_info,
7513 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7514 rcu_str_deref(dev->name),
7515 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
7516 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
7517 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
7518 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
7519 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
7520 }
7521
btrfs_dev_stat_print_on_load(struct btrfs_device * dev)7522 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
7523 {
7524 int i;
7525
7526 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7527 if (btrfs_dev_stat_read(dev, i) != 0)
7528 break;
7529 if (i == BTRFS_DEV_STAT_VALUES_MAX)
7530 return; /* all values == 0, suppress message */
7531
7532 btrfs_info_in_rcu(dev->fs_info,
7533 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7534 rcu_str_deref(dev->name),
7535 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
7536 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
7537 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
7538 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
7539 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
7540 }
7541
btrfs_get_dev_stats(struct btrfs_fs_info * fs_info,struct btrfs_ioctl_get_dev_stats * stats)7542 int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info,
7543 struct btrfs_ioctl_get_dev_stats *stats)
7544 {
7545 struct btrfs_device *dev;
7546 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7547 int i;
7548
7549 mutex_lock(&fs_devices->device_list_mutex);
7550 dev = btrfs_find_device(fs_info->fs_devices, stats->devid, NULL, NULL,
7551 true);
7552 mutex_unlock(&fs_devices->device_list_mutex);
7553
7554 if (!dev) {
7555 btrfs_warn(fs_info, "get dev_stats failed, device not found");
7556 return -ENODEV;
7557 } else if (!dev->dev_stats_valid) {
7558 btrfs_warn(fs_info, "get dev_stats failed, not yet valid");
7559 return -ENODEV;
7560 } else if (stats->flags & BTRFS_DEV_STATS_RESET) {
7561 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
7562 if (stats->nr_items > i)
7563 stats->values[i] =
7564 btrfs_dev_stat_read_and_reset(dev, i);
7565 else
7566 btrfs_dev_stat_set(dev, i, 0);
7567 }
7568 btrfs_info(fs_info, "device stats zeroed by %s (%d)",
7569 current->comm, task_pid_nr(current));
7570 } else {
7571 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7572 if (stats->nr_items > i)
7573 stats->values[i] = btrfs_dev_stat_read(dev, i);
7574 }
7575 if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
7576 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
7577 return 0;
7578 }
7579
7580 /*
7581 * Update the size and bytes used for each device where it changed. This is
7582 * delayed since we would otherwise get errors while writing out the
7583 * superblocks.
7584 *
7585 * Must be invoked during transaction commit.
7586 */
btrfs_commit_device_sizes(struct btrfs_transaction * trans)7587 void btrfs_commit_device_sizes(struct btrfs_transaction *trans)
7588 {
7589 struct btrfs_device *curr, *next;
7590
7591 ASSERT(trans->state == TRANS_STATE_COMMIT_DOING);
7592
7593 if (list_empty(&trans->dev_update_list))
7594 return;
7595
7596 /*
7597 * We don't need the device_list_mutex here. This list is owned by the
7598 * transaction and the transaction must complete before the device is
7599 * released.
7600 */
7601 mutex_lock(&trans->fs_info->chunk_mutex);
7602 list_for_each_entry_safe(curr, next, &trans->dev_update_list,
7603 post_commit_list) {
7604 list_del_init(&curr->post_commit_list);
7605 curr->commit_total_bytes = curr->disk_total_bytes;
7606 curr->commit_bytes_used = curr->bytes_used;
7607 }
7608 mutex_unlock(&trans->fs_info->chunk_mutex);
7609 }
7610
7611 /*
7612 * Multiplicity factor for simple profiles: DUP, RAID1-like and RAID10.
7613 */
btrfs_bg_type_to_factor(u64 flags)7614 int btrfs_bg_type_to_factor(u64 flags)
7615 {
7616 const int index = btrfs_bg_flags_to_raid_index(flags);
7617
7618 return btrfs_raid_array[index].ncopies;
7619 }
7620
7621
7622
verify_one_dev_extent(struct btrfs_fs_info * fs_info,u64 chunk_offset,u64 devid,u64 physical_offset,u64 physical_len)7623 static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
7624 u64 chunk_offset, u64 devid,
7625 u64 physical_offset, u64 physical_len)
7626 {
7627 struct extent_map_tree *em_tree = &fs_info->mapping_tree;
7628 struct extent_map *em;
7629 struct map_lookup *map;
7630 struct btrfs_device *dev;
7631 u64 stripe_len;
7632 bool found = false;
7633 int ret = 0;
7634 int i;
7635
7636 read_lock(&em_tree->lock);
7637 em = lookup_extent_mapping(em_tree, chunk_offset, 1);
7638 read_unlock(&em_tree->lock);
7639
7640 if (!em) {
7641 btrfs_err(fs_info,
7642 "dev extent physical offset %llu on devid %llu doesn't have corresponding chunk",
7643 physical_offset, devid);
7644 ret = -EUCLEAN;
7645 goto out;
7646 }
7647
7648 map = em->map_lookup;
7649 stripe_len = calc_stripe_length(map->type, em->len, map->num_stripes);
7650 if (physical_len != stripe_len) {
7651 btrfs_err(fs_info,
7652 "dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu",
7653 physical_offset, devid, em->start, physical_len,
7654 stripe_len);
7655 ret = -EUCLEAN;
7656 goto out;
7657 }
7658
7659 for (i = 0; i < map->num_stripes; i++) {
7660 if (map->stripes[i].dev->devid == devid &&
7661 map->stripes[i].physical == physical_offset) {
7662 found = true;
7663 if (map->verified_stripes >= map->num_stripes) {
7664 btrfs_err(fs_info,
7665 "too many dev extents for chunk %llu found",
7666 em->start);
7667 ret = -EUCLEAN;
7668 goto out;
7669 }
7670 map->verified_stripes++;
7671 break;
7672 }
7673 }
7674 if (!found) {
7675 btrfs_err(fs_info,
7676 "dev extent physical offset %llu devid %llu has no corresponding chunk",
7677 physical_offset, devid);
7678 ret = -EUCLEAN;
7679 }
7680
7681 /* Make sure no dev extent is beyond device bondary */
7682 dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL, true);
7683 if (!dev) {
7684 btrfs_err(fs_info, "failed to find devid %llu", devid);
7685 ret = -EUCLEAN;
7686 goto out;
7687 }
7688
7689 /* It's possible this device is a dummy for seed device */
7690 if (dev->disk_total_bytes == 0) {
7691 struct btrfs_fs_devices *devs;
7692
7693 devs = list_first_entry(&fs_info->fs_devices->seed_list,
7694 struct btrfs_fs_devices, seed_list);
7695 dev = btrfs_find_device(devs, devid, NULL, NULL, false);
7696 if (!dev) {
7697 btrfs_err(fs_info, "failed to find seed devid %llu",
7698 devid);
7699 ret = -EUCLEAN;
7700 goto out;
7701 }
7702 }
7703
7704 if (physical_offset + physical_len > dev->disk_total_bytes) {
7705 btrfs_err(fs_info,
7706 "dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu",
7707 devid, physical_offset, physical_len,
7708 dev->disk_total_bytes);
7709 ret = -EUCLEAN;
7710 goto out;
7711 }
7712 out:
7713 free_extent_map(em);
7714 return ret;
7715 }
7716
verify_chunk_dev_extent_mapping(struct btrfs_fs_info * fs_info)7717 static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info *fs_info)
7718 {
7719 struct extent_map_tree *em_tree = &fs_info->mapping_tree;
7720 struct extent_map *em;
7721 struct rb_node *node;
7722 int ret = 0;
7723
7724 read_lock(&em_tree->lock);
7725 for (node = rb_first_cached(&em_tree->map); node; node = rb_next(node)) {
7726 em = rb_entry(node, struct extent_map, rb_node);
7727 if (em->map_lookup->num_stripes !=
7728 em->map_lookup->verified_stripes) {
7729 btrfs_err(fs_info,
7730 "chunk %llu has missing dev extent, have %d expect %d",
7731 em->start, em->map_lookup->verified_stripes,
7732 em->map_lookup->num_stripes);
7733 ret = -EUCLEAN;
7734 goto out;
7735 }
7736 }
7737 out:
7738 read_unlock(&em_tree->lock);
7739 return ret;
7740 }
7741
7742 /*
7743 * Ensure that all dev extents are mapped to correct chunk, otherwise
7744 * later chunk allocation/free would cause unexpected behavior.
7745 *
7746 * NOTE: This will iterate through the whole device tree, which should be of
7747 * the same size level as the chunk tree. This slightly increases mount time.
7748 */
btrfs_verify_dev_extents(struct btrfs_fs_info * fs_info)7749 int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info)
7750 {
7751 struct btrfs_path *path;
7752 struct btrfs_root *root = fs_info->dev_root;
7753 struct btrfs_key key;
7754 u64 prev_devid = 0;
7755 u64 prev_dev_ext_end = 0;
7756 int ret = 0;
7757
7758 key.objectid = 1;
7759 key.type = BTRFS_DEV_EXTENT_KEY;
7760 key.offset = 0;
7761
7762 path = btrfs_alloc_path();
7763 if (!path)
7764 return -ENOMEM;
7765
7766 path->reada = READA_FORWARD;
7767 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
7768 if (ret < 0)
7769 goto out;
7770
7771 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
7772 ret = btrfs_next_item(root, path);
7773 if (ret < 0)
7774 goto out;
7775 /* No dev extents at all? Not good */
7776 if (ret > 0) {
7777 ret = -EUCLEAN;
7778 goto out;
7779 }
7780 }
7781 while (1) {
7782 struct extent_buffer *leaf = path->nodes[0];
7783 struct btrfs_dev_extent *dext;
7784 int slot = path->slots[0];
7785 u64 chunk_offset;
7786 u64 physical_offset;
7787 u64 physical_len;
7788 u64 devid;
7789
7790 btrfs_item_key_to_cpu(leaf, &key, slot);
7791 if (key.type != BTRFS_DEV_EXTENT_KEY)
7792 break;
7793 devid = key.objectid;
7794 physical_offset = key.offset;
7795
7796 dext = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent);
7797 chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dext);
7798 physical_len = btrfs_dev_extent_length(leaf, dext);
7799
7800 /* Check if this dev extent overlaps with the previous one */
7801 if (devid == prev_devid && physical_offset < prev_dev_ext_end) {
7802 btrfs_err(fs_info,
7803 "dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu",
7804 devid, physical_offset, prev_dev_ext_end);
7805 ret = -EUCLEAN;
7806 goto out;
7807 }
7808
7809 ret = verify_one_dev_extent(fs_info, chunk_offset, devid,
7810 physical_offset, physical_len);
7811 if (ret < 0)
7812 goto out;
7813 prev_devid = devid;
7814 prev_dev_ext_end = physical_offset + physical_len;
7815
7816 ret = btrfs_next_item(root, path);
7817 if (ret < 0)
7818 goto out;
7819 if (ret > 0) {
7820 ret = 0;
7821 break;
7822 }
7823 }
7824
7825 /* Ensure all chunks have corresponding dev extents */
7826 ret = verify_chunk_dev_extent_mapping(fs_info);
7827 out:
7828 btrfs_free_path(path);
7829 return ret;
7830 }
7831
7832 /*
7833 * Check whether the given block group or device is pinned by any inode being
7834 * used as a swapfile.
7835 */
btrfs_pinned_by_swapfile(struct btrfs_fs_info * fs_info,void * ptr)7836 bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr)
7837 {
7838 struct btrfs_swapfile_pin *sp;
7839 struct rb_node *node;
7840
7841 spin_lock(&fs_info->swapfile_pins_lock);
7842 node = fs_info->swapfile_pins.rb_node;
7843 while (node) {
7844 sp = rb_entry(node, struct btrfs_swapfile_pin, node);
7845 if (ptr < sp->ptr)
7846 node = node->rb_left;
7847 else if (ptr > sp->ptr)
7848 node = node->rb_right;
7849 else
7850 break;
7851 }
7852 spin_unlock(&fs_info->swapfile_pins_lock);
7853 return node != NULL;
7854 }
7855