1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2007 Oracle. All rights reserved.
4 */
5
6 #include <linux/sched.h>
7 #include <linux/sched/mm.h>
8 #include <linux/bio.h>
9 #include <linux/slab.h>
10 #include <linux/blkdev.h>
11 #include <linux/ratelimit.h>
12 #include <linux/kthread.h>
13 #include <linux/raid/pq.h>
14 #include <linux/semaphore.h>
15 #include <linux/uuid.h>
16 #include <linux/list_sort.h>
17 #include <linux/namei.h>
18 #include "misc.h"
19 #include "ctree.h"
20 #include "extent_map.h"
21 #include "disk-io.h"
22 #include "transaction.h"
23 #include "print-tree.h"
24 #include "volumes.h"
25 #include "raid56.h"
26 #include "async-thread.h"
27 #include "check-integrity.h"
28 #include "rcu-string.h"
29 #include "dev-replace.h"
30 #include "sysfs.h"
31 #include "tree-checker.h"
32 #include "space-info.h"
33 #include "block-group.h"
34 #include "discard.h"
35 #include "zoned.h"
36
37 const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
38 [BTRFS_RAID_RAID10] = {
39 .sub_stripes = 2,
40 .dev_stripes = 1,
41 .devs_max = 0, /* 0 == as many as possible */
42 .devs_min = 2,
43 .tolerated_failures = 1,
44 .devs_increment = 2,
45 .ncopies = 2,
46 .nparity = 0,
47 .raid_name = "raid10",
48 .bg_flag = BTRFS_BLOCK_GROUP_RAID10,
49 .mindev_error = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET,
50 },
51 [BTRFS_RAID_RAID1] = {
52 .sub_stripes = 1,
53 .dev_stripes = 1,
54 .devs_max = 2,
55 .devs_min = 2,
56 .tolerated_failures = 1,
57 .devs_increment = 2,
58 .ncopies = 2,
59 .nparity = 0,
60 .raid_name = "raid1",
61 .bg_flag = BTRFS_BLOCK_GROUP_RAID1,
62 .mindev_error = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET,
63 },
64 [BTRFS_RAID_RAID1C3] = {
65 .sub_stripes = 1,
66 .dev_stripes = 1,
67 .devs_max = 3,
68 .devs_min = 3,
69 .tolerated_failures = 2,
70 .devs_increment = 3,
71 .ncopies = 3,
72 .nparity = 0,
73 .raid_name = "raid1c3",
74 .bg_flag = BTRFS_BLOCK_GROUP_RAID1C3,
75 .mindev_error = BTRFS_ERROR_DEV_RAID1C3_MIN_NOT_MET,
76 },
77 [BTRFS_RAID_RAID1C4] = {
78 .sub_stripes = 1,
79 .dev_stripes = 1,
80 .devs_max = 4,
81 .devs_min = 4,
82 .tolerated_failures = 3,
83 .devs_increment = 4,
84 .ncopies = 4,
85 .nparity = 0,
86 .raid_name = "raid1c4",
87 .bg_flag = BTRFS_BLOCK_GROUP_RAID1C4,
88 .mindev_error = BTRFS_ERROR_DEV_RAID1C4_MIN_NOT_MET,
89 },
90 [BTRFS_RAID_DUP] = {
91 .sub_stripes = 1,
92 .dev_stripes = 2,
93 .devs_max = 1,
94 .devs_min = 1,
95 .tolerated_failures = 0,
96 .devs_increment = 1,
97 .ncopies = 2,
98 .nparity = 0,
99 .raid_name = "dup",
100 .bg_flag = BTRFS_BLOCK_GROUP_DUP,
101 .mindev_error = 0,
102 },
103 [BTRFS_RAID_RAID0] = {
104 .sub_stripes = 1,
105 .dev_stripes = 1,
106 .devs_max = 0,
107 .devs_min = 1,
108 .tolerated_failures = 0,
109 .devs_increment = 1,
110 .ncopies = 1,
111 .nparity = 0,
112 .raid_name = "raid0",
113 .bg_flag = BTRFS_BLOCK_GROUP_RAID0,
114 .mindev_error = 0,
115 },
116 [BTRFS_RAID_SINGLE] = {
117 .sub_stripes = 1,
118 .dev_stripes = 1,
119 .devs_max = 1,
120 .devs_min = 1,
121 .tolerated_failures = 0,
122 .devs_increment = 1,
123 .ncopies = 1,
124 .nparity = 0,
125 .raid_name = "single",
126 .bg_flag = 0,
127 .mindev_error = 0,
128 },
129 [BTRFS_RAID_RAID5] = {
130 .sub_stripes = 1,
131 .dev_stripes = 1,
132 .devs_max = 0,
133 .devs_min = 2,
134 .tolerated_failures = 1,
135 .devs_increment = 1,
136 .ncopies = 1,
137 .nparity = 1,
138 .raid_name = "raid5",
139 .bg_flag = BTRFS_BLOCK_GROUP_RAID5,
140 .mindev_error = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET,
141 },
142 [BTRFS_RAID_RAID6] = {
143 .sub_stripes = 1,
144 .dev_stripes = 1,
145 .devs_max = 0,
146 .devs_min = 3,
147 .tolerated_failures = 2,
148 .devs_increment = 1,
149 .ncopies = 1,
150 .nparity = 2,
151 .raid_name = "raid6",
152 .bg_flag = BTRFS_BLOCK_GROUP_RAID6,
153 .mindev_error = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET,
154 },
155 };
156
157 /*
158 * Convert block group flags (BTRFS_BLOCK_GROUP_*) to btrfs_raid_types, which
159 * can be used as index to access btrfs_raid_array[].
160 */
btrfs_bg_flags_to_raid_index(u64 flags)161 enum btrfs_raid_types __attribute_const__ btrfs_bg_flags_to_raid_index(u64 flags)
162 {
163 if (flags & BTRFS_BLOCK_GROUP_RAID10)
164 return BTRFS_RAID_RAID10;
165 else if (flags & BTRFS_BLOCK_GROUP_RAID1)
166 return BTRFS_RAID_RAID1;
167 else if (flags & BTRFS_BLOCK_GROUP_RAID1C3)
168 return BTRFS_RAID_RAID1C3;
169 else if (flags & BTRFS_BLOCK_GROUP_RAID1C4)
170 return BTRFS_RAID_RAID1C4;
171 else if (flags & BTRFS_BLOCK_GROUP_DUP)
172 return BTRFS_RAID_DUP;
173 else if (flags & BTRFS_BLOCK_GROUP_RAID0)
174 return BTRFS_RAID_RAID0;
175 else if (flags & BTRFS_BLOCK_GROUP_RAID5)
176 return BTRFS_RAID_RAID5;
177 else if (flags & BTRFS_BLOCK_GROUP_RAID6)
178 return BTRFS_RAID_RAID6;
179
180 return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
181 }
182
btrfs_bg_type_to_raid_name(u64 flags)183 const char *btrfs_bg_type_to_raid_name(u64 flags)
184 {
185 const int index = btrfs_bg_flags_to_raid_index(flags);
186
187 if (index >= BTRFS_NR_RAID_TYPES)
188 return NULL;
189
190 return btrfs_raid_array[index].raid_name;
191 }
192
193 /*
194 * Fill @buf with textual description of @bg_flags, no more than @size_buf
195 * bytes including terminating null byte.
196 */
btrfs_describe_block_groups(u64 bg_flags,char * buf,u32 size_buf)197 void btrfs_describe_block_groups(u64 bg_flags, char *buf, u32 size_buf)
198 {
199 int i;
200 int ret;
201 char *bp = buf;
202 u64 flags = bg_flags;
203 u32 size_bp = size_buf;
204
205 if (!flags) {
206 strcpy(bp, "NONE");
207 return;
208 }
209
210 #define DESCRIBE_FLAG(flag, desc) \
211 do { \
212 if (flags & (flag)) { \
213 ret = snprintf(bp, size_bp, "%s|", (desc)); \
214 if (ret < 0 || ret >= size_bp) \
215 goto out_overflow; \
216 size_bp -= ret; \
217 bp += ret; \
218 flags &= ~(flag); \
219 } \
220 } while (0)
221
222 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_DATA, "data");
223 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_SYSTEM, "system");
224 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_METADATA, "metadata");
225
226 DESCRIBE_FLAG(BTRFS_AVAIL_ALLOC_BIT_SINGLE, "single");
227 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
228 DESCRIBE_FLAG(btrfs_raid_array[i].bg_flag,
229 btrfs_raid_array[i].raid_name);
230 #undef DESCRIBE_FLAG
231
232 if (flags) {
233 ret = snprintf(bp, size_bp, "0x%llx|", flags);
234 size_bp -= ret;
235 }
236
237 if (size_bp < size_buf)
238 buf[size_buf - size_bp - 1] = '\0'; /* remove last | */
239
240 /*
241 * The text is trimmed, it's up to the caller to provide sufficiently
242 * large buffer
243 */
244 out_overflow:;
245 }
246
247 static int init_first_rw_device(struct btrfs_trans_handle *trans);
248 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info);
249 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
250 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
251 static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
252 enum btrfs_map_op op,
253 u64 logical, u64 *length,
254 struct btrfs_io_context **bioc_ret,
255 int mirror_num, int need_raid_map);
256
257 /*
258 * Device locking
259 * ==============
260 *
261 * There are several mutexes that protect manipulation of devices and low-level
262 * structures like chunks but not block groups, extents or files
263 *
264 * uuid_mutex (global lock)
265 * ------------------------
266 * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from
267 * the SCAN_DEV ioctl registration or from mount either implicitly (the first
268 * device) or requested by the device= mount option
269 *
270 * the mutex can be very coarse and can cover long-running operations
271 *
272 * protects: updates to fs_devices counters like missing devices, rw devices,
273 * seeding, structure cloning, opening/closing devices at mount/umount time
274 *
275 * global::fs_devs - add, remove, updates to the global list
276 *
277 * does not protect: manipulation of the fs_devices::devices list in general
278 * but in mount context it could be used to exclude list modifications by eg.
279 * scan ioctl
280 *
281 * btrfs_device::name - renames (write side), read is RCU
282 *
283 * fs_devices::device_list_mutex (per-fs, with RCU)
284 * ------------------------------------------------
285 * protects updates to fs_devices::devices, ie. adding and deleting
286 *
287 * simple list traversal with read-only actions can be done with RCU protection
288 *
289 * may be used to exclude some operations from running concurrently without any
290 * modifications to the list (see write_all_supers)
291 *
292 * Is not required at mount and close times, because our device list is
293 * protected by the uuid_mutex at that point.
294 *
295 * balance_mutex
296 * -------------
297 * protects balance structures (status, state) and context accessed from
298 * several places (internally, ioctl)
299 *
300 * chunk_mutex
301 * -----------
302 * protects chunks, adding or removing during allocation, trim or when a new
303 * device is added/removed. Additionally it also protects post_commit_list of
304 * individual devices, since they can be added to the transaction's
305 * post_commit_list only with chunk_mutex held.
306 *
307 * cleaner_mutex
308 * -------------
309 * a big lock that is held by the cleaner thread and prevents running subvolume
310 * cleaning together with relocation or delayed iputs
311 *
312 *
313 * Lock nesting
314 * ============
315 *
316 * uuid_mutex
317 * device_list_mutex
318 * chunk_mutex
319 * balance_mutex
320 *
321 *
322 * Exclusive operations
323 * ====================
324 *
325 * Maintains the exclusivity of the following operations that apply to the
326 * whole filesystem and cannot run in parallel.
327 *
328 * - Balance (*)
329 * - Device add
330 * - Device remove
331 * - Device replace (*)
332 * - Resize
333 *
334 * The device operations (as above) can be in one of the following states:
335 *
336 * - Running state
337 * - Paused state
338 * - Completed state
339 *
340 * Only device operations marked with (*) can go into the Paused state for the
341 * following reasons:
342 *
343 * - ioctl (only Balance can be Paused through ioctl)
344 * - filesystem remounted as read-only
345 * - filesystem unmounted and mounted as read-only
346 * - system power-cycle and filesystem mounted as read-only
347 * - filesystem or device errors leading to forced read-only
348 *
349 * The status of exclusive operation is set and cleared atomically.
350 * During the course of Paused state, fs_info::exclusive_operation remains set.
351 * A device operation in Paused or Running state can be canceled or resumed
352 * either by ioctl (Balance only) or when remounted as read-write.
353 * The exclusive status is cleared when the device operation is canceled or
354 * completed.
355 */
356
357 DEFINE_MUTEX(uuid_mutex);
358 static LIST_HEAD(fs_uuids);
btrfs_get_fs_uuids(void)359 struct list_head * __attribute_const__ btrfs_get_fs_uuids(void)
360 {
361 return &fs_uuids;
362 }
363
364 /*
365 * alloc_fs_devices - allocate struct btrfs_fs_devices
366 * @fsid: if not NULL, copy the UUID to fs_devices::fsid
367 * @metadata_fsid: if not NULL, copy the UUID to fs_devices::metadata_fsid
368 *
369 * Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR().
370 * The returned struct is not linked onto any lists and can be destroyed with
371 * kfree() right away.
372 */
alloc_fs_devices(const u8 * fsid,const u8 * metadata_fsid)373 static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid,
374 const u8 *metadata_fsid)
375 {
376 struct btrfs_fs_devices *fs_devs;
377
378 fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL);
379 if (!fs_devs)
380 return ERR_PTR(-ENOMEM);
381
382 mutex_init(&fs_devs->device_list_mutex);
383
384 INIT_LIST_HEAD(&fs_devs->devices);
385 INIT_LIST_HEAD(&fs_devs->alloc_list);
386 INIT_LIST_HEAD(&fs_devs->fs_list);
387 INIT_LIST_HEAD(&fs_devs->seed_list);
388 if (fsid)
389 memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE);
390
391 if (metadata_fsid)
392 memcpy(fs_devs->metadata_uuid, metadata_fsid, BTRFS_FSID_SIZE);
393 else if (fsid)
394 memcpy(fs_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE);
395
396 return fs_devs;
397 }
398
btrfs_free_device(struct btrfs_device * device)399 void btrfs_free_device(struct btrfs_device *device)
400 {
401 WARN_ON(!list_empty(&device->post_commit_list));
402 rcu_string_free(device->name);
403 extent_io_tree_release(&device->alloc_state);
404 bio_put(device->flush_bio);
405 btrfs_destroy_dev_zone_info(device);
406 kfree(device);
407 }
408
free_fs_devices(struct btrfs_fs_devices * fs_devices)409 static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
410 {
411 struct btrfs_device *device;
412
413 WARN_ON(fs_devices->opened);
414 while (!list_empty(&fs_devices->devices)) {
415 device = list_entry(fs_devices->devices.next,
416 struct btrfs_device, dev_list);
417 list_del(&device->dev_list);
418 btrfs_free_device(device);
419 }
420 kfree(fs_devices);
421 }
422
btrfs_cleanup_fs_uuids(void)423 void __exit btrfs_cleanup_fs_uuids(void)
424 {
425 struct btrfs_fs_devices *fs_devices;
426
427 while (!list_empty(&fs_uuids)) {
428 fs_devices = list_entry(fs_uuids.next,
429 struct btrfs_fs_devices, fs_list);
430 list_del(&fs_devices->fs_list);
431 free_fs_devices(fs_devices);
432 }
433 }
434
find_fsid(const u8 * fsid,const u8 * metadata_fsid)435 static noinline struct btrfs_fs_devices *find_fsid(
436 const u8 *fsid, const u8 *metadata_fsid)
437 {
438 struct btrfs_fs_devices *fs_devices;
439
440 ASSERT(fsid);
441
442 /* Handle non-split brain cases */
443 list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
444 if (metadata_fsid) {
445 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0
446 && memcmp(metadata_fsid, fs_devices->metadata_uuid,
447 BTRFS_FSID_SIZE) == 0)
448 return fs_devices;
449 } else {
450 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
451 return fs_devices;
452 }
453 }
454 return NULL;
455 }
456
find_fsid_with_metadata_uuid(struct btrfs_super_block * disk_super)457 static struct btrfs_fs_devices *find_fsid_with_metadata_uuid(
458 struct btrfs_super_block *disk_super)
459 {
460
461 struct btrfs_fs_devices *fs_devices;
462
463 /*
464 * Handle scanned device having completed its fsid change but
465 * belonging to a fs_devices that was created by first scanning
466 * a device which didn't have its fsid/metadata_uuid changed
467 * at all and the CHANGING_FSID_V2 flag set.
468 */
469 list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
470 if (fs_devices->fsid_change &&
471 memcmp(disk_super->metadata_uuid, fs_devices->fsid,
472 BTRFS_FSID_SIZE) == 0 &&
473 memcmp(fs_devices->fsid, fs_devices->metadata_uuid,
474 BTRFS_FSID_SIZE) == 0) {
475 return fs_devices;
476 }
477 }
478 /*
479 * Handle scanned device having completed its fsid change but
480 * belonging to a fs_devices that was created by a device that
481 * has an outdated pair of fsid/metadata_uuid and
482 * CHANGING_FSID_V2 flag set.
483 */
484 list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
485 if (fs_devices->fsid_change &&
486 memcmp(fs_devices->metadata_uuid,
487 fs_devices->fsid, BTRFS_FSID_SIZE) != 0 &&
488 memcmp(disk_super->metadata_uuid, fs_devices->metadata_uuid,
489 BTRFS_FSID_SIZE) == 0) {
490 return fs_devices;
491 }
492 }
493
494 return find_fsid(disk_super->fsid, disk_super->metadata_uuid);
495 }
496
497
498 static int
btrfs_get_bdev_and_sb(const char * device_path,fmode_t flags,void * holder,int flush,struct block_device ** bdev,struct btrfs_super_block ** disk_super)499 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder,
500 int flush, struct block_device **bdev,
501 struct btrfs_super_block **disk_super)
502 {
503 int ret;
504
505 *bdev = blkdev_get_by_path(device_path, flags, holder);
506
507 if (IS_ERR(*bdev)) {
508 ret = PTR_ERR(*bdev);
509 goto error;
510 }
511
512 if (flush)
513 filemap_write_and_wait((*bdev)->bd_inode->i_mapping);
514 ret = set_blocksize(*bdev, BTRFS_BDEV_BLOCKSIZE);
515 if (ret) {
516 blkdev_put(*bdev, flags);
517 goto error;
518 }
519 invalidate_bdev(*bdev);
520 *disk_super = btrfs_read_dev_super(*bdev);
521 if (IS_ERR(*disk_super)) {
522 ret = PTR_ERR(*disk_super);
523 blkdev_put(*bdev, flags);
524 goto error;
525 }
526
527 return 0;
528
529 error:
530 *bdev = NULL;
531 return ret;
532 }
533
534 /*
535 * Check if the device in the path matches the device in the given struct device.
536 *
537 * Returns:
538 * true If it is the same device.
539 * false If it is not the same device or on error.
540 */
device_matched(const struct btrfs_device * device,const char * path)541 static bool device_matched(const struct btrfs_device *device, const char *path)
542 {
543 char *device_name;
544 dev_t dev_old;
545 dev_t dev_new;
546 int ret;
547
548 /*
549 * If we are looking for a device with the matching dev_t, then skip
550 * device without a name (a missing device).
551 */
552 if (!device->name)
553 return false;
554
555 device_name = kzalloc(BTRFS_PATH_NAME_MAX, GFP_KERNEL);
556 if (!device_name)
557 return false;
558
559 rcu_read_lock();
560 scnprintf(device_name, BTRFS_PATH_NAME_MAX, "%s", rcu_str_deref(device->name));
561 rcu_read_unlock();
562
563 ret = lookup_bdev(device_name, &dev_old);
564 kfree(device_name);
565 if (ret)
566 return false;
567
568 ret = lookup_bdev(path, &dev_new);
569 if (ret)
570 return false;
571
572 if (dev_old == dev_new)
573 return true;
574
575 return false;
576 }
577
578 /*
579 * Search and remove all stale (devices which are not mounted) devices.
580 * When both inputs are NULL, it will search and release all stale devices.
581 * path: Optional. When provided will it release all unmounted devices
582 * matching this path only.
583 * skip_dev: Optional. Will skip this device when searching for the stale
584 * devices.
585 * Return: 0 for success or if @path is NULL.
586 * -EBUSY if @path is a mounted device.
587 * -ENOENT if @path does not match any device in the list.
588 */
btrfs_free_stale_devices(const char * path,struct btrfs_device * skip_device)589 static int btrfs_free_stale_devices(const char *path,
590 struct btrfs_device *skip_device)
591 {
592 struct btrfs_fs_devices *fs_devices, *tmp_fs_devices;
593 struct btrfs_device *device, *tmp_device;
594 int ret = 0;
595
596 lockdep_assert_held(&uuid_mutex);
597
598 if (path)
599 ret = -ENOENT;
600
601 list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) {
602
603 mutex_lock(&fs_devices->device_list_mutex);
604 list_for_each_entry_safe(device, tmp_device,
605 &fs_devices->devices, dev_list) {
606 if (skip_device && skip_device == device)
607 continue;
608 if (path && !device_matched(device, path))
609 continue;
610 if (fs_devices->opened) {
611 /* for an already deleted device return 0 */
612 if (path && ret != 0)
613 ret = -EBUSY;
614 break;
615 }
616
617 /* delete the stale device */
618 fs_devices->num_devices--;
619 list_del(&device->dev_list);
620 btrfs_free_device(device);
621
622 ret = 0;
623 }
624 mutex_unlock(&fs_devices->device_list_mutex);
625
626 if (fs_devices->num_devices == 0) {
627 btrfs_sysfs_remove_fsid(fs_devices);
628 list_del(&fs_devices->fs_list);
629 free_fs_devices(fs_devices);
630 }
631 }
632
633 return ret;
634 }
635
636 /*
637 * This is only used on mount, and we are protected from competing things
638 * messing with our fs_devices by the uuid_mutex, thus we do not need the
639 * fs_devices->device_list_mutex here.
640 */
btrfs_open_one_device(struct btrfs_fs_devices * fs_devices,struct btrfs_device * device,fmode_t flags,void * holder)641 static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
642 struct btrfs_device *device, fmode_t flags,
643 void *holder)
644 {
645 struct request_queue *q;
646 struct block_device *bdev;
647 struct btrfs_super_block *disk_super;
648 u64 devid;
649 int ret;
650
651 if (device->bdev)
652 return -EINVAL;
653 if (!device->name)
654 return -EINVAL;
655
656 ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
657 &bdev, &disk_super);
658 if (ret)
659 return ret;
660
661 devid = btrfs_stack_device_id(&disk_super->dev_item);
662 if (devid != device->devid)
663 goto error_free_page;
664
665 if (memcmp(device->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE))
666 goto error_free_page;
667
668 device->generation = btrfs_super_generation(disk_super);
669
670 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
671 if (btrfs_super_incompat_flags(disk_super) &
672 BTRFS_FEATURE_INCOMPAT_METADATA_UUID) {
673 pr_err(
674 "BTRFS: Invalid seeding and uuid-changed device detected\n");
675 goto error_free_page;
676 }
677
678 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
679 fs_devices->seeding = true;
680 } else {
681 if (bdev_read_only(bdev))
682 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
683 else
684 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
685 }
686
687 q = bdev_get_queue(bdev);
688 if (!blk_queue_nonrot(q))
689 fs_devices->rotating = true;
690
691 device->bdev = bdev;
692 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
693 device->mode = flags;
694
695 fs_devices->open_devices++;
696 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
697 device->devid != BTRFS_DEV_REPLACE_DEVID) {
698 fs_devices->rw_devices++;
699 list_add_tail(&device->dev_alloc_list, &fs_devices->alloc_list);
700 }
701 btrfs_release_disk_super(disk_super);
702
703 return 0;
704
705 error_free_page:
706 btrfs_release_disk_super(disk_super);
707 blkdev_put(bdev, flags);
708
709 return -EINVAL;
710 }
711
btrfs_sb_fsid_ptr(struct btrfs_super_block * sb)712 u8 *btrfs_sb_fsid_ptr(struct btrfs_super_block *sb)
713 {
714 bool has_metadata_uuid = (btrfs_super_incompat_flags(sb) &
715 BTRFS_FEATURE_INCOMPAT_METADATA_UUID);
716
717 return has_metadata_uuid ? sb->metadata_uuid : sb->fsid;
718 }
719
720 /*
721 * Handle scanned device having its CHANGING_FSID_V2 flag set and the fs_devices
722 * being created with a disk that has already completed its fsid change. Such
723 * disk can belong to an fs which has its FSID changed or to one which doesn't.
724 * Handle both cases here.
725 */
find_fsid_inprogress(struct btrfs_super_block * disk_super)726 static struct btrfs_fs_devices *find_fsid_inprogress(
727 struct btrfs_super_block *disk_super)
728 {
729 struct btrfs_fs_devices *fs_devices;
730
731 list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
732 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
733 BTRFS_FSID_SIZE) != 0 &&
734 memcmp(fs_devices->metadata_uuid, disk_super->fsid,
735 BTRFS_FSID_SIZE) == 0 && !fs_devices->fsid_change) {
736 return fs_devices;
737 }
738 }
739
740 return find_fsid(disk_super->fsid, NULL);
741 }
742
743
find_fsid_changed(struct btrfs_super_block * disk_super)744 static struct btrfs_fs_devices *find_fsid_changed(
745 struct btrfs_super_block *disk_super)
746 {
747 struct btrfs_fs_devices *fs_devices;
748
749 /*
750 * Handles the case where scanned device is part of an fs that had
751 * multiple successful changes of FSID but currently device didn't
752 * observe it. Meaning our fsid will be different than theirs. We need
753 * to handle two subcases :
754 * 1 - The fs still continues to have different METADATA/FSID uuids.
755 * 2 - The fs is switched back to its original FSID (METADATA/FSID
756 * are equal).
757 */
758 list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
759 /* Changed UUIDs */
760 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
761 BTRFS_FSID_SIZE) != 0 &&
762 memcmp(fs_devices->metadata_uuid, disk_super->metadata_uuid,
763 BTRFS_FSID_SIZE) == 0 &&
764 memcmp(fs_devices->fsid, disk_super->fsid,
765 BTRFS_FSID_SIZE) != 0)
766 return fs_devices;
767
768 /* Unchanged UUIDs */
769 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
770 BTRFS_FSID_SIZE) == 0 &&
771 memcmp(fs_devices->fsid, disk_super->metadata_uuid,
772 BTRFS_FSID_SIZE) == 0)
773 return fs_devices;
774 }
775
776 return NULL;
777 }
778
find_fsid_reverted_metadata(struct btrfs_super_block * disk_super)779 static struct btrfs_fs_devices *find_fsid_reverted_metadata(
780 struct btrfs_super_block *disk_super)
781 {
782 struct btrfs_fs_devices *fs_devices;
783
784 /*
785 * Handle the case where the scanned device is part of an fs whose last
786 * metadata UUID change reverted it to the original FSID. At the same
787 * time * fs_devices was first created by another constitutent device
788 * which didn't fully observe the operation. This results in an
789 * btrfs_fs_devices created with metadata/fsid different AND
790 * btrfs_fs_devices::fsid_change set AND the metadata_uuid of the
791 * fs_devices equal to the FSID of the disk.
792 */
793 list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
794 if (memcmp(fs_devices->fsid, fs_devices->metadata_uuid,
795 BTRFS_FSID_SIZE) != 0 &&
796 memcmp(fs_devices->metadata_uuid, disk_super->fsid,
797 BTRFS_FSID_SIZE) == 0 &&
798 fs_devices->fsid_change)
799 return fs_devices;
800 }
801
802 return NULL;
803 }
804 /*
805 * Add new device to list of registered devices
806 *
807 * Returns:
808 * device pointer which was just added or updated when successful
809 * error pointer when failed
810 */
device_list_add(const char * path,struct btrfs_super_block * disk_super,bool * new_device_added)811 static noinline struct btrfs_device *device_list_add(const char *path,
812 struct btrfs_super_block *disk_super,
813 bool *new_device_added)
814 {
815 struct btrfs_device *device;
816 struct btrfs_fs_devices *fs_devices = NULL;
817 struct rcu_string *name;
818 u64 found_transid = btrfs_super_generation(disk_super);
819 u64 devid = btrfs_stack_device_id(&disk_super->dev_item);
820 bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) &
821 BTRFS_FEATURE_INCOMPAT_METADATA_UUID);
822 bool fsid_change_in_progress = (btrfs_super_flags(disk_super) &
823 BTRFS_SUPER_FLAG_CHANGING_FSID_V2);
824
825 if (fsid_change_in_progress) {
826 if (!has_metadata_uuid)
827 fs_devices = find_fsid_inprogress(disk_super);
828 else
829 fs_devices = find_fsid_changed(disk_super);
830 } else if (has_metadata_uuid) {
831 fs_devices = find_fsid_with_metadata_uuid(disk_super);
832 } else {
833 fs_devices = find_fsid_reverted_metadata(disk_super);
834 if (!fs_devices)
835 fs_devices = find_fsid(disk_super->fsid, NULL);
836 }
837
838
839 if (!fs_devices) {
840 if (has_metadata_uuid)
841 fs_devices = alloc_fs_devices(disk_super->fsid,
842 disk_super->metadata_uuid);
843 else
844 fs_devices = alloc_fs_devices(disk_super->fsid, NULL);
845
846 if (IS_ERR(fs_devices))
847 return ERR_CAST(fs_devices);
848
849 fs_devices->fsid_change = fsid_change_in_progress;
850
851 mutex_lock(&fs_devices->device_list_mutex);
852 list_add(&fs_devices->fs_list, &fs_uuids);
853
854 device = NULL;
855 } else {
856 struct btrfs_dev_lookup_args args = {
857 .devid = devid,
858 .uuid = disk_super->dev_item.uuid,
859 };
860
861 mutex_lock(&fs_devices->device_list_mutex);
862 device = btrfs_find_device(fs_devices, &args);
863
864 /*
865 * If this disk has been pulled into an fs devices created by
866 * a device which had the CHANGING_FSID_V2 flag then replace the
867 * metadata_uuid/fsid values of the fs_devices.
868 */
869 if (fs_devices->fsid_change &&
870 found_transid > fs_devices->latest_generation) {
871 memcpy(fs_devices->fsid, disk_super->fsid,
872 BTRFS_FSID_SIZE);
873
874 if (has_metadata_uuid)
875 memcpy(fs_devices->metadata_uuid,
876 disk_super->metadata_uuid,
877 BTRFS_FSID_SIZE);
878 else
879 memcpy(fs_devices->metadata_uuid,
880 disk_super->fsid, BTRFS_FSID_SIZE);
881
882 fs_devices->fsid_change = false;
883 }
884 }
885
886 if (!device) {
887 if (fs_devices->opened) {
888 mutex_unlock(&fs_devices->device_list_mutex);
889 return ERR_PTR(-EBUSY);
890 }
891
892 device = btrfs_alloc_device(NULL, &devid,
893 disk_super->dev_item.uuid);
894 if (IS_ERR(device)) {
895 mutex_unlock(&fs_devices->device_list_mutex);
896 /* we can safely leave the fs_devices entry around */
897 return device;
898 }
899
900 name = rcu_string_strdup(path, GFP_NOFS);
901 if (!name) {
902 btrfs_free_device(device);
903 mutex_unlock(&fs_devices->device_list_mutex);
904 return ERR_PTR(-ENOMEM);
905 }
906 rcu_assign_pointer(device->name, name);
907
908 list_add_rcu(&device->dev_list, &fs_devices->devices);
909 fs_devices->num_devices++;
910
911 device->fs_devices = fs_devices;
912 *new_device_added = true;
913
914 if (disk_super->label[0])
915 pr_info(
916 "BTRFS: device label %s devid %llu transid %llu %s scanned by %s (%d)\n",
917 disk_super->label, devid, found_transid, path,
918 current->comm, task_pid_nr(current));
919 else
920 pr_info(
921 "BTRFS: device fsid %pU devid %llu transid %llu %s scanned by %s (%d)\n",
922 disk_super->fsid, devid, found_transid, path,
923 current->comm, task_pid_nr(current));
924
925 } else if (!device->name || strcmp(device->name->str, path)) {
926 /*
927 * When FS is already mounted.
928 * 1. If you are here and if the device->name is NULL that
929 * means this device was missing at time of FS mount.
930 * 2. If you are here and if the device->name is different
931 * from 'path' that means either
932 * a. The same device disappeared and reappeared with
933 * different name. or
934 * b. The missing-disk-which-was-replaced, has
935 * reappeared now.
936 *
937 * We must allow 1 and 2a above. But 2b would be a spurious
938 * and unintentional.
939 *
940 * Further in case of 1 and 2a above, the disk at 'path'
941 * would have missed some transaction when it was away and
942 * in case of 2a the stale bdev has to be updated as well.
943 * 2b must not be allowed at all time.
944 */
945
946 /*
947 * For now, we do allow update to btrfs_fs_device through the
948 * btrfs dev scan cli after FS has been mounted. We're still
949 * tracking a problem where systems fail mount by subvolume id
950 * when we reject replacement on a mounted FS.
951 */
952 if (!fs_devices->opened && found_transid < device->generation) {
953 /*
954 * That is if the FS is _not_ mounted and if you
955 * are here, that means there is more than one
956 * disk with same uuid and devid.We keep the one
957 * with larger generation number or the last-in if
958 * generation are equal.
959 */
960 mutex_unlock(&fs_devices->device_list_mutex);
961 return ERR_PTR(-EEXIST);
962 }
963
964 /*
965 * We are going to replace the device path for a given devid,
966 * make sure it's the same device if the device is mounted
967 *
968 * NOTE: the device->fs_info may not be reliable here so pass
969 * in a NULL to message helpers instead. This avoids a possible
970 * use-after-free when the fs_info and fs_info->sb are already
971 * torn down.
972 */
973 if (device->bdev) {
974 int error;
975 dev_t path_dev;
976
977 error = lookup_bdev(path, &path_dev);
978 if (error) {
979 mutex_unlock(&fs_devices->device_list_mutex);
980 return ERR_PTR(error);
981 }
982
983 if (device->bdev->bd_dev != path_dev) {
984 mutex_unlock(&fs_devices->device_list_mutex);
985 btrfs_warn_in_rcu(NULL,
986 "duplicate device %s devid %llu generation %llu scanned by %s (%d)",
987 path, devid, found_transid,
988 current->comm,
989 task_pid_nr(current));
990 return ERR_PTR(-EEXIST);
991 }
992 btrfs_info_in_rcu(NULL,
993 "devid %llu device path %s changed to %s scanned by %s (%d)",
994 devid, rcu_str_deref(device->name),
995 path, current->comm,
996 task_pid_nr(current));
997 }
998
999 name = rcu_string_strdup(path, GFP_NOFS);
1000 if (!name) {
1001 mutex_unlock(&fs_devices->device_list_mutex);
1002 return ERR_PTR(-ENOMEM);
1003 }
1004 rcu_string_free(device->name);
1005 rcu_assign_pointer(device->name, name);
1006 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
1007 fs_devices->missing_devices--;
1008 clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
1009 }
1010 }
1011
1012 /*
1013 * Unmount does not free the btrfs_device struct but would zero
1014 * generation along with most of the other members. So just update
1015 * it back. We need it to pick the disk with largest generation
1016 * (as above).
1017 */
1018 if (!fs_devices->opened) {
1019 device->generation = found_transid;
1020 fs_devices->latest_generation = max_t(u64, found_transid,
1021 fs_devices->latest_generation);
1022 }
1023
1024 fs_devices->total_devices = btrfs_super_num_devices(disk_super);
1025
1026 mutex_unlock(&fs_devices->device_list_mutex);
1027 return device;
1028 }
1029
clone_fs_devices(struct btrfs_fs_devices * orig)1030 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
1031 {
1032 struct btrfs_fs_devices *fs_devices;
1033 struct btrfs_device *device;
1034 struct btrfs_device *orig_dev;
1035 int ret = 0;
1036
1037 lockdep_assert_held(&uuid_mutex);
1038
1039 fs_devices = alloc_fs_devices(orig->fsid, NULL);
1040 if (IS_ERR(fs_devices))
1041 return fs_devices;
1042
1043 fs_devices->total_devices = orig->total_devices;
1044
1045 list_for_each_entry(orig_dev, &orig->devices, dev_list) {
1046 struct rcu_string *name;
1047
1048 device = btrfs_alloc_device(NULL, &orig_dev->devid,
1049 orig_dev->uuid);
1050 if (IS_ERR(device)) {
1051 ret = PTR_ERR(device);
1052 goto error;
1053 }
1054
1055 /*
1056 * This is ok to do without rcu read locked because we hold the
1057 * uuid mutex so nothing we touch in here is going to disappear.
1058 */
1059 if (orig_dev->name) {
1060 name = rcu_string_strdup(orig_dev->name->str,
1061 GFP_KERNEL);
1062 if (!name) {
1063 btrfs_free_device(device);
1064 ret = -ENOMEM;
1065 goto error;
1066 }
1067 rcu_assign_pointer(device->name, name);
1068 }
1069
1070 list_add(&device->dev_list, &fs_devices->devices);
1071 device->fs_devices = fs_devices;
1072 fs_devices->num_devices++;
1073 }
1074 return fs_devices;
1075 error:
1076 free_fs_devices(fs_devices);
1077 return ERR_PTR(ret);
1078 }
1079
__btrfs_free_extra_devids(struct btrfs_fs_devices * fs_devices,struct btrfs_device ** latest_dev)1080 static void __btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices,
1081 struct btrfs_device **latest_dev)
1082 {
1083 struct btrfs_device *device, *next;
1084
1085 /* This is the initialized path, it is safe to release the devices. */
1086 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
1087 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state)) {
1088 if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
1089 &device->dev_state) &&
1090 !test_bit(BTRFS_DEV_STATE_MISSING,
1091 &device->dev_state) &&
1092 (!*latest_dev ||
1093 device->generation > (*latest_dev)->generation)) {
1094 *latest_dev = device;
1095 }
1096 continue;
1097 }
1098
1099 /*
1100 * We have already validated the presence of BTRFS_DEV_REPLACE_DEVID,
1101 * in btrfs_init_dev_replace() so just continue.
1102 */
1103 if (device->devid == BTRFS_DEV_REPLACE_DEVID)
1104 continue;
1105
1106 if (device->bdev) {
1107 blkdev_put(device->bdev, device->mode);
1108 device->bdev = NULL;
1109 fs_devices->open_devices--;
1110 }
1111 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
1112 list_del_init(&device->dev_alloc_list);
1113 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
1114 fs_devices->rw_devices--;
1115 }
1116 list_del_init(&device->dev_list);
1117 fs_devices->num_devices--;
1118 btrfs_free_device(device);
1119 }
1120
1121 }
1122
1123 /*
1124 * After we have read the system tree and know devids belonging to this
1125 * filesystem, remove the device which does not belong there.
1126 */
btrfs_free_extra_devids(struct btrfs_fs_devices * fs_devices)1127 void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices)
1128 {
1129 struct btrfs_device *latest_dev = NULL;
1130 struct btrfs_fs_devices *seed_dev;
1131
1132 mutex_lock(&uuid_mutex);
1133 __btrfs_free_extra_devids(fs_devices, &latest_dev);
1134
1135 list_for_each_entry(seed_dev, &fs_devices->seed_list, seed_list)
1136 __btrfs_free_extra_devids(seed_dev, &latest_dev);
1137
1138 fs_devices->latest_dev = latest_dev;
1139
1140 mutex_unlock(&uuid_mutex);
1141 }
1142
btrfs_close_bdev(struct btrfs_device * device)1143 static void btrfs_close_bdev(struct btrfs_device *device)
1144 {
1145 if (!device->bdev)
1146 return;
1147
1148 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
1149 sync_blockdev(device->bdev);
1150 invalidate_bdev(device->bdev);
1151 }
1152
1153 blkdev_put(device->bdev, device->mode);
1154 }
1155
btrfs_close_one_device(struct btrfs_device * device)1156 static void btrfs_close_one_device(struct btrfs_device *device)
1157 {
1158 struct btrfs_fs_devices *fs_devices = device->fs_devices;
1159
1160 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
1161 device->devid != BTRFS_DEV_REPLACE_DEVID) {
1162 list_del_init(&device->dev_alloc_list);
1163 fs_devices->rw_devices--;
1164 }
1165
1166 if (device->devid == BTRFS_DEV_REPLACE_DEVID)
1167 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
1168
1169 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
1170 clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
1171 fs_devices->missing_devices--;
1172 }
1173
1174 btrfs_close_bdev(device);
1175 if (device->bdev) {
1176 fs_devices->open_devices--;
1177 device->bdev = NULL;
1178 }
1179 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
1180 btrfs_destroy_dev_zone_info(device);
1181
1182 device->fs_info = NULL;
1183 atomic_set(&device->dev_stats_ccnt, 0);
1184 extent_io_tree_release(&device->alloc_state);
1185
1186 /*
1187 * Reset the flush error record. We might have a transient flush error
1188 * in this mount, and if so we aborted the current transaction and set
1189 * the fs to an error state, guaranteeing no super blocks can be further
1190 * committed. However that error might be transient and if we unmount the
1191 * filesystem and mount it again, we should allow the mount to succeed
1192 * (btrfs_check_rw_degradable() should not fail) - if after mounting the
1193 * filesystem again we still get flush errors, then we will again abort
1194 * any transaction and set the error state, guaranteeing no commits of
1195 * unsafe super blocks.
1196 */
1197 device->last_flush_error = 0;
1198
1199 /* Verify the device is back in a pristine state */
1200 ASSERT(!test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state));
1201 ASSERT(!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state));
1202 ASSERT(list_empty(&device->dev_alloc_list));
1203 ASSERT(list_empty(&device->post_commit_list));
1204 ASSERT(atomic_read(&device->reada_in_flight) == 0);
1205 }
1206
close_fs_devices(struct btrfs_fs_devices * fs_devices)1207 static void close_fs_devices(struct btrfs_fs_devices *fs_devices)
1208 {
1209 struct btrfs_device *device, *tmp;
1210
1211 lockdep_assert_held(&uuid_mutex);
1212
1213 if (--fs_devices->opened > 0)
1214 return;
1215
1216 list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list)
1217 btrfs_close_one_device(device);
1218
1219 WARN_ON(fs_devices->open_devices);
1220 WARN_ON(fs_devices->rw_devices);
1221 fs_devices->opened = 0;
1222 fs_devices->seeding = false;
1223 fs_devices->fs_info = NULL;
1224 }
1225
btrfs_close_devices(struct btrfs_fs_devices * fs_devices)1226 void btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
1227 {
1228 LIST_HEAD(list);
1229 struct btrfs_fs_devices *tmp;
1230
1231 mutex_lock(&uuid_mutex);
1232 close_fs_devices(fs_devices);
1233 if (!fs_devices->opened) {
1234 list_splice_init(&fs_devices->seed_list, &list);
1235
1236 /*
1237 * If the struct btrfs_fs_devices is not assembled with any
1238 * other device, it can be re-initialized during the next mount
1239 * without the needing device-scan step. Therefore, it can be
1240 * fully freed.
1241 */
1242 if (fs_devices->num_devices == 1) {
1243 list_del(&fs_devices->fs_list);
1244 free_fs_devices(fs_devices);
1245 }
1246 }
1247
1248
1249 list_for_each_entry_safe(fs_devices, tmp, &list, seed_list) {
1250 close_fs_devices(fs_devices);
1251 list_del(&fs_devices->seed_list);
1252 free_fs_devices(fs_devices);
1253 }
1254 mutex_unlock(&uuid_mutex);
1255 }
1256
open_fs_devices(struct btrfs_fs_devices * fs_devices,fmode_t flags,void * holder)1257 static int open_fs_devices(struct btrfs_fs_devices *fs_devices,
1258 fmode_t flags, void *holder)
1259 {
1260 struct btrfs_device *device;
1261 struct btrfs_device *latest_dev = NULL;
1262 struct btrfs_device *tmp_device;
1263
1264 flags |= FMODE_EXCL;
1265
1266 list_for_each_entry_safe(device, tmp_device, &fs_devices->devices,
1267 dev_list) {
1268 int ret;
1269
1270 ret = btrfs_open_one_device(fs_devices, device, flags, holder);
1271 if (ret == 0 &&
1272 (!latest_dev || device->generation > latest_dev->generation)) {
1273 latest_dev = device;
1274 } else if (ret == -ENODATA) {
1275 fs_devices->num_devices--;
1276 list_del(&device->dev_list);
1277 btrfs_free_device(device);
1278 }
1279 }
1280 if (fs_devices->open_devices == 0)
1281 return -EINVAL;
1282
1283 fs_devices->opened = 1;
1284 fs_devices->latest_dev = latest_dev;
1285 fs_devices->total_rw_bytes = 0;
1286 fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_REGULAR;
1287 fs_devices->read_policy = BTRFS_READ_POLICY_PID;
1288
1289 return 0;
1290 }
1291
devid_cmp(void * priv,const struct list_head * a,const struct list_head * b)1292 static int devid_cmp(void *priv, const struct list_head *a,
1293 const struct list_head *b)
1294 {
1295 const struct btrfs_device *dev1, *dev2;
1296
1297 dev1 = list_entry(a, struct btrfs_device, dev_list);
1298 dev2 = list_entry(b, struct btrfs_device, dev_list);
1299
1300 if (dev1->devid < dev2->devid)
1301 return -1;
1302 else if (dev1->devid > dev2->devid)
1303 return 1;
1304 return 0;
1305 }
1306
btrfs_open_devices(struct btrfs_fs_devices * fs_devices,fmode_t flags,void * holder)1307 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
1308 fmode_t flags, void *holder)
1309 {
1310 int ret;
1311
1312 lockdep_assert_held(&uuid_mutex);
1313 /*
1314 * The device_list_mutex cannot be taken here in case opening the
1315 * underlying device takes further locks like open_mutex.
1316 *
1317 * We also don't need the lock here as this is called during mount and
1318 * exclusion is provided by uuid_mutex
1319 */
1320
1321 if (fs_devices->opened) {
1322 fs_devices->opened++;
1323 ret = 0;
1324 } else {
1325 list_sort(NULL, &fs_devices->devices, devid_cmp);
1326 ret = open_fs_devices(fs_devices, flags, holder);
1327 }
1328
1329 return ret;
1330 }
1331
btrfs_release_disk_super(struct btrfs_super_block * super)1332 void btrfs_release_disk_super(struct btrfs_super_block *super)
1333 {
1334 struct page *page = virt_to_page(super);
1335
1336 put_page(page);
1337 }
1338
btrfs_read_disk_super(struct block_device * bdev,u64 bytenr,u64 bytenr_orig)1339 static struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev,
1340 u64 bytenr, u64 bytenr_orig)
1341 {
1342 struct btrfs_super_block *disk_super;
1343 struct page *page;
1344 void *p;
1345 pgoff_t index;
1346
1347 /* make sure our super fits in the device */
1348 if (bytenr + PAGE_SIZE >= i_size_read(bdev->bd_inode))
1349 return ERR_PTR(-EINVAL);
1350
1351 /* make sure our super fits in the page */
1352 if (sizeof(*disk_super) > PAGE_SIZE)
1353 return ERR_PTR(-EINVAL);
1354
1355 /* make sure our super doesn't straddle pages on disk */
1356 index = bytenr >> PAGE_SHIFT;
1357 if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_SHIFT != index)
1358 return ERR_PTR(-EINVAL);
1359
1360 /* pull in the page with our super */
1361 page = read_cache_page_gfp(bdev->bd_inode->i_mapping, index, GFP_KERNEL);
1362
1363 if (IS_ERR(page))
1364 return ERR_CAST(page);
1365
1366 p = page_address(page);
1367
1368 /* align our pointer to the offset of the super block */
1369 disk_super = p + offset_in_page(bytenr);
1370
1371 if (btrfs_super_bytenr(disk_super) != bytenr_orig ||
1372 btrfs_super_magic(disk_super) != BTRFS_MAGIC) {
1373 btrfs_release_disk_super(p);
1374 return ERR_PTR(-EINVAL);
1375 }
1376
1377 if (disk_super->label[0] && disk_super->label[BTRFS_LABEL_SIZE - 1])
1378 disk_super->label[BTRFS_LABEL_SIZE - 1] = 0;
1379
1380 return disk_super;
1381 }
1382
btrfs_forget_devices(const char * path)1383 int btrfs_forget_devices(const char *path)
1384 {
1385 int ret;
1386
1387 mutex_lock(&uuid_mutex);
1388 ret = btrfs_free_stale_devices(strlen(path) ? path : NULL, NULL);
1389 mutex_unlock(&uuid_mutex);
1390
1391 return ret;
1392 }
1393
1394 /*
1395 * Look for a btrfs signature on a device. This may be called out of the mount path
1396 * and we are not allowed to call set_blocksize during the scan. The superblock
1397 * is read via pagecache
1398 */
btrfs_scan_one_device(const char * path,fmode_t flags,void * holder)1399 struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags,
1400 void *holder)
1401 {
1402 struct btrfs_super_block *disk_super;
1403 bool new_device_added = false;
1404 struct btrfs_device *device = NULL;
1405 struct block_device *bdev;
1406 u64 bytenr, bytenr_orig;
1407 int ret;
1408
1409 lockdep_assert_held(&uuid_mutex);
1410
1411 /*
1412 * we would like to check all the supers, but that would make
1413 * a btrfs mount succeed after a mkfs from a different FS.
1414 * So, we need to add a special mount option to scan for
1415 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
1416 */
1417
1418 /*
1419 * Avoid using flag |= FMODE_EXCL here, as the systemd-udev may
1420 * initiate the device scan which may race with the user's mount
1421 * or mkfs command, resulting in failure.
1422 * Since the device scan is solely for reading purposes, there is
1423 * no need for FMODE_EXCL. Additionally, the devices are read again
1424 * during the mount process. It is ok to get some inconsistent
1425 * values temporarily, as the device paths of the fsid are the only
1426 * required information for assembling the volume.
1427 */
1428 bdev = blkdev_get_by_path(path, flags, holder);
1429 if (IS_ERR(bdev))
1430 return ERR_CAST(bdev);
1431
1432 bytenr_orig = btrfs_sb_offset(0);
1433 ret = btrfs_sb_log_location_bdev(bdev, 0, READ, &bytenr);
1434 if (ret) {
1435 device = ERR_PTR(ret);
1436 goto error_bdev_put;
1437 }
1438
1439 disk_super = btrfs_read_disk_super(bdev, bytenr, bytenr_orig);
1440 if (IS_ERR(disk_super)) {
1441 device = ERR_CAST(disk_super);
1442 goto error_bdev_put;
1443 }
1444
1445 device = device_list_add(path, disk_super, &new_device_added);
1446 if (!IS_ERR(device)) {
1447 if (new_device_added)
1448 btrfs_free_stale_devices(path, device);
1449 }
1450
1451 btrfs_release_disk_super(disk_super);
1452
1453 error_bdev_put:
1454 blkdev_put(bdev, flags);
1455
1456 return device;
1457 }
1458
1459 /*
1460 * Try to find a chunk that intersects [start, start + len] range and when one
1461 * such is found, record the end of it in *start
1462 */
contains_pending_extent(struct btrfs_device * device,u64 * start,u64 len)1463 static bool contains_pending_extent(struct btrfs_device *device, u64 *start,
1464 u64 len)
1465 {
1466 u64 physical_start, physical_end;
1467
1468 lockdep_assert_held(&device->fs_info->chunk_mutex);
1469
1470 if (!find_first_extent_bit(&device->alloc_state, *start,
1471 &physical_start, &physical_end,
1472 CHUNK_ALLOCATED, NULL)) {
1473
1474 if (in_range(physical_start, *start, len) ||
1475 in_range(*start, physical_start,
1476 physical_end - physical_start)) {
1477 *start = physical_end + 1;
1478 return true;
1479 }
1480 }
1481 return false;
1482 }
1483
dev_extent_search_start(struct btrfs_device * device,u64 start)1484 static u64 dev_extent_search_start(struct btrfs_device *device, u64 start)
1485 {
1486 switch (device->fs_devices->chunk_alloc_policy) {
1487 case BTRFS_CHUNK_ALLOC_REGULAR:
1488 /*
1489 * We don't want to overwrite the superblock on the drive nor
1490 * any area used by the boot loader (grub for example), so we
1491 * make sure to start at an offset of at least 1MB.
1492 */
1493 return max_t(u64, start, SZ_1M);
1494 case BTRFS_CHUNK_ALLOC_ZONED:
1495 /*
1496 * We don't care about the starting region like regular
1497 * allocator, because we anyway use/reserve the first two zones
1498 * for superblock logging.
1499 */
1500 return ALIGN(start, device->zone_info->zone_size);
1501 default:
1502 BUG();
1503 }
1504 }
1505
dev_extent_hole_check_zoned(struct btrfs_device * device,u64 * hole_start,u64 * hole_size,u64 num_bytes)1506 static bool dev_extent_hole_check_zoned(struct btrfs_device *device,
1507 u64 *hole_start, u64 *hole_size,
1508 u64 num_bytes)
1509 {
1510 u64 zone_size = device->zone_info->zone_size;
1511 u64 pos;
1512 int ret;
1513 bool changed = false;
1514
1515 ASSERT(IS_ALIGNED(*hole_start, zone_size));
1516
1517 while (*hole_size > 0) {
1518 pos = btrfs_find_allocatable_zones(device, *hole_start,
1519 *hole_start + *hole_size,
1520 num_bytes);
1521 if (pos != *hole_start) {
1522 *hole_size = *hole_start + *hole_size - pos;
1523 *hole_start = pos;
1524 changed = true;
1525 if (*hole_size < num_bytes)
1526 break;
1527 }
1528
1529 ret = btrfs_ensure_empty_zones(device, pos, num_bytes);
1530
1531 /* Range is ensured to be empty */
1532 if (!ret)
1533 return changed;
1534
1535 /* Given hole range was invalid (outside of device) */
1536 if (ret == -ERANGE) {
1537 *hole_start += *hole_size;
1538 *hole_size = 0;
1539 return true;
1540 }
1541
1542 *hole_start += zone_size;
1543 *hole_size -= zone_size;
1544 changed = true;
1545 }
1546
1547 return changed;
1548 }
1549
1550 /**
1551 * dev_extent_hole_check - check if specified hole is suitable for allocation
1552 * @device: the device which we have the hole
1553 * @hole_start: starting position of the hole
1554 * @hole_size: the size of the hole
1555 * @num_bytes: the size of the free space that we need
1556 *
1557 * This function may modify @hole_start and @hole_size to reflect the suitable
1558 * position for allocation. Returns 1 if hole position is updated, 0 otherwise.
1559 */
dev_extent_hole_check(struct btrfs_device * device,u64 * hole_start,u64 * hole_size,u64 num_bytes)1560 static bool dev_extent_hole_check(struct btrfs_device *device, u64 *hole_start,
1561 u64 *hole_size, u64 num_bytes)
1562 {
1563 bool changed = false;
1564 u64 hole_end = *hole_start + *hole_size;
1565
1566 for (;;) {
1567 /*
1568 * Check before we set max_hole_start, otherwise we could end up
1569 * sending back this offset anyway.
1570 */
1571 if (contains_pending_extent(device, hole_start, *hole_size)) {
1572 if (hole_end >= *hole_start)
1573 *hole_size = hole_end - *hole_start;
1574 else
1575 *hole_size = 0;
1576 changed = true;
1577 }
1578
1579 switch (device->fs_devices->chunk_alloc_policy) {
1580 case BTRFS_CHUNK_ALLOC_REGULAR:
1581 /* No extra check */
1582 break;
1583 case BTRFS_CHUNK_ALLOC_ZONED:
1584 if (dev_extent_hole_check_zoned(device, hole_start,
1585 hole_size, num_bytes)) {
1586 changed = true;
1587 /*
1588 * The changed hole can contain pending extent.
1589 * Loop again to check that.
1590 */
1591 continue;
1592 }
1593 break;
1594 default:
1595 BUG();
1596 }
1597
1598 break;
1599 }
1600
1601 return changed;
1602 }
1603
1604 /*
1605 * find_free_dev_extent_start - find free space in the specified device
1606 * @device: the device which we search the free space in
1607 * @num_bytes: the size of the free space that we need
1608 * @search_start: the position from which to begin the search
1609 * @start: store the start of the free space.
1610 * @len: the size of the free space. that we find, or the size
1611 * of the max free space if we don't find suitable free space
1612 *
1613 * this uses a pretty simple search, the expectation is that it is
1614 * called very infrequently and that a given device has a small number
1615 * of extents
1616 *
1617 * @start is used to store the start of the free space if we find. But if we
1618 * don't find suitable free space, it will be used to store the start position
1619 * of the max free space.
1620 *
1621 * @len is used to store the size of the free space that we find.
1622 * But if we don't find suitable free space, it is used to store the size of
1623 * the max free space.
1624 *
1625 * NOTE: This function will search *commit* root of device tree, and does extra
1626 * check to ensure dev extents are not double allocated.
1627 * This makes the function safe to allocate dev extents but may not report
1628 * correct usable device space, as device extent freed in current transaction
1629 * is not reported as available.
1630 */
find_free_dev_extent_start(struct btrfs_device * device,u64 num_bytes,u64 search_start,u64 * start,u64 * len)1631 static int find_free_dev_extent_start(struct btrfs_device *device,
1632 u64 num_bytes, u64 search_start, u64 *start,
1633 u64 *len)
1634 {
1635 struct btrfs_fs_info *fs_info = device->fs_info;
1636 struct btrfs_root *root = fs_info->dev_root;
1637 struct btrfs_key key;
1638 struct btrfs_dev_extent *dev_extent;
1639 struct btrfs_path *path;
1640 u64 hole_size;
1641 u64 max_hole_start;
1642 u64 max_hole_size;
1643 u64 extent_end;
1644 u64 search_end = device->total_bytes;
1645 int ret;
1646 int slot;
1647 struct extent_buffer *l;
1648
1649 search_start = dev_extent_search_start(device, search_start);
1650
1651 WARN_ON(device->zone_info &&
1652 !IS_ALIGNED(num_bytes, device->zone_info->zone_size));
1653
1654 path = btrfs_alloc_path();
1655 if (!path)
1656 return -ENOMEM;
1657
1658 max_hole_start = search_start;
1659 max_hole_size = 0;
1660
1661 again:
1662 if (search_start >= search_end ||
1663 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
1664 ret = -ENOSPC;
1665 goto out;
1666 }
1667
1668 path->reada = READA_FORWARD;
1669 path->search_commit_root = 1;
1670 path->skip_locking = 1;
1671
1672 key.objectid = device->devid;
1673 key.offset = search_start;
1674 key.type = BTRFS_DEV_EXTENT_KEY;
1675
1676 ret = btrfs_search_backwards(root, &key, path);
1677 if (ret < 0)
1678 goto out;
1679
1680 while (search_start < search_end) {
1681 l = path->nodes[0];
1682 slot = path->slots[0];
1683 if (slot >= btrfs_header_nritems(l)) {
1684 ret = btrfs_next_leaf(root, path);
1685 if (ret == 0)
1686 continue;
1687 if (ret < 0)
1688 goto out;
1689
1690 break;
1691 }
1692 btrfs_item_key_to_cpu(l, &key, slot);
1693
1694 if (key.objectid < device->devid)
1695 goto next;
1696
1697 if (key.objectid > device->devid)
1698 break;
1699
1700 if (key.type != BTRFS_DEV_EXTENT_KEY)
1701 goto next;
1702
1703 if (key.offset > search_end)
1704 break;
1705
1706 if (key.offset > search_start) {
1707 hole_size = key.offset - search_start;
1708 dev_extent_hole_check(device, &search_start, &hole_size,
1709 num_bytes);
1710
1711 if (hole_size > max_hole_size) {
1712 max_hole_start = search_start;
1713 max_hole_size = hole_size;
1714 }
1715
1716 /*
1717 * If this free space is greater than which we need,
1718 * it must be the max free space that we have found
1719 * until now, so max_hole_start must point to the start
1720 * of this free space and the length of this free space
1721 * is stored in max_hole_size. Thus, we return
1722 * max_hole_start and max_hole_size and go back to the
1723 * caller.
1724 */
1725 if (hole_size >= num_bytes) {
1726 ret = 0;
1727 goto out;
1728 }
1729 }
1730
1731 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1732 extent_end = key.offset + btrfs_dev_extent_length(l,
1733 dev_extent);
1734 if (extent_end > search_start)
1735 search_start = extent_end;
1736 next:
1737 path->slots[0]++;
1738 cond_resched();
1739 }
1740
1741 /*
1742 * At this point, search_start should be the end of
1743 * allocated dev extents, and when shrinking the device,
1744 * search_end may be smaller than search_start.
1745 */
1746 if (search_end > search_start) {
1747 hole_size = search_end - search_start;
1748 if (dev_extent_hole_check(device, &search_start, &hole_size,
1749 num_bytes)) {
1750 btrfs_release_path(path);
1751 goto again;
1752 }
1753
1754 if (hole_size > max_hole_size) {
1755 max_hole_start = search_start;
1756 max_hole_size = hole_size;
1757 }
1758 }
1759
1760 /* See above. */
1761 if (max_hole_size < num_bytes)
1762 ret = -ENOSPC;
1763 else
1764 ret = 0;
1765
1766 ASSERT(max_hole_start + max_hole_size <= search_end);
1767 out:
1768 btrfs_free_path(path);
1769 *start = max_hole_start;
1770 if (len)
1771 *len = max_hole_size;
1772 return ret;
1773 }
1774
find_free_dev_extent(struct btrfs_device * device,u64 num_bytes,u64 * start,u64 * len)1775 int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
1776 u64 *start, u64 *len)
1777 {
1778 /* FIXME use last free of some kind */
1779 return find_free_dev_extent_start(device, num_bytes, 0, start, len);
1780 }
1781
btrfs_free_dev_extent(struct btrfs_trans_handle * trans,struct btrfs_device * device,u64 start,u64 * dev_extent_len)1782 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1783 struct btrfs_device *device,
1784 u64 start, u64 *dev_extent_len)
1785 {
1786 struct btrfs_fs_info *fs_info = device->fs_info;
1787 struct btrfs_root *root = fs_info->dev_root;
1788 int ret;
1789 struct btrfs_path *path;
1790 struct btrfs_key key;
1791 struct btrfs_key found_key;
1792 struct extent_buffer *leaf = NULL;
1793 struct btrfs_dev_extent *extent = NULL;
1794
1795 path = btrfs_alloc_path();
1796 if (!path)
1797 return -ENOMEM;
1798
1799 key.objectid = device->devid;
1800 key.offset = start;
1801 key.type = BTRFS_DEV_EXTENT_KEY;
1802 again:
1803 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1804 if (ret > 0) {
1805 ret = btrfs_previous_item(root, path, key.objectid,
1806 BTRFS_DEV_EXTENT_KEY);
1807 if (ret)
1808 goto out;
1809 leaf = path->nodes[0];
1810 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1811 extent = btrfs_item_ptr(leaf, path->slots[0],
1812 struct btrfs_dev_extent);
1813 BUG_ON(found_key.offset > start || found_key.offset +
1814 btrfs_dev_extent_length(leaf, extent) < start);
1815 key = found_key;
1816 btrfs_release_path(path);
1817 goto again;
1818 } else if (ret == 0) {
1819 leaf = path->nodes[0];
1820 extent = btrfs_item_ptr(leaf, path->slots[0],
1821 struct btrfs_dev_extent);
1822 } else {
1823 goto out;
1824 }
1825
1826 *dev_extent_len = btrfs_dev_extent_length(leaf, extent);
1827
1828 ret = btrfs_del_item(trans, root, path);
1829 if (ret == 0)
1830 set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags);
1831 out:
1832 btrfs_free_path(path);
1833 return ret;
1834 }
1835
find_next_chunk(struct btrfs_fs_info * fs_info)1836 static u64 find_next_chunk(struct btrfs_fs_info *fs_info)
1837 {
1838 struct extent_map_tree *em_tree;
1839 struct extent_map *em;
1840 struct rb_node *n;
1841 u64 ret = 0;
1842
1843 em_tree = &fs_info->mapping_tree;
1844 read_lock(&em_tree->lock);
1845 n = rb_last(&em_tree->map.rb_root);
1846 if (n) {
1847 em = rb_entry(n, struct extent_map, rb_node);
1848 ret = em->start + em->len;
1849 }
1850 read_unlock(&em_tree->lock);
1851
1852 return ret;
1853 }
1854
find_next_devid(struct btrfs_fs_info * fs_info,u64 * devid_ret)1855 static noinline int find_next_devid(struct btrfs_fs_info *fs_info,
1856 u64 *devid_ret)
1857 {
1858 int ret;
1859 struct btrfs_key key;
1860 struct btrfs_key found_key;
1861 struct btrfs_path *path;
1862
1863 path = btrfs_alloc_path();
1864 if (!path)
1865 return -ENOMEM;
1866
1867 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1868 key.type = BTRFS_DEV_ITEM_KEY;
1869 key.offset = (u64)-1;
1870
1871 ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0);
1872 if (ret < 0)
1873 goto error;
1874
1875 if (ret == 0) {
1876 /* Corruption */
1877 btrfs_err(fs_info, "corrupted chunk tree devid -1 matched");
1878 ret = -EUCLEAN;
1879 goto error;
1880 }
1881
1882 ret = btrfs_previous_item(fs_info->chunk_root, path,
1883 BTRFS_DEV_ITEMS_OBJECTID,
1884 BTRFS_DEV_ITEM_KEY);
1885 if (ret) {
1886 *devid_ret = 1;
1887 } else {
1888 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1889 path->slots[0]);
1890 *devid_ret = found_key.offset + 1;
1891 }
1892 ret = 0;
1893 error:
1894 btrfs_free_path(path);
1895 return ret;
1896 }
1897
1898 /*
1899 * the device information is stored in the chunk root
1900 * the btrfs_device struct should be fully filled in
1901 */
btrfs_add_dev_item(struct btrfs_trans_handle * trans,struct btrfs_device * device)1902 static int btrfs_add_dev_item(struct btrfs_trans_handle *trans,
1903 struct btrfs_device *device)
1904 {
1905 int ret;
1906 struct btrfs_path *path;
1907 struct btrfs_dev_item *dev_item;
1908 struct extent_buffer *leaf;
1909 struct btrfs_key key;
1910 unsigned long ptr;
1911
1912 path = btrfs_alloc_path();
1913 if (!path)
1914 return -ENOMEM;
1915
1916 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1917 key.type = BTRFS_DEV_ITEM_KEY;
1918 key.offset = device->devid;
1919
1920 btrfs_reserve_chunk_metadata(trans, true);
1921 ret = btrfs_insert_empty_item(trans, trans->fs_info->chunk_root, path,
1922 &key, sizeof(*dev_item));
1923 btrfs_trans_release_chunk_metadata(trans);
1924 if (ret)
1925 goto out;
1926
1927 leaf = path->nodes[0];
1928 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1929
1930 btrfs_set_device_id(leaf, dev_item, device->devid);
1931 btrfs_set_device_generation(leaf, dev_item, 0);
1932 btrfs_set_device_type(leaf, dev_item, device->type);
1933 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1934 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1935 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1936 btrfs_set_device_total_bytes(leaf, dev_item,
1937 btrfs_device_get_disk_total_bytes(device));
1938 btrfs_set_device_bytes_used(leaf, dev_item,
1939 btrfs_device_get_bytes_used(device));
1940 btrfs_set_device_group(leaf, dev_item, 0);
1941 btrfs_set_device_seek_speed(leaf, dev_item, 0);
1942 btrfs_set_device_bandwidth(leaf, dev_item, 0);
1943 btrfs_set_device_start_offset(leaf, dev_item, 0);
1944
1945 ptr = btrfs_device_uuid(dev_item);
1946 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1947 ptr = btrfs_device_fsid(dev_item);
1948 write_extent_buffer(leaf, trans->fs_info->fs_devices->metadata_uuid,
1949 ptr, BTRFS_FSID_SIZE);
1950 btrfs_mark_buffer_dirty(leaf);
1951
1952 ret = 0;
1953 out:
1954 btrfs_free_path(path);
1955 return ret;
1956 }
1957
1958 /*
1959 * Function to update ctime/mtime for a given device path.
1960 * Mainly used for ctime/mtime based probe like libblkid.
1961 *
1962 * We don't care about errors here, this is just to be kind to userspace.
1963 */
update_dev_time(const char * device_path)1964 static void update_dev_time(const char *device_path)
1965 {
1966 struct path path;
1967 struct timespec64 now;
1968 int ret;
1969
1970 ret = kern_path(device_path, LOOKUP_FOLLOW, &path);
1971 if (ret)
1972 return;
1973
1974 now = current_time(d_inode(path.dentry));
1975 inode_update_time(d_inode(path.dentry), &now, S_MTIME | S_CTIME);
1976 path_put(&path);
1977 }
1978
btrfs_rm_dev_item(struct btrfs_trans_handle * trans,struct btrfs_device * device)1979 static int btrfs_rm_dev_item(struct btrfs_trans_handle *trans,
1980 struct btrfs_device *device)
1981 {
1982 struct btrfs_root *root = device->fs_info->chunk_root;
1983 int ret;
1984 struct btrfs_path *path;
1985 struct btrfs_key key;
1986
1987 path = btrfs_alloc_path();
1988 if (!path)
1989 return -ENOMEM;
1990
1991 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1992 key.type = BTRFS_DEV_ITEM_KEY;
1993 key.offset = device->devid;
1994
1995 btrfs_reserve_chunk_metadata(trans, false);
1996 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1997 btrfs_trans_release_chunk_metadata(trans);
1998 if (ret) {
1999 if (ret > 0)
2000 ret = -ENOENT;
2001 goto out;
2002 }
2003
2004 ret = btrfs_del_item(trans, root, path);
2005 out:
2006 btrfs_free_path(path);
2007 return ret;
2008 }
2009
2010 /*
2011 * Verify that @num_devices satisfies the RAID profile constraints in the whole
2012 * filesystem. It's up to the caller to adjust that number regarding eg. device
2013 * replace.
2014 */
btrfs_check_raid_min_devices(struct btrfs_fs_info * fs_info,u64 num_devices)2015 static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info,
2016 u64 num_devices)
2017 {
2018 u64 all_avail;
2019 unsigned seq;
2020 int i;
2021
2022 do {
2023 seq = read_seqbegin(&fs_info->profiles_lock);
2024
2025 all_avail = fs_info->avail_data_alloc_bits |
2026 fs_info->avail_system_alloc_bits |
2027 fs_info->avail_metadata_alloc_bits;
2028 } while (read_seqretry(&fs_info->profiles_lock, seq));
2029
2030 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
2031 if (!(all_avail & btrfs_raid_array[i].bg_flag))
2032 continue;
2033
2034 if (num_devices < btrfs_raid_array[i].devs_min)
2035 return btrfs_raid_array[i].mindev_error;
2036 }
2037
2038 return 0;
2039 }
2040
btrfs_find_next_active_device(struct btrfs_fs_devices * fs_devs,struct btrfs_device * device)2041 static struct btrfs_device * btrfs_find_next_active_device(
2042 struct btrfs_fs_devices *fs_devs, struct btrfs_device *device)
2043 {
2044 struct btrfs_device *next_device;
2045
2046 list_for_each_entry(next_device, &fs_devs->devices, dev_list) {
2047 if (next_device != device &&
2048 !test_bit(BTRFS_DEV_STATE_MISSING, &next_device->dev_state)
2049 && next_device->bdev)
2050 return next_device;
2051 }
2052
2053 return NULL;
2054 }
2055
2056 /*
2057 * Helper function to check if the given device is part of s_bdev / latest_dev
2058 * and replace it with the provided or the next active device, in the context
2059 * where this function called, there should be always be another device (or
2060 * this_dev) which is active.
2061 */
btrfs_assign_next_active_device(struct btrfs_device * device,struct btrfs_device * next_device)2062 void __cold btrfs_assign_next_active_device(struct btrfs_device *device,
2063 struct btrfs_device *next_device)
2064 {
2065 struct btrfs_fs_info *fs_info = device->fs_info;
2066
2067 if (!next_device)
2068 next_device = btrfs_find_next_active_device(fs_info->fs_devices,
2069 device);
2070 ASSERT(next_device);
2071
2072 if (fs_info->sb->s_bdev &&
2073 (fs_info->sb->s_bdev == device->bdev))
2074 fs_info->sb->s_bdev = next_device->bdev;
2075
2076 if (fs_info->fs_devices->latest_dev->bdev == device->bdev)
2077 fs_info->fs_devices->latest_dev = next_device;
2078 }
2079
2080 /*
2081 * Return btrfs_fs_devices::num_devices excluding the device that's being
2082 * currently replaced.
2083 */
btrfs_num_devices(struct btrfs_fs_info * fs_info)2084 static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info)
2085 {
2086 u64 num_devices = fs_info->fs_devices->num_devices;
2087
2088 down_read(&fs_info->dev_replace.rwsem);
2089 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
2090 ASSERT(num_devices > 1);
2091 num_devices--;
2092 }
2093 up_read(&fs_info->dev_replace.rwsem);
2094
2095 return num_devices;
2096 }
2097
btrfs_scratch_superblocks(struct btrfs_fs_info * fs_info,struct block_device * bdev,const char * device_path)2098 void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info,
2099 struct block_device *bdev,
2100 const char *device_path)
2101 {
2102 struct btrfs_super_block *disk_super;
2103 int copy_num;
2104
2105 if (!bdev)
2106 return;
2107
2108 for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX; copy_num++) {
2109 struct page *page;
2110 int ret;
2111
2112 disk_super = btrfs_read_dev_one_super(bdev, copy_num, false);
2113 if (IS_ERR(disk_super))
2114 continue;
2115
2116 if (bdev_is_zoned(bdev)) {
2117 btrfs_reset_sb_log_zones(bdev, copy_num);
2118 continue;
2119 }
2120
2121 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
2122
2123 page = virt_to_page(disk_super);
2124 set_page_dirty(page);
2125 lock_page(page);
2126 /* write_on_page() unlocks the page */
2127 ret = write_one_page(page);
2128 if (ret)
2129 btrfs_warn(fs_info,
2130 "error clearing superblock number %d (%d)",
2131 copy_num, ret);
2132 btrfs_release_disk_super(disk_super);
2133
2134 }
2135
2136 /* Notify udev that device has changed */
2137 btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
2138
2139 /* Update ctime/mtime for device path for libblkid */
2140 update_dev_time(device_path);
2141 }
2142
btrfs_rm_device(struct btrfs_fs_info * fs_info,struct btrfs_dev_lookup_args * args,struct block_device ** bdev,fmode_t * mode)2143 int btrfs_rm_device(struct btrfs_fs_info *fs_info,
2144 struct btrfs_dev_lookup_args *args,
2145 struct block_device **bdev, fmode_t *mode)
2146 {
2147 struct btrfs_trans_handle *trans;
2148 struct btrfs_device *device;
2149 struct btrfs_fs_devices *cur_devices;
2150 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2151 u64 num_devices;
2152 int ret = 0;
2153
2154 /*
2155 * The device list in fs_devices is accessed without locks (neither
2156 * uuid_mutex nor device_list_mutex) as it won't change on a mounted
2157 * filesystem and another device rm cannot run.
2158 */
2159 num_devices = btrfs_num_devices(fs_info);
2160
2161 ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1);
2162 if (ret)
2163 return ret;
2164
2165 device = btrfs_find_device(fs_info->fs_devices, args);
2166 if (!device) {
2167 if (args->missing)
2168 ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
2169 else
2170 ret = -ENOENT;
2171 return ret;
2172 }
2173
2174 if (btrfs_pinned_by_swapfile(fs_info, device)) {
2175 btrfs_warn_in_rcu(fs_info,
2176 "cannot remove device %s (devid %llu) due to active swapfile",
2177 rcu_str_deref(device->name), device->devid);
2178 return -ETXTBSY;
2179 }
2180
2181 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
2182 return BTRFS_ERROR_DEV_TGT_REPLACE;
2183
2184 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
2185 fs_info->fs_devices->rw_devices == 1)
2186 return BTRFS_ERROR_DEV_ONLY_WRITABLE;
2187
2188 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
2189 mutex_lock(&fs_info->chunk_mutex);
2190 list_del_init(&device->dev_alloc_list);
2191 device->fs_devices->rw_devices--;
2192 mutex_unlock(&fs_info->chunk_mutex);
2193 }
2194
2195 ret = btrfs_shrink_device(device, 0);
2196 if (!ret)
2197 btrfs_reada_remove_dev(device);
2198 if (ret)
2199 goto error_undo;
2200
2201 trans = btrfs_start_transaction(fs_info->chunk_root, 0);
2202 if (IS_ERR(trans)) {
2203 ret = PTR_ERR(trans);
2204 goto error_undo;
2205 }
2206
2207 ret = btrfs_rm_dev_item(trans, device);
2208 if (ret) {
2209 /* Any error in dev item removal is critical */
2210 btrfs_crit(fs_info,
2211 "failed to remove device item for devid %llu: %d",
2212 device->devid, ret);
2213 btrfs_abort_transaction(trans, ret);
2214 btrfs_end_transaction(trans);
2215 return ret;
2216 }
2217
2218 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
2219 btrfs_scrub_cancel_dev(device);
2220
2221 /*
2222 * the device list mutex makes sure that we don't change
2223 * the device list while someone else is writing out all
2224 * the device supers. Whoever is writing all supers, should
2225 * lock the device list mutex before getting the number of
2226 * devices in the super block (super_copy). Conversely,
2227 * whoever updates the number of devices in the super block
2228 * (super_copy) should hold the device list mutex.
2229 */
2230
2231 /*
2232 * In normal cases the cur_devices == fs_devices. But in case
2233 * of deleting a seed device, the cur_devices should point to
2234 * its own fs_devices listed under the fs_devices->seed.
2235 */
2236 cur_devices = device->fs_devices;
2237 mutex_lock(&fs_devices->device_list_mutex);
2238 list_del_rcu(&device->dev_list);
2239
2240 cur_devices->num_devices--;
2241 cur_devices->total_devices--;
2242 /* Update total_devices of the parent fs_devices if it's seed */
2243 if (cur_devices != fs_devices)
2244 fs_devices->total_devices--;
2245
2246 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
2247 cur_devices->missing_devices--;
2248
2249 btrfs_assign_next_active_device(device, NULL);
2250
2251 if (device->bdev) {
2252 cur_devices->open_devices--;
2253 /* remove sysfs entry */
2254 btrfs_sysfs_remove_device(device);
2255 }
2256
2257 num_devices = btrfs_super_num_devices(fs_info->super_copy) - 1;
2258 btrfs_set_super_num_devices(fs_info->super_copy, num_devices);
2259 mutex_unlock(&fs_devices->device_list_mutex);
2260
2261 /*
2262 * At this point, the device is zero sized and detached from the
2263 * devices list. All that's left is to zero out the old supers and
2264 * free the device.
2265 *
2266 * We cannot call btrfs_close_bdev() here because we're holding the sb
2267 * write lock, and blkdev_put() will pull in the ->open_mutex on the
2268 * block device and it's dependencies. Instead just flush the device
2269 * and let the caller do the final blkdev_put.
2270 */
2271 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
2272 btrfs_scratch_superblocks(fs_info, device->bdev,
2273 device->name->str);
2274 if (device->bdev) {
2275 sync_blockdev(device->bdev);
2276 invalidate_bdev(device->bdev);
2277 }
2278 }
2279
2280 *bdev = device->bdev;
2281 *mode = device->mode;
2282 synchronize_rcu();
2283 btrfs_free_device(device);
2284
2285 if (cur_devices->open_devices == 0) {
2286 list_del_init(&cur_devices->seed_list);
2287 close_fs_devices(cur_devices);
2288 free_fs_devices(cur_devices);
2289 }
2290
2291 ret = btrfs_commit_transaction(trans);
2292
2293 return ret;
2294
2295 error_undo:
2296 btrfs_reada_undo_remove_dev(device);
2297 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
2298 mutex_lock(&fs_info->chunk_mutex);
2299 list_add(&device->dev_alloc_list,
2300 &fs_devices->alloc_list);
2301 device->fs_devices->rw_devices++;
2302 mutex_unlock(&fs_info->chunk_mutex);
2303 }
2304 return ret;
2305 }
2306
btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device * srcdev)2307 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev)
2308 {
2309 struct btrfs_fs_devices *fs_devices;
2310
2311 lockdep_assert_held(&srcdev->fs_info->fs_devices->device_list_mutex);
2312
2313 /*
2314 * in case of fs with no seed, srcdev->fs_devices will point
2315 * to fs_devices of fs_info. However when the dev being replaced is
2316 * a seed dev it will point to the seed's local fs_devices. In short
2317 * srcdev will have its correct fs_devices in both the cases.
2318 */
2319 fs_devices = srcdev->fs_devices;
2320
2321 list_del_rcu(&srcdev->dev_list);
2322 list_del(&srcdev->dev_alloc_list);
2323 fs_devices->num_devices--;
2324 if (test_bit(BTRFS_DEV_STATE_MISSING, &srcdev->dev_state))
2325 fs_devices->missing_devices--;
2326
2327 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state))
2328 fs_devices->rw_devices--;
2329
2330 if (srcdev->bdev)
2331 fs_devices->open_devices--;
2332 }
2333
btrfs_rm_dev_replace_free_srcdev(struct btrfs_device * srcdev)2334 void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev)
2335 {
2336 struct btrfs_fs_devices *fs_devices = srcdev->fs_devices;
2337
2338 mutex_lock(&uuid_mutex);
2339
2340 btrfs_close_bdev(srcdev);
2341 synchronize_rcu();
2342 btrfs_free_device(srcdev);
2343
2344 /* if this is no devs we rather delete the fs_devices */
2345 if (!fs_devices->num_devices) {
2346 /*
2347 * On a mounted FS, num_devices can't be zero unless it's a
2348 * seed. In case of a seed device being replaced, the replace
2349 * target added to the sprout FS, so there will be no more
2350 * device left under the seed FS.
2351 */
2352 ASSERT(fs_devices->seeding);
2353
2354 list_del_init(&fs_devices->seed_list);
2355 close_fs_devices(fs_devices);
2356 free_fs_devices(fs_devices);
2357 }
2358 mutex_unlock(&uuid_mutex);
2359 }
2360
btrfs_destroy_dev_replace_tgtdev(struct btrfs_device * tgtdev)2361 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev)
2362 {
2363 struct btrfs_fs_devices *fs_devices = tgtdev->fs_info->fs_devices;
2364
2365 mutex_lock(&fs_devices->device_list_mutex);
2366
2367 btrfs_sysfs_remove_device(tgtdev);
2368
2369 if (tgtdev->bdev)
2370 fs_devices->open_devices--;
2371
2372 fs_devices->num_devices--;
2373
2374 btrfs_assign_next_active_device(tgtdev, NULL);
2375
2376 list_del_rcu(&tgtdev->dev_list);
2377
2378 mutex_unlock(&fs_devices->device_list_mutex);
2379
2380 btrfs_scratch_superblocks(tgtdev->fs_info, tgtdev->bdev,
2381 tgtdev->name->str);
2382
2383 btrfs_close_bdev(tgtdev);
2384 synchronize_rcu();
2385 btrfs_free_device(tgtdev);
2386 }
2387
2388 /**
2389 * Populate args from device at path
2390 *
2391 * @fs_info: the filesystem
2392 * @args: the args to populate
2393 * @path: the path to the device
2394 *
2395 * This will read the super block of the device at @path and populate @args with
2396 * the devid, fsid, and uuid. This is meant to be used for ioctls that need to
2397 * lookup a device to operate on, but need to do it before we take any locks.
2398 * This properly handles the special case of "missing" that a user may pass in,
2399 * and does some basic sanity checks. The caller must make sure that @path is
2400 * properly NUL terminated before calling in, and must call
2401 * btrfs_put_dev_args_from_path() in order to free up the temporary fsid and
2402 * uuid buffers.
2403 *
2404 * Return: 0 for success, -errno for failure
2405 */
btrfs_get_dev_args_from_path(struct btrfs_fs_info * fs_info,struct btrfs_dev_lookup_args * args,const char * path)2406 int btrfs_get_dev_args_from_path(struct btrfs_fs_info *fs_info,
2407 struct btrfs_dev_lookup_args *args,
2408 const char *path)
2409 {
2410 struct btrfs_super_block *disk_super;
2411 struct block_device *bdev;
2412 int ret;
2413
2414 if (!path || !path[0])
2415 return -EINVAL;
2416 if (!strcmp(path, "missing")) {
2417 args->missing = true;
2418 return 0;
2419 }
2420
2421 args->uuid = kzalloc(BTRFS_UUID_SIZE, GFP_KERNEL);
2422 args->fsid = kzalloc(BTRFS_FSID_SIZE, GFP_KERNEL);
2423 if (!args->uuid || !args->fsid) {
2424 btrfs_put_dev_args_from_path(args);
2425 return -ENOMEM;
2426 }
2427
2428 ret = btrfs_get_bdev_and_sb(path, FMODE_READ, fs_info->bdev_holder, 0,
2429 &bdev, &disk_super);
2430 if (ret) {
2431 btrfs_put_dev_args_from_path(args);
2432 return ret;
2433 }
2434
2435 args->devid = btrfs_stack_device_id(&disk_super->dev_item);
2436 memcpy(args->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE);
2437 if (btrfs_fs_incompat(fs_info, METADATA_UUID))
2438 memcpy(args->fsid, disk_super->metadata_uuid, BTRFS_FSID_SIZE);
2439 else
2440 memcpy(args->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
2441 btrfs_release_disk_super(disk_super);
2442 blkdev_put(bdev, FMODE_READ);
2443 return 0;
2444 }
2445
2446 /*
2447 * Only use this jointly with btrfs_get_dev_args_from_path() because we will
2448 * allocate our ->uuid and ->fsid pointers, everybody else uses local variables
2449 * that don't need to be freed.
2450 */
btrfs_put_dev_args_from_path(struct btrfs_dev_lookup_args * args)2451 void btrfs_put_dev_args_from_path(struct btrfs_dev_lookup_args *args)
2452 {
2453 kfree(args->uuid);
2454 kfree(args->fsid);
2455 args->uuid = NULL;
2456 args->fsid = NULL;
2457 }
2458
btrfs_find_device_by_devspec(struct btrfs_fs_info * fs_info,u64 devid,const char * device_path)2459 struct btrfs_device *btrfs_find_device_by_devspec(
2460 struct btrfs_fs_info *fs_info, u64 devid,
2461 const char *device_path)
2462 {
2463 BTRFS_DEV_LOOKUP_ARGS(args);
2464 struct btrfs_device *device;
2465 int ret;
2466
2467 if (devid) {
2468 args.devid = devid;
2469 device = btrfs_find_device(fs_info->fs_devices, &args);
2470 if (!device)
2471 return ERR_PTR(-ENOENT);
2472 return device;
2473 }
2474
2475 ret = btrfs_get_dev_args_from_path(fs_info, &args, device_path);
2476 if (ret)
2477 return ERR_PTR(ret);
2478 device = btrfs_find_device(fs_info->fs_devices, &args);
2479 btrfs_put_dev_args_from_path(&args);
2480 if (!device)
2481 return ERR_PTR(-ENOENT);
2482 return device;
2483 }
2484
2485 /*
2486 * does all the dirty work required for changing file system's UUID.
2487 */
btrfs_prepare_sprout(struct btrfs_fs_info * fs_info)2488 static int btrfs_prepare_sprout(struct btrfs_fs_info *fs_info)
2489 {
2490 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2491 struct btrfs_fs_devices *old_devices;
2492 struct btrfs_fs_devices *seed_devices;
2493 struct btrfs_super_block *disk_super = fs_info->super_copy;
2494 struct btrfs_device *device;
2495 u64 super_flags;
2496
2497 lockdep_assert_held(&uuid_mutex);
2498 if (!fs_devices->seeding)
2499 return -EINVAL;
2500
2501 /*
2502 * Private copy of the seed devices, anchored at
2503 * fs_info->fs_devices->seed_list
2504 */
2505 seed_devices = alloc_fs_devices(NULL, NULL);
2506 if (IS_ERR(seed_devices))
2507 return PTR_ERR(seed_devices);
2508
2509 /*
2510 * It's necessary to retain a copy of the original seed fs_devices in
2511 * fs_uuids so that filesystems which have been seeded can successfully
2512 * reference the seed device from open_seed_devices. This also supports
2513 * multiple fs seed.
2514 */
2515 old_devices = clone_fs_devices(fs_devices);
2516 if (IS_ERR(old_devices)) {
2517 kfree(seed_devices);
2518 return PTR_ERR(old_devices);
2519 }
2520
2521 list_add(&old_devices->fs_list, &fs_uuids);
2522
2523 memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
2524 seed_devices->opened = 1;
2525 INIT_LIST_HEAD(&seed_devices->devices);
2526 INIT_LIST_HEAD(&seed_devices->alloc_list);
2527 mutex_init(&seed_devices->device_list_mutex);
2528
2529 mutex_lock(&fs_devices->device_list_mutex);
2530 list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
2531 synchronize_rcu);
2532 list_for_each_entry(device, &seed_devices->devices, dev_list)
2533 device->fs_devices = seed_devices;
2534
2535 fs_devices->seeding = false;
2536 fs_devices->num_devices = 0;
2537 fs_devices->open_devices = 0;
2538 fs_devices->missing_devices = 0;
2539 fs_devices->rotating = false;
2540 list_add(&seed_devices->seed_list, &fs_devices->seed_list);
2541
2542 generate_random_uuid(fs_devices->fsid);
2543 memcpy(fs_devices->metadata_uuid, fs_devices->fsid, BTRFS_FSID_SIZE);
2544 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
2545 mutex_unlock(&fs_devices->device_list_mutex);
2546
2547 super_flags = btrfs_super_flags(disk_super) &
2548 ~BTRFS_SUPER_FLAG_SEEDING;
2549 btrfs_set_super_flags(disk_super, super_flags);
2550
2551 return 0;
2552 }
2553
2554 /*
2555 * Store the expected generation for seed devices in device items.
2556 */
btrfs_finish_sprout(struct btrfs_trans_handle * trans)2557 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans)
2558 {
2559 BTRFS_DEV_LOOKUP_ARGS(args);
2560 struct btrfs_fs_info *fs_info = trans->fs_info;
2561 struct btrfs_root *root = fs_info->chunk_root;
2562 struct btrfs_path *path;
2563 struct extent_buffer *leaf;
2564 struct btrfs_dev_item *dev_item;
2565 struct btrfs_device *device;
2566 struct btrfs_key key;
2567 u8 fs_uuid[BTRFS_FSID_SIZE];
2568 u8 dev_uuid[BTRFS_UUID_SIZE];
2569 int ret;
2570
2571 path = btrfs_alloc_path();
2572 if (!path)
2573 return -ENOMEM;
2574
2575 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2576 key.offset = 0;
2577 key.type = BTRFS_DEV_ITEM_KEY;
2578
2579 while (1) {
2580 btrfs_reserve_chunk_metadata(trans, false);
2581 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2582 btrfs_trans_release_chunk_metadata(trans);
2583 if (ret < 0)
2584 goto error;
2585
2586 leaf = path->nodes[0];
2587 next_slot:
2588 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
2589 ret = btrfs_next_leaf(root, path);
2590 if (ret > 0)
2591 break;
2592 if (ret < 0)
2593 goto error;
2594 leaf = path->nodes[0];
2595 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2596 btrfs_release_path(path);
2597 continue;
2598 }
2599
2600 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2601 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
2602 key.type != BTRFS_DEV_ITEM_KEY)
2603 break;
2604
2605 dev_item = btrfs_item_ptr(leaf, path->slots[0],
2606 struct btrfs_dev_item);
2607 args.devid = btrfs_device_id(leaf, dev_item);
2608 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
2609 BTRFS_UUID_SIZE);
2610 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
2611 BTRFS_FSID_SIZE);
2612 args.uuid = dev_uuid;
2613 args.fsid = fs_uuid;
2614 device = btrfs_find_device(fs_info->fs_devices, &args);
2615 BUG_ON(!device); /* Logic error */
2616
2617 if (device->fs_devices->seeding) {
2618 btrfs_set_device_generation(leaf, dev_item,
2619 device->generation);
2620 btrfs_mark_buffer_dirty(leaf);
2621 }
2622
2623 path->slots[0]++;
2624 goto next_slot;
2625 }
2626 ret = 0;
2627 error:
2628 btrfs_free_path(path);
2629 return ret;
2630 }
2631
btrfs_init_new_device(struct btrfs_fs_info * fs_info,const char * device_path)2632 int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path)
2633 {
2634 struct btrfs_root *root = fs_info->dev_root;
2635 struct request_queue *q;
2636 struct btrfs_trans_handle *trans;
2637 struct btrfs_device *device;
2638 struct block_device *bdev;
2639 struct super_block *sb = fs_info->sb;
2640 struct rcu_string *name;
2641 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2642 u64 orig_super_total_bytes;
2643 u64 orig_super_num_devices;
2644 int seeding_dev = 0;
2645 int ret = 0;
2646 bool locked = false;
2647
2648 if (sb_rdonly(sb) && !fs_devices->seeding)
2649 return -EROFS;
2650
2651 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2652 fs_info->bdev_holder);
2653 if (IS_ERR(bdev))
2654 return PTR_ERR(bdev);
2655
2656 if (!btrfs_check_device_zone_type(fs_info, bdev)) {
2657 ret = -EINVAL;
2658 goto error;
2659 }
2660
2661 if (fs_devices->seeding) {
2662 seeding_dev = 1;
2663 down_write(&sb->s_umount);
2664 mutex_lock(&uuid_mutex);
2665 locked = true;
2666 }
2667
2668 sync_blockdev(bdev);
2669
2670 rcu_read_lock();
2671 list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) {
2672 if (device->bdev == bdev) {
2673 ret = -EEXIST;
2674 rcu_read_unlock();
2675 goto error;
2676 }
2677 }
2678 rcu_read_unlock();
2679
2680 device = btrfs_alloc_device(fs_info, NULL, NULL);
2681 if (IS_ERR(device)) {
2682 /* we can safely leave the fs_devices entry around */
2683 ret = PTR_ERR(device);
2684 goto error;
2685 }
2686
2687 name = rcu_string_strdup(device_path, GFP_KERNEL);
2688 if (!name) {
2689 ret = -ENOMEM;
2690 goto error_free_device;
2691 }
2692 rcu_assign_pointer(device->name, name);
2693
2694 device->fs_info = fs_info;
2695 device->bdev = bdev;
2696
2697 ret = btrfs_get_dev_zone_info(device, false);
2698 if (ret)
2699 goto error_free_device;
2700
2701 trans = btrfs_start_transaction(root, 0);
2702 if (IS_ERR(trans)) {
2703 ret = PTR_ERR(trans);
2704 goto error_free_zone;
2705 }
2706
2707 q = bdev_get_queue(bdev);
2708 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
2709 device->generation = trans->transid;
2710 device->io_width = fs_info->sectorsize;
2711 device->io_align = fs_info->sectorsize;
2712 device->sector_size = fs_info->sectorsize;
2713 device->total_bytes = round_down(i_size_read(bdev->bd_inode),
2714 fs_info->sectorsize);
2715 device->disk_total_bytes = device->total_bytes;
2716 device->commit_total_bytes = device->total_bytes;
2717 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
2718 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
2719 device->mode = FMODE_EXCL;
2720 device->dev_stats_valid = 1;
2721 set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE);
2722
2723 if (seeding_dev) {
2724 btrfs_clear_sb_rdonly(sb);
2725 ret = btrfs_prepare_sprout(fs_info);
2726 if (ret) {
2727 btrfs_abort_transaction(trans, ret);
2728 goto error_trans;
2729 }
2730 btrfs_assign_next_active_device(fs_info->fs_devices->latest_dev,
2731 device);
2732 }
2733
2734 device->fs_devices = fs_devices;
2735
2736 mutex_lock(&fs_devices->device_list_mutex);
2737 mutex_lock(&fs_info->chunk_mutex);
2738 list_add_rcu(&device->dev_list, &fs_devices->devices);
2739 list_add(&device->dev_alloc_list, &fs_devices->alloc_list);
2740 fs_devices->num_devices++;
2741 fs_devices->open_devices++;
2742 fs_devices->rw_devices++;
2743 fs_devices->total_devices++;
2744 fs_devices->total_rw_bytes += device->total_bytes;
2745
2746 atomic64_add(device->total_bytes, &fs_info->free_chunk_space);
2747
2748 if (!blk_queue_nonrot(q))
2749 fs_devices->rotating = true;
2750
2751 orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
2752 btrfs_set_super_total_bytes(fs_info->super_copy,
2753 round_down(orig_super_total_bytes + device->total_bytes,
2754 fs_info->sectorsize));
2755
2756 orig_super_num_devices = btrfs_super_num_devices(fs_info->super_copy);
2757 btrfs_set_super_num_devices(fs_info->super_copy,
2758 orig_super_num_devices + 1);
2759
2760 /*
2761 * we've got more storage, clear any full flags on the space
2762 * infos
2763 */
2764 btrfs_clear_space_info_full(fs_info);
2765
2766 mutex_unlock(&fs_info->chunk_mutex);
2767
2768 /* Add sysfs device entry */
2769 btrfs_sysfs_add_device(device);
2770
2771 mutex_unlock(&fs_devices->device_list_mutex);
2772
2773 if (seeding_dev) {
2774 mutex_lock(&fs_info->chunk_mutex);
2775 ret = init_first_rw_device(trans);
2776 mutex_unlock(&fs_info->chunk_mutex);
2777 if (ret) {
2778 btrfs_abort_transaction(trans, ret);
2779 goto error_sysfs;
2780 }
2781 }
2782
2783 ret = btrfs_add_dev_item(trans, device);
2784 if (ret) {
2785 btrfs_abort_transaction(trans, ret);
2786 goto error_sysfs;
2787 }
2788
2789 if (seeding_dev) {
2790 ret = btrfs_finish_sprout(trans);
2791 if (ret) {
2792 btrfs_abort_transaction(trans, ret);
2793 goto error_sysfs;
2794 }
2795
2796 /*
2797 * fs_devices now represents the newly sprouted filesystem and
2798 * its fsid has been changed by btrfs_prepare_sprout
2799 */
2800 btrfs_sysfs_update_sprout_fsid(fs_devices);
2801 }
2802
2803 ret = btrfs_commit_transaction(trans);
2804
2805 if (seeding_dev) {
2806 mutex_unlock(&uuid_mutex);
2807 up_write(&sb->s_umount);
2808 locked = false;
2809
2810 if (ret) /* transaction commit */
2811 return ret;
2812
2813 ret = btrfs_relocate_sys_chunks(fs_info);
2814 if (ret < 0)
2815 btrfs_handle_fs_error(fs_info, ret,
2816 "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command.");
2817 trans = btrfs_attach_transaction(root);
2818 if (IS_ERR(trans)) {
2819 if (PTR_ERR(trans) == -ENOENT)
2820 return 0;
2821 ret = PTR_ERR(trans);
2822 trans = NULL;
2823 goto error_sysfs;
2824 }
2825 ret = btrfs_commit_transaction(trans);
2826 }
2827
2828 /*
2829 * Now that we have written a new super block to this device, check all
2830 * other fs_devices list if device_path alienates any other scanned
2831 * device.
2832 * We can ignore the return value as it typically returns -EINVAL and
2833 * only succeeds if the device was an alien.
2834 */
2835 btrfs_forget_devices(device_path);
2836
2837 /* Update ctime/mtime for blkid or udev */
2838 update_dev_time(device_path);
2839
2840 return ret;
2841
2842 error_sysfs:
2843 btrfs_sysfs_remove_device(device);
2844 mutex_lock(&fs_info->fs_devices->device_list_mutex);
2845 mutex_lock(&fs_info->chunk_mutex);
2846 list_del_rcu(&device->dev_list);
2847 list_del(&device->dev_alloc_list);
2848 fs_info->fs_devices->num_devices--;
2849 fs_info->fs_devices->open_devices--;
2850 fs_info->fs_devices->rw_devices--;
2851 fs_info->fs_devices->total_devices--;
2852 fs_info->fs_devices->total_rw_bytes -= device->total_bytes;
2853 atomic64_sub(device->total_bytes, &fs_info->free_chunk_space);
2854 btrfs_set_super_total_bytes(fs_info->super_copy,
2855 orig_super_total_bytes);
2856 btrfs_set_super_num_devices(fs_info->super_copy,
2857 orig_super_num_devices);
2858 mutex_unlock(&fs_info->chunk_mutex);
2859 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2860 error_trans:
2861 if (seeding_dev)
2862 btrfs_set_sb_rdonly(sb);
2863 if (trans)
2864 btrfs_end_transaction(trans);
2865 error_free_zone:
2866 btrfs_destroy_dev_zone_info(device);
2867 error_free_device:
2868 btrfs_free_device(device);
2869 error:
2870 blkdev_put(bdev, FMODE_EXCL);
2871 if (locked) {
2872 mutex_unlock(&uuid_mutex);
2873 up_write(&sb->s_umount);
2874 }
2875 return ret;
2876 }
2877
btrfs_update_device(struct btrfs_trans_handle * trans,struct btrfs_device * device)2878 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
2879 struct btrfs_device *device)
2880 {
2881 int ret;
2882 struct btrfs_path *path;
2883 struct btrfs_root *root = device->fs_info->chunk_root;
2884 struct btrfs_dev_item *dev_item;
2885 struct extent_buffer *leaf;
2886 struct btrfs_key key;
2887
2888 path = btrfs_alloc_path();
2889 if (!path)
2890 return -ENOMEM;
2891
2892 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2893 key.type = BTRFS_DEV_ITEM_KEY;
2894 key.offset = device->devid;
2895
2896 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2897 if (ret < 0)
2898 goto out;
2899
2900 if (ret > 0) {
2901 ret = -ENOENT;
2902 goto out;
2903 }
2904
2905 leaf = path->nodes[0];
2906 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
2907
2908 btrfs_set_device_id(leaf, dev_item, device->devid);
2909 btrfs_set_device_type(leaf, dev_item, device->type);
2910 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
2911 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
2912 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
2913 btrfs_set_device_total_bytes(leaf, dev_item,
2914 btrfs_device_get_disk_total_bytes(device));
2915 btrfs_set_device_bytes_used(leaf, dev_item,
2916 btrfs_device_get_bytes_used(device));
2917 btrfs_mark_buffer_dirty(leaf);
2918
2919 out:
2920 btrfs_free_path(path);
2921 return ret;
2922 }
2923
btrfs_grow_device(struct btrfs_trans_handle * trans,struct btrfs_device * device,u64 new_size)2924 int btrfs_grow_device(struct btrfs_trans_handle *trans,
2925 struct btrfs_device *device, u64 new_size)
2926 {
2927 struct btrfs_fs_info *fs_info = device->fs_info;
2928 struct btrfs_super_block *super_copy = fs_info->super_copy;
2929 u64 old_total;
2930 u64 diff;
2931 int ret;
2932
2933 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
2934 return -EACCES;
2935
2936 new_size = round_down(new_size, fs_info->sectorsize);
2937
2938 mutex_lock(&fs_info->chunk_mutex);
2939 old_total = btrfs_super_total_bytes(super_copy);
2940 diff = round_down(new_size - device->total_bytes, fs_info->sectorsize);
2941
2942 if (new_size <= device->total_bytes ||
2943 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
2944 mutex_unlock(&fs_info->chunk_mutex);
2945 return -EINVAL;
2946 }
2947
2948 btrfs_set_super_total_bytes(super_copy,
2949 round_down(old_total + diff, fs_info->sectorsize));
2950 device->fs_devices->total_rw_bytes += diff;
2951
2952 btrfs_device_set_total_bytes(device, new_size);
2953 btrfs_device_set_disk_total_bytes(device, new_size);
2954 btrfs_clear_space_info_full(device->fs_info);
2955 if (list_empty(&device->post_commit_list))
2956 list_add_tail(&device->post_commit_list,
2957 &trans->transaction->dev_update_list);
2958 mutex_unlock(&fs_info->chunk_mutex);
2959
2960 btrfs_reserve_chunk_metadata(trans, false);
2961 ret = btrfs_update_device(trans, device);
2962 btrfs_trans_release_chunk_metadata(trans);
2963
2964 return ret;
2965 }
2966
btrfs_free_chunk(struct btrfs_trans_handle * trans,u64 chunk_offset)2967 static int btrfs_free_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
2968 {
2969 struct btrfs_fs_info *fs_info = trans->fs_info;
2970 struct btrfs_root *root = fs_info->chunk_root;
2971 int ret;
2972 struct btrfs_path *path;
2973 struct btrfs_key key;
2974
2975 path = btrfs_alloc_path();
2976 if (!path)
2977 return -ENOMEM;
2978
2979 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2980 key.offset = chunk_offset;
2981 key.type = BTRFS_CHUNK_ITEM_KEY;
2982
2983 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2984 if (ret < 0)
2985 goto out;
2986 else if (ret > 0) { /* Logic error or corruption */
2987 btrfs_handle_fs_error(fs_info, -ENOENT,
2988 "Failed lookup while freeing chunk.");
2989 ret = -ENOENT;
2990 goto out;
2991 }
2992
2993 ret = btrfs_del_item(trans, root, path);
2994 if (ret < 0)
2995 btrfs_handle_fs_error(fs_info, ret,
2996 "Failed to delete chunk item.");
2997 out:
2998 btrfs_free_path(path);
2999 return ret;
3000 }
3001
btrfs_del_sys_chunk(struct btrfs_fs_info * fs_info,u64 chunk_offset)3002 static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
3003 {
3004 struct btrfs_super_block *super_copy = fs_info->super_copy;
3005 struct btrfs_disk_key *disk_key;
3006 struct btrfs_chunk *chunk;
3007 u8 *ptr;
3008 int ret = 0;
3009 u32 num_stripes;
3010 u32 array_size;
3011 u32 len = 0;
3012 u32 cur;
3013 struct btrfs_key key;
3014
3015 lockdep_assert_held(&fs_info->chunk_mutex);
3016 array_size = btrfs_super_sys_array_size(super_copy);
3017
3018 ptr = super_copy->sys_chunk_array;
3019 cur = 0;
3020
3021 while (cur < array_size) {
3022 disk_key = (struct btrfs_disk_key *)ptr;
3023 btrfs_disk_key_to_cpu(&key, disk_key);
3024
3025 len = sizeof(*disk_key);
3026
3027 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
3028 chunk = (struct btrfs_chunk *)(ptr + len);
3029 num_stripes = btrfs_stack_chunk_num_stripes(chunk);
3030 len += btrfs_chunk_item_size(num_stripes);
3031 } else {
3032 ret = -EIO;
3033 break;
3034 }
3035 if (key.objectid == BTRFS_FIRST_CHUNK_TREE_OBJECTID &&
3036 key.offset == chunk_offset) {
3037 memmove(ptr, ptr + len, array_size - (cur + len));
3038 array_size -= len;
3039 btrfs_set_super_sys_array_size(super_copy, array_size);
3040 } else {
3041 ptr += len;
3042 cur += len;
3043 }
3044 }
3045 return ret;
3046 }
3047
3048 /*
3049 * btrfs_get_chunk_map() - Find the mapping containing the given logical extent.
3050 * @logical: Logical block offset in bytes.
3051 * @length: Length of extent in bytes.
3052 *
3053 * Return: Chunk mapping or ERR_PTR.
3054 */
btrfs_get_chunk_map(struct btrfs_fs_info * fs_info,u64 logical,u64 length)3055 struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
3056 u64 logical, u64 length)
3057 {
3058 struct extent_map_tree *em_tree;
3059 struct extent_map *em;
3060
3061 em_tree = &fs_info->mapping_tree;
3062 read_lock(&em_tree->lock);
3063 em = lookup_extent_mapping(em_tree, logical, length);
3064 read_unlock(&em_tree->lock);
3065
3066 if (!em) {
3067 btrfs_crit(fs_info,
3068 "unable to find chunk map for logical %llu length %llu",
3069 logical, length);
3070 return ERR_PTR(-EINVAL);
3071 }
3072
3073 if (em->start > logical || em->start + em->len <= logical) {
3074 btrfs_crit(fs_info,
3075 "found a bad chunk map, wanted %llu-%llu, found %llu-%llu",
3076 logical, logical + length, em->start, em->start + em->len);
3077 free_extent_map(em);
3078 return ERR_PTR(-EINVAL);
3079 }
3080
3081 /* callers are responsible for dropping em's ref. */
3082 return em;
3083 }
3084
remove_chunk_item(struct btrfs_trans_handle * trans,struct map_lookup * map,u64 chunk_offset)3085 static int remove_chunk_item(struct btrfs_trans_handle *trans,
3086 struct map_lookup *map, u64 chunk_offset)
3087 {
3088 int i;
3089
3090 /*
3091 * Removing chunk items and updating the device items in the chunks btree
3092 * requires holding the chunk_mutex.
3093 * See the comment at btrfs_chunk_alloc() for the details.
3094 */
3095 lockdep_assert_held(&trans->fs_info->chunk_mutex);
3096
3097 for (i = 0; i < map->num_stripes; i++) {
3098 int ret;
3099
3100 ret = btrfs_update_device(trans, map->stripes[i].dev);
3101 if (ret)
3102 return ret;
3103 }
3104
3105 return btrfs_free_chunk(trans, chunk_offset);
3106 }
3107
btrfs_remove_chunk(struct btrfs_trans_handle * trans,u64 chunk_offset)3108 int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
3109 {
3110 struct btrfs_fs_info *fs_info = trans->fs_info;
3111 struct extent_map *em;
3112 struct map_lookup *map;
3113 u64 dev_extent_len = 0;
3114 int i, ret = 0;
3115 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
3116
3117 em = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
3118 if (IS_ERR(em)) {
3119 /*
3120 * This is a logic error, but we don't want to just rely on the
3121 * user having built with ASSERT enabled, so if ASSERT doesn't
3122 * do anything we still error out.
3123 */
3124 ASSERT(0);
3125 return PTR_ERR(em);
3126 }
3127 map = em->map_lookup;
3128
3129 /*
3130 * First delete the device extent items from the devices btree.
3131 * We take the device_list_mutex to avoid racing with the finishing phase
3132 * of a device replace operation. See the comment below before acquiring
3133 * fs_info->chunk_mutex. Note that here we do not acquire the chunk_mutex
3134 * because that can result in a deadlock when deleting the device extent
3135 * items from the devices btree - COWing an extent buffer from the btree
3136 * may result in allocating a new metadata chunk, which would attempt to
3137 * lock again fs_info->chunk_mutex.
3138 */
3139 mutex_lock(&fs_devices->device_list_mutex);
3140 for (i = 0; i < map->num_stripes; i++) {
3141 struct btrfs_device *device = map->stripes[i].dev;
3142 ret = btrfs_free_dev_extent(trans, device,
3143 map->stripes[i].physical,
3144 &dev_extent_len);
3145 if (ret) {
3146 mutex_unlock(&fs_devices->device_list_mutex);
3147 btrfs_abort_transaction(trans, ret);
3148 goto out;
3149 }
3150
3151 if (device->bytes_used > 0) {
3152 mutex_lock(&fs_info->chunk_mutex);
3153 btrfs_device_set_bytes_used(device,
3154 device->bytes_used - dev_extent_len);
3155 atomic64_add(dev_extent_len, &fs_info->free_chunk_space);
3156 btrfs_clear_space_info_full(fs_info);
3157 mutex_unlock(&fs_info->chunk_mutex);
3158 }
3159 }
3160 mutex_unlock(&fs_devices->device_list_mutex);
3161
3162 /*
3163 * We acquire fs_info->chunk_mutex for 2 reasons:
3164 *
3165 * 1) Just like with the first phase of the chunk allocation, we must
3166 * reserve system space, do all chunk btree updates and deletions, and
3167 * update the system chunk array in the superblock while holding this
3168 * mutex. This is for similar reasons as explained on the comment at
3169 * the top of btrfs_chunk_alloc();
3170 *
3171 * 2) Prevent races with the final phase of a device replace operation
3172 * that replaces the device object associated with the map's stripes,
3173 * because the device object's id can change at any time during that
3174 * final phase of the device replace operation
3175 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the
3176 * replaced device and then see it with an ID of
3177 * BTRFS_DEV_REPLACE_DEVID, which would cause a failure when updating
3178 * the device item, which does not exists on the chunk btree.
3179 * The finishing phase of device replace acquires both the
3180 * device_list_mutex and the chunk_mutex, in that order, so we are
3181 * safe by just acquiring the chunk_mutex.
3182 */
3183 trans->removing_chunk = true;
3184 mutex_lock(&fs_info->chunk_mutex);
3185
3186 check_system_chunk(trans, map->type);
3187
3188 ret = remove_chunk_item(trans, map, chunk_offset);
3189 /*
3190 * Normally we should not get -ENOSPC since we reserved space before
3191 * through the call to check_system_chunk().
3192 *
3193 * Despite our system space_info having enough free space, we may not
3194 * be able to allocate extents from its block groups, because all have
3195 * an incompatible profile, which will force us to allocate a new system
3196 * block group with the right profile, or right after we called
3197 * check_system_space() above, a scrub turned the only system block group
3198 * with enough free space into RO mode.
3199 * This is explained with more detail at do_chunk_alloc().
3200 *
3201 * So if we get -ENOSPC, allocate a new system chunk and retry once.
3202 */
3203 if (ret == -ENOSPC) {
3204 const u64 sys_flags = btrfs_system_alloc_profile(fs_info);
3205 struct btrfs_block_group *sys_bg;
3206
3207 sys_bg = btrfs_create_chunk(trans, sys_flags);
3208 if (IS_ERR(sys_bg)) {
3209 ret = PTR_ERR(sys_bg);
3210 btrfs_abort_transaction(trans, ret);
3211 goto out;
3212 }
3213
3214 ret = btrfs_chunk_alloc_add_chunk_item(trans, sys_bg);
3215 if (ret) {
3216 btrfs_abort_transaction(trans, ret);
3217 goto out;
3218 }
3219
3220 ret = remove_chunk_item(trans, map, chunk_offset);
3221 if (ret) {
3222 btrfs_abort_transaction(trans, ret);
3223 goto out;
3224 }
3225 } else if (ret) {
3226 btrfs_abort_transaction(trans, ret);
3227 goto out;
3228 }
3229
3230 trace_btrfs_chunk_free(fs_info, map, chunk_offset, em->len);
3231
3232 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
3233 ret = btrfs_del_sys_chunk(fs_info, chunk_offset);
3234 if (ret) {
3235 btrfs_abort_transaction(trans, ret);
3236 goto out;
3237 }
3238 }
3239
3240 mutex_unlock(&fs_info->chunk_mutex);
3241 trans->removing_chunk = false;
3242
3243 /*
3244 * We are done with chunk btree updates and deletions, so release the
3245 * system space we previously reserved (with check_system_chunk()).
3246 */
3247 btrfs_trans_release_chunk_metadata(trans);
3248
3249 ret = btrfs_remove_block_group(trans, chunk_offset, em);
3250 if (ret) {
3251 btrfs_abort_transaction(trans, ret);
3252 goto out;
3253 }
3254
3255 out:
3256 if (trans->removing_chunk) {
3257 mutex_unlock(&fs_info->chunk_mutex);
3258 trans->removing_chunk = false;
3259 }
3260 /* once for us */
3261 free_extent_map(em);
3262 return ret;
3263 }
3264
btrfs_relocate_chunk(struct btrfs_fs_info * fs_info,u64 chunk_offset)3265 int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
3266 {
3267 struct btrfs_root *root = fs_info->chunk_root;
3268 struct btrfs_trans_handle *trans;
3269 struct btrfs_block_group *block_group;
3270 u64 length;
3271 int ret;
3272
3273 /*
3274 * Prevent races with automatic removal of unused block groups.
3275 * After we relocate and before we remove the chunk with offset
3276 * chunk_offset, automatic removal of the block group can kick in,
3277 * resulting in a failure when calling btrfs_remove_chunk() below.
3278 *
3279 * Make sure to acquire this mutex before doing a tree search (dev
3280 * or chunk trees) to find chunks. Otherwise the cleaner kthread might
3281 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after
3282 * we release the path used to search the chunk/dev tree and before
3283 * the current task acquires this mutex and calls us.
3284 */
3285 lockdep_assert_held(&fs_info->reclaim_bgs_lock);
3286
3287 /* step one, relocate all the extents inside this chunk */
3288 btrfs_scrub_pause(fs_info);
3289 ret = btrfs_relocate_block_group(fs_info, chunk_offset);
3290 btrfs_scrub_continue(fs_info);
3291 if (ret)
3292 return ret;
3293
3294 block_group = btrfs_lookup_block_group(fs_info, chunk_offset);
3295 if (!block_group)
3296 return -ENOENT;
3297 btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group);
3298 length = block_group->length;
3299 btrfs_put_block_group(block_group);
3300
3301 /*
3302 * On a zoned file system, discard the whole block group, this will
3303 * trigger a REQ_OP_ZONE_RESET operation on the device zone. If
3304 * resetting the zone fails, don't treat it as a fatal problem from the
3305 * filesystem's point of view.
3306 */
3307 if (btrfs_is_zoned(fs_info)) {
3308 ret = btrfs_discard_extent(fs_info, chunk_offset, length, NULL);
3309 if (ret)
3310 btrfs_info(fs_info,
3311 "failed to reset zone %llu after relocation",
3312 chunk_offset);
3313 }
3314
3315 trans = btrfs_start_trans_remove_block_group(root->fs_info,
3316 chunk_offset);
3317 if (IS_ERR(trans)) {
3318 ret = PTR_ERR(trans);
3319 btrfs_handle_fs_error(root->fs_info, ret, NULL);
3320 return ret;
3321 }
3322
3323 /*
3324 * step two, delete the device extents and the
3325 * chunk tree entries
3326 */
3327 ret = btrfs_remove_chunk(trans, chunk_offset);
3328 btrfs_end_transaction(trans);
3329 return ret;
3330 }
3331
btrfs_relocate_sys_chunks(struct btrfs_fs_info * fs_info)3332 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info)
3333 {
3334 struct btrfs_root *chunk_root = fs_info->chunk_root;
3335 struct btrfs_path *path;
3336 struct extent_buffer *leaf;
3337 struct btrfs_chunk *chunk;
3338 struct btrfs_key key;
3339 struct btrfs_key found_key;
3340 u64 chunk_type;
3341 bool retried = false;
3342 int failed = 0;
3343 int ret;
3344
3345 path = btrfs_alloc_path();
3346 if (!path)
3347 return -ENOMEM;
3348
3349 again:
3350 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3351 key.offset = (u64)-1;
3352 key.type = BTRFS_CHUNK_ITEM_KEY;
3353
3354 while (1) {
3355 mutex_lock(&fs_info->reclaim_bgs_lock);
3356 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3357 if (ret < 0) {
3358 mutex_unlock(&fs_info->reclaim_bgs_lock);
3359 goto error;
3360 }
3361 BUG_ON(ret == 0); /* Corruption */
3362
3363 ret = btrfs_previous_item(chunk_root, path, key.objectid,
3364 key.type);
3365 if (ret)
3366 mutex_unlock(&fs_info->reclaim_bgs_lock);
3367 if (ret < 0)
3368 goto error;
3369 if (ret > 0)
3370 break;
3371
3372 leaf = path->nodes[0];
3373 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3374
3375 chunk = btrfs_item_ptr(leaf, path->slots[0],
3376 struct btrfs_chunk);
3377 chunk_type = btrfs_chunk_type(leaf, chunk);
3378 btrfs_release_path(path);
3379
3380 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
3381 ret = btrfs_relocate_chunk(fs_info, found_key.offset);
3382 if (ret == -ENOSPC)
3383 failed++;
3384 else
3385 BUG_ON(ret);
3386 }
3387 mutex_unlock(&fs_info->reclaim_bgs_lock);
3388
3389 if (found_key.offset == 0)
3390 break;
3391 key.offset = found_key.offset - 1;
3392 }
3393 ret = 0;
3394 if (failed && !retried) {
3395 failed = 0;
3396 retried = true;
3397 goto again;
3398 } else if (WARN_ON(failed && retried)) {
3399 ret = -ENOSPC;
3400 }
3401 error:
3402 btrfs_free_path(path);
3403 return ret;
3404 }
3405
3406 /*
3407 * return 1 : allocate a data chunk successfully,
3408 * return <0: errors during allocating a data chunk,
3409 * return 0 : no need to allocate a data chunk.
3410 */
btrfs_may_alloc_data_chunk(struct btrfs_fs_info * fs_info,u64 chunk_offset)3411 static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info,
3412 u64 chunk_offset)
3413 {
3414 struct btrfs_block_group *cache;
3415 u64 bytes_used;
3416 u64 chunk_type;
3417
3418 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3419 ASSERT(cache);
3420 chunk_type = cache->flags;
3421 btrfs_put_block_group(cache);
3422
3423 if (!(chunk_type & BTRFS_BLOCK_GROUP_DATA))
3424 return 0;
3425
3426 spin_lock(&fs_info->data_sinfo->lock);
3427 bytes_used = fs_info->data_sinfo->bytes_used;
3428 spin_unlock(&fs_info->data_sinfo->lock);
3429
3430 if (!bytes_used) {
3431 struct btrfs_trans_handle *trans;
3432 int ret;
3433
3434 trans = btrfs_join_transaction(fs_info->tree_root);
3435 if (IS_ERR(trans))
3436 return PTR_ERR(trans);
3437
3438 ret = btrfs_force_chunk_alloc(trans, BTRFS_BLOCK_GROUP_DATA);
3439 btrfs_end_transaction(trans);
3440 if (ret < 0)
3441 return ret;
3442 return 1;
3443 }
3444
3445 return 0;
3446 }
3447
insert_balance_item(struct btrfs_fs_info * fs_info,struct btrfs_balance_control * bctl)3448 static int insert_balance_item(struct btrfs_fs_info *fs_info,
3449 struct btrfs_balance_control *bctl)
3450 {
3451 struct btrfs_root *root = fs_info->tree_root;
3452 struct btrfs_trans_handle *trans;
3453 struct btrfs_balance_item *item;
3454 struct btrfs_disk_balance_args disk_bargs;
3455 struct btrfs_path *path;
3456 struct extent_buffer *leaf;
3457 struct btrfs_key key;
3458 int ret, err;
3459
3460 path = btrfs_alloc_path();
3461 if (!path)
3462 return -ENOMEM;
3463
3464 trans = btrfs_start_transaction(root, 0);
3465 if (IS_ERR(trans)) {
3466 btrfs_free_path(path);
3467 return PTR_ERR(trans);
3468 }
3469
3470 key.objectid = BTRFS_BALANCE_OBJECTID;
3471 key.type = BTRFS_TEMPORARY_ITEM_KEY;
3472 key.offset = 0;
3473
3474 ret = btrfs_insert_empty_item(trans, root, path, &key,
3475 sizeof(*item));
3476 if (ret)
3477 goto out;
3478
3479 leaf = path->nodes[0];
3480 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
3481
3482 memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item));
3483
3484 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
3485 btrfs_set_balance_data(leaf, item, &disk_bargs);
3486 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
3487 btrfs_set_balance_meta(leaf, item, &disk_bargs);
3488 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
3489 btrfs_set_balance_sys(leaf, item, &disk_bargs);
3490
3491 btrfs_set_balance_flags(leaf, item, bctl->flags);
3492
3493 btrfs_mark_buffer_dirty(leaf);
3494 out:
3495 btrfs_free_path(path);
3496 err = btrfs_commit_transaction(trans);
3497 if (err && !ret)
3498 ret = err;
3499 return ret;
3500 }
3501
del_balance_item(struct btrfs_fs_info * fs_info)3502 static int del_balance_item(struct btrfs_fs_info *fs_info)
3503 {
3504 struct btrfs_root *root = fs_info->tree_root;
3505 struct btrfs_trans_handle *trans;
3506 struct btrfs_path *path;
3507 struct btrfs_key key;
3508 int ret, err;
3509
3510 path = btrfs_alloc_path();
3511 if (!path)
3512 return -ENOMEM;
3513
3514 trans = btrfs_start_transaction_fallback_global_rsv(root, 0);
3515 if (IS_ERR(trans)) {
3516 btrfs_free_path(path);
3517 return PTR_ERR(trans);
3518 }
3519
3520 key.objectid = BTRFS_BALANCE_OBJECTID;
3521 key.type = BTRFS_TEMPORARY_ITEM_KEY;
3522 key.offset = 0;
3523
3524 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3525 if (ret < 0)
3526 goto out;
3527 if (ret > 0) {
3528 ret = -ENOENT;
3529 goto out;
3530 }
3531
3532 ret = btrfs_del_item(trans, root, path);
3533 out:
3534 btrfs_free_path(path);
3535 err = btrfs_commit_transaction(trans);
3536 if (err && !ret)
3537 ret = err;
3538 return ret;
3539 }
3540
3541 /*
3542 * This is a heuristic used to reduce the number of chunks balanced on
3543 * resume after balance was interrupted.
3544 */
update_balance_args(struct btrfs_balance_control * bctl)3545 static void update_balance_args(struct btrfs_balance_control *bctl)
3546 {
3547 /*
3548 * Turn on soft mode for chunk types that were being converted.
3549 */
3550 if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
3551 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
3552 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
3553 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
3554 if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
3555 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
3556
3557 /*
3558 * Turn on usage filter if is not already used. The idea is
3559 * that chunks that we have already balanced should be
3560 * reasonably full. Don't do it for chunks that are being
3561 * converted - that will keep us from relocating unconverted
3562 * (albeit full) chunks.
3563 */
3564 if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3565 !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3566 !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3567 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
3568 bctl->data.usage = 90;
3569 }
3570 if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3571 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3572 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3573 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
3574 bctl->sys.usage = 90;
3575 }
3576 if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3577 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3578 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3579 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
3580 bctl->meta.usage = 90;
3581 }
3582 }
3583
3584 /*
3585 * Clear the balance status in fs_info and delete the balance item from disk.
3586 */
reset_balance_state(struct btrfs_fs_info * fs_info)3587 static void reset_balance_state(struct btrfs_fs_info *fs_info)
3588 {
3589 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3590 int ret;
3591
3592 BUG_ON(!fs_info->balance_ctl);
3593
3594 spin_lock(&fs_info->balance_lock);
3595 fs_info->balance_ctl = NULL;
3596 spin_unlock(&fs_info->balance_lock);
3597
3598 kfree(bctl);
3599 ret = del_balance_item(fs_info);
3600 if (ret)
3601 btrfs_handle_fs_error(fs_info, ret, NULL);
3602 }
3603
3604 /*
3605 * Balance filters. Return 1 if chunk should be filtered out
3606 * (should not be balanced).
3607 */
chunk_profiles_filter(u64 chunk_type,struct btrfs_balance_args * bargs)3608 static int chunk_profiles_filter(u64 chunk_type,
3609 struct btrfs_balance_args *bargs)
3610 {
3611 chunk_type = chunk_to_extended(chunk_type) &
3612 BTRFS_EXTENDED_PROFILE_MASK;
3613
3614 if (bargs->profiles & chunk_type)
3615 return 0;
3616
3617 return 1;
3618 }
3619
chunk_usage_range_filter(struct btrfs_fs_info * fs_info,u64 chunk_offset,struct btrfs_balance_args * bargs)3620 static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
3621 struct btrfs_balance_args *bargs)
3622 {
3623 struct btrfs_block_group *cache;
3624 u64 chunk_used;
3625 u64 user_thresh_min;
3626 u64 user_thresh_max;
3627 int ret = 1;
3628
3629 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3630 chunk_used = cache->used;
3631
3632 if (bargs->usage_min == 0)
3633 user_thresh_min = 0;
3634 else
3635 user_thresh_min = div_factor_fine(cache->length,
3636 bargs->usage_min);
3637
3638 if (bargs->usage_max == 0)
3639 user_thresh_max = 1;
3640 else if (bargs->usage_max > 100)
3641 user_thresh_max = cache->length;
3642 else
3643 user_thresh_max = div_factor_fine(cache->length,
3644 bargs->usage_max);
3645
3646 if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max)
3647 ret = 0;
3648
3649 btrfs_put_block_group(cache);
3650 return ret;
3651 }
3652
chunk_usage_filter(struct btrfs_fs_info * fs_info,u64 chunk_offset,struct btrfs_balance_args * bargs)3653 static int chunk_usage_filter(struct btrfs_fs_info *fs_info,
3654 u64 chunk_offset, struct btrfs_balance_args *bargs)
3655 {
3656 struct btrfs_block_group *cache;
3657 u64 chunk_used, user_thresh;
3658 int ret = 1;
3659
3660 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3661 chunk_used = cache->used;
3662
3663 if (bargs->usage_min == 0)
3664 user_thresh = 1;
3665 else if (bargs->usage > 100)
3666 user_thresh = cache->length;
3667 else
3668 user_thresh = div_factor_fine(cache->length, bargs->usage);
3669
3670 if (chunk_used < user_thresh)
3671 ret = 0;
3672
3673 btrfs_put_block_group(cache);
3674 return ret;
3675 }
3676
chunk_devid_filter(struct extent_buffer * leaf,struct btrfs_chunk * chunk,struct btrfs_balance_args * bargs)3677 static int chunk_devid_filter(struct extent_buffer *leaf,
3678 struct btrfs_chunk *chunk,
3679 struct btrfs_balance_args *bargs)
3680 {
3681 struct btrfs_stripe *stripe;
3682 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3683 int i;
3684
3685 for (i = 0; i < num_stripes; i++) {
3686 stripe = btrfs_stripe_nr(chunk, i);
3687 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
3688 return 0;
3689 }
3690
3691 return 1;
3692 }
3693
calc_data_stripes(u64 type,int num_stripes)3694 static u64 calc_data_stripes(u64 type, int num_stripes)
3695 {
3696 const int index = btrfs_bg_flags_to_raid_index(type);
3697 const int ncopies = btrfs_raid_array[index].ncopies;
3698 const int nparity = btrfs_raid_array[index].nparity;
3699
3700 return (num_stripes - nparity) / ncopies;
3701 }
3702
3703 /* [pstart, pend) */
chunk_drange_filter(struct extent_buffer * leaf,struct btrfs_chunk * chunk,struct btrfs_balance_args * bargs)3704 static int chunk_drange_filter(struct extent_buffer *leaf,
3705 struct btrfs_chunk *chunk,
3706 struct btrfs_balance_args *bargs)
3707 {
3708 struct btrfs_stripe *stripe;
3709 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3710 u64 stripe_offset;
3711 u64 stripe_length;
3712 u64 type;
3713 int factor;
3714 int i;
3715
3716 if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
3717 return 0;
3718
3719 type = btrfs_chunk_type(leaf, chunk);
3720 factor = calc_data_stripes(type, num_stripes);
3721
3722 for (i = 0; i < num_stripes; i++) {
3723 stripe = btrfs_stripe_nr(chunk, i);
3724 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
3725 continue;
3726
3727 stripe_offset = btrfs_stripe_offset(leaf, stripe);
3728 stripe_length = btrfs_chunk_length(leaf, chunk);
3729 stripe_length = div_u64(stripe_length, factor);
3730
3731 if (stripe_offset < bargs->pend &&
3732 stripe_offset + stripe_length > bargs->pstart)
3733 return 0;
3734 }
3735
3736 return 1;
3737 }
3738
3739 /* [vstart, vend) */
chunk_vrange_filter(struct extent_buffer * leaf,struct btrfs_chunk * chunk,u64 chunk_offset,struct btrfs_balance_args * bargs)3740 static int chunk_vrange_filter(struct extent_buffer *leaf,
3741 struct btrfs_chunk *chunk,
3742 u64 chunk_offset,
3743 struct btrfs_balance_args *bargs)
3744 {
3745 if (chunk_offset < bargs->vend &&
3746 chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
3747 /* at least part of the chunk is inside this vrange */
3748 return 0;
3749
3750 return 1;
3751 }
3752
chunk_stripes_range_filter(struct extent_buffer * leaf,struct btrfs_chunk * chunk,struct btrfs_balance_args * bargs)3753 static int chunk_stripes_range_filter(struct extent_buffer *leaf,
3754 struct btrfs_chunk *chunk,
3755 struct btrfs_balance_args *bargs)
3756 {
3757 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3758
3759 if (bargs->stripes_min <= num_stripes
3760 && num_stripes <= bargs->stripes_max)
3761 return 0;
3762
3763 return 1;
3764 }
3765
chunk_soft_convert_filter(u64 chunk_type,struct btrfs_balance_args * bargs)3766 static int chunk_soft_convert_filter(u64 chunk_type,
3767 struct btrfs_balance_args *bargs)
3768 {
3769 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
3770 return 0;
3771
3772 chunk_type = chunk_to_extended(chunk_type) &
3773 BTRFS_EXTENDED_PROFILE_MASK;
3774
3775 if (bargs->target == chunk_type)
3776 return 1;
3777
3778 return 0;
3779 }
3780
should_balance_chunk(struct extent_buffer * leaf,struct btrfs_chunk * chunk,u64 chunk_offset)3781 static int should_balance_chunk(struct extent_buffer *leaf,
3782 struct btrfs_chunk *chunk, u64 chunk_offset)
3783 {
3784 struct btrfs_fs_info *fs_info = leaf->fs_info;
3785 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3786 struct btrfs_balance_args *bargs = NULL;
3787 u64 chunk_type = btrfs_chunk_type(leaf, chunk);
3788
3789 /* type filter */
3790 if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
3791 (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
3792 return 0;
3793 }
3794
3795 if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3796 bargs = &bctl->data;
3797 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3798 bargs = &bctl->sys;
3799 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3800 bargs = &bctl->meta;
3801
3802 /* profiles filter */
3803 if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
3804 chunk_profiles_filter(chunk_type, bargs)) {
3805 return 0;
3806 }
3807
3808 /* usage filter */
3809 if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
3810 chunk_usage_filter(fs_info, chunk_offset, bargs)) {
3811 return 0;
3812 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3813 chunk_usage_range_filter(fs_info, chunk_offset, bargs)) {
3814 return 0;
3815 }
3816
3817 /* devid filter */
3818 if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
3819 chunk_devid_filter(leaf, chunk, bargs)) {
3820 return 0;
3821 }
3822
3823 /* drange filter, makes sense only with devid filter */
3824 if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
3825 chunk_drange_filter(leaf, chunk, bargs)) {
3826 return 0;
3827 }
3828
3829 /* vrange filter */
3830 if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
3831 chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
3832 return 0;
3833 }
3834
3835 /* stripes filter */
3836 if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) &&
3837 chunk_stripes_range_filter(leaf, chunk, bargs)) {
3838 return 0;
3839 }
3840
3841 /* soft profile changing mode */
3842 if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
3843 chunk_soft_convert_filter(chunk_type, bargs)) {
3844 return 0;
3845 }
3846
3847 /*
3848 * limited by count, must be the last filter
3849 */
3850 if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) {
3851 if (bargs->limit == 0)
3852 return 0;
3853 else
3854 bargs->limit--;
3855 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) {
3856 /*
3857 * Same logic as the 'limit' filter; the minimum cannot be
3858 * determined here because we do not have the global information
3859 * about the count of all chunks that satisfy the filters.
3860 */
3861 if (bargs->limit_max == 0)
3862 return 0;
3863 else
3864 bargs->limit_max--;
3865 }
3866
3867 return 1;
3868 }
3869
__btrfs_balance(struct btrfs_fs_info * fs_info)3870 static int __btrfs_balance(struct btrfs_fs_info *fs_info)
3871 {
3872 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3873 struct btrfs_root *chunk_root = fs_info->chunk_root;
3874 u64 chunk_type;
3875 struct btrfs_chunk *chunk;
3876 struct btrfs_path *path = NULL;
3877 struct btrfs_key key;
3878 struct btrfs_key found_key;
3879 struct extent_buffer *leaf;
3880 int slot;
3881 int ret;
3882 int enospc_errors = 0;
3883 bool counting = true;
3884 /* The single value limit and min/max limits use the same bytes in the */
3885 u64 limit_data = bctl->data.limit;
3886 u64 limit_meta = bctl->meta.limit;
3887 u64 limit_sys = bctl->sys.limit;
3888 u32 count_data = 0;
3889 u32 count_meta = 0;
3890 u32 count_sys = 0;
3891 int chunk_reserved = 0;
3892
3893 path = btrfs_alloc_path();
3894 if (!path) {
3895 ret = -ENOMEM;
3896 goto error;
3897 }
3898
3899 /* zero out stat counters */
3900 spin_lock(&fs_info->balance_lock);
3901 memset(&bctl->stat, 0, sizeof(bctl->stat));
3902 spin_unlock(&fs_info->balance_lock);
3903 again:
3904 if (!counting) {
3905 /*
3906 * The single value limit and min/max limits use the same bytes
3907 * in the
3908 */
3909 bctl->data.limit = limit_data;
3910 bctl->meta.limit = limit_meta;
3911 bctl->sys.limit = limit_sys;
3912 }
3913 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3914 key.offset = (u64)-1;
3915 key.type = BTRFS_CHUNK_ITEM_KEY;
3916
3917 while (1) {
3918 if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
3919 atomic_read(&fs_info->balance_cancel_req)) {
3920 ret = -ECANCELED;
3921 goto error;
3922 }
3923
3924 mutex_lock(&fs_info->reclaim_bgs_lock);
3925 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3926 if (ret < 0) {
3927 mutex_unlock(&fs_info->reclaim_bgs_lock);
3928 goto error;
3929 }
3930
3931 /*
3932 * this shouldn't happen, it means the last relocate
3933 * failed
3934 */
3935 if (ret == 0)
3936 BUG(); /* FIXME break ? */
3937
3938 ret = btrfs_previous_item(chunk_root, path, 0,
3939 BTRFS_CHUNK_ITEM_KEY);
3940 if (ret) {
3941 mutex_unlock(&fs_info->reclaim_bgs_lock);
3942 ret = 0;
3943 break;
3944 }
3945
3946 leaf = path->nodes[0];
3947 slot = path->slots[0];
3948 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3949
3950 if (found_key.objectid != key.objectid) {
3951 mutex_unlock(&fs_info->reclaim_bgs_lock);
3952 break;
3953 }
3954
3955 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
3956 chunk_type = btrfs_chunk_type(leaf, chunk);
3957
3958 if (!counting) {
3959 spin_lock(&fs_info->balance_lock);
3960 bctl->stat.considered++;
3961 spin_unlock(&fs_info->balance_lock);
3962 }
3963
3964 ret = should_balance_chunk(leaf, chunk, found_key.offset);
3965
3966 btrfs_release_path(path);
3967 if (!ret) {
3968 mutex_unlock(&fs_info->reclaim_bgs_lock);
3969 goto loop;
3970 }
3971
3972 if (counting) {
3973 mutex_unlock(&fs_info->reclaim_bgs_lock);
3974 spin_lock(&fs_info->balance_lock);
3975 bctl->stat.expected++;
3976 spin_unlock(&fs_info->balance_lock);
3977
3978 if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3979 count_data++;
3980 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3981 count_sys++;
3982 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3983 count_meta++;
3984
3985 goto loop;
3986 }
3987
3988 /*
3989 * Apply limit_min filter, no need to check if the LIMITS
3990 * filter is used, limit_min is 0 by default
3991 */
3992 if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) &&
3993 count_data < bctl->data.limit_min)
3994 || ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) &&
3995 count_meta < bctl->meta.limit_min)
3996 || ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) &&
3997 count_sys < bctl->sys.limit_min)) {
3998 mutex_unlock(&fs_info->reclaim_bgs_lock);
3999 goto loop;
4000 }
4001
4002 if (!chunk_reserved) {
4003 /*
4004 * We may be relocating the only data chunk we have,
4005 * which could potentially end up with losing data's
4006 * raid profile, so lets allocate an empty one in
4007 * advance.
4008 */
4009 ret = btrfs_may_alloc_data_chunk(fs_info,
4010 found_key.offset);
4011 if (ret < 0) {
4012 mutex_unlock(&fs_info->reclaim_bgs_lock);
4013 goto error;
4014 } else if (ret == 1) {
4015 chunk_reserved = 1;
4016 }
4017 }
4018
4019 ret = btrfs_relocate_chunk(fs_info, found_key.offset);
4020 mutex_unlock(&fs_info->reclaim_bgs_lock);
4021 if (ret == -ENOSPC) {
4022 enospc_errors++;
4023 } else if (ret == -ETXTBSY) {
4024 btrfs_info(fs_info,
4025 "skipping relocation of block group %llu due to active swapfile",
4026 found_key.offset);
4027 ret = 0;
4028 } else if (ret) {
4029 goto error;
4030 } else {
4031 spin_lock(&fs_info->balance_lock);
4032 bctl->stat.completed++;
4033 spin_unlock(&fs_info->balance_lock);
4034 }
4035 loop:
4036 if (found_key.offset == 0)
4037 break;
4038 key.offset = found_key.offset - 1;
4039 }
4040
4041 if (counting) {
4042 btrfs_release_path(path);
4043 counting = false;
4044 goto again;
4045 }
4046 error:
4047 btrfs_free_path(path);
4048 if (enospc_errors) {
4049 btrfs_info(fs_info, "%d enospc errors during balance",
4050 enospc_errors);
4051 if (!ret)
4052 ret = -ENOSPC;
4053 }
4054
4055 return ret;
4056 }
4057
4058 /**
4059 * alloc_profile_is_valid - see if a given profile is valid and reduced
4060 * @flags: profile to validate
4061 * @extended: if true @flags is treated as an extended profile
4062 */
alloc_profile_is_valid(u64 flags,int extended)4063 static int alloc_profile_is_valid(u64 flags, int extended)
4064 {
4065 u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
4066 BTRFS_BLOCK_GROUP_PROFILE_MASK);
4067
4068 flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
4069
4070 /* 1) check that all other bits are zeroed */
4071 if (flags & ~mask)
4072 return 0;
4073
4074 /* 2) see if profile is reduced */
4075 if (flags == 0)
4076 return !extended; /* "0" is valid for usual profiles */
4077
4078 return has_single_bit_set(flags);
4079 }
4080
balance_need_close(struct btrfs_fs_info * fs_info)4081 static inline int balance_need_close(struct btrfs_fs_info *fs_info)
4082 {
4083 /* cancel requested || normal exit path */
4084 return atomic_read(&fs_info->balance_cancel_req) ||
4085 (atomic_read(&fs_info->balance_pause_req) == 0 &&
4086 atomic_read(&fs_info->balance_cancel_req) == 0);
4087 }
4088
4089 /*
4090 * Validate target profile against allowed profiles and return true if it's OK.
4091 * Otherwise print the error message and return false.
4092 */
validate_convert_profile(struct btrfs_fs_info * fs_info,const struct btrfs_balance_args * bargs,u64 allowed,const char * type)4093 static inline int validate_convert_profile(struct btrfs_fs_info *fs_info,
4094 const struct btrfs_balance_args *bargs,
4095 u64 allowed, const char *type)
4096 {
4097 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
4098 return true;
4099
4100 if (fs_info->sectorsize < PAGE_SIZE &&
4101 bargs->target & BTRFS_BLOCK_GROUP_RAID56_MASK) {
4102 btrfs_err(fs_info,
4103 "RAID56 is not yet supported for sectorsize %u with page size %lu",
4104 fs_info->sectorsize, PAGE_SIZE);
4105 return false;
4106 }
4107 /* Profile is valid and does not have bits outside of the allowed set */
4108 if (alloc_profile_is_valid(bargs->target, 1) &&
4109 (bargs->target & ~allowed) == 0)
4110 return true;
4111
4112 btrfs_err(fs_info, "balance: invalid convert %s profile %s",
4113 type, btrfs_bg_type_to_raid_name(bargs->target));
4114 return false;
4115 }
4116
4117 /*
4118 * Fill @buf with textual description of balance filter flags @bargs, up to
4119 * @size_buf including the terminating null. The output may be trimmed if it
4120 * does not fit into the provided buffer.
4121 */
describe_balance_args(struct btrfs_balance_args * bargs,char * buf,u32 size_buf)4122 static void describe_balance_args(struct btrfs_balance_args *bargs, char *buf,
4123 u32 size_buf)
4124 {
4125 int ret;
4126 u32 size_bp = size_buf;
4127 char *bp = buf;
4128 u64 flags = bargs->flags;
4129 char tmp_buf[128] = {'\0'};
4130
4131 if (!flags)
4132 return;
4133
4134 #define CHECK_APPEND_NOARG(a) \
4135 do { \
4136 ret = snprintf(bp, size_bp, (a)); \
4137 if (ret < 0 || ret >= size_bp) \
4138 goto out_overflow; \
4139 size_bp -= ret; \
4140 bp += ret; \
4141 } while (0)
4142
4143 #define CHECK_APPEND_1ARG(a, v1) \
4144 do { \
4145 ret = snprintf(bp, size_bp, (a), (v1)); \
4146 if (ret < 0 || ret >= size_bp) \
4147 goto out_overflow; \
4148 size_bp -= ret; \
4149 bp += ret; \
4150 } while (0)
4151
4152 #define CHECK_APPEND_2ARG(a, v1, v2) \
4153 do { \
4154 ret = snprintf(bp, size_bp, (a), (v1), (v2)); \
4155 if (ret < 0 || ret >= size_bp) \
4156 goto out_overflow; \
4157 size_bp -= ret; \
4158 bp += ret; \
4159 } while (0)
4160
4161 if (flags & BTRFS_BALANCE_ARGS_CONVERT)
4162 CHECK_APPEND_1ARG("convert=%s,",
4163 btrfs_bg_type_to_raid_name(bargs->target));
4164
4165 if (flags & BTRFS_BALANCE_ARGS_SOFT)
4166 CHECK_APPEND_NOARG("soft,");
4167
4168 if (flags & BTRFS_BALANCE_ARGS_PROFILES) {
4169 btrfs_describe_block_groups(bargs->profiles, tmp_buf,
4170 sizeof(tmp_buf));
4171 CHECK_APPEND_1ARG("profiles=%s,", tmp_buf);
4172 }
4173
4174 if (flags & BTRFS_BALANCE_ARGS_USAGE)
4175 CHECK_APPEND_1ARG("usage=%llu,", bargs->usage);
4176
4177 if (flags & BTRFS_BALANCE_ARGS_USAGE_RANGE)
4178 CHECK_APPEND_2ARG("usage=%u..%u,",
4179 bargs->usage_min, bargs->usage_max);
4180
4181 if (flags & BTRFS_BALANCE_ARGS_DEVID)
4182 CHECK_APPEND_1ARG("devid=%llu,", bargs->devid);
4183
4184 if (flags & BTRFS_BALANCE_ARGS_DRANGE)
4185 CHECK_APPEND_2ARG("drange=%llu..%llu,",
4186 bargs->pstart, bargs->pend);
4187
4188 if (flags & BTRFS_BALANCE_ARGS_VRANGE)
4189 CHECK_APPEND_2ARG("vrange=%llu..%llu,",
4190 bargs->vstart, bargs->vend);
4191
4192 if (flags & BTRFS_BALANCE_ARGS_LIMIT)
4193 CHECK_APPEND_1ARG("limit=%llu,", bargs->limit);
4194
4195 if (flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)
4196 CHECK_APPEND_2ARG("limit=%u..%u,",
4197 bargs->limit_min, bargs->limit_max);
4198
4199 if (flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE)
4200 CHECK_APPEND_2ARG("stripes=%u..%u,",
4201 bargs->stripes_min, bargs->stripes_max);
4202
4203 #undef CHECK_APPEND_2ARG
4204 #undef CHECK_APPEND_1ARG
4205 #undef CHECK_APPEND_NOARG
4206
4207 out_overflow:
4208
4209 if (size_bp < size_buf)
4210 buf[size_buf - size_bp - 1] = '\0'; /* remove last , */
4211 else
4212 buf[0] = '\0';
4213 }
4214
describe_balance_start_or_resume(struct btrfs_fs_info * fs_info)4215 static void describe_balance_start_or_resume(struct btrfs_fs_info *fs_info)
4216 {
4217 u32 size_buf = 1024;
4218 char tmp_buf[192] = {'\0'};
4219 char *buf;
4220 char *bp;
4221 u32 size_bp = size_buf;
4222 int ret;
4223 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
4224
4225 buf = kzalloc(size_buf, GFP_KERNEL);
4226 if (!buf)
4227 return;
4228
4229 bp = buf;
4230
4231 #define CHECK_APPEND_1ARG(a, v1) \
4232 do { \
4233 ret = snprintf(bp, size_bp, (a), (v1)); \
4234 if (ret < 0 || ret >= size_bp) \
4235 goto out_overflow; \
4236 size_bp -= ret; \
4237 bp += ret; \
4238 } while (0)
4239
4240 if (bctl->flags & BTRFS_BALANCE_FORCE)
4241 CHECK_APPEND_1ARG("%s", "-f ");
4242
4243 if (bctl->flags & BTRFS_BALANCE_DATA) {
4244 describe_balance_args(&bctl->data, tmp_buf, sizeof(tmp_buf));
4245 CHECK_APPEND_1ARG("-d%s ", tmp_buf);
4246 }
4247
4248 if (bctl->flags & BTRFS_BALANCE_METADATA) {
4249 describe_balance_args(&bctl->meta, tmp_buf, sizeof(tmp_buf));
4250 CHECK_APPEND_1ARG("-m%s ", tmp_buf);
4251 }
4252
4253 if (bctl->flags & BTRFS_BALANCE_SYSTEM) {
4254 describe_balance_args(&bctl->sys, tmp_buf, sizeof(tmp_buf));
4255 CHECK_APPEND_1ARG("-s%s ", tmp_buf);
4256 }
4257
4258 #undef CHECK_APPEND_1ARG
4259
4260 out_overflow:
4261
4262 if (size_bp < size_buf)
4263 buf[size_buf - size_bp - 1] = '\0'; /* remove last " " */
4264 btrfs_info(fs_info, "balance: %s %s",
4265 (bctl->flags & BTRFS_BALANCE_RESUME) ?
4266 "resume" : "start", buf);
4267
4268 kfree(buf);
4269 }
4270
4271 /*
4272 * Should be called with balance mutexe held
4273 */
btrfs_balance(struct btrfs_fs_info * fs_info,struct btrfs_balance_control * bctl,struct btrfs_ioctl_balance_args * bargs)4274 int btrfs_balance(struct btrfs_fs_info *fs_info,
4275 struct btrfs_balance_control *bctl,
4276 struct btrfs_ioctl_balance_args *bargs)
4277 {
4278 u64 meta_target, data_target;
4279 u64 allowed;
4280 int mixed = 0;
4281 int ret;
4282 u64 num_devices;
4283 unsigned seq;
4284 bool reducing_redundancy;
4285 int i;
4286
4287 if (btrfs_fs_closing(fs_info) ||
4288 atomic_read(&fs_info->balance_pause_req) ||
4289 btrfs_should_cancel_balance(fs_info)) {
4290 ret = -EINVAL;
4291 goto out;
4292 }
4293
4294 allowed = btrfs_super_incompat_flags(fs_info->super_copy);
4295 if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
4296 mixed = 1;
4297
4298 /*
4299 * In case of mixed groups both data and meta should be picked,
4300 * and identical options should be given for both of them.
4301 */
4302 allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
4303 if (mixed && (bctl->flags & allowed)) {
4304 if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
4305 !(bctl->flags & BTRFS_BALANCE_METADATA) ||
4306 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
4307 btrfs_err(fs_info,
4308 "balance: mixed groups data and metadata options must be the same");
4309 ret = -EINVAL;
4310 goto out;
4311 }
4312 }
4313
4314 /*
4315 * rw_devices will not change at the moment, device add/delete/replace
4316 * are exclusive
4317 */
4318 num_devices = fs_info->fs_devices->rw_devices;
4319
4320 /*
4321 * SINGLE profile on-disk has no profile bit, but in-memory we have a
4322 * special bit for it, to make it easier to distinguish. Thus we need
4323 * to set it manually, or balance would refuse the profile.
4324 */
4325 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
4326 for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++)
4327 if (num_devices >= btrfs_raid_array[i].devs_min)
4328 allowed |= btrfs_raid_array[i].bg_flag;
4329
4330 if (!validate_convert_profile(fs_info, &bctl->data, allowed, "data") ||
4331 !validate_convert_profile(fs_info, &bctl->meta, allowed, "metadata") ||
4332 !validate_convert_profile(fs_info, &bctl->sys, allowed, "system")) {
4333 ret = -EINVAL;
4334 goto out;
4335 }
4336
4337 /*
4338 * Allow to reduce metadata or system integrity only if force set for
4339 * profiles with redundancy (copies, parity)
4340 */
4341 allowed = 0;
4342 for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) {
4343 if (btrfs_raid_array[i].ncopies >= 2 ||
4344 btrfs_raid_array[i].tolerated_failures >= 1)
4345 allowed |= btrfs_raid_array[i].bg_flag;
4346 }
4347 do {
4348 seq = read_seqbegin(&fs_info->profiles_lock);
4349
4350 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
4351 (fs_info->avail_system_alloc_bits & allowed) &&
4352 !(bctl->sys.target & allowed)) ||
4353 ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
4354 (fs_info->avail_metadata_alloc_bits & allowed) &&
4355 !(bctl->meta.target & allowed)))
4356 reducing_redundancy = true;
4357 else
4358 reducing_redundancy = false;
4359
4360 /* if we're not converting, the target field is uninitialized */
4361 meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
4362 bctl->meta.target : fs_info->avail_metadata_alloc_bits;
4363 data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
4364 bctl->data.target : fs_info->avail_data_alloc_bits;
4365 } while (read_seqretry(&fs_info->profiles_lock, seq));
4366
4367 if (reducing_redundancy) {
4368 if (bctl->flags & BTRFS_BALANCE_FORCE) {
4369 btrfs_info(fs_info,
4370 "balance: force reducing metadata redundancy");
4371 } else {
4372 btrfs_err(fs_info,
4373 "balance: reduces metadata redundancy, use --force if you want this");
4374 ret = -EINVAL;
4375 goto out;
4376 }
4377 }
4378
4379 if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) <
4380 btrfs_get_num_tolerated_disk_barrier_failures(data_target)) {
4381 btrfs_warn(fs_info,
4382 "balance: metadata profile %s has lower redundancy than data profile %s",
4383 btrfs_bg_type_to_raid_name(meta_target),
4384 btrfs_bg_type_to_raid_name(data_target));
4385 }
4386
4387 ret = insert_balance_item(fs_info, bctl);
4388 if (ret && ret != -EEXIST)
4389 goto out;
4390
4391 if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
4392 BUG_ON(ret == -EEXIST);
4393 BUG_ON(fs_info->balance_ctl);
4394 spin_lock(&fs_info->balance_lock);
4395 fs_info->balance_ctl = bctl;
4396 spin_unlock(&fs_info->balance_lock);
4397 } else {
4398 BUG_ON(ret != -EEXIST);
4399 spin_lock(&fs_info->balance_lock);
4400 update_balance_args(bctl);
4401 spin_unlock(&fs_info->balance_lock);
4402 }
4403
4404 ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4405 set_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags);
4406 describe_balance_start_or_resume(fs_info);
4407 mutex_unlock(&fs_info->balance_mutex);
4408
4409 ret = __btrfs_balance(fs_info);
4410
4411 mutex_lock(&fs_info->balance_mutex);
4412 if (ret == -ECANCELED && atomic_read(&fs_info->balance_pause_req))
4413 btrfs_info(fs_info, "balance: paused");
4414 /*
4415 * Balance can be canceled by:
4416 *
4417 * - Regular cancel request
4418 * Then ret == -ECANCELED and balance_cancel_req > 0
4419 *
4420 * - Fatal signal to "btrfs" process
4421 * Either the signal caught by wait_reserve_ticket() and callers
4422 * got -EINTR, or caught by btrfs_should_cancel_balance() and
4423 * got -ECANCELED.
4424 * Either way, in this case balance_cancel_req = 0, and
4425 * ret == -EINTR or ret == -ECANCELED.
4426 *
4427 * So here we only check the return value to catch canceled balance.
4428 */
4429 else if (ret == -ECANCELED || ret == -EINTR)
4430 btrfs_info(fs_info, "balance: canceled");
4431 else
4432 btrfs_info(fs_info, "balance: ended with status: %d", ret);
4433
4434 clear_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags);
4435
4436 if (bargs) {
4437 memset(bargs, 0, sizeof(*bargs));
4438 btrfs_update_ioctl_balance_args(fs_info, bargs);
4439 }
4440
4441 if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
4442 balance_need_close(fs_info)) {
4443 reset_balance_state(fs_info);
4444 btrfs_exclop_finish(fs_info);
4445 }
4446
4447 wake_up(&fs_info->balance_wait_q);
4448
4449 return ret;
4450 out:
4451 if (bctl->flags & BTRFS_BALANCE_RESUME)
4452 reset_balance_state(fs_info);
4453 else
4454 kfree(bctl);
4455 btrfs_exclop_finish(fs_info);
4456
4457 return ret;
4458 }
4459
balance_kthread(void * data)4460 static int balance_kthread(void *data)
4461 {
4462 struct btrfs_fs_info *fs_info = data;
4463 int ret = 0;
4464
4465 sb_start_write(fs_info->sb);
4466 mutex_lock(&fs_info->balance_mutex);
4467 if (fs_info->balance_ctl)
4468 ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL);
4469 mutex_unlock(&fs_info->balance_mutex);
4470 sb_end_write(fs_info->sb);
4471
4472 return ret;
4473 }
4474
btrfs_resume_balance_async(struct btrfs_fs_info * fs_info)4475 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
4476 {
4477 struct task_struct *tsk;
4478
4479 mutex_lock(&fs_info->balance_mutex);
4480 if (!fs_info->balance_ctl) {
4481 mutex_unlock(&fs_info->balance_mutex);
4482 return 0;
4483 }
4484 mutex_unlock(&fs_info->balance_mutex);
4485
4486 if (btrfs_test_opt(fs_info, SKIP_BALANCE)) {
4487 btrfs_info(fs_info, "balance: resume skipped");
4488 return 0;
4489 }
4490
4491 /*
4492 * A ro->rw remount sequence should continue with the paused balance
4493 * regardless of who pauses it, system or the user as of now, so set
4494 * the resume flag.
4495 */
4496 spin_lock(&fs_info->balance_lock);
4497 fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME;
4498 spin_unlock(&fs_info->balance_lock);
4499
4500 tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
4501 return PTR_ERR_OR_ZERO(tsk);
4502 }
4503
btrfs_recover_balance(struct btrfs_fs_info * fs_info)4504 int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
4505 {
4506 struct btrfs_balance_control *bctl;
4507 struct btrfs_balance_item *item;
4508 struct btrfs_disk_balance_args disk_bargs;
4509 struct btrfs_path *path;
4510 struct extent_buffer *leaf;
4511 struct btrfs_key key;
4512 int ret;
4513
4514 path = btrfs_alloc_path();
4515 if (!path)
4516 return -ENOMEM;
4517
4518 key.objectid = BTRFS_BALANCE_OBJECTID;
4519 key.type = BTRFS_TEMPORARY_ITEM_KEY;
4520 key.offset = 0;
4521
4522 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
4523 if (ret < 0)
4524 goto out;
4525 if (ret > 0) { /* ret = -ENOENT; */
4526 ret = 0;
4527 goto out;
4528 }
4529
4530 bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
4531 if (!bctl) {
4532 ret = -ENOMEM;
4533 goto out;
4534 }
4535
4536 leaf = path->nodes[0];
4537 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
4538
4539 bctl->flags = btrfs_balance_flags(leaf, item);
4540 bctl->flags |= BTRFS_BALANCE_RESUME;
4541
4542 btrfs_balance_data(leaf, item, &disk_bargs);
4543 btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
4544 btrfs_balance_meta(leaf, item, &disk_bargs);
4545 btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
4546 btrfs_balance_sys(leaf, item, &disk_bargs);
4547 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
4548
4549 /*
4550 * This should never happen, as the paused balance state is recovered
4551 * during mount without any chance of other exclusive ops to collide.
4552 *
4553 * This gives the exclusive op status to balance and keeps in paused
4554 * state until user intervention (cancel or umount). If the ownership
4555 * cannot be assigned, show a message but do not fail. The balance
4556 * is in a paused state and must have fs_info::balance_ctl properly
4557 * set up.
4558 */
4559 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE))
4560 btrfs_warn(fs_info,
4561 "balance: cannot set exclusive op status, resume manually");
4562
4563 btrfs_release_path(path);
4564
4565 mutex_lock(&fs_info->balance_mutex);
4566 BUG_ON(fs_info->balance_ctl);
4567 spin_lock(&fs_info->balance_lock);
4568 fs_info->balance_ctl = bctl;
4569 spin_unlock(&fs_info->balance_lock);
4570 mutex_unlock(&fs_info->balance_mutex);
4571 out:
4572 btrfs_free_path(path);
4573 return ret;
4574 }
4575
btrfs_pause_balance(struct btrfs_fs_info * fs_info)4576 int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
4577 {
4578 int ret = 0;
4579
4580 mutex_lock(&fs_info->balance_mutex);
4581 if (!fs_info->balance_ctl) {
4582 mutex_unlock(&fs_info->balance_mutex);
4583 return -ENOTCONN;
4584 }
4585
4586 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4587 atomic_inc(&fs_info->balance_pause_req);
4588 mutex_unlock(&fs_info->balance_mutex);
4589
4590 wait_event(fs_info->balance_wait_q,
4591 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4592
4593 mutex_lock(&fs_info->balance_mutex);
4594 /* we are good with balance_ctl ripped off from under us */
4595 BUG_ON(test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4596 atomic_dec(&fs_info->balance_pause_req);
4597 } else {
4598 ret = -ENOTCONN;
4599 }
4600
4601 mutex_unlock(&fs_info->balance_mutex);
4602 return ret;
4603 }
4604
btrfs_cancel_balance(struct btrfs_fs_info * fs_info)4605 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
4606 {
4607 mutex_lock(&fs_info->balance_mutex);
4608 if (!fs_info->balance_ctl) {
4609 mutex_unlock(&fs_info->balance_mutex);
4610 return -ENOTCONN;
4611 }
4612
4613 /*
4614 * A paused balance with the item stored on disk can be resumed at
4615 * mount time if the mount is read-write. Otherwise it's still paused
4616 * and we must not allow cancelling as it deletes the item.
4617 */
4618 if (sb_rdonly(fs_info->sb)) {
4619 mutex_unlock(&fs_info->balance_mutex);
4620 return -EROFS;
4621 }
4622
4623 atomic_inc(&fs_info->balance_cancel_req);
4624 /*
4625 * if we are running just wait and return, balance item is
4626 * deleted in btrfs_balance in this case
4627 */
4628 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4629 mutex_unlock(&fs_info->balance_mutex);
4630 wait_event(fs_info->balance_wait_q,
4631 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4632 mutex_lock(&fs_info->balance_mutex);
4633 } else {
4634 mutex_unlock(&fs_info->balance_mutex);
4635 /*
4636 * Lock released to allow other waiters to continue, we'll
4637 * reexamine the status again.
4638 */
4639 mutex_lock(&fs_info->balance_mutex);
4640
4641 if (fs_info->balance_ctl) {
4642 reset_balance_state(fs_info);
4643 btrfs_exclop_finish(fs_info);
4644 btrfs_info(fs_info, "balance: canceled");
4645 }
4646 }
4647
4648 ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4649 atomic_dec(&fs_info->balance_cancel_req);
4650 mutex_unlock(&fs_info->balance_mutex);
4651 return 0;
4652 }
4653
btrfs_uuid_scan_kthread(void * data)4654 int btrfs_uuid_scan_kthread(void *data)
4655 {
4656 struct btrfs_fs_info *fs_info = data;
4657 struct btrfs_root *root = fs_info->tree_root;
4658 struct btrfs_key key;
4659 struct btrfs_path *path = NULL;
4660 int ret = 0;
4661 struct extent_buffer *eb;
4662 int slot;
4663 struct btrfs_root_item root_item;
4664 u32 item_size;
4665 struct btrfs_trans_handle *trans = NULL;
4666 bool closing = false;
4667
4668 path = btrfs_alloc_path();
4669 if (!path) {
4670 ret = -ENOMEM;
4671 goto out;
4672 }
4673
4674 key.objectid = 0;
4675 key.type = BTRFS_ROOT_ITEM_KEY;
4676 key.offset = 0;
4677
4678 while (1) {
4679 if (btrfs_fs_closing(fs_info)) {
4680 closing = true;
4681 break;
4682 }
4683 ret = btrfs_search_forward(root, &key, path,
4684 BTRFS_OLDEST_GENERATION);
4685 if (ret) {
4686 if (ret > 0)
4687 ret = 0;
4688 break;
4689 }
4690
4691 if (key.type != BTRFS_ROOT_ITEM_KEY ||
4692 (key.objectid < BTRFS_FIRST_FREE_OBJECTID &&
4693 key.objectid != BTRFS_FS_TREE_OBJECTID) ||
4694 key.objectid > BTRFS_LAST_FREE_OBJECTID)
4695 goto skip;
4696
4697 eb = path->nodes[0];
4698 slot = path->slots[0];
4699 item_size = btrfs_item_size_nr(eb, slot);
4700 if (item_size < sizeof(root_item))
4701 goto skip;
4702
4703 read_extent_buffer(eb, &root_item,
4704 btrfs_item_ptr_offset(eb, slot),
4705 (int)sizeof(root_item));
4706 if (btrfs_root_refs(&root_item) == 0)
4707 goto skip;
4708
4709 if (!btrfs_is_empty_uuid(root_item.uuid) ||
4710 !btrfs_is_empty_uuid(root_item.received_uuid)) {
4711 if (trans)
4712 goto update_tree;
4713
4714 btrfs_release_path(path);
4715 /*
4716 * 1 - subvol uuid item
4717 * 1 - received_subvol uuid item
4718 */
4719 trans = btrfs_start_transaction(fs_info->uuid_root, 2);
4720 if (IS_ERR(trans)) {
4721 ret = PTR_ERR(trans);
4722 break;
4723 }
4724 continue;
4725 } else {
4726 goto skip;
4727 }
4728 update_tree:
4729 btrfs_release_path(path);
4730 if (!btrfs_is_empty_uuid(root_item.uuid)) {
4731 ret = btrfs_uuid_tree_add(trans, root_item.uuid,
4732 BTRFS_UUID_KEY_SUBVOL,
4733 key.objectid);
4734 if (ret < 0) {
4735 btrfs_warn(fs_info, "uuid_tree_add failed %d",
4736 ret);
4737 break;
4738 }
4739 }
4740
4741 if (!btrfs_is_empty_uuid(root_item.received_uuid)) {
4742 ret = btrfs_uuid_tree_add(trans,
4743 root_item.received_uuid,
4744 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
4745 key.objectid);
4746 if (ret < 0) {
4747 btrfs_warn(fs_info, "uuid_tree_add failed %d",
4748 ret);
4749 break;
4750 }
4751 }
4752
4753 skip:
4754 btrfs_release_path(path);
4755 if (trans) {
4756 ret = btrfs_end_transaction(trans);
4757 trans = NULL;
4758 if (ret)
4759 break;
4760 }
4761
4762 if (key.offset < (u64)-1) {
4763 key.offset++;
4764 } else if (key.type < BTRFS_ROOT_ITEM_KEY) {
4765 key.offset = 0;
4766 key.type = BTRFS_ROOT_ITEM_KEY;
4767 } else if (key.objectid < (u64)-1) {
4768 key.offset = 0;
4769 key.type = BTRFS_ROOT_ITEM_KEY;
4770 key.objectid++;
4771 } else {
4772 break;
4773 }
4774 cond_resched();
4775 }
4776
4777 out:
4778 btrfs_free_path(path);
4779 if (trans && !IS_ERR(trans))
4780 btrfs_end_transaction(trans);
4781 if (ret)
4782 btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret);
4783 else if (!closing)
4784 set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
4785 up(&fs_info->uuid_tree_rescan_sem);
4786 return 0;
4787 }
4788
btrfs_create_uuid_tree(struct btrfs_fs_info * fs_info)4789 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
4790 {
4791 struct btrfs_trans_handle *trans;
4792 struct btrfs_root *tree_root = fs_info->tree_root;
4793 struct btrfs_root *uuid_root;
4794 struct task_struct *task;
4795 int ret;
4796
4797 /*
4798 * 1 - root node
4799 * 1 - root item
4800 */
4801 trans = btrfs_start_transaction(tree_root, 2);
4802 if (IS_ERR(trans))
4803 return PTR_ERR(trans);
4804
4805 uuid_root = btrfs_create_tree(trans, BTRFS_UUID_TREE_OBJECTID);
4806 if (IS_ERR(uuid_root)) {
4807 ret = PTR_ERR(uuid_root);
4808 btrfs_abort_transaction(trans, ret);
4809 btrfs_end_transaction(trans);
4810 return ret;
4811 }
4812
4813 fs_info->uuid_root = uuid_root;
4814
4815 ret = btrfs_commit_transaction(trans);
4816 if (ret)
4817 return ret;
4818
4819 down(&fs_info->uuid_tree_rescan_sem);
4820 task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid");
4821 if (IS_ERR(task)) {
4822 /* fs_info->update_uuid_tree_gen remains 0 in all error case */
4823 btrfs_warn(fs_info, "failed to start uuid_scan task");
4824 up(&fs_info->uuid_tree_rescan_sem);
4825 return PTR_ERR(task);
4826 }
4827
4828 return 0;
4829 }
4830
4831 /*
4832 * shrinking a device means finding all of the device extents past
4833 * the new size, and then following the back refs to the chunks.
4834 * The chunk relocation code actually frees the device extent
4835 */
btrfs_shrink_device(struct btrfs_device * device,u64 new_size)4836 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
4837 {
4838 struct btrfs_fs_info *fs_info = device->fs_info;
4839 struct btrfs_root *root = fs_info->dev_root;
4840 struct btrfs_trans_handle *trans;
4841 struct btrfs_dev_extent *dev_extent = NULL;
4842 struct btrfs_path *path;
4843 u64 length;
4844 u64 chunk_offset;
4845 int ret;
4846 int slot;
4847 int failed = 0;
4848 bool retried = false;
4849 struct extent_buffer *l;
4850 struct btrfs_key key;
4851 struct btrfs_super_block *super_copy = fs_info->super_copy;
4852 u64 old_total = btrfs_super_total_bytes(super_copy);
4853 u64 old_size = btrfs_device_get_total_bytes(device);
4854 u64 diff;
4855 u64 start;
4856
4857 new_size = round_down(new_size, fs_info->sectorsize);
4858 start = new_size;
4859 diff = round_down(old_size - new_size, fs_info->sectorsize);
4860
4861 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
4862 return -EINVAL;
4863
4864 path = btrfs_alloc_path();
4865 if (!path)
4866 return -ENOMEM;
4867
4868 path->reada = READA_BACK;
4869
4870 trans = btrfs_start_transaction(root, 0);
4871 if (IS_ERR(trans)) {
4872 btrfs_free_path(path);
4873 return PTR_ERR(trans);
4874 }
4875
4876 mutex_lock(&fs_info->chunk_mutex);
4877
4878 btrfs_device_set_total_bytes(device, new_size);
4879 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
4880 device->fs_devices->total_rw_bytes -= diff;
4881 atomic64_sub(diff, &fs_info->free_chunk_space);
4882 }
4883
4884 /*
4885 * Once the device's size has been set to the new size, ensure all
4886 * in-memory chunks are synced to disk so that the loop below sees them
4887 * and relocates them accordingly.
4888 */
4889 if (contains_pending_extent(device, &start, diff)) {
4890 mutex_unlock(&fs_info->chunk_mutex);
4891 ret = btrfs_commit_transaction(trans);
4892 if (ret)
4893 goto done;
4894 } else {
4895 mutex_unlock(&fs_info->chunk_mutex);
4896 btrfs_end_transaction(trans);
4897 }
4898
4899 again:
4900 key.objectid = device->devid;
4901 key.offset = (u64)-1;
4902 key.type = BTRFS_DEV_EXTENT_KEY;
4903
4904 do {
4905 mutex_lock(&fs_info->reclaim_bgs_lock);
4906 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4907 if (ret < 0) {
4908 mutex_unlock(&fs_info->reclaim_bgs_lock);
4909 goto done;
4910 }
4911
4912 ret = btrfs_previous_item(root, path, 0, key.type);
4913 if (ret) {
4914 mutex_unlock(&fs_info->reclaim_bgs_lock);
4915 if (ret < 0)
4916 goto done;
4917 ret = 0;
4918 btrfs_release_path(path);
4919 break;
4920 }
4921
4922 l = path->nodes[0];
4923 slot = path->slots[0];
4924 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
4925
4926 if (key.objectid != device->devid) {
4927 mutex_unlock(&fs_info->reclaim_bgs_lock);
4928 btrfs_release_path(path);
4929 break;
4930 }
4931
4932 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
4933 length = btrfs_dev_extent_length(l, dev_extent);
4934
4935 if (key.offset + length <= new_size) {
4936 mutex_unlock(&fs_info->reclaim_bgs_lock);
4937 btrfs_release_path(path);
4938 break;
4939 }
4940
4941 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
4942 btrfs_release_path(path);
4943
4944 /*
4945 * We may be relocating the only data chunk we have,
4946 * which could potentially end up with losing data's
4947 * raid profile, so lets allocate an empty one in
4948 * advance.
4949 */
4950 ret = btrfs_may_alloc_data_chunk(fs_info, chunk_offset);
4951 if (ret < 0) {
4952 mutex_unlock(&fs_info->reclaim_bgs_lock);
4953 goto done;
4954 }
4955
4956 ret = btrfs_relocate_chunk(fs_info, chunk_offset);
4957 mutex_unlock(&fs_info->reclaim_bgs_lock);
4958 if (ret == -ENOSPC) {
4959 failed++;
4960 } else if (ret) {
4961 if (ret == -ETXTBSY) {
4962 btrfs_warn(fs_info,
4963 "could not shrink block group %llu due to active swapfile",
4964 chunk_offset);
4965 }
4966 goto done;
4967 }
4968 } while (key.offset-- > 0);
4969
4970 if (failed && !retried) {
4971 failed = 0;
4972 retried = true;
4973 goto again;
4974 } else if (failed && retried) {
4975 ret = -ENOSPC;
4976 goto done;
4977 }
4978
4979 /* Shrinking succeeded, else we would be at "done". */
4980 trans = btrfs_start_transaction(root, 0);
4981 if (IS_ERR(trans)) {
4982 ret = PTR_ERR(trans);
4983 goto done;
4984 }
4985
4986 mutex_lock(&fs_info->chunk_mutex);
4987 /* Clear all state bits beyond the shrunk device size */
4988 clear_extent_bits(&device->alloc_state, new_size, (u64)-1,
4989 CHUNK_STATE_MASK);
4990
4991 btrfs_device_set_disk_total_bytes(device, new_size);
4992 if (list_empty(&device->post_commit_list))
4993 list_add_tail(&device->post_commit_list,
4994 &trans->transaction->dev_update_list);
4995
4996 WARN_ON(diff > old_total);
4997 btrfs_set_super_total_bytes(super_copy,
4998 round_down(old_total - diff, fs_info->sectorsize));
4999 mutex_unlock(&fs_info->chunk_mutex);
5000
5001 btrfs_reserve_chunk_metadata(trans, false);
5002 /* Now btrfs_update_device() will change the on-disk size. */
5003 ret = btrfs_update_device(trans, device);
5004 btrfs_trans_release_chunk_metadata(trans);
5005 if (ret < 0) {
5006 btrfs_abort_transaction(trans, ret);
5007 btrfs_end_transaction(trans);
5008 } else {
5009 ret = btrfs_commit_transaction(trans);
5010 }
5011 done:
5012 btrfs_free_path(path);
5013 if (ret) {
5014 mutex_lock(&fs_info->chunk_mutex);
5015 btrfs_device_set_total_bytes(device, old_size);
5016 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
5017 device->fs_devices->total_rw_bytes += diff;
5018 atomic64_add(diff, &fs_info->free_chunk_space);
5019 mutex_unlock(&fs_info->chunk_mutex);
5020 }
5021 return ret;
5022 }
5023
btrfs_add_system_chunk(struct btrfs_fs_info * fs_info,struct btrfs_key * key,struct btrfs_chunk * chunk,int item_size)5024 static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info,
5025 struct btrfs_key *key,
5026 struct btrfs_chunk *chunk, int item_size)
5027 {
5028 struct btrfs_super_block *super_copy = fs_info->super_copy;
5029 struct btrfs_disk_key disk_key;
5030 u32 array_size;
5031 u8 *ptr;
5032
5033 lockdep_assert_held(&fs_info->chunk_mutex);
5034
5035 array_size = btrfs_super_sys_array_size(super_copy);
5036 if (array_size + item_size + sizeof(disk_key)
5037 > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
5038 return -EFBIG;
5039
5040 ptr = super_copy->sys_chunk_array + array_size;
5041 btrfs_cpu_key_to_disk(&disk_key, key);
5042 memcpy(ptr, &disk_key, sizeof(disk_key));
5043 ptr += sizeof(disk_key);
5044 memcpy(ptr, chunk, item_size);
5045 item_size += sizeof(disk_key);
5046 btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
5047
5048 return 0;
5049 }
5050
5051 /*
5052 * sort the devices in descending order by max_avail, total_avail
5053 */
btrfs_cmp_device_info(const void * a,const void * b)5054 static int btrfs_cmp_device_info(const void *a, const void *b)
5055 {
5056 const struct btrfs_device_info *di_a = a;
5057 const struct btrfs_device_info *di_b = b;
5058
5059 if (di_a->max_avail > di_b->max_avail)
5060 return -1;
5061 if (di_a->max_avail < di_b->max_avail)
5062 return 1;
5063 if (di_a->total_avail > di_b->total_avail)
5064 return -1;
5065 if (di_a->total_avail < di_b->total_avail)
5066 return 1;
5067 return 0;
5068 }
5069
check_raid56_incompat_flag(struct btrfs_fs_info * info,u64 type)5070 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
5071 {
5072 if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK))
5073 return;
5074
5075 btrfs_set_fs_incompat(info, RAID56);
5076 }
5077
check_raid1c34_incompat_flag(struct btrfs_fs_info * info,u64 type)5078 static void check_raid1c34_incompat_flag(struct btrfs_fs_info *info, u64 type)
5079 {
5080 if (!(type & (BTRFS_BLOCK_GROUP_RAID1C3 | BTRFS_BLOCK_GROUP_RAID1C4)))
5081 return;
5082
5083 btrfs_set_fs_incompat(info, RAID1C34);
5084 }
5085
5086 /*
5087 * Structure used internally for btrfs_create_chunk() function.
5088 * Wraps needed parameters.
5089 */
5090 struct alloc_chunk_ctl {
5091 u64 start;
5092 u64 type;
5093 /* Total number of stripes to allocate */
5094 int num_stripes;
5095 /* sub_stripes info for map */
5096 int sub_stripes;
5097 /* Stripes per device */
5098 int dev_stripes;
5099 /* Maximum number of devices to use */
5100 int devs_max;
5101 /* Minimum number of devices to use */
5102 int devs_min;
5103 /* ndevs has to be a multiple of this */
5104 int devs_increment;
5105 /* Number of copies */
5106 int ncopies;
5107 /* Number of stripes worth of bytes to store parity information */
5108 int nparity;
5109 u64 max_stripe_size;
5110 u64 max_chunk_size;
5111 u64 dev_extent_min;
5112 u64 stripe_size;
5113 u64 chunk_size;
5114 int ndevs;
5115 };
5116
init_alloc_chunk_ctl_policy_regular(struct btrfs_fs_devices * fs_devices,struct alloc_chunk_ctl * ctl)5117 static void init_alloc_chunk_ctl_policy_regular(
5118 struct btrfs_fs_devices *fs_devices,
5119 struct alloc_chunk_ctl *ctl)
5120 {
5121 u64 type = ctl->type;
5122
5123 if (type & BTRFS_BLOCK_GROUP_DATA) {
5124 ctl->max_stripe_size = SZ_1G;
5125 ctl->max_chunk_size = BTRFS_MAX_DATA_CHUNK_SIZE;
5126 } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
5127 /* For larger filesystems, use larger metadata chunks */
5128 if (fs_devices->total_rw_bytes > 50ULL * SZ_1G)
5129 ctl->max_stripe_size = SZ_1G;
5130 else
5131 ctl->max_stripe_size = SZ_256M;
5132 ctl->max_chunk_size = ctl->max_stripe_size;
5133 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
5134 ctl->max_stripe_size = SZ_32M;
5135 ctl->max_chunk_size = 2 * ctl->max_stripe_size;
5136 ctl->devs_max = min_t(int, ctl->devs_max,
5137 BTRFS_MAX_DEVS_SYS_CHUNK);
5138 } else {
5139 BUG();
5140 }
5141
5142 /* We don't want a chunk larger than 10% of writable space */
5143 ctl->max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
5144 ctl->max_chunk_size);
5145 ctl->dev_extent_min = BTRFS_STRIPE_LEN * ctl->dev_stripes;
5146 }
5147
init_alloc_chunk_ctl_policy_zoned(struct btrfs_fs_devices * fs_devices,struct alloc_chunk_ctl * ctl)5148 static void init_alloc_chunk_ctl_policy_zoned(
5149 struct btrfs_fs_devices *fs_devices,
5150 struct alloc_chunk_ctl *ctl)
5151 {
5152 u64 zone_size = fs_devices->fs_info->zone_size;
5153 u64 limit;
5154 int min_num_stripes = ctl->devs_min * ctl->dev_stripes;
5155 int min_data_stripes = (min_num_stripes - ctl->nparity) / ctl->ncopies;
5156 u64 min_chunk_size = min_data_stripes * zone_size;
5157 u64 type = ctl->type;
5158
5159 ctl->max_stripe_size = zone_size;
5160 if (type & BTRFS_BLOCK_GROUP_DATA) {
5161 ctl->max_chunk_size = round_down(BTRFS_MAX_DATA_CHUNK_SIZE,
5162 zone_size);
5163 } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
5164 ctl->max_chunk_size = ctl->max_stripe_size;
5165 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
5166 ctl->max_chunk_size = 2 * ctl->max_stripe_size;
5167 ctl->devs_max = min_t(int, ctl->devs_max,
5168 BTRFS_MAX_DEVS_SYS_CHUNK);
5169 } else {
5170 BUG();
5171 }
5172
5173 /* We don't want a chunk larger than 10% of writable space */
5174 limit = max(round_down(div_factor(fs_devices->total_rw_bytes, 1),
5175 zone_size),
5176 min_chunk_size);
5177 ctl->max_chunk_size = min(limit, ctl->max_chunk_size);
5178 ctl->dev_extent_min = zone_size * ctl->dev_stripes;
5179 }
5180
init_alloc_chunk_ctl(struct btrfs_fs_devices * fs_devices,struct alloc_chunk_ctl * ctl)5181 static void init_alloc_chunk_ctl(struct btrfs_fs_devices *fs_devices,
5182 struct alloc_chunk_ctl *ctl)
5183 {
5184 int index = btrfs_bg_flags_to_raid_index(ctl->type);
5185
5186 ctl->sub_stripes = btrfs_raid_array[index].sub_stripes;
5187 ctl->dev_stripes = btrfs_raid_array[index].dev_stripes;
5188 ctl->devs_max = btrfs_raid_array[index].devs_max;
5189 if (!ctl->devs_max)
5190 ctl->devs_max = BTRFS_MAX_DEVS(fs_devices->fs_info);
5191 ctl->devs_min = btrfs_raid_array[index].devs_min;
5192 ctl->devs_increment = btrfs_raid_array[index].devs_increment;
5193 ctl->ncopies = btrfs_raid_array[index].ncopies;
5194 ctl->nparity = btrfs_raid_array[index].nparity;
5195 ctl->ndevs = 0;
5196
5197 switch (fs_devices->chunk_alloc_policy) {
5198 case BTRFS_CHUNK_ALLOC_REGULAR:
5199 init_alloc_chunk_ctl_policy_regular(fs_devices, ctl);
5200 break;
5201 case BTRFS_CHUNK_ALLOC_ZONED:
5202 init_alloc_chunk_ctl_policy_zoned(fs_devices, ctl);
5203 break;
5204 default:
5205 BUG();
5206 }
5207 }
5208
gather_device_info(struct btrfs_fs_devices * fs_devices,struct alloc_chunk_ctl * ctl,struct btrfs_device_info * devices_info)5209 static int gather_device_info(struct btrfs_fs_devices *fs_devices,
5210 struct alloc_chunk_ctl *ctl,
5211 struct btrfs_device_info *devices_info)
5212 {
5213 struct btrfs_fs_info *info = fs_devices->fs_info;
5214 struct btrfs_device *device;
5215 u64 total_avail;
5216 u64 dev_extent_want = ctl->max_stripe_size * ctl->dev_stripes;
5217 int ret;
5218 int ndevs = 0;
5219 u64 max_avail;
5220 u64 dev_offset;
5221
5222 /*
5223 * in the first pass through the devices list, we gather information
5224 * about the available holes on each device.
5225 */
5226 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
5227 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
5228 WARN(1, KERN_ERR
5229 "BTRFS: read-only device in alloc_list\n");
5230 continue;
5231 }
5232
5233 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
5234 &device->dev_state) ||
5235 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
5236 continue;
5237
5238 if (device->total_bytes > device->bytes_used)
5239 total_avail = device->total_bytes - device->bytes_used;
5240 else
5241 total_avail = 0;
5242
5243 /* If there is no space on this device, skip it. */
5244 if (total_avail < ctl->dev_extent_min)
5245 continue;
5246
5247 ret = find_free_dev_extent(device, dev_extent_want, &dev_offset,
5248 &max_avail);
5249 if (ret && ret != -ENOSPC)
5250 return ret;
5251
5252 if (ret == 0)
5253 max_avail = dev_extent_want;
5254
5255 if (max_avail < ctl->dev_extent_min) {
5256 if (btrfs_test_opt(info, ENOSPC_DEBUG))
5257 btrfs_debug(info,
5258 "%s: devid %llu has no free space, have=%llu want=%llu",
5259 __func__, device->devid, max_avail,
5260 ctl->dev_extent_min);
5261 continue;
5262 }
5263
5264 if (ndevs == fs_devices->rw_devices) {
5265 WARN(1, "%s: found more than %llu devices\n",
5266 __func__, fs_devices->rw_devices);
5267 break;
5268 }
5269 devices_info[ndevs].dev_offset = dev_offset;
5270 devices_info[ndevs].max_avail = max_avail;
5271 devices_info[ndevs].total_avail = total_avail;
5272 devices_info[ndevs].dev = device;
5273 ++ndevs;
5274 }
5275 ctl->ndevs = ndevs;
5276
5277 /*
5278 * now sort the devices by hole size / available space
5279 */
5280 sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
5281 btrfs_cmp_device_info, NULL);
5282
5283 return 0;
5284 }
5285
decide_stripe_size_regular(struct alloc_chunk_ctl * ctl,struct btrfs_device_info * devices_info)5286 static int decide_stripe_size_regular(struct alloc_chunk_ctl *ctl,
5287 struct btrfs_device_info *devices_info)
5288 {
5289 /* Number of stripes that count for block group size */
5290 int data_stripes;
5291
5292 /*
5293 * The primary goal is to maximize the number of stripes, so use as
5294 * many devices as possible, even if the stripes are not maximum sized.
5295 *
5296 * The DUP profile stores more than one stripe per device, the
5297 * max_avail is the total size so we have to adjust.
5298 */
5299 ctl->stripe_size = div_u64(devices_info[ctl->ndevs - 1].max_avail,
5300 ctl->dev_stripes);
5301 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes;
5302
5303 /* This will have to be fixed for RAID1 and RAID10 over more drives */
5304 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies;
5305
5306 /*
5307 * Use the number of data stripes to figure out how big this chunk is
5308 * really going to be in terms of logical address space, and compare
5309 * that answer with the max chunk size. If it's higher, we try to
5310 * reduce stripe_size.
5311 */
5312 if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) {
5313 /*
5314 * Reduce stripe_size, round it up to a 16MB boundary again and
5315 * then use it, unless it ends up being even bigger than the
5316 * previous value we had already.
5317 */
5318 ctl->stripe_size = min(round_up(div_u64(ctl->max_chunk_size,
5319 data_stripes), SZ_16M),
5320 ctl->stripe_size);
5321 }
5322
5323 /* Align to BTRFS_STRIPE_LEN */
5324 ctl->stripe_size = round_down(ctl->stripe_size, BTRFS_STRIPE_LEN);
5325 ctl->chunk_size = ctl->stripe_size * data_stripes;
5326
5327 return 0;
5328 }
5329
decide_stripe_size_zoned(struct alloc_chunk_ctl * ctl,struct btrfs_device_info * devices_info)5330 static int decide_stripe_size_zoned(struct alloc_chunk_ctl *ctl,
5331 struct btrfs_device_info *devices_info)
5332 {
5333 u64 zone_size = devices_info[0].dev->zone_info->zone_size;
5334 /* Number of stripes that count for block group size */
5335 int data_stripes;
5336
5337 /*
5338 * It should hold because:
5339 * dev_extent_min == dev_extent_want == zone_size * dev_stripes
5340 */
5341 ASSERT(devices_info[ctl->ndevs - 1].max_avail == ctl->dev_extent_min);
5342
5343 ctl->stripe_size = zone_size;
5344 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes;
5345 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies;
5346
5347 /* stripe_size is fixed in zoned filesysmte. Reduce ndevs instead. */
5348 if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) {
5349 ctl->ndevs = div_u64(div_u64(ctl->max_chunk_size * ctl->ncopies,
5350 ctl->stripe_size) + ctl->nparity,
5351 ctl->dev_stripes);
5352 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes;
5353 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies;
5354 ASSERT(ctl->stripe_size * data_stripes <= ctl->max_chunk_size);
5355 }
5356
5357 ctl->chunk_size = ctl->stripe_size * data_stripes;
5358
5359 return 0;
5360 }
5361
decide_stripe_size(struct btrfs_fs_devices * fs_devices,struct alloc_chunk_ctl * ctl,struct btrfs_device_info * devices_info)5362 static int decide_stripe_size(struct btrfs_fs_devices *fs_devices,
5363 struct alloc_chunk_ctl *ctl,
5364 struct btrfs_device_info *devices_info)
5365 {
5366 struct btrfs_fs_info *info = fs_devices->fs_info;
5367
5368 /*
5369 * Round down to number of usable stripes, devs_increment can be any
5370 * number so we can't use round_down() that requires power of 2, while
5371 * rounddown is safe.
5372 */
5373 ctl->ndevs = rounddown(ctl->ndevs, ctl->devs_increment);
5374
5375 if (ctl->ndevs < ctl->devs_min) {
5376 if (btrfs_test_opt(info, ENOSPC_DEBUG)) {
5377 btrfs_debug(info,
5378 "%s: not enough devices with free space: have=%d minimum required=%d",
5379 __func__, ctl->ndevs, ctl->devs_min);
5380 }
5381 return -ENOSPC;
5382 }
5383
5384 ctl->ndevs = min(ctl->ndevs, ctl->devs_max);
5385
5386 switch (fs_devices->chunk_alloc_policy) {
5387 case BTRFS_CHUNK_ALLOC_REGULAR:
5388 return decide_stripe_size_regular(ctl, devices_info);
5389 case BTRFS_CHUNK_ALLOC_ZONED:
5390 return decide_stripe_size_zoned(ctl, devices_info);
5391 default:
5392 BUG();
5393 }
5394 }
5395
create_chunk(struct btrfs_trans_handle * trans,struct alloc_chunk_ctl * ctl,struct btrfs_device_info * devices_info)5396 static struct btrfs_block_group *create_chunk(struct btrfs_trans_handle *trans,
5397 struct alloc_chunk_ctl *ctl,
5398 struct btrfs_device_info *devices_info)
5399 {
5400 struct btrfs_fs_info *info = trans->fs_info;
5401 struct map_lookup *map = NULL;
5402 struct extent_map_tree *em_tree;
5403 struct btrfs_block_group *block_group;
5404 struct extent_map *em;
5405 u64 start = ctl->start;
5406 u64 type = ctl->type;
5407 int ret;
5408 int i;
5409 int j;
5410
5411 map = kmalloc(map_lookup_size(ctl->num_stripes), GFP_NOFS);
5412 if (!map)
5413 return ERR_PTR(-ENOMEM);
5414 map->num_stripes = ctl->num_stripes;
5415
5416 for (i = 0; i < ctl->ndevs; ++i) {
5417 for (j = 0; j < ctl->dev_stripes; ++j) {
5418 int s = i * ctl->dev_stripes + j;
5419 map->stripes[s].dev = devices_info[i].dev;
5420 map->stripes[s].physical = devices_info[i].dev_offset +
5421 j * ctl->stripe_size;
5422 }
5423 }
5424 map->stripe_len = BTRFS_STRIPE_LEN;
5425 map->io_align = BTRFS_STRIPE_LEN;
5426 map->io_width = BTRFS_STRIPE_LEN;
5427 map->type = type;
5428 map->sub_stripes = ctl->sub_stripes;
5429
5430 trace_btrfs_chunk_alloc(info, map, start, ctl->chunk_size);
5431
5432 em = alloc_extent_map();
5433 if (!em) {
5434 kfree(map);
5435 return ERR_PTR(-ENOMEM);
5436 }
5437 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
5438 em->map_lookup = map;
5439 em->start = start;
5440 em->len = ctl->chunk_size;
5441 em->block_start = 0;
5442 em->block_len = em->len;
5443 em->orig_block_len = ctl->stripe_size;
5444
5445 em_tree = &info->mapping_tree;
5446 write_lock(&em_tree->lock);
5447 ret = add_extent_mapping(em_tree, em, 0);
5448 if (ret) {
5449 write_unlock(&em_tree->lock);
5450 free_extent_map(em);
5451 return ERR_PTR(ret);
5452 }
5453 write_unlock(&em_tree->lock);
5454
5455 block_group = btrfs_make_block_group(trans, 0, type, start, ctl->chunk_size);
5456 if (IS_ERR(block_group))
5457 goto error_del_extent;
5458
5459 for (i = 0; i < map->num_stripes; i++) {
5460 struct btrfs_device *dev = map->stripes[i].dev;
5461
5462 btrfs_device_set_bytes_used(dev,
5463 dev->bytes_used + ctl->stripe_size);
5464 if (list_empty(&dev->post_commit_list))
5465 list_add_tail(&dev->post_commit_list,
5466 &trans->transaction->dev_update_list);
5467 }
5468
5469 atomic64_sub(ctl->stripe_size * map->num_stripes,
5470 &info->free_chunk_space);
5471
5472 free_extent_map(em);
5473 check_raid56_incompat_flag(info, type);
5474 check_raid1c34_incompat_flag(info, type);
5475
5476 return block_group;
5477
5478 error_del_extent:
5479 write_lock(&em_tree->lock);
5480 remove_extent_mapping(em_tree, em);
5481 write_unlock(&em_tree->lock);
5482
5483 /* One for our allocation */
5484 free_extent_map(em);
5485 /* One for the tree reference */
5486 free_extent_map(em);
5487
5488 return block_group;
5489 }
5490
btrfs_create_chunk(struct btrfs_trans_handle * trans,u64 type)5491 struct btrfs_block_group *btrfs_create_chunk(struct btrfs_trans_handle *trans,
5492 u64 type)
5493 {
5494 struct btrfs_fs_info *info = trans->fs_info;
5495 struct btrfs_fs_devices *fs_devices = info->fs_devices;
5496 struct btrfs_device_info *devices_info = NULL;
5497 struct alloc_chunk_ctl ctl;
5498 struct btrfs_block_group *block_group;
5499 int ret;
5500
5501 lockdep_assert_held(&info->chunk_mutex);
5502
5503 if (!alloc_profile_is_valid(type, 0)) {
5504 ASSERT(0);
5505 return ERR_PTR(-EINVAL);
5506 }
5507
5508 if (list_empty(&fs_devices->alloc_list)) {
5509 if (btrfs_test_opt(info, ENOSPC_DEBUG))
5510 btrfs_debug(info, "%s: no writable device", __func__);
5511 return ERR_PTR(-ENOSPC);
5512 }
5513
5514 if (!(type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
5515 btrfs_err(info, "invalid chunk type 0x%llx requested", type);
5516 ASSERT(0);
5517 return ERR_PTR(-EINVAL);
5518 }
5519
5520 ctl.start = find_next_chunk(info);
5521 ctl.type = type;
5522 init_alloc_chunk_ctl(fs_devices, &ctl);
5523
5524 devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info),
5525 GFP_NOFS);
5526 if (!devices_info)
5527 return ERR_PTR(-ENOMEM);
5528
5529 ret = gather_device_info(fs_devices, &ctl, devices_info);
5530 if (ret < 0) {
5531 block_group = ERR_PTR(ret);
5532 goto out;
5533 }
5534
5535 ret = decide_stripe_size(fs_devices, &ctl, devices_info);
5536 if (ret < 0) {
5537 block_group = ERR_PTR(ret);
5538 goto out;
5539 }
5540
5541 block_group = create_chunk(trans, &ctl, devices_info);
5542
5543 out:
5544 kfree(devices_info);
5545 return block_group;
5546 }
5547
5548 /*
5549 * This function, btrfs_chunk_alloc_add_chunk_item(), typically belongs to the
5550 * phase 1 of chunk allocation. It belongs to phase 2 only when allocating system
5551 * chunks.
5552 *
5553 * See the comment at btrfs_chunk_alloc() for details about the chunk allocation
5554 * phases.
5555 */
btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle * trans,struct btrfs_block_group * bg)5556 int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans,
5557 struct btrfs_block_group *bg)
5558 {
5559 struct btrfs_fs_info *fs_info = trans->fs_info;
5560 struct btrfs_root *extent_root = fs_info->extent_root;
5561 struct btrfs_root *chunk_root = fs_info->chunk_root;
5562 struct btrfs_key key;
5563 struct btrfs_chunk *chunk;
5564 struct btrfs_stripe *stripe;
5565 struct extent_map *em;
5566 struct map_lookup *map;
5567 size_t item_size;
5568 int i;
5569 int ret;
5570
5571 /*
5572 * We take the chunk_mutex for 2 reasons:
5573 *
5574 * 1) Updates and insertions in the chunk btree must be done while holding
5575 * the chunk_mutex, as well as updating the system chunk array in the
5576 * superblock. See the comment on top of btrfs_chunk_alloc() for the
5577 * details;
5578 *
5579 * 2) To prevent races with the final phase of a device replace operation
5580 * that replaces the device object associated with the map's stripes,
5581 * because the device object's id can change at any time during that
5582 * final phase of the device replace operation
5583 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the
5584 * replaced device and then see it with an ID of BTRFS_DEV_REPLACE_DEVID,
5585 * which would cause a failure when updating the device item, which does
5586 * not exists, or persisting a stripe of the chunk item with such ID.
5587 * Here we can't use the device_list_mutex because our caller already
5588 * has locked the chunk_mutex, and the final phase of device replace
5589 * acquires both mutexes - first the device_list_mutex and then the
5590 * chunk_mutex. Using any of those two mutexes protects us from a
5591 * concurrent device replace.
5592 */
5593 lockdep_assert_held(&fs_info->chunk_mutex);
5594
5595 em = btrfs_get_chunk_map(fs_info, bg->start, bg->length);
5596 if (IS_ERR(em)) {
5597 ret = PTR_ERR(em);
5598 btrfs_abort_transaction(trans, ret);
5599 return ret;
5600 }
5601
5602 map = em->map_lookup;
5603 item_size = btrfs_chunk_item_size(map->num_stripes);
5604
5605 chunk = kzalloc(item_size, GFP_NOFS);
5606 if (!chunk) {
5607 ret = -ENOMEM;
5608 btrfs_abort_transaction(trans, ret);
5609 goto out;
5610 }
5611
5612 for (i = 0; i < map->num_stripes; i++) {
5613 struct btrfs_device *device = map->stripes[i].dev;
5614
5615 ret = btrfs_update_device(trans, device);
5616 if (ret)
5617 goto out;
5618 }
5619
5620 stripe = &chunk->stripe;
5621 for (i = 0; i < map->num_stripes; i++) {
5622 struct btrfs_device *device = map->stripes[i].dev;
5623 const u64 dev_offset = map->stripes[i].physical;
5624
5625 btrfs_set_stack_stripe_devid(stripe, device->devid);
5626 btrfs_set_stack_stripe_offset(stripe, dev_offset);
5627 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
5628 stripe++;
5629 }
5630
5631 btrfs_set_stack_chunk_length(chunk, bg->length);
5632 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
5633 btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
5634 btrfs_set_stack_chunk_type(chunk, map->type);
5635 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
5636 btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
5637 btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
5638 btrfs_set_stack_chunk_sector_size(chunk, fs_info->sectorsize);
5639 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
5640
5641 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
5642 key.type = BTRFS_CHUNK_ITEM_KEY;
5643 key.offset = bg->start;
5644
5645 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
5646 if (ret)
5647 goto out;
5648
5649 bg->chunk_item_inserted = 1;
5650
5651 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
5652 ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size);
5653 if (ret)
5654 goto out;
5655 }
5656
5657 out:
5658 kfree(chunk);
5659 free_extent_map(em);
5660 return ret;
5661 }
5662
init_first_rw_device(struct btrfs_trans_handle * trans)5663 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans)
5664 {
5665 struct btrfs_fs_info *fs_info = trans->fs_info;
5666 u64 alloc_profile;
5667 struct btrfs_block_group *meta_bg;
5668 struct btrfs_block_group *sys_bg;
5669
5670 /*
5671 * When adding a new device for sprouting, the seed device is read-only
5672 * so we must first allocate a metadata and a system chunk. But before
5673 * adding the block group items to the extent, device and chunk btrees,
5674 * we must first:
5675 *
5676 * 1) Create both chunks without doing any changes to the btrees, as
5677 * otherwise we would get -ENOSPC since the block groups from the
5678 * seed device are read-only;
5679 *
5680 * 2) Add the device item for the new sprout device - finishing the setup
5681 * of a new block group requires updating the device item in the chunk
5682 * btree, so it must exist when we attempt to do it. The previous step
5683 * ensures this does not fail with -ENOSPC.
5684 *
5685 * After that we can add the block group items to their btrees:
5686 * update existing device item in the chunk btree, add a new block group
5687 * item to the extent btree, add a new chunk item to the chunk btree and
5688 * finally add the new device extent items to the devices btree.
5689 */
5690
5691 alloc_profile = btrfs_metadata_alloc_profile(fs_info);
5692 meta_bg = btrfs_create_chunk(trans, alloc_profile);
5693 if (IS_ERR(meta_bg))
5694 return PTR_ERR(meta_bg);
5695
5696 alloc_profile = btrfs_system_alloc_profile(fs_info);
5697 sys_bg = btrfs_create_chunk(trans, alloc_profile);
5698 if (IS_ERR(sys_bg))
5699 return PTR_ERR(sys_bg);
5700
5701 return 0;
5702 }
5703
btrfs_chunk_max_errors(struct map_lookup * map)5704 static inline int btrfs_chunk_max_errors(struct map_lookup *map)
5705 {
5706 const int index = btrfs_bg_flags_to_raid_index(map->type);
5707
5708 return btrfs_raid_array[index].tolerated_failures;
5709 }
5710
btrfs_chunk_readonly(struct btrfs_fs_info * fs_info,u64 chunk_offset)5711 int btrfs_chunk_readonly(struct btrfs_fs_info *fs_info, u64 chunk_offset)
5712 {
5713 struct extent_map *em;
5714 struct map_lookup *map;
5715 int readonly = 0;
5716 int miss_ndevs = 0;
5717 int i;
5718
5719 em = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
5720 if (IS_ERR(em))
5721 return 1;
5722
5723 map = em->map_lookup;
5724 for (i = 0; i < map->num_stripes; i++) {
5725 if (test_bit(BTRFS_DEV_STATE_MISSING,
5726 &map->stripes[i].dev->dev_state)) {
5727 miss_ndevs++;
5728 continue;
5729 }
5730 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE,
5731 &map->stripes[i].dev->dev_state)) {
5732 readonly = 1;
5733 goto end;
5734 }
5735 }
5736
5737 /*
5738 * If the number of missing devices is larger than max errors,
5739 * we can not write the data into that chunk successfully, so
5740 * set it readonly.
5741 */
5742 if (miss_ndevs > btrfs_chunk_max_errors(map))
5743 readonly = 1;
5744 end:
5745 free_extent_map(em);
5746 return readonly;
5747 }
5748
btrfs_mapping_tree_free(struct extent_map_tree * tree)5749 void btrfs_mapping_tree_free(struct extent_map_tree *tree)
5750 {
5751 struct extent_map *em;
5752
5753 while (1) {
5754 write_lock(&tree->lock);
5755 em = lookup_extent_mapping(tree, 0, (u64)-1);
5756 if (em)
5757 remove_extent_mapping(tree, em);
5758 write_unlock(&tree->lock);
5759 if (!em)
5760 break;
5761 /* once for us */
5762 free_extent_map(em);
5763 /* once for the tree */
5764 free_extent_map(em);
5765 }
5766 }
5767
btrfs_num_copies(struct btrfs_fs_info * fs_info,u64 logical,u64 len)5768 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5769 {
5770 struct extent_map *em;
5771 struct map_lookup *map;
5772 int ret;
5773
5774 em = btrfs_get_chunk_map(fs_info, logical, len);
5775 if (IS_ERR(em))
5776 /*
5777 * We could return errors for these cases, but that could get
5778 * ugly and we'd probably do the same thing which is just not do
5779 * anything else and exit, so return 1 so the callers don't try
5780 * to use other copies.
5781 */
5782 return 1;
5783
5784 map = em->map_lookup;
5785 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1_MASK))
5786 ret = map->num_stripes;
5787 else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5788 ret = map->sub_stripes;
5789 else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
5790 ret = 2;
5791 else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
5792 /*
5793 * There could be two corrupted data stripes, we need
5794 * to loop retry in order to rebuild the correct data.
5795 *
5796 * Fail a stripe at a time on every retry except the
5797 * stripe under reconstruction.
5798 */
5799 ret = map->num_stripes;
5800 else
5801 ret = 1;
5802 free_extent_map(em);
5803
5804 down_read(&fs_info->dev_replace.rwsem);
5805 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace) &&
5806 fs_info->dev_replace.tgtdev)
5807 ret++;
5808 up_read(&fs_info->dev_replace.rwsem);
5809
5810 return ret;
5811 }
5812
btrfs_full_stripe_len(struct btrfs_fs_info * fs_info,u64 logical)5813 unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
5814 u64 logical)
5815 {
5816 struct extent_map *em;
5817 struct map_lookup *map;
5818 unsigned long len = fs_info->sectorsize;
5819
5820 em = btrfs_get_chunk_map(fs_info, logical, len);
5821
5822 if (!WARN_ON(IS_ERR(em))) {
5823 map = em->map_lookup;
5824 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5825 len = map->stripe_len * nr_data_stripes(map);
5826 free_extent_map(em);
5827 }
5828 return len;
5829 }
5830
btrfs_is_parity_mirror(struct btrfs_fs_info * fs_info,u64 logical,u64 len)5831 int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5832 {
5833 struct extent_map *em;
5834 struct map_lookup *map;
5835 int ret = 0;
5836
5837 em = btrfs_get_chunk_map(fs_info, logical, len);
5838
5839 if(!WARN_ON(IS_ERR(em))) {
5840 map = em->map_lookup;
5841 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5842 ret = 1;
5843 free_extent_map(em);
5844 }
5845 return ret;
5846 }
5847
find_live_mirror(struct btrfs_fs_info * fs_info,struct map_lookup * map,int first,int dev_replace_is_ongoing)5848 static int find_live_mirror(struct btrfs_fs_info *fs_info,
5849 struct map_lookup *map, int first,
5850 int dev_replace_is_ongoing)
5851 {
5852 int i;
5853 int num_stripes;
5854 int preferred_mirror;
5855 int tolerance;
5856 struct btrfs_device *srcdev;
5857
5858 ASSERT((map->type &
5859 (BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10)));
5860
5861 if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5862 num_stripes = map->sub_stripes;
5863 else
5864 num_stripes = map->num_stripes;
5865
5866 switch (fs_info->fs_devices->read_policy) {
5867 default:
5868 /* Shouldn't happen, just warn and use pid instead of failing */
5869 btrfs_warn_rl(fs_info,
5870 "unknown read_policy type %u, reset to pid",
5871 fs_info->fs_devices->read_policy);
5872 fs_info->fs_devices->read_policy = BTRFS_READ_POLICY_PID;
5873 fallthrough;
5874 case BTRFS_READ_POLICY_PID:
5875 preferred_mirror = first + (current->pid % num_stripes);
5876 break;
5877 }
5878
5879 if (dev_replace_is_ongoing &&
5880 fs_info->dev_replace.cont_reading_from_srcdev_mode ==
5881 BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID)
5882 srcdev = fs_info->dev_replace.srcdev;
5883 else
5884 srcdev = NULL;
5885
5886 /*
5887 * try to avoid the drive that is the source drive for a
5888 * dev-replace procedure, only choose it if no other non-missing
5889 * mirror is available
5890 */
5891 for (tolerance = 0; tolerance < 2; tolerance++) {
5892 if (map->stripes[preferred_mirror].dev->bdev &&
5893 (tolerance || map->stripes[preferred_mirror].dev != srcdev))
5894 return preferred_mirror;
5895 for (i = first; i < first + num_stripes; i++) {
5896 if (map->stripes[i].dev->bdev &&
5897 (tolerance || map->stripes[i].dev != srcdev))
5898 return i;
5899 }
5900 }
5901
5902 /* we couldn't find one that doesn't fail. Just return something
5903 * and the io error handling code will clean up eventually
5904 */
5905 return preferred_mirror;
5906 }
5907
5908 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */
sort_parity_stripes(struct btrfs_io_context * bioc,int num_stripes)5909 static void sort_parity_stripes(struct btrfs_io_context *bioc, int num_stripes)
5910 {
5911 int i;
5912 int again = 1;
5913
5914 while (again) {
5915 again = 0;
5916 for (i = 0; i < num_stripes - 1; i++) {
5917 /* Swap if parity is on a smaller index */
5918 if (bioc->raid_map[i] > bioc->raid_map[i + 1]) {
5919 swap(bioc->stripes[i], bioc->stripes[i + 1]);
5920 swap(bioc->raid_map[i], bioc->raid_map[i + 1]);
5921 again = 1;
5922 }
5923 }
5924 }
5925 }
5926
alloc_btrfs_io_context(int total_stripes,int real_stripes)5927 static struct btrfs_io_context *alloc_btrfs_io_context(int total_stripes,
5928 int real_stripes)
5929 {
5930 struct btrfs_io_context *bioc = kzalloc(
5931 /* The size of btrfs_io_context */
5932 sizeof(struct btrfs_io_context) +
5933 /* Plus the variable array for the stripes */
5934 sizeof(struct btrfs_io_stripe) * (total_stripes) +
5935 /* Plus the variable array for the tgt dev */
5936 sizeof(int) * (real_stripes) +
5937 /*
5938 * Plus the raid_map, which includes both the tgt dev
5939 * and the stripes.
5940 */
5941 sizeof(u64) * (total_stripes),
5942 GFP_NOFS|__GFP_NOFAIL);
5943
5944 atomic_set(&bioc->error, 0);
5945 refcount_set(&bioc->refs, 1);
5946
5947 bioc->tgtdev_map = (int *)(bioc->stripes + total_stripes);
5948 bioc->raid_map = (u64 *)(bioc->tgtdev_map + real_stripes);
5949
5950 return bioc;
5951 }
5952
btrfs_get_bioc(struct btrfs_io_context * bioc)5953 void btrfs_get_bioc(struct btrfs_io_context *bioc)
5954 {
5955 WARN_ON(!refcount_read(&bioc->refs));
5956 refcount_inc(&bioc->refs);
5957 }
5958
btrfs_put_bioc(struct btrfs_io_context * bioc)5959 void btrfs_put_bioc(struct btrfs_io_context *bioc)
5960 {
5961 if (!bioc)
5962 return;
5963 if (refcount_dec_and_test(&bioc->refs))
5964 kfree(bioc);
5965 }
5966
5967 /* can REQ_OP_DISCARD be sent with other REQ like REQ_OP_WRITE? */
5968 /*
5969 * Please note that, discard won't be sent to target device of device
5970 * replace.
5971 */
__btrfs_map_block_for_discard(struct btrfs_fs_info * fs_info,u64 logical,u64 * length_ret,struct btrfs_io_context ** bioc_ret)5972 static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info,
5973 u64 logical, u64 *length_ret,
5974 struct btrfs_io_context **bioc_ret)
5975 {
5976 struct extent_map *em;
5977 struct map_lookup *map;
5978 struct btrfs_io_context *bioc;
5979 u64 length = *length_ret;
5980 u64 offset;
5981 u64 stripe_nr;
5982 u64 stripe_nr_end;
5983 u64 stripe_end_offset;
5984 u64 stripe_cnt;
5985 u64 stripe_len;
5986 u64 stripe_offset;
5987 u64 num_stripes;
5988 u32 stripe_index;
5989 u32 factor = 0;
5990 u32 sub_stripes = 0;
5991 u64 stripes_per_dev = 0;
5992 u32 remaining_stripes = 0;
5993 u32 last_stripe = 0;
5994 int ret = 0;
5995 int i;
5996
5997 /* Discard always returns a bioc. */
5998 ASSERT(bioc_ret);
5999
6000 em = btrfs_get_chunk_map(fs_info, logical, length);
6001 if (IS_ERR(em))
6002 return PTR_ERR(em);
6003
6004 map = em->map_lookup;
6005 /* we don't discard raid56 yet */
6006 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
6007 ret = -EOPNOTSUPP;
6008 goto out;
6009 }
6010
6011 offset = logical - em->start;
6012 length = min_t(u64, em->start + em->len - logical, length);
6013 *length_ret = length;
6014
6015 stripe_len = map->stripe_len;
6016 /*
6017 * stripe_nr counts the total number of stripes we have to stride
6018 * to get to this block
6019 */
6020 stripe_nr = div64_u64(offset, stripe_len);
6021
6022 /* stripe_offset is the offset of this block in its stripe */
6023 stripe_offset = offset - stripe_nr * stripe_len;
6024
6025 stripe_nr_end = round_up(offset + length, map->stripe_len);
6026 stripe_nr_end = div64_u64(stripe_nr_end, map->stripe_len);
6027 stripe_cnt = stripe_nr_end - stripe_nr;
6028 stripe_end_offset = stripe_nr_end * map->stripe_len -
6029 (offset + length);
6030 /*
6031 * after this, stripe_nr is the number of stripes on this
6032 * device we have to walk to find the data, and stripe_index is
6033 * the number of our device in the stripe array
6034 */
6035 num_stripes = 1;
6036 stripe_index = 0;
6037 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
6038 BTRFS_BLOCK_GROUP_RAID10)) {
6039 if (map->type & BTRFS_BLOCK_GROUP_RAID0)
6040 sub_stripes = 1;
6041 else
6042 sub_stripes = map->sub_stripes;
6043
6044 factor = map->num_stripes / sub_stripes;
6045 num_stripes = min_t(u64, map->num_stripes,
6046 sub_stripes * stripe_cnt);
6047 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
6048 stripe_index *= sub_stripes;
6049 stripes_per_dev = div_u64_rem(stripe_cnt, factor,
6050 &remaining_stripes);
6051 div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
6052 last_stripe *= sub_stripes;
6053 } else if (map->type & (BTRFS_BLOCK_GROUP_RAID1_MASK |
6054 BTRFS_BLOCK_GROUP_DUP)) {
6055 num_stripes = map->num_stripes;
6056 } else {
6057 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
6058 &stripe_index);
6059 }
6060
6061 bioc = alloc_btrfs_io_context(num_stripes, 0);
6062 if (!bioc) {
6063 ret = -ENOMEM;
6064 goto out;
6065 }
6066
6067 for (i = 0; i < num_stripes; i++) {
6068 bioc->stripes[i].physical =
6069 map->stripes[stripe_index].physical +
6070 stripe_offset + stripe_nr * map->stripe_len;
6071 bioc->stripes[i].dev = map->stripes[stripe_index].dev;
6072
6073 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
6074 BTRFS_BLOCK_GROUP_RAID10)) {
6075 bioc->stripes[i].length = stripes_per_dev *
6076 map->stripe_len;
6077
6078 if (i / sub_stripes < remaining_stripes)
6079 bioc->stripes[i].length += map->stripe_len;
6080
6081 /*
6082 * Special for the first stripe and
6083 * the last stripe:
6084 *
6085 * |-------|...|-------|
6086 * |----------|
6087 * off end_off
6088 */
6089 if (i < sub_stripes)
6090 bioc->stripes[i].length -= stripe_offset;
6091
6092 if (stripe_index >= last_stripe &&
6093 stripe_index <= (last_stripe +
6094 sub_stripes - 1))
6095 bioc->stripes[i].length -= stripe_end_offset;
6096
6097 if (i == sub_stripes - 1)
6098 stripe_offset = 0;
6099 } else {
6100 bioc->stripes[i].length = length;
6101 }
6102
6103 stripe_index++;
6104 if (stripe_index == map->num_stripes) {
6105 stripe_index = 0;
6106 stripe_nr++;
6107 }
6108 }
6109
6110 *bioc_ret = bioc;
6111 bioc->map_type = map->type;
6112 bioc->num_stripes = num_stripes;
6113 out:
6114 free_extent_map(em);
6115 return ret;
6116 }
6117
6118 /*
6119 * In dev-replace case, for repair case (that's the only case where the mirror
6120 * is selected explicitly when calling btrfs_map_block), blocks left of the
6121 * left cursor can also be read from the target drive.
6122 *
6123 * For REQ_GET_READ_MIRRORS, the target drive is added as the last one to the
6124 * array of stripes.
6125 * For READ, it also needs to be supported using the same mirror number.
6126 *
6127 * If the requested block is not left of the left cursor, EIO is returned. This
6128 * can happen because btrfs_num_copies() returns one more in the dev-replace
6129 * case.
6130 */
get_extra_mirror_from_replace(struct btrfs_fs_info * fs_info,u64 logical,u64 length,u64 srcdev_devid,int * mirror_num,u64 * physical)6131 static int get_extra_mirror_from_replace(struct btrfs_fs_info *fs_info,
6132 u64 logical, u64 length,
6133 u64 srcdev_devid, int *mirror_num,
6134 u64 *physical)
6135 {
6136 struct btrfs_io_context *bioc = NULL;
6137 int num_stripes;
6138 int index_srcdev = 0;
6139 int found = 0;
6140 u64 physical_of_found = 0;
6141 int i;
6142 int ret = 0;
6143
6144 ret = __btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
6145 logical, &length, &bioc, 0, 0);
6146 if (ret) {
6147 ASSERT(bioc == NULL);
6148 return ret;
6149 }
6150
6151 num_stripes = bioc->num_stripes;
6152 if (*mirror_num > num_stripes) {
6153 /*
6154 * BTRFS_MAP_GET_READ_MIRRORS does not contain this mirror,
6155 * that means that the requested area is not left of the left
6156 * cursor
6157 */
6158 btrfs_put_bioc(bioc);
6159 return -EIO;
6160 }
6161
6162 /*
6163 * process the rest of the function using the mirror_num of the source
6164 * drive. Therefore look it up first. At the end, patch the device
6165 * pointer to the one of the target drive.
6166 */
6167 for (i = 0; i < num_stripes; i++) {
6168 if (bioc->stripes[i].dev->devid != srcdev_devid)
6169 continue;
6170
6171 /*
6172 * In case of DUP, in order to keep it simple, only add the
6173 * mirror with the lowest physical address
6174 */
6175 if (found &&
6176 physical_of_found <= bioc->stripes[i].physical)
6177 continue;
6178
6179 index_srcdev = i;
6180 found = 1;
6181 physical_of_found = bioc->stripes[i].physical;
6182 }
6183
6184 btrfs_put_bioc(bioc);
6185
6186 ASSERT(found);
6187 if (!found)
6188 return -EIO;
6189
6190 *mirror_num = index_srcdev + 1;
6191 *physical = physical_of_found;
6192 return ret;
6193 }
6194
is_block_group_to_copy(struct btrfs_fs_info * fs_info,u64 logical)6195 static bool is_block_group_to_copy(struct btrfs_fs_info *fs_info, u64 logical)
6196 {
6197 struct btrfs_block_group *cache;
6198 bool ret;
6199
6200 /* Non zoned filesystem does not use "to_copy" flag */
6201 if (!btrfs_is_zoned(fs_info))
6202 return false;
6203
6204 cache = btrfs_lookup_block_group(fs_info, logical);
6205
6206 spin_lock(&cache->lock);
6207 ret = cache->to_copy;
6208 spin_unlock(&cache->lock);
6209
6210 btrfs_put_block_group(cache);
6211 return ret;
6212 }
6213
handle_ops_on_dev_replace(enum btrfs_map_op op,struct btrfs_io_context ** bioc_ret,struct btrfs_dev_replace * dev_replace,u64 logical,int * num_stripes_ret,int * max_errors_ret)6214 static void handle_ops_on_dev_replace(enum btrfs_map_op op,
6215 struct btrfs_io_context **bioc_ret,
6216 struct btrfs_dev_replace *dev_replace,
6217 u64 logical,
6218 int *num_stripes_ret, int *max_errors_ret)
6219 {
6220 struct btrfs_io_context *bioc = *bioc_ret;
6221 u64 srcdev_devid = dev_replace->srcdev->devid;
6222 int tgtdev_indexes = 0;
6223 int num_stripes = *num_stripes_ret;
6224 int max_errors = *max_errors_ret;
6225 int i;
6226
6227 if (op == BTRFS_MAP_WRITE) {
6228 int index_where_to_add;
6229
6230 /*
6231 * A block group which have "to_copy" set will eventually
6232 * copied by dev-replace process. We can avoid cloning IO here.
6233 */
6234 if (is_block_group_to_copy(dev_replace->srcdev->fs_info, logical))
6235 return;
6236
6237 /*
6238 * duplicate the write operations while the dev replace
6239 * procedure is running. Since the copying of the old disk to
6240 * the new disk takes place at run time while the filesystem is
6241 * mounted writable, the regular write operations to the old
6242 * disk have to be duplicated to go to the new disk as well.
6243 *
6244 * Note that device->missing is handled by the caller, and that
6245 * the write to the old disk is already set up in the stripes
6246 * array.
6247 */
6248 index_where_to_add = num_stripes;
6249 for (i = 0; i < num_stripes; i++) {
6250 if (bioc->stripes[i].dev->devid == srcdev_devid) {
6251 /* write to new disk, too */
6252 struct btrfs_io_stripe *new =
6253 bioc->stripes + index_where_to_add;
6254 struct btrfs_io_stripe *old =
6255 bioc->stripes + i;
6256
6257 new->physical = old->physical;
6258 new->length = old->length;
6259 new->dev = dev_replace->tgtdev;
6260 bioc->tgtdev_map[i] = index_where_to_add;
6261 index_where_to_add++;
6262 max_errors++;
6263 tgtdev_indexes++;
6264 }
6265 }
6266 num_stripes = index_where_to_add;
6267 } else if (op == BTRFS_MAP_GET_READ_MIRRORS) {
6268 int index_srcdev = 0;
6269 int found = 0;
6270 u64 physical_of_found = 0;
6271
6272 /*
6273 * During the dev-replace procedure, the target drive can also
6274 * be used to read data in case it is needed to repair a corrupt
6275 * block elsewhere. This is possible if the requested area is
6276 * left of the left cursor. In this area, the target drive is a
6277 * full copy of the source drive.
6278 */
6279 for (i = 0; i < num_stripes; i++) {
6280 if (bioc->stripes[i].dev->devid == srcdev_devid) {
6281 /*
6282 * In case of DUP, in order to keep it simple,
6283 * only add the mirror with the lowest physical
6284 * address
6285 */
6286 if (found &&
6287 physical_of_found <= bioc->stripes[i].physical)
6288 continue;
6289 index_srcdev = i;
6290 found = 1;
6291 physical_of_found = bioc->stripes[i].physical;
6292 }
6293 }
6294 if (found) {
6295 struct btrfs_io_stripe *tgtdev_stripe =
6296 bioc->stripes + num_stripes;
6297
6298 tgtdev_stripe->physical = physical_of_found;
6299 tgtdev_stripe->length =
6300 bioc->stripes[index_srcdev].length;
6301 tgtdev_stripe->dev = dev_replace->tgtdev;
6302 bioc->tgtdev_map[index_srcdev] = num_stripes;
6303
6304 tgtdev_indexes++;
6305 num_stripes++;
6306 }
6307 }
6308
6309 *num_stripes_ret = num_stripes;
6310 *max_errors_ret = max_errors;
6311 bioc->num_tgtdevs = tgtdev_indexes;
6312 *bioc_ret = bioc;
6313 }
6314
need_full_stripe(enum btrfs_map_op op)6315 static bool need_full_stripe(enum btrfs_map_op op)
6316 {
6317 return (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS);
6318 }
6319
6320 /*
6321 * Calculate the geometry of a particular (address, len) tuple. This
6322 * information is used to calculate how big a particular bio can get before it
6323 * straddles a stripe.
6324 *
6325 * @fs_info: the filesystem
6326 * @em: mapping containing the logical extent
6327 * @op: type of operation - write or read
6328 * @logical: address that we want to figure out the geometry of
6329 * @io_geom: pointer used to return values
6330 *
6331 * Returns < 0 in case a chunk for the given logical address cannot be found,
6332 * usually shouldn't happen unless @logical is corrupted, 0 otherwise.
6333 */
btrfs_get_io_geometry(struct btrfs_fs_info * fs_info,struct extent_map * em,enum btrfs_map_op op,u64 logical,struct btrfs_io_geometry * io_geom)6334 int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, struct extent_map *em,
6335 enum btrfs_map_op op, u64 logical,
6336 struct btrfs_io_geometry *io_geom)
6337 {
6338 struct map_lookup *map;
6339 u64 len;
6340 u64 offset;
6341 u64 stripe_offset;
6342 u64 stripe_nr;
6343 u64 stripe_len;
6344 u64 raid56_full_stripe_start = (u64)-1;
6345 int data_stripes;
6346
6347 ASSERT(op != BTRFS_MAP_DISCARD);
6348
6349 map = em->map_lookup;
6350 /* Offset of this logical address in the chunk */
6351 offset = logical - em->start;
6352 /* Len of a stripe in a chunk */
6353 stripe_len = map->stripe_len;
6354 /* Stripe where this block falls in */
6355 stripe_nr = div64_u64(offset, stripe_len);
6356 /* Offset of stripe in the chunk */
6357 stripe_offset = stripe_nr * stripe_len;
6358 if (offset < stripe_offset) {
6359 btrfs_crit(fs_info,
6360 "stripe math has gone wrong, stripe_offset=%llu offset=%llu start=%llu logical=%llu stripe_len=%llu",
6361 stripe_offset, offset, em->start, logical, stripe_len);
6362 return -EINVAL;
6363 }
6364
6365 /* stripe_offset is the offset of this block in its stripe */
6366 stripe_offset = offset - stripe_offset;
6367 data_stripes = nr_data_stripes(map);
6368
6369 if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
6370 u64 max_len = stripe_len - stripe_offset;
6371
6372 /*
6373 * In case of raid56, we need to know the stripe aligned start
6374 */
6375 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
6376 unsigned long full_stripe_len = stripe_len * data_stripes;
6377 raid56_full_stripe_start = offset;
6378
6379 /*
6380 * Allow a write of a full stripe, but make sure we
6381 * don't allow straddling of stripes
6382 */
6383 raid56_full_stripe_start = div64_u64(raid56_full_stripe_start,
6384 full_stripe_len);
6385 raid56_full_stripe_start *= full_stripe_len;
6386
6387 /*
6388 * For writes to RAID[56], allow a full stripeset across
6389 * all disks. For other RAID types and for RAID[56]
6390 * reads, just allow a single stripe (on a single disk).
6391 */
6392 if (op == BTRFS_MAP_WRITE) {
6393 max_len = stripe_len * data_stripes -
6394 (offset - raid56_full_stripe_start);
6395 }
6396 }
6397 len = min_t(u64, em->len - offset, max_len);
6398 } else {
6399 len = em->len - offset;
6400 }
6401
6402 io_geom->len = len;
6403 io_geom->offset = offset;
6404 io_geom->stripe_len = stripe_len;
6405 io_geom->stripe_nr = stripe_nr;
6406 io_geom->stripe_offset = stripe_offset;
6407 io_geom->raid56_stripe_offset = raid56_full_stripe_start;
6408
6409 return 0;
6410 }
6411
__btrfs_map_block(struct btrfs_fs_info * fs_info,enum btrfs_map_op op,u64 logical,u64 * length,struct btrfs_io_context ** bioc_ret,int mirror_num,int need_raid_map)6412 static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
6413 enum btrfs_map_op op,
6414 u64 logical, u64 *length,
6415 struct btrfs_io_context **bioc_ret,
6416 int mirror_num, int need_raid_map)
6417 {
6418 struct extent_map *em;
6419 struct map_lookup *map;
6420 u64 stripe_offset;
6421 u64 stripe_nr;
6422 u64 stripe_len;
6423 u32 stripe_index;
6424 int data_stripes;
6425 int i;
6426 int ret = 0;
6427 int num_stripes;
6428 int max_errors = 0;
6429 int tgtdev_indexes = 0;
6430 struct btrfs_io_context *bioc = NULL;
6431 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
6432 int dev_replace_is_ongoing = 0;
6433 int num_alloc_stripes;
6434 int patch_the_first_stripe_for_dev_replace = 0;
6435 u64 physical_to_patch_in_first_stripe = 0;
6436 u64 raid56_full_stripe_start = (u64)-1;
6437 struct btrfs_io_geometry geom;
6438
6439 ASSERT(bioc_ret);
6440 ASSERT(op != BTRFS_MAP_DISCARD);
6441
6442 em = btrfs_get_chunk_map(fs_info, logical, *length);
6443 ASSERT(!IS_ERR(em));
6444
6445 ret = btrfs_get_io_geometry(fs_info, em, op, logical, &geom);
6446 if (ret < 0)
6447 return ret;
6448
6449 map = em->map_lookup;
6450
6451 *length = geom.len;
6452 stripe_len = geom.stripe_len;
6453 stripe_nr = geom.stripe_nr;
6454 stripe_offset = geom.stripe_offset;
6455 raid56_full_stripe_start = geom.raid56_stripe_offset;
6456 data_stripes = nr_data_stripes(map);
6457
6458 down_read(&dev_replace->rwsem);
6459 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
6460 /*
6461 * Hold the semaphore for read during the whole operation, write is
6462 * requested at commit time but must wait.
6463 */
6464 if (!dev_replace_is_ongoing)
6465 up_read(&dev_replace->rwsem);
6466
6467 if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
6468 !need_full_stripe(op) && dev_replace->tgtdev != NULL) {
6469 ret = get_extra_mirror_from_replace(fs_info, logical, *length,
6470 dev_replace->srcdev->devid,
6471 &mirror_num,
6472 &physical_to_patch_in_first_stripe);
6473 if (ret)
6474 goto out;
6475 else
6476 patch_the_first_stripe_for_dev_replace = 1;
6477 } else if (mirror_num > map->num_stripes) {
6478 mirror_num = 0;
6479 }
6480
6481 num_stripes = 1;
6482 stripe_index = 0;
6483 if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
6484 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
6485 &stripe_index);
6486 if (!need_full_stripe(op))
6487 mirror_num = 1;
6488 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1_MASK) {
6489 if (need_full_stripe(op))
6490 num_stripes = map->num_stripes;
6491 else if (mirror_num)
6492 stripe_index = mirror_num - 1;
6493 else {
6494 stripe_index = find_live_mirror(fs_info, map, 0,
6495 dev_replace_is_ongoing);
6496 mirror_num = stripe_index + 1;
6497 }
6498
6499 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
6500 if (need_full_stripe(op)) {
6501 num_stripes = map->num_stripes;
6502 } else if (mirror_num) {
6503 stripe_index = mirror_num - 1;
6504 } else {
6505 mirror_num = 1;
6506 }
6507
6508 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
6509 u32 factor = map->num_stripes / map->sub_stripes;
6510
6511 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
6512 stripe_index *= map->sub_stripes;
6513
6514 if (need_full_stripe(op))
6515 num_stripes = map->sub_stripes;
6516 else if (mirror_num)
6517 stripe_index += mirror_num - 1;
6518 else {
6519 int old_stripe_index = stripe_index;
6520 stripe_index = find_live_mirror(fs_info, map,
6521 stripe_index,
6522 dev_replace_is_ongoing);
6523 mirror_num = stripe_index - old_stripe_index + 1;
6524 }
6525
6526 } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
6527 if (need_raid_map && (need_full_stripe(op) || mirror_num > 1)) {
6528 /* push stripe_nr back to the start of the full stripe */
6529 stripe_nr = div64_u64(raid56_full_stripe_start,
6530 stripe_len * data_stripes);
6531
6532 /* RAID[56] write or recovery. Return all stripes */
6533 num_stripes = map->num_stripes;
6534 max_errors = nr_parity_stripes(map);
6535
6536 *length = map->stripe_len;
6537 stripe_index = 0;
6538 stripe_offset = 0;
6539 } else {
6540 /*
6541 * Mirror #0 or #1 means the original data block.
6542 * Mirror #2 is RAID5 parity block.
6543 * Mirror #3 is RAID6 Q block.
6544 */
6545 stripe_nr = div_u64_rem(stripe_nr,
6546 data_stripes, &stripe_index);
6547 if (mirror_num > 1)
6548 stripe_index = data_stripes + mirror_num - 2;
6549
6550 /* We distribute the parity blocks across stripes */
6551 div_u64_rem(stripe_nr + stripe_index, map->num_stripes,
6552 &stripe_index);
6553 if (!need_full_stripe(op) && mirror_num <= 1)
6554 mirror_num = 1;
6555 }
6556 } else {
6557 /*
6558 * after this, stripe_nr is the number of stripes on this
6559 * device we have to walk to find the data, and stripe_index is
6560 * the number of our device in the stripe array
6561 */
6562 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
6563 &stripe_index);
6564 mirror_num = stripe_index + 1;
6565 }
6566 if (stripe_index >= map->num_stripes) {
6567 btrfs_crit(fs_info,
6568 "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u",
6569 stripe_index, map->num_stripes);
6570 ret = -EINVAL;
6571 goto out;
6572 }
6573
6574 num_alloc_stripes = num_stripes;
6575 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL) {
6576 if (op == BTRFS_MAP_WRITE)
6577 num_alloc_stripes <<= 1;
6578 if (op == BTRFS_MAP_GET_READ_MIRRORS)
6579 num_alloc_stripes++;
6580 tgtdev_indexes = num_stripes;
6581 }
6582
6583 bioc = alloc_btrfs_io_context(num_alloc_stripes, tgtdev_indexes);
6584 if (!bioc) {
6585 ret = -ENOMEM;
6586 goto out;
6587 }
6588
6589 for (i = 0; i < num_stripes; i++) {
6590 bioc->stripes[i].physical = map->stripes[stripe_index].physical +
6591 stripe_offset + stripe_nr * map->stripe_len;
6592 bioc->stripes[i].dev = map->stripes[stripe_index].dev;
6593 stripe_index++;
6594 }
6595
6596 /* Build raid_map */
6597 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && need_raid_map &&
6598 (need_full_stripe(op) || mirror_num > 1)) {
6599 u64 tmp;
6600 unsigned rot;
6601
6602 /* Work out the disk rotation on this stripe-set */
6603 div_u64_rem(stripe_nr, num_stripes, &rot);
6604
6605 /* Fill in the logical address of each stripe */
6606 tmp = stripe_nr * data_stripes;
6607 for (i = 0; i < data_stripes; i++)
6608 bioc->raid_map[(i + rot) % num_stripes] =
6609 em->start + (tmp + i) * map->stripe_len;
6610
6611 bioc->raid_map[(i + rot) % map->num_stripes] = RAID5_P_STRIPE;
6612 if (map->type & BTRFS_BLOCK_GROUP_RAID6)
6613 bioc->raid_map[(i + rot + 1) % num_stripes] =
6614 RAID6_Q_STRIPE;
6615
6616 sort_parity_stripes(bioc, num_stripes);
6617 }
6618
6619 if (need_full_stripe(op))
6620 max_errors = btrfs_chunk_max_errors(map);
6621
6622 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL &&
6623 need_full_stripe(op)) {
6624 handle_ops_on_dev_replace(op, &bioc, dev_replace, logical,
6625 &num_stripes, &max_errors);
6626 }
6627
6628 *bioc_ret = bioc;
6629 bioc->map_type = map->type;
6630 bioc->num_stripes = num_stripes;
6631 bioc->max_errors = max_errors;
6632 bioc->mirror_num = mirror_num;
6633
6634 /*
6635 * this is the case that REQ_READ && dev_replace_is_ongoing &&
6636 * mirror_num == num_stripes + 1 && dev_replace target drive is
6637 * available as a mirror
6638 */
6639 if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) {
6640 WARN_ON(num_stripes > 1);
6641 bioc->stripes[0].dev = dev_replace->tgtdev;
6642 bioc->stripes[0].physical = physical_to_patch_in_first_stripe;
6643 bioc->mirror_num = map->num_stripes + 1;
6644 }
6645 out:
6646 if (dev_replace_is_ongoing) {
6647 lockdep_assert_held(&dev_replace->rwsem);
6648 /* Unlock and let waiting writers proceed */
6649 up_read(&dev_replace->rwsem);
6650 }
6651 free_extent_map(em);
6652 return ret;
6653 }
6654
btrfs_map_block(struct btrfs_fs_info * fs_info,enum btrfs_map_op op,u64 logical,u64 * length,struct btrfs_io_context ** bioc_ret,int mirror_num)6655 int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
6656 u64 logical, u64 *length,
6657 struct btrfs_io_context **bioc_ret, int mirror_num)
6658 {
6659 if (op == BTRFS_MAP_DISCARD)
6660 return __btrfs_map_block_for_discard(fs_info, logical,
6661 length, bioc_ret);
6662
6663 return __btrfs_map_block(fs_info, op, logical, length, bioc_ret,
6664 mirror_num, 0);
6665 }
6666
6667 /* For Scrub/replace */
btrfs_map_sblock(struct btrfs_fs_info * fs_info,enum btrfs_map_op op,u64 logical,u64 * length,struct btrfs_io_context ** bioc_ret)6668 int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
6669 u64 logical, u64 *length,
6670 struct btrfs_io_context **bioc_ret)
6671 {
6672 return __btrfs_map_block(fs_info, op, logical, length, bioc_ret, 0, 1);
6673 }
6674
btrfs_end_bioc(struct btrfs_io_context * bioc,struct bio * bio)6675 static inline void btrfs_end_bioc(struct btrfs_io_context *bioc, struct bio *bio)
6676 {
6677 bio->bi_private = bioc->private;
6678 bio->bi_end_io = bioc->end_io;
6679 bio_endio(bio);
6680
6681 btrfs_put_bioc(bioc);
6682 }
6683
btrfs_end_bio(struct bio * bio)6684 static void btrfs_end_bio(struct bio *bio)
6685 {
6686 struct btrfs_io_context *bioc = bio->bi_private;
6687 int is_orig_bio = 0;
6688
6689 if (bio->bi_status) {
6690 atomic_inc(&bioc->error);
6691 if (bio->bi_status == BLK_STS_IOERR ||
6692 bio->bi_status == BLK_STS_TARGET) {
6693 struct btrfs_device *dev = btrfs_io_bio(bio)->device;
6694
6695 ASSERT(dev->bdev);
6696 if (btrfs_op(bio) == BTRFS_MAP_WRITE)
6697 btrfs_dev_stat_inc_and_print(dev,
6698 BTRFS_DEV_STAT_WRITE_ERRS);
6699 else if (!(bio->bi_opf & REQ_RAHEAD))
6700 btrfs_dev_stat_inc_and_print(dev,
6701 BTRFS_DEV_STAT_READ_ERRS);
6702 if (bio->bi_opf & REQ_PREFLUSH)
6703 btrfs_dev_stat_inc_and_print(dev,
6704 BTRFS_DEV_STAT_FLUSH_ERRS);
6705 }
6706 }
6707
6708 if (bio == bioc->orig_bio)
6709 is_orig_bio = 1;
6710
6711 btrfs_bio_counter_dec(bioc->fs_info);
6712
6713 if (atomic_dec_and_test(&bioc->stripes_pending)) {
6714 if (!is_orig_bio) {
6715 bio_put(bio);
6716 bio = bioc->orig_bio;
6717 }
6718
6719 btrfs_io_bio(bio)->mirror_num = bioc->mirror_num;
6720 /* only send an error to the higher layers if it is
6721 * beyond the tolerance of the btrfs bio
6722 */
6723 if (atomic_read(&bioc->error) > bioc->max_errors) {
6724 bio->bi_status = BLK_STS_IOERR;
6725 } else {
6726 /*
6727 * this bio is actually up to date, we didn't
6728 * go over the max number of errors
6729 */
6730 bio->bi_status = BLK_STS_OK;
6731 }
6732
6733 btrfs_end_bioc(bioc, bio);
6734 } else if (!is_orig_bio) {
6735 bio_put(bio);
6736 }
6737 }
6738
submit_stripe_bio(struct btrfs_io_context * bioc,struct bio * bio,u64 physical,struct btrfs_device * dev)6739 static void submit_stripe_bio(struct btrfs_io_context *bioc, struct bio *bio,
6740 u64 physical, struct btrfs_device *dev)
6741 {
6742 struct btrfs_fs_info *fs_info = bioc->fs_info;
6743
6744 bio->bi_private = bioc;
6745 btrfs_io_bio(bio)->device = dev;
6746 bio->bi_end_io = btrfs_end_bio;
6747 bio->bi_iter.bi_sector = physical >> 9;
6748 /*
6749 * For zone append writing, bi_sector must point the beginning of the
6750 * zone
6751 */
6752 if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
6753 if (btrfs_dev_is_sequential(dev, physical)) {
6754 u64 zone_start = round_down(physical, fs_info->zone_size);
6755
6756 bio->bi_iter.bi_sector = zone_start >> SECTOR_SHIFT;
6757 } else {
6758 bio->bi_opf &= ~REQ_OP_ZONE_APPEND;
6759 bio->bi_opf |= REQ_OP_WRITE;
6760 }
6761 }
6762 btrfs_debug_in_rcu(fs_info,
6763 "btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u",
6764 bio_op(bio), bio->bi_opf, bio->bi_iter.bi_sector,
6765 (unsigned long)dev->bdev->bd_dev, rcu_str_deref(dev->name),
6766 dev->devid, bio->bi_iter.bi_size);
6767 bio_set_dev(bio, dev->bdev);
6768
6769 btrfs_bio_counter_inc_noblocked(fs_info);
6770
6771 btrfsic_submit_bio(bio);
6772 }
6773
bioc_error(struct btrfs_io_context * bioc,struct bio * bio,u64 logical)6774 static void bioc_error(struct btrfs_io_context *bioc, struct bio *bio, u64 logical)
6775 {
6776 atomic_inc(&bioc->error);
6777 if (atomic_dec_and_test(&bioc->stripes_pending)) {
6778 /* Should be the original bio. */
6779 WARN_ON(bio != bioc->orig_bio);
6780
6781 btrfs_io_bio(bio)->mirror_num = bioc->mirror_num;
6782 bio->bi_iter.bi_sector = logical >> 9;
6783 if (atomic_read(&bioc->error) > bioc->max_errors)
6784 bio->bi_status = BLK_STS_IOERR;
6785 else
6786 bio->bi_status = BLK_STS_OK;
6787 btrfs_end_bioc(bioc, bio);
6788 }
6789 }
6790
btrfs_map_bio(struct btrfs_fs_info * fs_info,struct bio * bio,int mirror_num)6791 blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
6792 int mirror_num)
6793 {
6794 struct btrfs_device *dev;
6795 struct bio *first_bio = bio;
6796 u64 logical = bio->bi_iter.bi_sector << 9;
6797 u64 length = 0;
6798 u64 map_length;
6799 int ret;
6800 int dev_nr;
6801 int total_devs;
6802 struct btrfs_io_context *bioc = NULL;
6803
6804 length = bio->bi_iter.bi_size;
6805 map_length = length;
6806
6807 btrfs_bio_counter_inc_blocked(fs_info);
6808 ret = __btrfs_map_block(fs_info, btrfs_op(bio), logical,
6809 &map_length, &bioc, mirror_num, 1);
6810 if (ret) {
6811 btrfs_bio_counter_dec(fs_info);
6812 return errno_to_blk_status(ret);
6813 }
6814
6815 total_devs = bioc->num_stripes;
6816 bioc->orig_bio = first_bio;
6817 bioc->private = first_bio->bi_private;
6818 bioc->end_io = first_bio->bi_end_io;
6819 bioc->fs_info = fs_info;
6820 atomic_set(&bioc->stripes_pending, bioc->num_stripes);
6821
6822 if ((bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
6823 ((btrfs_op(bio) == BTRFS_MAP_WRITE) || (mirror_num > 1))) {
6824 /* In this case, map_length has been set to the length of
6825 a single stripe; not the whole write */
6826 if (btrfs_op(bio) == BTRFS_MAP_WRITE) {
6827 ret = raid56_parity_write(fs_info, bio, bioc,
6828 map_length);
6829 } else {
6830 ret = raid56_parity_recover(fs_info, bio, bioc,
6831 map_length, mirror_num, 1);
6832 }
6833
6834 btrfs_bio_counter_dec(fs_info);
6835 return errno_to_blk_status(ret);
6836 }
6837
6838 if (map_length < length) {
6839 btrfs_crit(fs_info,
6840 "mapping failed logical %llu bio len %llu len %llu",
6841 logical, length, map_length);
6842 BUG();
6843 }
6844
6845 for (dev_nr = 0; dev_nr < total_devs; dev_nr++) {
6846 dev = bioc->stripes[dev_nr].dev;
6847 if (!dev || !dev->bdev || test_bit(BTRFS_DEV_STATE_MISSING,
6848 &dev->dev_state) ||
6849 (btrfs_op(first_bio) == BTRFS_MAP_WRITE &&
6850 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) {
6851 bioc_error(bioc, first_bio, logical);
6852 continue;
6853 }
6854
6855 if (dev_nr < total_devs - 1)
6856 bio = btrfs_bio_clone(first_bio);
6857 else
6858 bio = first_bio;
6859
6860 submit_stripe_bio(bioc, bio, bioc->stripes[dev_nr].physical, dev);
6861 }
6862 btrfs_bio_counter_dec(fs_info);
6863 return BLK_STS_OK;
6864 }
6865
dev_args_match_fs_devices(const struct btrfs_dev_lookup_args * args,const struct btrfs_fs_devices * fs_devices)6866 static bool dev_args_match_fs_devices(const struct btrfs_dev_lookup_args *args,
6867 const struct btrfs_fs_devices *fs_devices)
6868 {
6869 if (args->fsid == NULL)
6870 return true;
6871 if (memcmp(fs_devices->metadata_uuid, args->fsid, BTRFS_FSID_SIZE) == 0)
6872 return true;
6873 return false;
6874 }
6875
dev_args_match_device(const struct btrfs_dev_lookup_args * args,const struct btrfs_device * device)6876 static bool dev_args_match_device(const struct btrfs_dev_lookup_args *args,
6877 const struct btrfs_device *device)
6878 {
6879 if (args->missing) {
6880 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state) &&
6881 !device->bdev)
6882 return true;
6883 return false;
6884 }
6885
6886 if (device->devid != args->devid)
6887 return false;
6888 if (args->uuid && memcmp(device->uuid, args->uuid, BTRFS_UUID_SIZE) != 0)
6889 return false;
6890 return true;
6891 }
6892
6893 /*
6894 * Find a device specified by @devid or @uuid in the list of @fs_devices, or
6895 * return NULL.
6896 *
6897 * If devid and uuid are both specified, the match must be exact, otherwise
6898 * only devid is used.
6899 */
btrfs_find_device(const struct btrfs_fs_devices * fs_devices,const struct btrfs_dev_lookup_args * args)6900 struct btrfs_device *btrfs_find_device(const struct btrfs_fs_devices *fs_devices,
6901 const struct btrfs_dev_lookup_args *args)
6902 {
6903 struct btrfs_device *device;
6904 struct btrfs_fs_devices *seed_devs;
6905
6906 if (dev_args_match_fs_devices(args, fs_devices)) {
6907 list_for_each_entry(device, &fs_devices->devices, dev_list) {
6908 if (dev_args_match_device(args, device))
6909 return device;
6910 }
6911 }
6912
6913 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
6914 if (!dev_args_match_fs_devices(args, seed_devs))
6915 continue;
6916 list_for_each_entry(device, &seed_devs->devices, dev_list) {
6917 if (dev_args_match_device(args, device))
6918 return device;
6919 }
6920 }
6921
6922 return NULL;
6923 }
6924
add_missing_dev(struct btrfs_fs_devices * fs_devices,u64 devid,u8 * dev_uuid)6925 static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices,
6926 u64 devid, u8 *dev_uuid)
6927 {
6928 struct btrfs_device *device;
6929 unsigned int nofs_flag;
6930
6931 /*
6932 * We call this under the chunk_mutex, so we want to use NOFS for this
6933 * allocation, however we don't want to change btrfs_alloc_device() to
6934 * always do NOFS because we use it in a lot of other GFP_KERNEL safe
6935 * places.
6936 */
6937 nofs_flag = memalloc_nofs_save();
6938 device = btrfs_alloc_device(NULL, &devid, dev_uuid);
6939 memalloc_nofs_restore(nofs_flag);
6940 if (IS_ERR(device))
6941 return device;
6942
6943 list_add(&device->dev_list, &fs_devices->devices);
6944 device->fs_devices = fs_devices;
6945 fs_devices->num_devices++;
6946
6947 set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
6948 fs_devices->missing_devices++;
6949
6950 return device;
6951 }
6952
6953 /**
6954 * btrfs_alloc_device - allocate struct btrfs_device
6955 * @fs_info: used only for generating a new devid, can be NULL if
6956 * devid is provided (i.e. @devid != NULL).
6957 * @devid: a pointer to devid for this device. If NULL a new devid
6958 * is generated.
6959 * @uuid: a pointer to UUID for this device. If NULL a new UUID
6960 * is generated.
6961 *
6962 * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR()
6963 * on error. Returned struct is not linked onto any lists and must be
6964 * destroyed with btrfs_free_device.
6965 */
btrfs_alloc_device(struct btrfs_fs_info * fs_info,const u64 * devid,const u8 * uuid)6966 struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
6967 const u64 *devid,
6968 const u8 *uuid)
6969 {
6970 struct btrfs_device *dev;
6971 u64 tmp;
6972
6973 if (WARN_ON(!devid && !fs_info))
6974 return ERR_PTR(-EINVAL);
6975
6976 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
6977 if (!dev)
6978 return ERR_PTR(-ENOMEM);
6979
6980 /*
6981 * Preallocate a bio that's always going to be used for flushing device
6982 * barriers and matches the device lifespan
6983 */
6984 dev->flush_bio = bio_kmalloc(GFP_KERNEL, 0);
6985 if (!dev->flush_bio) {
6986 kfree(dev);
6987 return ERR_PTR(-ENOMEM);
6988 }
6989
6990 INIT_LIST_HEAD(&dev->dev_list);
6991 INIT_LIST_HEAD(&dev->dev_alloc_list);
6992 INIT_LIST_HEAD(&dev->post_commit_list);
6993
6994 atomic_set(&dev->reada_in_flight, 0);
6995 atomic_set(&dev->dev_stats_ccnt, 0);
6996 btrfs_device_data_ordered_init(dev);
6997 INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
6998 INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
6999 extent_io_tree_init(fs_info, &dev->alloc_state,
7000 IO_TREE_DEVICE_ALLOC_STATE, NULL);
7001
7002 if (devid)
7003 tmp = *devid;
7004 else {
7005 int ret;
7006
7007 ret = find_next_devid(fs_info, &tmp);
7008 if (ret) {
7009 btrfs_free_device(dev);
7010 return ERR_PTR(ret);
7011 }
7012 }
7013 dev->devid = tmp;
7014
7015 if (uuid)
7016 memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE);
7017 else
7018 generate_random_uuid(dev->uuid);
7019
7020 return dev;
7021 }
7022
btrfs_report_missing_device(struct btrfs_fs_info * fs_info,u64 devid,u8 * uuid,bool error)7023 static void btrfs_report_missing_device(struct btrfs_fs_info *fs_info,
7024 u64 devid, u8 *uuid, bool error)
7025 {
7026 if (error)
7027 btrfs_err_rl(fs_info, "devid %llu uuid %pU is missing",
7028 devid, uuid);
7029 else
7030 btrfs_warn_rl(fs_info, "devid %llu uuid %pU is missing",
7031 devid, uuid);
7032 }
7033
calc_stripe_length(u64 type,u64 chunk_len,int num_stripes)7034 static u64 calc_stripe_length(u64 type, u64 chunk_len, int num_stripes)
7035 {
7036 const int data_stripes = calc_data_stripes(type, num_stripes);
7037
7038 return div_u64(chunk_len, data_stripes);
7039 }
7040
7041 #if BITS_PER_LONG == 32
7042 /*
7043 * Due to page cache limit, metadata beyond BTRFS_32BIT_MAX_FILE_SIZE
7044 * can't be accessed on 32bit systems.
7045 *
7046 * This function do mount time check to reject the fs if it already has
7047 * metadata chunk beyond that limit.
7048 */
check_32bit_meta_chunk(struct btrfs_fs_info * fs_info,u64 logical,u64 length,u64 type)7049 static int check_32bit_meta_chunk(struct btrfs_fs_info *fs_info,
7050 u64 logical, u64 length, u64 type)
7051 {
7052 if (!(type & BTRFS_BLOCK_GROUP_METADATA))
7053 return 0;
7054
7055 if (logical + length < MAX_LFS_FILESIZE)
7056 return 0;
7057
7058 btrfs_err_32bit_limit(fs_info);
7059 return -EOVERFLOW;
7060 }
7061
7062 /*
7063 * This is to give early warning for any metadata chunk reaching
7064 * BTRFS_32BIT_EARLY_WARN_THRESHOLD.
7065 * Although we can still access the metadata, it's not going to be possible
7066 * once the limit is reached.
7067 */
warn_32bit_meta_chunk(struct btrfs_fs_info * fs_info,u64 logical,u64 length,u64 type)7068 static void warn_32bit_meta_chunk(struct btrfs_fs_info *fs_info,
7069 u64 logical, u64 length, u64 type)
7070 {
7071 if (!(type & BTRFS_BLOCK_GROUP_METADATA))
7072 return;
7073
7074 if (logical + length < BTRFS_32BIT_EARLY_WARN_THRESHOLD)
7075 return;
7076
7077 btrfs_warn_32bit_limit(fs_info);
7078 }
7079 #endif
7080
handle_missing_device(struct btrfs_fs_info * fs_info,u64 devid,u8 * uuid)7081 static struct btrfs_device *handle_missing_device(struct btrfs_fs_info *fs_info,
7082 u64 devid, u8 *uuid)
7083 {
7084 struct btrfs_device *dev;
7085
7086 if (!btrfs_test_opt(fs_info, DEGRADED)) {
7087 btrfs_report_missing_device(fs_info, devid, uuid, true);
7088 return ERR_PTR(-ENOENT);
7089 }
7090
7091 dev = add_missing_dev(fs_info->fs_devices, devid, uuid);
7092 if (IS_ERR(dev)) {
7093 btrfs_err(fs_info, "failed to init missing device %llu: %ld",
7094 devid, PTR_ERR(dev));
7095 return dev;
7096 }
7097 btrfs_report_missing_device(fs_info, devid, uuid, false);
7098
7099 return dev;
7100 }
7101
read_one_chunk(struct btrfs_key * key,struct extent_buffer * leaf,struct btrfs_chunk * chunk)7102 static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf,
7103 struct btrfs_chunk *chunk)
7104 {
7105 BTRFS_DEV_LOOKUP_ARGS(args);
7106 struct btrfs_fs_info *fs_info = leaf->fs_info;
7107 struct extent_map_tree *map_tree = &fs_info->mapping_tree;
7108 struct map_lookup *map;
7109 struct extent_map *em;
7110 u64 logical;
7111 u64 length;
7112 u64 devid;
7113 u64 type;
7114 u8 uuid[BTRFS_UUID_SIZE];
7115 int num_stripes;
7116 int ret;
7117 int i;
7118
7119 logical = key->offset;
7120 length = btrfs_chunk_length(leaf, chunk);
7121 type = btrfs_chunk_type(leaf, chunk);
7122 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
7123
7124 #if BITS_PER_LONG == 32
7125 ret = check_32bit_meta_chunk(fs_info, logical, length, type);
7126 if (ret < 0)
7127 return ret;
7128 warn_32bit_meta_chunk(fs_info, logical, length, type);
7129 #endif
7130
7131 /*
7132 * Only need to verify chunk item if we're reading from sys chunk array,
7133 * as chunk item in tree block is already verified by tree-checker.
7134 */
7135 if (leaf->start == BTRFS_SUPER_INFO_OFFSET) {
7136 ret = btrfs_check_chunk_valid(leaf, chunk, logical);
7137 if (ret)
7138 return ret;
7139 }
7140
7141 read_lock(&map_tree->lock);
7142 em = lookup_extent_mapping(map_tree, logical, 1);
7143 read_unlock(&map_tree->lock);
7144
7145 /* already mapped? */
7146 if (em && em->start <= logical && em->start + em->len > logical) {
7147 free_extent_map(em);
7148 return 0;
7149 } else if (em) {
7150 free_extent_map(em);
7151 }
7152
7153 em = alloc_extent_map();
7154 if (!em)
7155 return -ENOMEM;
7156 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
7157 if (!map) {
7158 free_extent_map(em);
7159 return -ENOMEM;
7160 }
7161
7162 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
7163 em->map_lookup = map;
7164 em->start = logical;
7165 em->len = length;
7166 em->orig_start = 0;
7167 em->block_start = 0;
7168 em->block_len = em->len;
7169
7170 map->num_stripes = num_stripes;
7171 map->io_width = btrfs_chunk_io_width(leaf, chunk);
7172 map->io_align = btrfs_chunk_io_align(leaf, chunk);
7173 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
7174 map->type = type;
7175 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
7176 map->verified_stripes = 0;
7177 em->orig_block_len = calc_stripe_length(type, em->len,
7178 map->num_stripes);
7179 for (i = 0; i < num_stripes; i++) {
7180 map->stripes[i].physical =
7181 btrfs_stripe_offset_nr(leaf, chunk, i);
7182 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
7183 args.devid = devid;
7184 read_extent_buffer(leaf, uuid, (unsigned long)
7185 btrfs_stripe_dev_uuid_nr(chunk, i),
7186 BTRFS_UUID_SIZE);
7187 args.uuid = uuid;
7188 map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices, &args);
7189 if (!map->stripes[i].dev) {
7190 map->stripes[i].dev = handle_missing_device(fs_info,
7191 devid, uuid);
7192 if (IS_ERR(map->stripes[i].dev)) {
7193 ret = PTR_ERR(map->stripes[i].dev);
7194 free_extent_map(em);
7195 return ret;
7196 }
7197 }
7198
7199 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
7200 &(map->stripes[i].dev->dev_state));
7201 }
7202
7203 write_lock(&map_tree->lock);
7204 ret = add_extent_mapping(map_tree, em, 0);
7205 write_unlock(&map_tree->lock);
7206 if (ret < 0) {
7207 btrfs_err(fs_info,
7208 "failed to add chunk map, start=%llu len=%llu: %d",
7209 em->start, em->len, ret);
7210 }
7211 free_extent_map(em);
7212
7213 return ret;
7214 }
7215
fill_device_from_item(struct extent_buffer * leaf,struct btrfs_dev_item * dev_item,struct btrfs_device * device)7216 static void fill_device_from_item(struct extent_buffer *leaf,
7217 struct btrfs_dev_item *dev_item,
7218 struct btrfs_device *device)
7219 {
7220 unsigned long ptr;
7221
7222 device->devid = btrfs_device_id(leaf, dev_item);
7223 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
7224 device->total_bytes = device->disk_total_bytes;
7225 device->commit_total_bytes = device->disk_total_bytes;
7226 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
7227 device->commit_bytes_used = device->bytes_used;
7228 device->type = btrfs_device_type(leaf, dev_item);
7229 device->io_align = btrfs_device_io_align(leaf, dev_item);
7230 device->io_width = btrfs_device_io_width(leaf, dev_item);
7231 device->sector_size = btrfs_device_sector_size(leaf, dev_item);
7232 WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID);
7233 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
7234
7235 ptr = btrfs_device_uuid(dev_item);
7236 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
7237 }
7238
open_seed_devices(struct btrfs_fs_info * fs_info,u8 * fsid)7239 static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info,
7240 u8 *fsid)
7241 {
7242 struct btrfs_fs_devices *fs_devices;
7243 int ret;
7244
7245 lockdep_assert_held(&uuid_mutex);
7246 ASSERT(fsid);
7247
7248 /* This will match only for multi-device seed fs */
7249 list_for_each_entry(fs_devices, &fs_info->fs_devices->seed_list, seed_list)
7250 if (!memcmp(fs_devices->fsid, fsid, BTRFS_FSID_SIZE))
7251 return fs_devices;
7252
7253
7254 fs_devices = find_fsid(fsid, NULL);
7255 if (!fs_devices) {
7256 if (!btrfs_test_opt(fs_info, DEGRADED))
7257 return ERR_PTR(-ENOENT);
7258
7259 fs_devices = alloc_fs_devices(fsid, NULL);
7260 if (IS_ERR(fs_devices))
7261 return fs_devices;
7262
7263 fs_devices->seeding = true;
7264 fs_devices->opened = 1;
7265 return fs_devices;
7266 }
7267
7268 /*
7269 * Upon first call for a seed fs fsid, just create a private copy of the
7270 * respective fs_devices and anchor it at fs_info->fs_devices->seed_list
7271 */
7272 fs_devices = clone_fs_devices(fs_devices);
7273 if (IS_ERR(fs_devices))
7274 return fs_devices;
7275
7276 ret = open_fs_devices(fs_devices, FMODE_READ, fs_info->bdev_holder);
7277 if (ret) {
7278 free_fs_devices(fs_devices);
7279 return ERR_PTR(ret);
7280 }
7281
7282 if (!fs_devices->seeding) {
7283 close_fs_devices(fs_devices);
7284 free_fs_devices(fs_devices);
7285 return ERR_PTR(-EINVAL);
7286 }
7287
7288 list_add(&fs_devices->seed_list, &fs_info->fs_devices->seed_list);
7289
7290 return fs_devices;
7291 }
7292
read_one_dev(struct extent_buffer * leaf,struct btrfs_dev_item * dev_item)7293 static int read_one_dev(struct extent_buffer *leaf,
7294 struct btrfs_dev_item *dev_item)
7295 {
7296 BTRFS_DEV_LOOKUP_ARGS(args);
7297 struct btrfs_fs_info *fs_info = leaf->fs_info;
7298 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7299 struct btrfs_device *device;
7300 u64 devid;
7301 int ret;
7302 u8 fs_uuid[BTRFS_FSID_SIZE];
7303 u8 dev_uuid[BTRFS_UUID_SIZE];
7304
7305 devid = args.devid = btrfs_device_id(leaf, dev_item);
7306 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
7307 BTRFS_UUID_SIZE);
7308 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
7309 BTRFS_FSID_SIZE);
7310 args.uuid = dev_uuid;
7311 args.fsid = fs_uuid;
7312
7313 if (memcmp(fs_uuid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE)) {
7314 fs_devices = open_seed_devices(fs_info, fs_uuid);
7315 if (IS_ERR(fs_devices))
7316 return PTR_ERR(fs_devices);
7317 }
7318
7319 device = btrfs_find_device(fs_info->fs_devices, &args);
7320 if (!device) {
7321 if (!btrfs_test_opt(fs_info, DEGRADED)) {
7322 btrfs_report_missing_device(fs_info, devid,
7323 dev_uuid, true);
7324 return -ENOENT;
7325 }
7326
7327 device = add_missing_dev(fs_devices, devid, dev_uuid);
7328 if (IS_ERR(device)) {
7329 btrfs_err(fs_info,
7330 "failed to add missing dev %llu: %ld",
7331 devid, PTR_ERR(device));
7332 return PTR_ERR(device);
7333 }
7334 btrfs_report_missing_device(fs_info, devid, dev_uuid, false);
7335 } else {
7336 if (!device->bdev) {
7337 if (!btrfs_test_opt(fs_info, DEGRADED)) {
7338 btrfs_report_missing_device(fs_info,
7339 devid, dev_uuid, true);
7340 return -ENOENT;
7341 }
7342 btrfs_report_missing_device(fs_info, devid,
7343 dev_uuid, false);
7344 }
7345
7346 if (!device->bdev &&
7347 !test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
7348 /*
7349 * this happens when a device that was properly setup
7350 * in the device info lists suddenly goes bad.
7351 * device->bdev is NULL, and so we have to set
7352 * device->missing to one here
7353 */
7354 device->fs_devices->missing_devices++;
7355 set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
7356 }
7357
7358 /* Move the device to its own fs_devices */
7359 if (device->fs_devices != fs_devices) {
7360 ASSERT(test_bit(BTRFS_DEV_STATE_MISSING,
7361 &device->dev_state));
7362
7363 list_move(&device->dev_list, &fs_devices->devices);
7364 device->fs_devices->num_devices--;
7365 fs_devices->num_devices++;
7366
7367 device->fs_devices->missing_devices--;
7368 fs_devices->missing_devices++;
7369
7370 device->fs_devices = fs_devices;
7371 }
7372 }
7373
7374 if (device->fs_devices != fs_info->fs_devices) {
7375 BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state));
7376 if (device->generation !=
7377 btrfs_device_generation(leaf, dev_item))
7378 return -EINVAL;
7379 }
7380
7381 fill_device_from_item(leaf, dev_item, device);
7382 if (device->bdev) {
7383 u64 max_total_bytes = i_size_read(device->bdev->bd_inode);
7384
7385 if (device->total_bytes > max_total_bytes) {
7386 btrfs_err(fs_info,
7387 "device total_bytes should be at most %llu but found %llu",
7388 max_total_bytes, device->total_bytes);
7389 return -EINVAL;
7390 }
7391 }
7392 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
7393 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
7394 !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
7395 device->fs_devices->total_rw_bytes += device->total_bytes;
7396 atomic64_add(device->total_bytes - device->bytes_used,
7397 &fs_info->free_chunk_space);
7398 }
7399 ret = 0;
7400 return ret;
7401 }
7402
btrfs_read_sys_array(struct btrfs_fs_info * fs_info)7403 int btrfs_read_sys_array(struct btrfs_fs_info *fs_info)
7404 {
7405 struct btrfs_root *root = fs_info->tree_root;
7406 struct btrfs_super_block *super_copy = fs_info->super_copy;
7407 struct extent_buffer *sb;
7408 struct btrfs_disk_key *disk_key;
7409 struct btrfs_chunk *chunk;
7410 u8 *array_ptr;
7411 unsigned long sb_array_offset;
7412 int ret = 0;
7413 u32 num_stripes;
7414 u32 array_size;
7415 u32 len = 0;
7416 u32 cur_offset;
7417 u64 type;
7418 struct btrfs_key key;
7419
7420 ASSERT(BTRFS_SUPER_INFO_SIZE <= fs_info->nodesize);
7421 /*
7422 * This will create extent buffer of nodesize, superblock size is
7423 * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will
7424 * overallocate but we can keep it as-is, only the first page is used.
7425 */
7426 sb = btrfs_find_create_tree_block(fs_info, BTRFS_SUPER_INFO_OFFSET,
7427 root->root_key.objectid, 0);
7428 if (IS_ERR(sb))
7429 return PTR_ERR(sb);
7430 set_extent_buffer_uptodate(sb);
7431 /*
7432 * The sb extent buffer is artificial and just used to read the system array.
7433 * set_extent_buffer_uptodate() call does not properly mark all it's
7434 * pages up-to-date when the page is larger: extent does not cover the
7435 * whole page and consequently check_page_uptodate does not find all
7436 * the page's extents up-to-date (the hole beyond sb),
7437 * write_extent_buffer then triggers a WARN_ON.
7438 *
7439 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
7440 * but sb spans only this function. Add an explicit SetPageUptodate call
7441 * to silence the warning eg. on PowerPC 64.
7442 */
7443 if (PAGE_SIZE > BTRFS_SUPER_INFO_SIZE)
7444 SetPageUptodate(sb->pages[0]);
7445
7446 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
7447 array_size = btrfs_super_sys_array_size(super_copy);
7448
7449 array_ptr = super_copy->sys_chunk_array;
7450 sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array);
7451 cur_offset = 0;
7452
7453 while (cur_offset < array_size) {
7454 disk_key = (struct btrfs_disk_key *)array_ptr;
7455 len = sizeof(*disk_key);
7456 if (cur_offset + len > array_size)
7457 goto out_short_read;
7458
7459 btrfs_disk_key_to_cpu(&key, disk_key);
7460
7461 array_ptr += len;
7462 sb_array_offset += len;
7463 cur_offset += len;
7464
7465 if (key.type != BTRFS_CHUNK_ITEM_KEY) {
7466 btrfs_err(fs_info,
7467 "unexpected item type %u in sys_array at offset %u",
7468 (u32)key.type, cur_offset);
7469 ret = -EIO;
7470 break;
7471 }
7472
7473 chunk = (struct btrfs_chunk *)sb_array_offset;
7474 /*
7475 * At least one btrfs_chunk with one stripe must be present,
7476 * exact stripe count check comes afterwards
7477 */
7478 len = btrfs_chunk_item_size(1);
7479 if (cur_offset + len > array_size)
7480 goto out_short_read;
7481
7482 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
7483 if (!num_stripes) {
7484 btrfs_err(fs_info,
7485 "invalid number of stripes %u in sys_array at offset %u",
7486 num_stripes, cur_offset);
7487 ret = -EIO;
7488 break;
7489 }
7490
7491 type = btrfs_chunk_type(sb, chunk);
7492 if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) {
7493 btrfs_err(fs_info,
7494 "invalid chunk type %llu in sys_array at offset %u",
7495 type, cur_offset);
7496 ret = -EIO;
7497 break;
7498 }
7499
7500 len = btrfs_chunk_item_size(num_stripes);
7501 if (cur_offset + len > array_size)
7502 goto out_short_read;
7503
7504 ret = read_one_chunk(&key, sb, chunk);
7505 if (ret)
7506 break;
7507
7508 array_ptr += len;
7509 sb_array_offset += len;
7510 cur_offset += len;
7511 }
7512 clear_extent_buffer_uptodate(sb);
7513 free_extent_buffer_stale(sb);
7514 return ret;
7515
7516 out_short_read:
7517 btrfs_err(fs_info, "sys_array too short to read %u bytes at offset %u",
7518 len, cur_offset);
7519 clear_extent_buffer_uptodate(sb);
7520 free_extent_buffer_stale(sb);
7521 return -EIO;
7522 }
7523
7524 /*
7525 * Check if all chunks in the fs are OK for read-write degraded mount
7526 *
7527 * If the @failing_dev is specified, it's accounted as missing.
7528 *
7529 * Return true if all chunks meet the minimal RW mount requirements.
7530 * Return false if any chunk doesn't meet the minimal RW mount requirements.
7531 */
btrfs_check_rw_degradable(struct btrfs_fs_info * fs_info,struct btrfs_device * failing_dev)7532 bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info,
7533 struct btrfs_device *failing_dev)
7534 {
7535 struct extent_map_tree *map_tree = &fs_info->mapping_tree;
7536 struct extent_map *em;
7537 u64 next_start = 0;
7538 bool ret = true;
7539
7540 read_lock(&map_tree->lock);
7541 em = lookup_extent_mapping(map_tree, 0, (u64)-1);
7542 read_unlock(&map_tree->lock);
7543 /* No chunk at all? Return false anyway */
7544 if (!em) {
7545 ret = false;
7546 goto out;
7547 }
7548 while (em) {
7549 struct map_lookup *map;
7550 int missing = 0;
7551 int max_tolerated;
7552 int i;
7553
7554 map = em->map_lookup;
7555 max_tolerated =
7556 btrfs_get_num_tolerated_disk_barrier_failures(
7557 map->type);
7558 for (i = 0; i < map->num_stripes; i++) {
7559 struct btrfs_device *dev = map->stripes[i].dev;
7560
7561 if (!dev || !dev->bdev ||
7562 test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) ||
7563 dev->last_flush_error)
7564 missing++;
7565 else if (failing_dev && failing_dev == dev)
7566 missing++;
7567 }
7568 if (missing > max_tolerated) {
7569 if (!failing_dev)
7570 btrfs_warn(fs_info,
7571 "chunk %llu missing %d devices, max tolerance is %d for writable mount",
7572 em->start, missing, max_tolerated);
7573 free_extent_map(em);
7574 ret = false;
7575 goto out;
7576 }
7577 next_start = extent_map_end(em);
7578 free_extent_map(em);
7579
7580 read_lock(&map_tree->lock);
7581 em = lookup_extent_mapping(map_tree, next_start,
7582 (u64)(-1) - next_start);
7583 read_unlock(&map_tree->lock);
7584 }
7585 out:
7586 return ret;
7587 }
7588
readahead_tree_node_children(struct extent_buffer * node)7589 static void readahead_tree_node_children(struct extent_buffer *node)
7590 {
7591 int i;
7592 const int nr_items = btrfs_header_nritems(node);
7593
7594 for (i = 0; i < nr_items; i++)
7595 btrfs_readahead_node_child(node, i);
7596 }
7597
btrfs_read_chunk_tree(struct btrfs_fs_info * fs_info)7598 int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
7599 {
7600 struct btrfs_root *root = fs_info->chunk_root;
7601 struct btrfs_path *path;
7602 struct extent_buffer *leaf;
7603 struct btrfs_key key;
7604 struct btrfs_key found_key;
7605 int ret;
7606 int slot;
7607 u64 total_dev = 0;
7608 u64 last_ra_node = 0;
7609
7610 path = btrfs_alloc_path();
7611 if (!path)
7612 return -ENOMEM;
7613
7614 /*
7615 * uuid_mutex is needed only if we are mounting a sprout FS
7616 * otherwise we don't need it.
7617 */
7618 mutex_lock(&uuid_mutex);
7619
7620 /*
7621 * It is possible for mount and umount to race in such a way that
7622 * we execute this code path, but open_fs_devices failed to clear
7623 * total_rw_bytes. We certainly want it cleared before reading the
7624 * device items, so clear it here.
7625 */
7626 fs_info->fs_devices->total_rw_bytes = 0;
7627
7628 /*
7629 * Lockdep complains about possible circular locking dependency between
7630 * a disk's open_mutex (struct gendisk.open_mutex), the rw semaphores
7631 * used for freeze procection of a fs (struct super_block.s_writers),
7632 * which we take when starting a transaction, and extent buffers of the
7633 * chunk tree if we call read_one_dev() while holding a lock on an
7634 * extent buffer of the chunk tree. Since we are mounting the filesystem
7635 * and at this point there can't be any concurrent task modifying the
7636 * chunk tree, to keep it simple, just skip locking on the chunk tree.
7637 */
7638 ASSERT(!test_bit(BTRFS_FS_OPEN, &fs_info->flags));
7639 path->skip_locking = 1;
7640
7641 /*
7642 * Read all device items, and then all the chunk items. All
7643 * device items are found before any chunk item (their object id
7644 * is smaller than the lowest possible object id for a chunk
7645 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
7646 */
7647 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
7648 key.offset = 0;
7649 key.type = 0;
7650 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
7651 if (ret < 0)
7652 goto error;
7653 while (1) {
7654 struct extent_buffer *node;
7655
7656 leaf = path->nodes[0];
7657 slot = path->slots[0];
7658 if (slot >= btrfs_header_nritems(leaf)) {
7659 ret = btrfs_next_leaf(root, path);
7660 if (ret == 0)
7661 continue;
7662 if (ret < 0)
7663 goto error;
7664 break;
7665 }
7666 node = path->nodes[1];
7667 if (node) {
7668 if (last_ra_node != node->start) {
7669 readahead_tree_node_children(node);
7670 last_ra_node = node->start;
7671 }
7672 }
7673 btrfs_item_key_to_cpu(leaf, &found_key, slot);
7674 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
7675 struct btrfs_dev_item *dev_item;
7676 dev_item = btrfs_item_ptr(leaf, slot,
7677 struct btrfs_dev_item);
7678 ret = read_one_dev(leaf, dev_item);
7679 if (ret)
7680 goto error;
7681 total_dev++;
7682 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
7683 struct btrfs_chunk *chunk;
7684
7685 /*
7686 * We are only called at mount time, so no need to take
7687 * fs_info->chunk_mutex. Plus, to avoid lockdep warnings,
7688 * we always lock first fs_info->chunk_mutex before
7689 * acquiring any locks on the chunk tree. This is a
7690 * requirement for chunk allocation, see the comment on
7691 * top of btrfs_chunk_alloc() for details.
7692 */
7693 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
7694 ret = read_one_chunk(&found_key, leaf, chunk);
7695 if (ret)
7696 goto error;
7697 }
7698 path->slots[0]++;
7699 }
7700
7701 /*
7702 * After loading chunk tree, we've got all device information,
7703 * do another round of validation checks.
7704 */
7705 if (total_dev != fs_info->fs_devices->total_devices) {
7706 btrfs_warn(fs_info,
7707 "super block num_devices %llu mismatch with DEV_ITEM count %llu, will be repaired on next transaction commit",
7708 btrfs_super_num_devices(fs_info->super_copy),
7709 total_dev);
7710 fs_info->fs_devices->total_devices = total_dev;
7711 btrfs_set_super_num_devices(fs_info->super_copy, total_dev);
7712 }
7713 if (btrfs_super_total_bytes(fs_info->super_copy) <
7714 fs_info->fs_devices->total_rw_bytes) {
7715 btrfs_err(fs_info,
7716 "super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu",
7717 btrfs_super_total_bytes(fs_info->super_copy),
7718 fs_info->fs_devices->total_rw_bytes);
7719 ret = -EINVAL;
7720 goto error;
7721 }
7722 ret = 0;
7723 error:
7724 mutex_unlock(&uuid_mutex);
7725
7726 btrfs_free_path(path);
7727 return ret;
7728 }
7729
btrfs_init_devices_late(struct btrfs_fs_info * fs_info)7730 int btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
7731 {
7732 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs;
7733 struct btrfs_device *device;
7734 int ret = 0;
7735
7736 fs_devices->fs_info = fs_info;
7737
7738 mutex_lock(&fs_devices->device_list_mutex);
7739 list_for_each_entry(device, &fs_devices->devices, dev_list)
7740 device->fs_info = fs_info;
7741
7742 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
7743 list_for_each_entry(device, &seed_devs->devices, dev_list) {
7744 device->fs_info = fs_info;
7745 ret = btrfs_get_dev_zone_info(device, false);
7746 if (ret)
7747 break;
7748 }
7749
7750 seed_devs->fs_info = fs_info;
7751 }
7752 mutex_unlock(&fs_devices->device_list_mutex);
7753
7754 return ret;
7755 }
7756
btrfs_dev_stats_value(const struct extent_buffer * eb,const struct btrfs_dev_stats_item * ptr,int index)7757 static u64 btrfs_dev_stats_value(const struct extent_buffer *eb,
7758 const struct btrfs_dev_stats_item *ptr,
7759 int index)
7760 {
7761 u64 val;
7762
7763 read_extent_buffer(eb, &val,
7764 offsetof(struct btrfs_dev_stats_item, values) +
7765 ((unsigned long)ptr) + (index * sizeof(u64)),
7766 sizeof(val));
7767 return val;
7768 }
7769
btrfs_set_dev_stats_value(struct extent_buffer * eb,struct btrfs_dev_stats_item * ptr,int index,u64 val)7770 static void btrfs_set_dev_stats_value(struct extent_buffer *eb,
7771 struct btrfs_dev_stats_item *ptr,
7772 int index, u64 val)
7773 {
7774 write_extent_buffer(eb, &val,
7775 offsetof(struct btrfs_dev_stats_item, values) +
7776 ((unsigned long)ptr) + (index * sizeof(u64)),
7777 sizeof(val));
7778 }
7779
btrfs_device_init_dev_stats(struct btrfs_device * device,struct btrfs_path * path)7780 static int btrfs_device_init_dev_stats(struct btrfs_device *device,
7781 struct btrfs_path *path)
7782 {
7783 struct btrfs_dev_stats_item *ptr;
7784 struct extent_buffer *eb;
7785 struct btrfs_key key;
7786 int item_size;
7787 int i, ret, slot;
7788
7789 if (!device->fs_info->dev_root)
7790 return 0;
7791
7792 key.objectid = BTRFS_DEV_STATS_OBJECTID;
7793 key.type = BTRFS_PERSISTENT_ITEM_KEY;
7794 key.offset = device->devid;
7795 ret = btrfs_search_slot(NULL, device->fs_info->dev_root, &key, path, 0, 0);
7796 if (ret) {
7797 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7798 btrfs_dev_stat_set(device, i, 0);
7799 device->dev_stats_valid = 1;
7800 btrfs_release_path(path);
7801 return ret < 0 ? ret : 0;
7802 }
7803 slot = path->slots[0];
7804 eb = path->nodes[0];
7805 item_size = btrfs_item_size_nr(eb, slot);
7806
7807 ptr = btrfs_item_ptr(eb, slot, struct btrfs_dev_stats_item);
7808
7809 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
7810 if (item_size >= (1 + i) * sizeof(__le64))
7811 btrfs_dev_stat_set(device, i,
7812 btrfs_dev_stats_value(eb, ptr, i));
7813 else
7814 btrfs_dev_stat_set(device, i, 0);
7815 }
7816
7817 device->dev_stats_valid = 1;
7818 btrfs_dev_stat_print_on_load(device);
7819 btrfs_release_path(path);
7820
7821 return 0;
7822 }
7823
btrfs_init_dev_stats(struct btrfs_fs_info * fs_info)7824 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
7825 {
7826 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs;
7827 struct btrfs_device *device;
7828 struct btrfs_path *path = NULL;
7829 int ret = 0;
7830
7831 path = btrfs_alloc_path();
7832 if (!path)
7833 return -ENOMEM;
7834
7835 mutex_lock(&fs_devices->device_list_mutex);
7836 list_for_each_entry(device, &fs_devices->devices, dev_list) {
7837 ret = btrfs_device_init_dev_stats(device, path);
7838 if (ret)
7839 goto out;
7840 }
7841 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
7842 list_for_each_entry(device, &seed_devs->devices, dev_list) {
7843 ret = btrfs_device_init_dev_stats(device, path);
7844 if (ret)
7845 goto out;
7846 }
7847 }
7848 out:
7849 mutex_unlock(&fs_devices->device_list_mutex);
7850
7851 btrfs_free_path(path);
7852 return ret;
7853 }
7854
update_dev_stat_item(struct btrfs_trans_handle * trans,struct btrfs_device * device)7855 static int update_dev_stat_item(struct btrfs_trans_handle *trans,
7856 struct btrfs_device *device)
7857 {
7858 struct btrfs_fs_info *fs_info = trans->fs_info;
7859 struct btrfs_root *dev_root = fs_info->dev_root;
7860 struct btrfs_path *path;
7861 struct btrfs_key key;
7862 struct extent_buffer *eb;
7863 struct btrfs_dev_stats_item *ptr;
7864 int ret;
7865 int i;
7866
7867 key.objectid = BTRFS_DEV_STATS_OBJECTID;
7868 key.type = BTRFS_PERSISTENT_ITEM_KEY;
7869 key.offset = device->devid;
7870
7871 path = btrfs_alloc_path();
7872 if (!path)
7873 return -ENOMEM;
7874 ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
7875 if (ret < 0) {
7876 btrfs_warn_in_rcu(fs_info,
7877 "error %d while searching for dev_stats item for device %s",
7878 ret, rcu_str_deref(device->name));
7879 goto out;
7880 }
7881
7882 if (ret == 0 &&
7883 btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
7884 /* need to delete old one and insert a new one */
7885 ret = btrfs_del_item(trans, dev_root, path);
7886 if (ret != 0) {
7887 btrfs_warn_in_rcu(fs_info,
7888 "delete too small dev_stats item for device %s failed %d",
7889 rcu_str_deref(device->name), ret);
7890 goto out;
7891 }
7892 ret = 1;
7893 }
7894
7895 if (ret == 1) {
7896 /* need to insert a new item */
7897 btrfs_release_path(path);
7898 ret = btrfs_insert_empty_item(trans, dev_root, path,
7899 &key, sizeof(*ptr));
7900 if (ret < 0) {
7901 btrfs_warn_in_rcu(fs_info,
7902 "insert dev_stats item for device %s failed %d",
7903 rcu_str_deref(device->name), ret);
7904 goto out;
7905 }
7906 }
7907
7908 eb = path->nodes[0];
7909 ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
7910 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7911 btrfs_set_dev_stats_value(eb, ptr, i,
7912 btrfs_dev_stat_read(device, i));
7913 btrfs_mark_buffer_dirty(eb);
7914
7915 out:
7916 btrfs_free_path(path);
7917 return ret;
7918 }
7919
7920 /*
7921 * called from commit_transaction. Writes all changed device stats to disk.
7922 */
btrfs_run_dev_stats(struct btrfs_trans_handle * trans)7923 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans)
7924 {
7925 struct btrfs_fs_info *fs_info = trans->fs_info;
7926 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7927 struct btrfs_device *device;
7928 int stats_cnt;
7929 int ret = 0;
7930
7931 mutex_lock(&fs_devices->device_list_mutex);
7932 list_for_each_entry(device, &fs_devices->devices, dev_list) {
7933 stats_cnt = atomic_read(&device->dev_stats_ccnt);
7934 if (!device->dev_stats_valid || stats_cnt == 0)
7935 continue;
7936
7937
7938 /*
7939 * There is a LOAD-LOAD control dependency between the value of
7940 * dev_stats_ccnt and updating the on-disk values which requires
7941 * reading the in-memory counters. Such control dependencies
7942 * require explicit read memory barriers.
7943 *
7944 * This memory barriers pairs with smp_mb__before_atomic in
7945 * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full
7946 * barrier implied by atomic_xchg in
7947 * btrfs_dev_stats_read_and_reset
7948 */
7949 smp_rmb();
7950
7951 ret = update_dev_stat_item(trans, device);
7952 if (!ret)
7953 atomic_sub(stats_cnt, &device->dev_stats_ccnt);
7954 }
7955 mutex_unlock(&fs_devices->device_list_mutex);
7956
7957 return ret;
7958 }
7959
btrfs_dev_stat_inc_and_print(struct btrfs_device * dev,int index)7960 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
7961 {
7962 btrfs_dev_stat_inc(dev, index);
7963 btrfs_dev_stat_print_on_error(dev);
7964 }
7965
btrfs_dev_stat_print_on_error(struct btrfs_device * dev)7966 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
7967 {
7968 if (!dev->dev_stats_valid)
7969 return;
7970 btrfs_err_rl_in_rcu(dev->fs_info,
7971 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7972 rcu_str_deref(dev->name),
7973 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
7974 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
7975 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
7976 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
7977 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
7978 }
7979
btrfs_dev_stat_print_on_load(struct btrfs_device * dev)7980 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
7981 {
7982 int i;
7983
7984 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7985 if (btrfs_dev_stat_read(dev, i) != 0)
7986 break;
7987 if (i == BTRFS_DEV_STAT_VALUES_MAX)
7988 return; /* all values == 0, suppress message */
7989
7990 btrfs_info_in_rcu(dev->fs_info,
7991 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7992 rcu_str_deref(dev->name),
7993 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
7994 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
7995 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
7996 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
7997 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
7998 }
7999
btrfs_get_dev_stats(struct btrfs_fs_info * fs_info,struct btrfs_ioctl_get_dev_stats * stats)8000 int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info,
8001 struct btrfs_ioctl_get_dev_stats *stats)
8002 {
8003 BTRFS_DEV_LOOKUP_ARGS(args);
8004 struct btrfs_device *dev;
8005 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
8006 int i;
8007
8008 mutex_lock(&fs_devices->device_list_mutex);
8009 args.devid = stats->devid;
8010 dev = btrfs_find_device(fs_info->fs_devices, &args);
8011 mutex_unlock(&fs_devices->device_list_mutex);
8012
8013 if (!dev) {
8014 btrfs_warn(fs_info, "get dev_stats failed, device not found");
8015 return -ENODEV;
8016 } else if (!dev->dev_stats_valid) {
8017 btrfs_warn(fs_info, "get dev_stats failed, not yet valid");
8018 return -ENODEV;
8019 } else if (stats->flags & BTRFS_DEV_STATS_RESET) {
8020 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
8021 if (stats->nr_items > i)
8022 stats->values[i] =
8023 btrfs_dev_stat_read_and_reset(dev, i);
8024 else
8025 btrfs_dev_stat_set(dev, i, 0);
8026 }
8027 btrfs_info(fs_info, "device stats zeroed by %s (%d)",
8028 current->comm, task_pid_nr(current));
8029 } else {
8030 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
8031 if (stats->nr_items > i)
8032 stats->values[i] = btrfs_dev_stat_read(dev, i);
8033 }
8034 if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
8035 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
8036 return 0;
8037 }
8038
8039 /*
8040 * Update the size and bytes used for each device where it changed. This is
8041 * delayed since we would otherwise get errors while writing out the
8042 * superblocks.
8043 *
8044 * Must be invoked during transaction commit.
8045 */
btrfs_commit_device_sizes(struct btrfs_transaction * trans)8046 void btrfs_commit_device_sizes(struct btrfs_transaction *trans)
8047 {
8048 struct btrfs_device *curr, *next;
8049
8050 ASSERT(trans->state == TRANS_STATE_COMMIT_DOING);
8051
8052 if (list_empty(&trans->dev_update_list))
8053 return;
8054
8055 /*
8056 * We don't need the device_list_mutex here. This list is owned by the
8057 * transaction and the transaction must complete before the device is
8058 * released.
8059 */
8060 mutex_lock(&trans->fs_info->chunk_mutex);
8061 list_for_each_entry_safe(curr, next, &trans->dev_update_list,
8062 post_commit_list) {
8063 list_del_init(&curr->post_commit_list);
8064 curr->commit_total_bytes = curr->disk_total_bytes;
8065 curr->commit_bytes_used = curr->bytes_used;
8066 }
8067 mutex_unlock(&trans->fs_info->chunk_mutex);
8068 }
8069
8070 /*
8071 * Multiplicity factor for simple profiles: DUP, RAID1-like and RAID10.
8072 */
btrfs_bg_type_to_factor(u64 flags)8073 int btrfs_bg_type_to_factor(u64 flags)
8074 {
8075 const int index = btrfs_bg_flags_to_raid_index(flags);
8076
8077 return btrfs_raid_array[index].ncopies;
8078 }
8079
8080
8081
verify_one_dev_extent(struct btrfs_fs_info * fs_info,u64 chunk_offset,u64 devid,u64 physical_offset,u64 physical_len)8082 static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
8083 u64 chunk_offset, u64 devid,
8084 u64 physical_offset, u64 physical_len)
8085 {
8086 struct btrfs_dev_lookup_args args = { .devid = devid };
8087 struct extent_map_tree *em_tree = &fs_info->mapping_tree;
8088 struct extent_map *em;
8089 struct map_lookup *map;
8090 struct btrfs_device *dev;
8091 u64 stripe_len;
8092 bool found = false;
8093 int ret = 0;
8094 int i;
8095
8096 read_lock(&em_tree->lock);
8097 em = lookup_extent_mapping(em_tree, chunk_offset, 1);
8098 read_unlock(&em_tree->lock);
8099
8100 if (!em) {
8101 btrfs_err(fs_info,
8102 "dev extent physical offset %llu on devid %llu doesn't have corresponding chunk",
8103 physical_offset, devid);
8104 ret = -EUCLEAN;
8105 goto out;
8106 }
8107
8108 map = em->map_lookup;
8109 stripe_len = calc_stripe_length(map->type, em->len, map->num_stripes);
8110 if (physical_len != stripe_len) {
8111 btrfs_err(fs_info,
8112 "dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu",
8113 physical_offset, devid, em->start, physical_len,
8114 stripe_len);
8115 ret = -EUCLEAN;
8116 goto out;
8117 }
8118
8119 for (i = 0; i < map->num_stripes; i++) {
8120 if (map->stripes[i].dev->devid == devid &&
8121 map->stripes[i].physical == physical_offset) {
8122 found = true;
8123 if (map->verified_stripes >= map->num_stripes) {
8124 btrfs_err(fs_info,
8125 "too many dev extents for chunk %llu found",
8126 em->start);
8127 ret = -EUCLEAN;
8128 goto out;
8129 }
8130 map->verified_stripes++;
8131 break;
8132 }
8133 }
8134 if (!found) {
8135 btrfs_err(fs_info,
8136 "dev extent physical offset %llu devid %llu has no corresponding chunk",
8137 physical_offset, devid);
8138 ret = -EUCLEAN;
8139 }
8140
8141 /* Make sure no dev extent is beyond device boundary */
8142 dev = btrfs_find_device(fs_info->fs_devices, &args);
8143 if (!dev) {
8144 btrfs_err(fs_info, "failed to find devid %llu", devid);
8145 ret = -EUCLEAN;
8146 goto out;
8147 }
8148
8149 if (physical_offset + physical_len > dev->disk_total_bytes) {
8150 btrfs_err(fs_info,
8151 "dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu",
8152 devid, physical_offset, physical_len,
8153 dev->disk_total_bytes);
8154 ret = -EUCLEAN;
8155 goto out;
8156 }
8157
8158 if (dev->zone_info) {
8159 u64 zone_size = dev->zone_info->zone_size;
8160
8161 if (!IS_ALIGNED(physical_offset, zone_size) ||
8162 !IS_ALIGNED(physical_len, zone_size)) {
8163 btrfs_err(fs_info,
8164 "zoned: dev extent devid %llu physical offset %llu len %llu is not aligned to device zone",
8165 devid, physical_offset, physical_len);
8166 ret = -EUCLEAN;
8167 goto out;
8168 }
8169 }
8170
8171 out:
8172 free_extent_map(em);
8173 return ret;
8174 }
8175
verify_chunk_dev_extent_mapping(struct btrfs_fs_info * fs_info)8176 static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info *fs_info)
8177 {
8178 struct extent_map_tree *em_tree = &fs_info->mapping_tree;
8179 struct extent_map *em;
8180 struct rb_node *node;
8181 int ret = 0;
8182
8183 read_lock(&em_tree->lock);
8184 for (node = rb_first_cached(&em_tree->map); node; node = rb_next(node)) {
8185 em = rb_entry(node, struct extent_map, rb_node);
8186 if (em->map_lookup->num_stripes !=
8187 em->map_lookup->verified_stripes) {
8188 btrfs_err(fs_info,
8189 "chunk %llu has missing dev extent, have %d expect %d",
8190 em->start, em->map_lookup->verified_stripes,
8191 em->map_lookup->num_stripes);
8192 ret = -EUCLEAN;
8193 goto out;
8194 }
8195 }
8196 out:
8197 read_unlock(&em_tree->lock);
8198 return ret;
8199 }
8200
8201 /*
8202 * Ensure that all dev extents are mapped to correct chunk, otherwise
8203 * later chunk allocation/free would cause unexpected behavior.
8204 *
8205 * NOTE: This will iterate through the whole device tree, which should be of
8206 * the same size level as the chunk tree. This slightly increases mount time.
8207 */
btrfs_verify_dev_extents(struct btrfs_fs_info * fs_info)8208 int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info)
8209 {
8210 struct btrfs_path *path;
8211 struct btrfs_root *root = fs_info->dev_root;
8212 struct btrfs_key key;
8213 u64 prev_devid = 0;
8214 u64 prev_dev_ext_end = 0;
8215 int ret = 0;
8216
8217 /*
8218 * We don't have a dev_root because we mounted with ignorebadroots and
8219 * failed to load the root, so we want to skip the verification in this
8220 * case for sure.
8221 *
8222 * However if the dev root is fine, but the tree itself is corrupted
8223 * we'd still fail to mount. This verification is only to make sure
8224 * writes can happen safely, so instead just bypass this check
8225 * completely in the case of IGNOREBADROOTS.
8226 */
8227 if (btrfs_test_opt(fs_info, IGNOREBADROOTS))
8228 return 0;
8229
8230 key.objectid = 1;
8231 key.type = BTRFS_DEV_EXTENT_KEY;
8232 key.offset = 0;
8233
8234 path = btrfs_alloc_path();
8235 if (!path)
8236 return -ENOMEM;
8237
8238 path->reada = READA_FORWARD;
8239 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
8240 if (ret < 0)
8241 goto out;
8242
8243 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
8244 ret = btrfs_next_leaf(root, path);
8245 if (ret < 0)
8246 goto out;
8247 /* No dev extents at all? Not good */
8248 if (ret > 0) {
8249 ret = -EUCLEAN;
8250 goto out;
8251 }
8252 }
8253 while (1) {
8254 struct extent_buffer *leaf = path->nodes[0];
8255 struct btrfs_dev_extent *dext;
8256 int slot = path->slots[0];
8257 u64 chunk_offset;
8258 u64 physical_offset;
8259 u64 physical_len;
8260 u64 devid;
8261
8262 btrfs_item_key_to_cpu(leaf, &key, slot);
8263 if (key.type != BTRFS_DEV_EXTENT_KEY)
8264 break;
8265 devid = key.objectid;
8266 physical_offset = key.offset;
8267
8268 dext = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent);
8269 chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dext);
8270 physical_len = btrfs_dev_extent_length(leaf, dext);
8271
8272 /* Check if this dev extent overlaps with the previous one */
8273 if (devid == prev_devid && physical_offset < prev_dev_ext_end) {
8274 btrfs_err(fs_info,
8275 "dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu",
8276 devid, physical_offset, prev_dev_ext_end);
8277 ret = -EUCLEAN;
8278 goto out;
8279 }
8280
8281 ret = verify_one_dev_extent(fs_info, chunk_offset, devid,
8282 physical_offset, physical_len);
8283 if (ret < 0)
8284 goto out;
8285 prev_devid = devid;
8286 prev_dev_ext_end = physical_offset + physical_len;
8287
8288 ret = btrfs_next_item(root, path);
8289 if (ret < 0)
8290 goto out;
8291 if (ret > 0) {
8292 ret = 0;
8293 break;
8294 }
8295 }
8296
8297 /* Ensure all chunks have corresponding dev extents */
8298 ret = verify_chunk_dev_extent_mapping(fs_info);
8299 out:
8300 btrfs_free_path(path);
8301 return ret;
8302 }
8303
8304 /*
8305 * Check whether the given block group or device is pinned by any inode being
8306 * used as a swapfile.
8307 */
btrfs_pinned_by_swapfile(struct btrfs_fs_info * fs_info,void * ptr)8308 bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr)
8309 {
8310 struct btrfs_swapfile_pin *sp;
8311 struct rb_node *node;
8312
8313 spin_lock(&fs_info->swapfile_pins_lock);
8314 node = fs_info->swapfile_pins.rb_node;
8315 while (node) {
8316 sp = rb_entry(node, struct btrfs_swapfile_pin, node);
8317 if (ptr < sp->ptr)
8318 node = node->rb_left;
8319 else if (ptr > sp->ptr)
8320 node = node->rb_right;
8321 else
8322 break;
8323 }
8324 spin_unlock(&fs_info->swapfile_pins_lock);
8325 return node != NULL;
8326 }
8327
relocating_repair_kthread(void * data)8328 static int relocating_repair_kthread(void *data)
8329 {
8330 struct btrfs_block_group *cache = (struct btrfs_block_group *)data;
8331 struct btrfs_fs_info *fs_info = cache->fs_info;
8332 u64 target;
8333 int ret = 0;
8334
8335 target = cache->start;
8336 btrfs_put_block_group(cache);
8337
8338 sb_start_write(fs_info->sb);
8339 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) {
8340 btrfs_info(fs_info,
8341 "zoned: skip relocating block group %llu to repair: EBUSY",
8342 target);
8343 sb_end_write(fs_info->sb);
8344 return -EBUSY;
8345 }
8346
8347 mutex_lock(&fs_info->reclaim_bgs_lock);
8348
8349 /* Ensure block group still exists */
8350 cache = btrfs_lookup_block_group(fs_info, target);
8351 if (!cache)
8352 goto out;
8353
8354 if (!cache->relocating_repair)
8355 goto out;
8356
8357 ret = btrfs_may_alloc_data_chunk(fs_info, target);
8358 if (ret < 0)
8359 goto out;
8360
8361 btrfs_info(fs_info,
8362 "zoned: relocating block group %llu to repair IO failure",
8363 target);
8364 ret = btrfs_relocate_chunk(fs_info, target);
8365
8366 out:
8367 if (cache)
8368 btrfs_put_block_group(cache);
8369 mutex_unlock(&fs_info->reclaim_bgs_lock);
8370 btrfs_exclop_finish(fs_info);
8371 sb_end_write(fs_info->sb);
8372
8373 return ret;
8374 }
8375
btrfs_repair_one_zone(struct btrfs_fs_info * fs_info,u64 logical)8376 int btrfs_repair_one_zone(struct btrfs_fs_info *fs_info, u64 logical)
8377 {
8378 struct btrfs_block_group *cache;
8379
8380 /* Do not attempt to repair in degraded state */
8381 if (btrfs_test_opt(fs_info, DEGRADED))
8382 return 0;
8383
8384 cache = btrfs_lookup_block_group(fs_info, logical);
8385 if (!cache)
8386 return 0;
8387
8388 spin_lock(&cache->lock);
8389 if (cache->relocating_repair) {
8390 spin_unlock(&cache->lock);
8391 btrfs_put_block_group(cache);
8392 return 0;
8393 }
8394 cache->relocating_repair = 1;
8395 spin_unlock(&cache->lock);
8396
8397 kthread_run(relocating_repair_kthread, cache,
8398 "btrfs-relocating-repair");
8399
8400 return 0;
8401 }
8402