1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2007 Oracle. All rights reserved.
4 */
5
6 #include <linux/sched.h>
7 #include <linux/sched/mm.h>
8 #include <linux/bio.h>
9 #include <linux/slab.h>
10 #include <linux/blkdev.h>
11 #include <linux/ratelimit.h>
12 #include <linux/kthread.h>
13 #include <linux/raid/pq.h>
14 #include <linux/semaphore.h>
15 #include <linux/uuid.h>
16 #include <linux/list_sort.h>
17 #include <linux/namei.h>
18 #include "misc.h"
19 #include "ctree.h"
20 #include "extent_map.h"
21 #include "disk-io.h"
22 #include "transaction.h"
23 #include "print-tree.h"
24 #include "volumes.h"
25 #include "raid56.h"
26 #include "async-thread.h"
27 #include "check-integrity.h"
28 #include "rcu-string.h"
29 #include "dev-replace.h"
30 #include "sysfs.h"
31 #include "tree-checker.h"
32 #include "space-info.h"
33 #include "block-group.h"
34 #include "discard.h"
35
36 const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
37 [BTRFS_RAID_RAID10] = {
38 .sub_stripes = 2,
39 .dev_stripes = 1,
40 .devs_max = 0, /* 0 == as many as possible */
41 .devs_min = 4,
42 .tolerated_failures = 1,
43 .devs_increment = 2,
44 .ncopies = 2,
45 .nparity = 0,
46 .raid_name = "raid10",
47 .bg_flag = BTRFS_BLOCK_GROUP_RAID10,
48 .mindev_error = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET,
49 },
50 [BTRFS_RAID_RAID1] = {
51 .sub_stripes = 1,
52 .dev_stripes = 1,
53 .devs_max = 2,
54 .devs_min = 2,
55 .tolerated_failures = 1,
56 .devs_increment = 2,
57 .ncopies = 2,
58 .nparity = 0,
59 .raid_name = "raid1",
60 .bg_flag = BTRFS_BLOCK_GROUP_RAID1,
61 .mindev_error = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET,
62 },
63 [BTRFS_RAID_RAID1C3] = {
64 .sub_stripes = 1,
65 .dev_stripes = 1,
66 .devs_max = 3,
67 .devs_min = 3,
68 .tolerated_failures = 2,
69 .devs_increment = 3,
70 .ncopies = 3,
71 .nparity = 0,
72 .raid_name = "raid1c3",
73 .bg_flag = BTRFS_BLOCK_GROUP_RAID1C3,
74 .mindev_error = BTRFS_ERROR_DEV_RAID1C3_MIN_NOT_MET,
75 },
76 [BTRFS_RAID_RAID1C4] = {
77 .sub_stripes = 1,
78 .dev_stripes = 1,
79 .devs_max = 4,
80 .devs_min = 4,
81 .tolerated_failures = 3,
82 .devs_increment = 4,
83 .ncopies = 4,
84 .nparity = 0,
85 .raid_name = "raid1c4",
86 .bg_flag = BTRFS_BLOCK_GROUP_RAID1C4,
87 .mindev_error = BTRFS_ERROR_DEV_RAID1C4_MIN_NOT_MET,
88 },
89 [BTRFS_RAID_DUP] = {
90 .sub_stripes = 1,
91 .dev_stripes = 2,
92 .devs_max = 1,
93 .devs_min = 1,
94 .tolerated_failures = 0,
95 .devs_increment = 1,
96 .ncopies = 2,
97 .nparity = 0,
98 .raid_name = "dup",
99 .bg_flag = BTRFS_BLOCK_GROUP_DUP,
100 .mindev_error = 0,
101 },
102 [BTRFS_RAID_RAID0] = {
103 .sub_stripes = 1,
104 .dev_stripes = 1,
105 .devs_max = 0,
106 .devs_min = 2,
107 .tolerated_failures = 0,
108 .devs_increment = 1,
109 .ncopies = 1,
110 .nparity = 0,
111 .raid_name = "raid0",
112 .bg_flag = BTRFS_BLOCK_GROUP_RAID0,
113 .mindev_error = 0,
114 },
115 [BTRFS_RAID_SINGLE] = {
116 .sub_stripes = 1,
117 .dev_stripes = 1,
118 .devs_max = 1,
119 .devs_min = 1,
120 .tolerated_failures = 0,
121 .devs_increment = 1,
122 .ncopies = 1,
123 .nparity = 0,
124 .raid_name = "single",
125 .bg_flag = 0,
126 .mindev_error = 0,
127 },
128 [BTRFS_RAID_RAID5] = {
129 .sub_stripes = 1,
130 .dev_stripes = 1,
131 .devs_max = 0,
132 .devs_min = 2,
133 .tolerated_failures = 1,
134 .devs_increment = 1,
135 .ncopies = 1,
136 .nparity = 1,
137 .raid_name = "raid5",
138 .bg_flag = BTRFS_BLOCK_GROUP_RAID5,
139 .mindev_error = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET,
140 },
141 [BTRFS_RAID_RAID6] = {
142 .sub_stripes = 1,
143 .dev_stripes = 1,
144 .devs_max = 0,
145 .devs_min = 3,
146 .tolerated_failures = 2,
147 .devs_increment = 1,
148 .ncopies = 1,
149 .nparity = 2,
150 .raid_name = "raid6",
151 .bg_flag = BTRFS_BLOCK_GROUP_RAID6,
152 .mindev_error = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET,
153 },
154 };
155
btrfs_bg_type_to_raid_name(u64 flags)156 const char *btrfs_bg_type_to_raid_name(u64 flags)
157 {
158 const int index = btrfs_bg_flags_to_raid_index(flags);
159
160 if (index >= BTRFS_NR_RAID_TYPES)
161 return NULL;
162
163 return btrfs_raid_array[index].raid_name;
164 }
165
166 /*
167 * Fill @buf with textual description of @bg_flags, no more than @size_buf
168 * bytes including terminating null byte.
169 */
btrfs_describe_block_groups(u64 bg_flags,char * buf,u32 size_buf)170 void btrfs_describe_block_groups(u64 bg_flags, char *buf, u32 size_buf)
171 {
172 int i;
173 int ret;
174 char *bp = buf;
175 u64 flags = bg_flags;
176 u32 size_bp = size_buf;
177
178 if (!flags) {
179 strcpy(bp, "NONE");
180 return;
181 }
182
183 #define DESCRIBE_FLAG(flag, desc) \
184 do { \
185 if (flags & (flag)) { \
186 ret = snprintf(bp, size_bp, "%s|", (desc)); \
187 if (ret < 0 || ret >= size_bp) \
188 goto out_overflow; \
189 size_bp -= ret; \
190 bp += ret; \
191 flags &= ~(flag); \
192 } \
193 } while (0)
194
195 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_DATA, "data");
196 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_SYSTEM, "system");
197 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_METADATA, "metadata");
198
199 DESCRIBE_FLAG(BTRFS_AVAIL_ALLOC_BIT_SINGLE, "single");
200 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
201 DESCRIBE_FLAG(btrfs_raid_array[i].bg_flag,
202 btrfs_raid_array[i].raid_name);
203 #undef DESCRIBE_FLAG
204
205 if (flags) {
206 ret = snprintf(bp, size_bp, "0x%llx|", flags);
207 size_bp -= ret;
208 }
209
210 if (size_bp < size_buf)
211 buf[size_buf - size_bp - 1] = '\0'; /* remove last | */
212
213 /*
214 * The text is trimmed, it's up to the caller to provide sufficiently
215 * large buffer
216 */
217 out_overflow:;
218 }
219
220 static int init_first_rw_device(struct btrfs_trans_handle *trans);
221 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info);
222 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
223 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
224 static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
225 enum btrfs_map_op op,
226 u64 logical, u64 *length,
227 struct btrfs_bio **bbio_ret,
228 int mirror_num, int need_raid_map);
229
230 /*
231 * Device locking
232 * ==============
233 *
234 * There are several mutexes that protect manipulation of devices and low-level
235 * structures like chunks but not block groups, extents or files
236 *
237 * uuid_mutex (global lock)
238 * ------------------------
239 * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from
240 * the SCAN_DEV ioctl registration or from mount either implicitly (the first
241 * device) or requested by the device= mount option
242 *
243 * the mutex can be very coarse and can cover long-running operations
244 *
245 * protects: updates to fs_devices counters like missing devices, rw devices,
246 * seeding, structure cloning, opening/closing devices at mount/umount time
247 *
248 * global::fs_devs - add, remove, updates to the global list
249 *
250 * does not protect: manipulation of the fs_devices::devices list in general
251 * but in mount context it could be used to exclude list modifications by eg.
252 * scan ioctl
253 *
254 * btrfs_device::name - renames (write side), read is RCU
255 *
256 * fs_devices::device_list_mutex (per-fs, with RCU)
257 * ------------------------------------------------
258 * protects updates to fs_devices::devices, ie. adding and deleting
259 *
260 * simple list traversal with read-only actions can be done with RCU protection
261 *
262 * may be used to exclude some operations from running concurrently without any
263 * modifications to the list (see write_all_supers)
264 *
265 * Is not required at mount and close times, because our device list is
266 * protected by the uuid_mutex at that point.
267 *
268 * balance_mutex
269 * -------------
270 * protects balance structures (status, state) and context accessed from
271 * several places (internally, ioctl)
272 *
273 * chunk_mutex
274 * -----------
275 * protects chunks, adding or removing during allocation, trim or when a new
276 * device is added/removed. Additionally it also protects post_commit_list of
277 * individual devices, since they can be added to the transaction's
278 * post_commit_list only with chunk_mutex held.
279 *
280 * cleaner_mutex
281 * -------------
282 * a big lock that is held by the cleaner thread and prevents running subvolume
283 * cleaning together with relocation or delayed iputs
284 *
285 *
286 * Lock nesting
287 * ============
288 *
289 * uuid_mutex
290 * device_list_mutex
291 * chunk_mutex
292 * balance_mutex
293 *
294 *
295 * Exclusive operations
296 * ====================
297 *
298 * Maintains the exclusivity of the following operations that apply to the
299 * whole filesystem and cannot run in parallel.
300 *
301 * - Balance (*)
302 * - Device add
303 * - Device remove
304 * - Device replace (*)
305 * - Resize
306 *
307 * The device operations (as above) can be in one of the following states:
308 *
309 * - Running state
310 * - Paused state
311 * - Completed state
312 *
313 * Only device operations marked with (*) can go into the Paused state for the
314 * following reasons:
315 *
316 * - ioctl (only Balance can be Paused through ioctl)
317 * - filesystem remounted as read-only
318 * - filesystem unmounted and mounted as read-only
319 * - system power-cycle and filesystem mounted as read-only
320 * - filesystem or device errors leading to forced read-only
321 *
322 * The status of exclusive operation is set and cleared atomically.
323 * During the course of Paused state, fs_info::exclusive_operation remains set.
324 * A device operation in Paused or Running state can be canceled or resumed
325 * either by ioctl (Balance only) or when remounted as read-write.
326 * The exclusive status is cleared when the device operation is canceled or
327 * completed.
328 */
329
330 DEFINE_MUTEX(uuid_mutex);
331 static LIST_HEAD(fs_uuids);
btrfs_get_fs_uuids(void)332 struct list_head * __attribute_const__ btrfs_get_fs_uuids(void)
333 {
334 return &fs_uuids;
335 }
336
337 /*
338 * alloc_fs_devices - allocate struct btrfs_fs_devices
339 * @fsid: if not NULL, copy the UUID to fs_devices::fsid
340 * @metadata_fsid: if not NULL, copy the UUID to fs_devices::metadata_fsid
341 *
342 * Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR().
343 * The returned struct is not linked onto any lists and can be destroyed with
344 * kfree() right away.
345 */
alloc_fs_devices(const u8 * fsid,const u8 * metadata_fsid)346 static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid,
347 const u8 *metadata_fsid)
348 {
349 struct btrfs_fs_devices *fs_devs;
350
351 fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL);
352 if (!fs_devs)
353 return ERR_PTR(-ENOMEM);
354
355 mutex_init(&fs_devs->device_list_mutex);
356
357 INIT_LIST_HEAD(&fs_devs->devices);
358 INIT_LIST_HEAD(&fs_devs->alloc_list);
359 INIT_LIST_HEAD(&fs_devs->fs_list);
360 INIT_LIST_HEAD(&fs_devs->seed_list);
361 if (fsid)
362 memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE);
363
364 if (metadata_fsid)
365 memcpy(fs_devs->metadata_uuid, metadata_fsid, BTRFS_FSID_SIZE);
366 else if (fsid)
367 memcpy(fs_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE);
368
369 return fs_devs;
370 }
371
btrfs_free_device(struct btrfs_device * device)372 void btrfs_free_device(struct btrfs_device *device)
373 {
374 WARN_ON(!list_empty(&device->post_commit_list));
375 rcu_string_free(device->name);
376 extent_io_tree_release(&device->alloc_state);
377 bio_put(device->flush_bio);
378 kfree(device);
379 }
380
free_fs_devices(struct btrfs_fs_devices * fs_devices)381 static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
382 {
383 struct btrfs_device *device;
384
385 WARN_ON(fs_devices->opened);
386 while (!list_empty(&fs_devices->devices)) {
387 device = list_entry(fs_devices->devices.next,
388 struct btrfs_device, dev_list);
389 list_del(&device->dev_list);
390 btrfs_free_device(device);
391 }
392 kfree(fs_devices);
393 }
394
btrfs_cleanup_fs_uuids(void)395 void __exit btrfs_cleanup_fs_uuids(void)
396 {
397 struct btrfs_fs_devices *fs_devices;
398
399 while (!list_empty(&fs_uuids)) {
400 fs_devices = list_entry(fs_uuids.next,
401 struct btrfs_fs_devices, fs_list);
402 list_del(&fs_devices->fs_list);
403 free_fs_devices(fs_devices);
404 }
405 }
406
407 /*
408 * Returns a pointer to a new btrfs_device on success; ERR_PTR() on error.
409 * Returned struct is not linked onto any lists and must be destroyed using
410 * btrfs_free_device.
411 */
__alloc_device(struct btrfs_fs_info * fs_info)412 static struct btrfs_device *__alloc_device(struct btrfs_fs_info *fs_info)
413 {
414 struct btrfs_device *dev;
415
416 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
417 if (!dev)
418 return ERR_PTR(-ENOMEM);
419
420 /*
421 * Preallocate a bio that's always going to be used for flushing device
422 * barriers and matches the device lifespan
423 */
424 dev->flush_bio = bio_alloc_bioset(GFP_KERNEL, 0, NULL);
425 if (!dev->flush_bio) {
426 kfree(dev);
427 return ERR_PTR(-ENOMEM);
428 }
429
430 INIT_LIST_HEAD(&dev->dev_list);
431 INIT_LIST_HEAD(&dev->dev_alloc_list);
432 INIT_LIST_HEAD(&dev->post_commit_list);
433
434 atomic_set(&dev->reada_in_flight, 0);
435 atomic_set(&dev->dev_stats_ccnt, 0);
436 btrfs_device_data_ordered_init(dev);
437 INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
438 INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
439 extent_io_tree_init(fs_info, &dev->alloc_state,
440 IO_TREE_DEVICE_ALLOC_STATE, NULL);
441
442 return dev;
443 }
444
find_fsid(const u8 * fsid,const u8 * metadata_fsid)445 static noinline struct btrfs_fs_devices *find_fsid(
446 const u8 *fsid, const u8 *metadata_fsid)
447 {
448 struct btrfs_fs_devices *fs_devices;
449
450 ASSERT(fsid);
451
452 /* Handle non-split brain cases */
453 list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
454 if (metadata_fsid) {
455 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0
456 && memcmp(metadata_fsid, fs_devices->metadata_uuid,
457 BTRFS_FSID_SIZE) == 0)
458 return fs_devices;
459 } else {
460 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
461 return fs_devices;
462 }
463 }
464 return NULL;
465 }
466
find_fsid_with_metadata_uuid(struct btrfs_super_block * disk_super)467 static struct btrfs_fs_devices *find_fsid_with_metadata_uuid(
468 struct btrfs_super_block *disk_super)
469 {
470
471 struct btrfs_fs_devices *fs_devices;
472
473 /*
474 * Handle scanned device having completed its fsid change but
475 * belonging to a fs_devices that was created by first scanning
476 * a device which didn't have its fsid/metadata_uuid changed
477 * at all and the CHANGING_FSID_V2 flag set.
478 */
479 list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
480 if (fs_devices->fsid_change &&
481 memcmp(disk_super->metadata_uuid, fs_devices->fsid,
482 BTRFS_FSID_SIZE) == 0 &&
483 memcmp(fs_devices->fsid, fs_devices->metadata_uuid,
484 BTRFS_FSID_SIZE) == 0) {
485 return fs_devices;
486 }
487 }
488 /*
489 * Handle scanned device having completed its fsid change but
490 * belonging to a fs_devices that was created by a device that
491 * has an outdated pair of fsid/metadata_uuid and
492 * CHANGING_FSID_V2 flag set.
493 */
494 list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
495 if (fs_devices->fsid_change &&
496 memcmp(fs_devices->metadata_uuid,
497 fs_devices->fsid, BTRFS_FSID_SIZE) != 0 &&
498 memcmp(disk_super->metadata_uuid, fs_devices->metadata_uuid,
499 BTRFS_FSID_SIZE) == 0) {
500 return fs_devices;
501 }
502 }
503
504 return find_fsid(disk_super->fsid, disk_super->metadata_uuid);
505 }
506
507
508 static int
btrfs_get_bdev_and_sb(const char * device_path,fmode_t flags,void * holder,int flush,struct block_device ** bdev,struct btrfs_super_block ** disk_super)509 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder,
510 int flush, struct block_device **bdev,
511 struct btrfs_super_block **disk_super)
512 {
513 int ret;
514
515 *bdev = blkdev_get_by_path(device_path, flags, holder);
516
517 if (IS_ERR(*bdev)) {
518 ret = PTR_ERR(*bdev);
519 goto error;
520 }
521
522 if (flush)
523 filemap_write_and_wait((*bdev)->bd_inode->i_mapping);
524 ret = set_blocksize(*bdev, BTRFS_BDEV_BLOCKSIZE);
525 if (ret) {
526 blkdev_put(*bdev, flags);
527 goto error;
528 }
529 invalidate_bdev(*bdev);
530 *disk_super = btrfs_read_dev_super(*bdev);
531 if (IS_ERR(*disk_super)) {
532 ret = PTR_ERR(*disk_super);
533 blkdev_put(*bdev, flags);
534 goto error;
535 }
536
537 return 0;
538
539 error:
540 *bdev = NULL;
541 return ret;
542 }
543
544 /*
545 * Check if the device in the path matches the device in the given struct device.
546 *
547 * Returns:
548 * true If it is the same device.
549 * false If it is not the same device or on error.
550 */
device_matched(const struct btrfs_device * device,const char * path)551 static bool device_matched(const struct btrfs_device *device, const char *path)
552 {
553 char *device_name;
554 struct block_device *bdev_old;
555 struct block_device *bdev_new;
556
557 /*
558 * If we are looking for a device with the matching dev_t, then skip
559 * device without a name (a missing device).
560 */
561 if (!device->name)
562 return false;
563
564 device_name = kzalloc(BTRFS_PATH_NAME_MAX, GFP_KERNEL);
565 if (!device_name)
566 return false;
567
568 rcu_read_lock();
569 scnprintf(device_name, BTRFS_PATH_NAME_MAX, "%s", rcu_str_deref(device->name));
570 rcu_read_unlock();
571
572 bdev_old = lookup_bdev(device_name);
573 kfree(device_name);
574 if (IS_ERR(bdev_old))
575 return false;
576
577 bdev_new = lookup_bdev(path);
578 if (IS_ERR(bdev_new))
579 return false;
580
581 if (bdev_old == bdev_new)
582 return true;
583
584 return false;
585 }
586
587 /*
588 * Search and remove all stale (devices which are not mounted) devices.
589 * When both inputs are NULL, it will search and release all stale devices.
590 * path: Optional. When provided will it release all unmounted devices
591 * matching this path only.
592 * skip_dev: Optional. Will skip this device when searching for the stale
593 * devices.
594 * Return: 0 for success or if @path is NULL.
595 * -EBUSY if @path is a mounted device.
596 * -ENOENT if @path does not match any device in the list.
597 */
btrfs_free_stale_devices(const char * path,struct btrfs_device * skip_device)598 static int btrfs_free_stale_devices(const char *path,
599 struct btrfs_device *skip_device)
600 {
601 struct btrfs_fs_devices *fs_devices, *tmp_fs_devices;
602 struct btrfs_device *device, *tmp_device;
603 int ret = 0;
604
605 lockdep_assert_held(&uuid_mutex);
606
607 if (path)
608 ret = -ENOENT;
609
610 list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) {
611
612 mutex_lock(&fs_devices->device_list_mutex);
613 list_for_each_entry_safe(device, tmp_device,
614 &fs_devices->devices, dev_list) {
615 if (skip_device && skip_device == device)
616 continue;
617 if (path && !device_matched(device, path))
618 continue;
619 if (fs_devices->opened) {
620 /* for an already deleted device return 0 */
621 if (path && ret != 0)
622 ret = -EBUSY;
623 break;
624 }
625
626 /* delete the stale device */
627 fs_devices->num_devices--;
628 list_del(&device->dev_list);
629 btrfs_free_device(device);
630
631 ret = 0;
632 }
633 mutex_unlock(&fs_devices->device_list_mutex);
634
635 if (fs_devices->num_devices == 0) {
636 btrfs_sysfs_remove_fsid(fs_devices);
637 list_del(&fs_devices->fs_list);
638 free_fs_devices(fs_devices);
639 }
640 }
641
642 return ret;
643 }
644
645 /*
646 * This is only used on mount, and we are protected from competing things
647 * messing with our fs_devices by the uuid_mutex, thus we do not need the
648 * fs_devices->device_list_mutex here.
649 */
btrfs_open_one_device(struct btrfs_fs_devices * fs_devices,struct btrfs_device * device,fmode_t flags,void * holder)650 static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
651 struct btrfs_device *device, fmode_t flags,
652 void *holder)
653 {
654 struct request_queue *q;
655 struct block_device *bdev;
656 struct btrfs_super_block *disk_super;
657 u64 devid;
658 int ret;
659
660 if (device->bdev)
661 return -EINVAL;
662 if (!device->name)
663 return -EINVAL;
664
665 ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
666 &bdev, &disk_super);
667 if (ret)
668 return ret;
669
670 devid = btrfs_stack_device_id(&disk_super->dev_item);
671 if (devid != device->devid)
672 goto error_free_page;
673
674 if (memcmp(device->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE))
675 goto error_free_page;
676
677 device->generation = btrfs_super_generation(disk_super);
678
679 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
680 if (btrfs_super_incompat_flags(disk_super) &
681 BTRFS_FEATURE_INCOMPAT_METADATA_UUID) {
682 pr_err(
683 "BTRFS: Invalid seeding and uuid-changed device detected\n");
684 goto error_free_page;
685 }
686
687 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
688 fs_devices->seeding = true;
689 } else {
690 if (bdev_read_only(bdev))
691 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
692 else
693 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
694 }
695
696 q = bdev_get_queue(bdev);
697 if (!blk_queue_nonrot(q))
698 fs_devices->rotating = true;
699
700 device->bdev = bdev;
701 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
702 device->mode = flags;
703
704 fs_devices->open_devices++;
705 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
706 device->devid != BTRFS_DEV_REPLACE_DEVID) {
707 fs_devices->rw_devices++;
708 list_add_tail(&device->dev_alloc_list, &fs_devices->alloc_list);
709 }
710 btrfs_release_disk_super(disk_super);
711
712 return 0;
713
714 error_free_page:
715 btrfs_release_disk_super(disk_super);
716 blkdev_put(bdev, flags);
717
718 return -EINVAL;
719 }
720
721 /*
722 * Handle scanned device having its CHANGING_FSID_V2 flag set and the fs_devices
723 * being created with a disk that has already completed its fsid change. Such
724 * disk can belong to an fs which has its FSID changed or to one which doesn't.
725 * Handle both cases here.
726 */
find_fsid_inprogress(struct btrfs_super_block * disk_super)727 static struct btrfs_fs_devices *find_fsid_inprogress(
728 struct btrfs_super_block *disk_super)
729 {
730 struct btrfs_fs_devices *fs_devices;
731
732 list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
733 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
734 BTRFS_FSID_SIZE) != 0 &&
735 memcmp(fs_devices->metadata_uuid, disk_super->fsid,
736 BTRFS_FSID_SIZE) == 0 && !fs_devices->fsid_change) {
737 return fs_devices;
738 }
739 }
740
741 return find_fsid(disk_super->fsid, NULL);
742 }
743
744
find_fsid_changed(struct btrfs_super_block * disk_super)745 static struct btrfs_fs_devices *find_fsid_changed(
746 struct btrfs_super_block *disk_super)
747 {
748 struct btrfs_fs_devices *fs_devices;
749
750 /*
751 * Handles the case where scanned device is part of an fs that had
752 * multiple successful changes of FSID but curently device didn't
753 * observe it. Meaning our fsid will be different than theirs. We need
754 * to handle two subcases :
755 * 1 - The fs still continues to have different METADATA/FSID uuids.
756 * 2 - The fs is switched back to its original FSID (METADATA/FSID
757 * are equal).
758 */
759 list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
760 /* Changed UUIDs */
761 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
762 BTRFS_FSID_SIZE) != 0 &&
763 memcmp(fs_devices->metadata_uuid, disk_super->metadata_uuid,
764 BTRFS_FSID_SIZE) == 0 &&
765 memcmp(fs_devices->fsid, disk_super->fsid,
766 BTRFS_FSID_SIZE) != 0)
767 return fs_devices;
768
769 /* Unchanged UUIDs */
770 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
771 BTRFS_FSID_SIZE) == 0 &&
772 memcmp(fs_devices->fsid, disk_super->metadata_uuid,
773 BTRFS_FSID_SIZE) == 0)
774 return fs_devices;
775 }
776
777 return NULL;
778 }
779
find_fsid_reverted_metadata(struct btrfs_super_block * disk_super)780 static struct btrfs_fs_devices *find_fsid_reverted_metadata(
781 struct btrfs_super_block *disk_super)
782 {
783 struct btrfs_fs_devices *fs_devices;
784
785 /*
786 * Handle the case where the scanned device is part of an fs whose last
787 * metadata UUID change reverted it to the original FSID. At the same
788 * time * fs_devices was first created by another constitutent device
789 * which didn't fully observe the operation. This results in an
790 * btrfs_fs_devices created with metadata/fsid different AND
791 * btrfs_fs_devices::fsid_change set AND the metadata_uuid of the
792 * fs_devices equal to the FSID of the disk.
793 */
794 list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
795 if (memcmp(fs_devices->fsid, fs_devices->metadata_uuid,
796 BTRFS_FSID_SIZE) != 0 &&
797 memcmp(fs_devices->metadata_uuid, disk_super->fsid,
798 BTRFS_FSID_SIZE) == 0 &&
799 fs_devices->fsid_change)
800 return fs_devices;
801 }
802
803 return NULL;
804 }
805 /*
806 * Add new device to list of registered devices
807 *
808 * Returns:
809 * device pointer which was just added or updated when successful
810 * error pointer when failed
811 */
device_list_add(const char * path,struct btrfs_super_block * disk_super,bool * new_device_added)812 static noinline struct btrfs_device *device_list_add(const char *path,
813 struct btrfs_super_block *disk_super,
814 bool *new_device_added)
815 {
816 struct btrfs_device *device;
817 struct btrfs_fs_devices *fs_devices = NULL;
818 struct rcu_string *name;
819 u64 found_transid = btrfs_super_generation(disk_super);
820 u64 devid = btrfs_stack_device_id(&disk_super->dev_item);
821 bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) &
822 BTRFS_FEATURE_INCOMPAT_METADATA_UUID);
823 bool fsid_change_in_progress = (btrfs_super_flags(disk_super) &
824 BTRFS_SUPER_FLAG_CHANGING_FSID_V2);
825
826 if (fsid_change_in_progress) {
827 if (!has_metadata_uuid)
828 fs_devices = find_fsid_inprogress(disk_super);
829 else
830 fs_devices = find_fsid_changed(disk_super);
831 } else if (has_metadata_uuid) {
832 fs_devices = find_fsid_with_metadata_uuid(disk_super);
833 } else {
834 fs_devices = find_fsid_reverted_metadata(disk_super);
835 if (!fs_devices)
836 fs_devices = find_fsid(disk_super->fsid, NULL);
837 }
838
839
840 if (!fs_devices) {
841 if (has_metadata_uuid)
842 fs_devices = alloc_fs_devices(disk_super->fsid,
843 disk_super->metadata_uuid);
844 else
845 fs_devices = alloc_fs_devices(disk_super->fsid, NULL);
846
847 if (IS_ERR(fs_devices))
848 return ERR_CAST(fs_devices);
849
850 fs_devices->fsid_change = fsid_change_in_progress;
851
852 mutex_lock(&fs_devices->device_list_mutex);
853 list_add(&fs_devices->fs_list, &fs_uuids);
854
855 device = NULL;
856 } else {
857 mutex_lock(&fs_devices->device_list_mutex);
858 device = btrfs_find_device(fs_devices, devid,
859 disk_super->dev_item.uuid, NULL, false);
860
861 /*
862 * If this disk has been pulled into an fs devices created by
863 * a device which had the CHANGING_FSID_V2 flag then replace the
864 * metadata_uuid/fsid values of the fs_devices.
865 */
866 if (fs_devices->fsid_change &&
867 found_transid > fs_devices->latest_generation) {
868 memcpy(fs_devices->fsid, disk_super->fsid,
869 BTRFS_FSID_SIZE);
870
871 if (has_metadata_uuid)
872 memcpy(fs_devices->metadata_uuid,
873 disk_super->metadata_uuid,
874 BTRFS_FSID_SIZE);
875 else
876 memcpy(fs_devices->metadata_uuid,
877 disk_super->fsid, BTRFS_FSID_SIZE);
878
879 fs_devices->fsid_change = false;
880 }
881 }
882
883 if (!device) {
884 if (fs_devices->opened) {
885 mutex_unlock(&fs_devices->device_list_mutex);
886 return ERR_PTR(-EBUSY);
887 }
888
889 device = btrfs_alloc_device(NULL, &devid,
890 disk_super->dev_item.uuid);
891 if (IS_ERR(device)) {
892 mutex_unlock(&fs_devices->device_list_mutex);
893 /* we can safely leave the fs_devices entry around */
894 return device;
895 }
896
897 name = rcu_string_strdup(path, GFP_NOFS);
898 if (!name) {
899 btrfs_free_device(device);
900 mutex_unlock(&fs_devices->device_list_mutex);
901 return ERR_PTR(-ENOMEM);
902 }
903 rcu_assign_pointer(device->name, name);
904
905 list_add_rcu(&device->dev_list, &fs_devices->devices);
906 fs_devices->num_devices++;
907
908 device->fs_devices = fs_devices;
909 *new_device_added = true;
910
911 if (disk_super->label[0])
912 pr_info(
913 "BTRFS: device label %s devid %llu transid %llu %s scanned by %s (%d)\n",
914 disk_super->label, devid, found_transid, path,
915 current->comm, task_pid_nr(current));
916 else
917 pr_info(
918 "BTRFS: device fsid %pU devid %llu transid %llu %s scanned by %s (%d)\n",
919 disk_super->fsid, devid, found_transid, path,
920 current->comm, task_pid_nr(current));
921
922 } else if (!device->name || strcmp(device->name->str, path)) {
923 /*
924 * When FS is already mounted.
925 * 1. If you are here and if the device->name is NULL that
926 * means this device was missing at time of FS mount.
927 * 2. If you are here and if the device->name is different
928 * from 'path' that means either
929 * a. The same device disappeared and reappeared with
930 * different name. or
931 * b. The missing-disk-which-was-replaced, has
932 * reappeared now.
933 *
934 * We must allow 1 and 2a above. But 2b would be a spurious
935 * and unintentional.
936 *
937 * Further in case of 1 and 2a above, the disk at 'path'
938 * would have missed some transaction when it was away and
939 * in case of 2a the stale bdev has to be updated as well.
940 * 2b must not be allowed at all time.
941 */
942
943 /*
944 * For now, we do allow update to btrfs_fs_device through the
945 * btrfs dev scan cli after FS has been mounted. We're still
946 * tracking a problem where systems fail mount by subvolume id
947 * when we reject replacement on a mounted FS.
948 */
949 if (!fs_devices->opened && found_transid < device->generation) {
950 /*
951 * That is if the FS is _not_ mounted and if you
952 * are here, that means there is more than one
953 * disk with same uuid and devid.We keep the one
954 * with larger generation number or the last-in if
955 * generation are equal.
956 */
957 mutex_unlock(&fs_devices->device_list_mutex);
958 return ERR_PTR(-EEXIST);
959 }
960
961 /*
962 * We are going to replace the device path for a given devid,
963 * make sure it's the same device if the device is mounted
964 */
965 if (device->bdev) {
966 struct block_device *path_bdev;
967
968 path_bdev = lookup_bdev(path);
969 if (IS_ERR(path_bdev)) {
970 mutex_unlock(&fs_devices->device_list_mutex);
971 return ERR_CAST(path_bdev);
972 }
973
974 if (device->bdev != path_bdev) {
975 bdput(path_bdev);
976 mutex_unlock(&fs_devices->device_list_mutex);
977 /*
978 * device->fs_info may not be reliable here, so
979 * pass in a NULL instead. This avoids a
980 * possible use-after-free when the fs_info and
981 * fs_info->sb are already torn down.
982 */
983 btrfs_warn_in_rcu(NULL,
984 "duplicate device %s devid %llu generation %llu scanned by %s (%d)",
985 path, devid, found_transid,
986 current->comm,
987 task_pid_nr(current));
988 return ERR_PTR(-EEXIST);
989 }
990 bdput(path_bdev);
991 btrfs_info_in_rcu(device->fs_info,
992 "devid %llu device path %s changed to %s scanned by %s (%d)",
993 devid, rcu_str_deref(device->name),
994 path, current->comm,
995 task_pid_nr(current));
996 }
997
998 name = rcu_string_strdup(path, GFP_NOFS);
999 if (!name) {
1000 mutex_unlock(&fs_devices->device_list_mutex);
1001 return ERR_PTR(-ENOMEM);
1002 }
1003 rcu_string_free(device->name);
1004 rcu_assign_pointer(device->name, name);
1005 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
1006 fs_devices->missing_devices--;
1007 clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
1008 }
1009 }
1010
1011 /*
1012 * Unmount does not free the btrfs_device struct but would zero
1013 * generation along with most of the other members. So just update
1014 * it back. We need it to pick the disk with largest generation
1015 * (as above).
1016 */
1017 if (!fs_devices->opened) {
1018 device->generation = found_transid;
1019 fs_devices->latest_generation = max_t(u64, found_transid,
1020 fs_devices->latest_generation);
1021 }
1022
1023 fs_devices->total_devices = btrfs_super_num_devices(disk_super);
1024
1025 mutex_unlock(&fs_devices->device_list_mutex);
1026 return device;
1027 }
1028
clone_fs_devices(struct btrfs_fs_devices * orig)1029 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
1030 {
1031 struct btrfs_fs_devices *fs_devices;
1032 struct btrfs_device *device;
1033 struct btrfs_device *orig_dev;
1034 int ret = 0;
1035
1036 lockdep_assert_held(&uuid_mutex);
1037
1038 fs_devices = alloc_fs_devices(orig->fsid, NULL);
1039 if (IS_ERR(fs_devices))
1040 return fs_devices;
1041
1042 fs_devices->total_devices = orig->total_devices;
1043
1044 list_for_each_entry(orig_dev, &orig->devices, dev_list) {
1045 struct rcu_string *name;
1046
1047 device = btrfs_alloc_device(NULL, &orig_dev->devid,
1048 orig_dev->uuid);
1049 if (IS_ERR(device)) {
1050 ret = PTR_ERR(device);
1051 goto error;
1052 }
1053
1054 /*
1055 * This is ok to do without rcu read locked because we hold the
1056 * uuid mutex so nothing we touch in here is going to disappear.
1057 */
1058 if (orig_dev->name) {
1059 name = rcu_string_strdup(orig_dev->name->str,
1060 GFP_KERNEL);
1061 if (!name) {
1062 btrfs_free_device(device);
1063 ret = -ENOMEM;
1064 goto error;
1065 }
1066 rcu_assign_pointer(device->name, name);
1067 }
1068
1069 list_add(&device->dev_list, &fs_devices->devices);
1070 device->fs_devices = fs_devices;
1071 fs_devices->num_devices++;
1072 }
1073 return fs_devices;
1074 error:
1075 free_fs_devices(fs_devices);
1076 return ERR_PTR(ret);
1077 }
1078
__btrfs_free_extra_devids(struct btrfs_fs_devices * fs_devices,int step,struct btrfs_device ** latest_dev)1079 static void __btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices,
1080 int step, struct btrfs_device **latest_dev)
1081 {
1082 struct btrfs_device *device, *next;
1083
1084 /* This is the initialized path, it is safe to release the devices. */
1085 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
1086 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state)) {
1087 if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
1088 &device->dev_state) &&
1089 !test_bit(BTRFS_DEV_STATE_MISSING,
1090 &device->dev_state) &&
1091 (!*latest_dev ||
1092 device->generation > (*latest_dev)->generation)) {
1093 *latest_dev = device;
1094 }
1095 continue;
1096 }
1097
1098 /*
1099 * We have already validated the presence of BTRFS_DEV_REPLACE_DEVID,
1100 * in btrfs_init_dev_replace() so just continue.
1101 */
1102 if (device->devid == BTRFS_DEV_REPLACE_DEVID)
1103 continue;
1104
1105 if (device->bdev) {
1106 blkdev_put(device->bdev, device->mode);
1107 device->bdev = NULL;
1108 fs_devices->open_devices--;
1109 }
1110 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
1111 list_del_init(&device->dev_alloc_list);
1112 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
1113 fs_devices->rw_devices--;
1114 }
1115 list_del_init(&device->dev_list);
1116 fs_devices->num_devices--;
1117 btrfs_free_device(device);
1118 }
1119
1120 }
1121
1122 /*
1123 * After we have read the system tree and know devids belonging to this
1124 * filesystem, remove the device which does not belong there.
1125 */
btrfs_free_extra_devids(struct btrfs_fs_devices * fs_devices,int step)1126 void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices, int step)
1127 {
1128 struct btrfs_device *latest_dev = NULL;
1129 struct btrfs_fs_devices *seed_dev;
1130
1131 mutex_lock(&uuid_mutex);
1132 __btrfs_free_extra_devids(fs_devices, step, &latest_dev);
1133
1134 list_for_each_entry(seed_dev, &fs_devices->seed_list, seed_list)
1135 __btrfs_free_extra_devids(seed_dev, step, &latest_dev);
1136
1137 fs_devices->latest_bdev = latest_dev->bdev;
1138
1139 mutex_unlock(&uuid_mutex);
1140 }
1141
btrfs_close_bdev(struct btrfs_device * device)1142 static void btrfs_close_bdev(struct btrfs_device *device)
1143 {
1144 if (!device->bdev)
1145 return;
1146
1147 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
1148 sync_blockdev(device->bdev);
1149 invalidate_bdev(device->bdev);
1150 }
1151
1152 blkdev_put(device->bdev, device->mode);
1153 }
1154
btrfs_close_one_device(struct btrfs_device * device)1155 static void btrfs_close_one_device(struct btrfs_device *device)
1156 {
1157 struct btrfs_fs_devices *fs_devices = device->fs_devices;
1158
1159 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
1160 device->devid != BTRFS_DEV_REPLACE_DEVID) {
1161 list_del_init(&device->dev_alloc_list);
1162 fs_devices->rw_devices--;
1163 }
1164
1165 if (device->devid == BTRFS_DEV_REPLACE_DEVID)
1166 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
1167
1168 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
1169 clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
1170 fs_devices->missing_devices--;
1171 }
1172
1173 btrfs_close_bdev(device);
1174 if (device->bdev) {
1175 fs_devices->open_devices--;
1176 device->bdev = NULL;
1177 }
1178 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
1179
1180 device->fs_info = NULL;
1181 atomic_set(&device->dev_stats_ccnt, 0);
1182 extent_io_tree_release(&device->alloc_state);
1183
1184 /*
1185 * Reset the flush error record. We might have a transient flush error
1186 * in this mount, and if so we aborted the current transaction and set
1187 * the fs to an error state, guaranteeing no super blocks can be further
1188 * committed. However that error might be transient and if we unmount the
1189 * filesystem and mount it again, we should allow the mount to succeed
1190 * (btrfs_check_rw_degradable() should not fail) - if after mounting the
1191 * filesystem again we still get flush errors, then we will again abort
1192 * any transaction and set the error state, guaranteeing no commits of
1193 * unsafe super blocks.
1194 */
1195 device->last_flush_error = 0;
1196
1197 /* Verify the device is back in a pristine state */
1198 ASSERT(!test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state));
1199 ASSERT(!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state));
1200 ASSERT(list_empty(&device->dev_alloc_list));
1201 ASSERT(list_empty(&device->post_commit_list));
1202 ASSERT(atomic_read(&device->reada_in_flight) == 0);
1203 }
1204
close_fs_devices(struct btrfs_fs_devices * fs_devices)1205 static void close_fs_devices(struct btrfs_fs_devices *fs_devices)
1206 {
1207 struct btrfs_device *device, *tmp;
1208
1209 lockdep_assert_held(&uuid_mutex);
1210
1211 if (--fs_devices->opened > 0)
1212 return;
1213
1214 list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list)
1215 btrfs_close_one_device(device);
1216
1217 WARN_ON(fs_devices->open_devices);
1218 WARN_ON(fs_devices->rw_devices);
1219 fs_devices->opened = 0;
1220 fs_devices->seeding = false;
1221 fs_devices->fs_info = NULL;
1222 }
1223
btrfs_close_devices(struct btrfs_fs_devices * fs_devices)1224 void btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
1225 {
1226 LIST_HEAD(list);
1227 struct btrfs_fs_devices *tmp;
1228
1229 mutex_lock(&uuid_mutex);
1230 close_fs_devices(fs_devices);
1231 if (!fs_devices->opened) {
1232 list_splice_init(&fs_devices->seed_list, &list);
1233
1234 /*
1235 * If the struct btrfs_fs_devices is not assembled with any
1236 * other device, it can be re-initialized during the next mount
1237 * without the needing device-scan step. Therefore, it can be
1238 * fully freed.
1239 */
1240 if (fs_devices->num_devices == 1) {
1241 list_del(&fs_devices->fs_list);
1242 free_fs_devices(fs_devices);
1243 }
1244 }
1245
1246
1247 list_for_each_entry_safe(fs_devices, tmp, &list, seed_list) {
1248 close_fs_devices(fs_devices);
1249 list_del(&fs_devices->seed_list);
1250 free_fs_devices(fs_devices);
1251 }
1252 mutex_unlock(&uuid_mutex);
1253 }
1254
open_fs_devices(struct btrfs_fs_devices * fs_devices,fmode_t flags,void * holder)1255 static int open_fs_devices(struct btrfs_fs_devices *fs_devices,
1256 fmode_t flags, void *holder)
1257 {
1258 struct btrfs_device *device;
1259 struct btrfs_device *latest_dev = NULL;
1260 struct btrfs_device *tmp_device;
1261
1262 flags |= FMODE_EXCL;
1263
1264 list_for_each_entry_safe(device, tmp_device, &fs_devices->devices,
1265 dev_list) {
1266 int ret;
1267
1268 ret = btrfs_open_one_device(fs_devices, device, flags, holder);
1269 if (ret == 0 &&
1270 (!latest_dev || device->generation > latest_dev->generation)) {
1271 latest_dev = device;
1272 } else if (ret == -ENODATA) {
1273 fs_devices->num_devices--;
1274 list_del(&device->dev_list);
1275 btrfs_free_device(device);
1276 }
1277 }
1278 if (fs_devices->open_devices == 0)
1279 return -EINVAL;
1280
1281 fs_devices->opened = 1;
1282 fs_devices->latest_bdev = latest_dev->bdev;
1283 fs_devices->total_rw_bytes = 0;
1284 fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_REGULAR;
1285
1286 return 0;
1287 }
1288
devid_cmp(void * priv,const struct list_head * a,const struct list_head * b)1289 static int devid_cmp(void *priv, const struct list_head *a,
1290 const struct list_head *b)
1291 {
1292 struct btrfs_device *dev1, *dev2;
1293
1294 dev1 = list_entry(a, struct btrfs_device, dev_list);
1295 dev2 = list_entry(b, struct btrfs_device, dev_list);
1296
1297 if (dev1->devid < dev2->devid)
1298 return -1;
1299 else if (dev1->devid > dev2->devid)
1300 return 1;
1301 return 0;
1302 }
1303
btrfs_open_devices(struct btrfs_fs_devices * fs_devices,fmode_t flags,void * holder)1304 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
1305 fmode_t flags, void *holder)
1306 {
1307 int ret;
1308
1309 lockdep_assert_held(&uuid_mutex);
1310 /*
1311 * The device_list_mutex cannot be taken here in case opening the
1312 * underlying device takes further locks like bd_mutex.
1313 *
1314 * We also don't need the lock here as this is called during mount and
1315 * exclusion is provided by uuid_mutex
1316 */
1317
1318 if (fs_devices->opened) {
1319 fs_devices->opened++;
1320 ret = 0;
1321 } else {
1322 list_sort(NULL, &fs_devices->devices, devid_cmp);
1323 ret = open_fs_devices(fs_devices, flags, holder);
1324 }
1325
1326 return ret;
1327 }
1328
btrfs_release_disk_super(struct btrfs_super_block * super)1329 void btrfs_release_disk_super(struct btrfs_super_block *super)
1330 {
1331 struct page *page = virt_to_page(super);
1332
1333 put_page(page);
1334 }
1335
btrfs_read_disk_super(struct block_device * bdev,u64 bytenr)1336 static struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev,
1337 u64 bytenr)
1338 {
1339 struct btrfs_super_block *disk_super;
1340 struct page *page;
1341 void *p;
1342 pgoff_t index;
1343
1344 /* make sure our super fits in the device */
1345 if (bytenr + PAGE_SIZE >= i_size_read(bdev->bd_inode))
1346 return ERR_PTR(-EINVAL);
1347
1348 /* make sure our super fits in the page */
1349 if (sizeof(*disk_super) > PAGE_SIZE)
1350 return ERR_PTR(-EINVAL);
1351
1352 /* make sure our super doesn't straddle pages on disk */
1353 index = bytenr >> PAGE_SHIFT;
1354 if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_SHIFT != index)
1355 return ERR_PTR(-EINVAL);
1356
1357 /* pull in the page with our super */
1358 page = read_cache_page_gfp(bdev->bd_inode->i_mapping, index, GFP_KERNEL);
1359
1360 if (IS_ERR(page))
1361 return ERR_CAST(page);
1362
1363 p = page_address(page);
1364
1365 /* align our pointer to the offset of the super block */
1366 disk_super = p + offset_in_page(bytenr);
1367
1368 if (btrfs_super_bytenr(disk_super) != bytenr ||
1369 btrfs_super_magic(disk_super) != BTRFS_MAGIC) {
1370 btrfs_release_disk_super(p);
1371 return ERR_PTR(-EINVAL);
1372 }
1373
1374 if (disk_super->label[0] && disk_super->label[BTRFS_LABEL_SIZE - 1])
1375 disk_super->label[BTRFS_LABEL_SIZE - 1] = 0;
1376
1377 return disk_super;
1378 }
1379
btrfs_forget_devices(const char * path)1380 int btrfs_forget_devices(const char *path)
1381 {
1382 int ret;
1383
1384 mutex_lock(&uuid_mutex);
1385 ret = btrfs_free_stale_devices(strlen(path) ? path : NULL, NULL);
1386 mutex_unlock(&uuid_mutex);
1387
1388 return ret;
1389 }
1390
1391 /*
1392 * Look for a btrfs signature on a device. This may be called out of the mount path
1393 * and we are not allowed to call set_blocksize during the scan. The superblock
1394 * is read via pagecache
1395 */
btrfs_scan_one_device(const char * path,fmode_t flags,void * holder)1396 struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags,
1397 void *holder)
1398 {
1399 struct btrfs_super_block *disk_super;
1400 bool new_device_added = false;
1401 struct btrfs_device *device = NULL;
1402 struct block_device *bdev;
1403 u64 bytenr;
1404
1405 lockdep_assert_held(&uuid_mutex);
1406
1407 /*
1408 * we would like to check all the supers, but that would make
1409 * a btrfs mount succeed after a mkfs from a different FS.
1410 * So, we need to add a special mount option to scan for
1411 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
1412 */
1413 bytenr = btrfs_sb_offset(0);
1414
1415 /*
1416 * Avoid using flag |= FMODE_EXCL here, as the systemd-udev may
1417 * initiate the device scan which may race with the user's mount
1418 * or mkfs command, resulting in failure.
1419 * Since the device scan is solely for reading purposes, there is
1420 * no need for FMODE_EXCL. Additionally, the devices are read again
1421 * during the mount process. It is ok to get some inconsistent
1422 * values temporarily, as the device paths of the fsid are the only
1423 * required information for assembling the volume.
1424 */
1425 bdev = blkdev_get_by_path(path, flags, holder);
1426 if (IS_ERR(bdev))
1427 return ERR_CAST(bdev);
1428
1429 disk_super = btrfs_read_disk_super(bdev, bytenr);
1430 if (IS_ERR(disk_super)) {
1431 device = ERR_CAST(disk_super);
1432 goto error_bdev_put;
1433 }
1434
1435 device = device_list_add(path, disk_super, &new_device_added);
1436 if (!IS_ERR(device)) {
1437 if (new_device_added)
1438 btrfs_free_stale_devices(path, device);
1439 }
1440
1441 btrfs_release_disk_super(disk_super);
1442
1443 error_bdev_put:
1444 blkdev_put(bdev, flags);
1445
1446 return device;
1447 }
1448
1449 /*
1450 * Try to find a chunk that intersects [start, start + len] range and when one
1451 * such is found, record the end of it in *start
1452 */
contains_pending_extent(struct btrfs_device * device,u64 * start,u64 len)1453 static bool contains_pending_extent(struct btrfs_device *device, u64 *start,
1454 u64 len)
1455 {
1456 u64 physical_start, physical_end;
1457
1458 lockdep_assert_held(&device->fs_info->chunk_mutex);
1459
1460 if (!find_first_extent_bit(&device->alloc_state, *start,
1461 &physical_start, &physical_end,
1462 CHUNK_ALLOCATED, NULL)) {
1463
1464 if (in_range(physical_start, *start, len) ||
1465 in_range(*start, physical_start,
1466 physical_end - physical_start)) {
1467 *start = physical_end + 1;
1468 return true;
1469 }
1470 }
1471 return false;
1472 }
1473
dev_extent_search_start(struct btrfs_device * device,u64 start)1474 static u64 dev_extent_search_start(struct btrfs_device *device, u64 start)
1475 {
1476 switch (device->fs_devices->chunk_alloc_policy) {
1477 case BTRFS_CHUNK_ALLOC_REGULAR:
1478 /*
1479 * We don't want to overwrite the superblock on the drive nor
1480 * any area used by the boot loader (grub for example), so we
1481 * make sure to start at an offset of at least 1MB.
1482 */
1483 return max_t(u64, start, SZ_1M);
1484 default:
1485 BUG();
1486 }
1487 }
1488
1489 /**
1490 * dev_extent_hole_check - check if specified hole is suitable for allocation
1491 * @device: the device which we have the hole
1492 * @hole_start: starting position of the hole
1493 * @hole_size: the size of the hole
1494 * @num_bytes: the size of the free space that we need
1495 *
1496 * This function may modify @hole_start and @hole_end to reflect the suitable
1497 * position for allocation. Returns 1 if hole position is updated, 0 otherwise.
1498 */
dev_extent_hole_check(struct btrfs_device * device,u64 * hole_start,u64 * hole_size,u64 num_bytes)1499 static bool dev_extent_hole_check(struct btrfs_device *device, u64 *hole_start,
1500 u64 *hole_size, u64 num_bytes)
1501 {
1502 bool changed = false;
1503 u64 hole_end = *hole_start + *hole_size;
1504
1505 /*
1506 * Check before we set max_hole_start, otherwise we could end up
1507 * sending back this offset anyway.
1508 */
1509 if (contains_pending_extent(device, hole_start, *hole_size)) {
1510 if (hole_end >= *hole_start)
1511 *hole_size = hole_end - *hole_start;
1512 else
1513 *hole_size = 0;
1514 changed = true;
1515 }
1516
1517 switch (device->fs_devices->chunk_alloc_policy) {
1518 case BTRFS_CHUNK_ALLOC_REGULAR:
1519 /* No extra check */
1520 break;
1521 default:
1522 BUG();
1523 }
1524
1525 return changed;
1526 }
1527
1528 /*
1529 * find_free_dev_extent_start - find free space in the specified device
1530 * @device: the device which we search the free space in
1531 * @num_bytes: the size of the free space that we need
1532 * @search_start: the position from which to begin the search
1533 * @start: store the start of the free space.
1534 * @len: the size of the free space. that we find, or the size
1535 * of the max free space if we don't find suitable free space
1536 *
1537 * this uses a pretty simple search, the expectation is that it is
1538 * called very infrequently and that a given device has a small number
1539 * of extents
1540 *
1541 * @start is used to store the start of the free space if we find. But if we
1542 * don't find suitable free space, it will be used to store the start position
1543 * of the max free space.
1544 *
1545 * @len is used to store the size of the free space that we find.
1546 * But if we don't find suitable free space, it is used to store the size of
1547 * the max free space.
1548 *
1549 * NOTE: This function will search *commit* root of device tree, and does extra
1550 * check to ensure dev extents are not double allocated.
1551 * This makes the function safe to allocate dev extents but may not report
1552 * correct usable device space, as device extent freed in current transaction
1553 * is not reported as avaiable.
1554 */
find_free_dev_extent_start(struct btrfs_device * device,u64 num_bytes,u64 search_start,u64 * start,u64 * len)1555 static int find_free_dev_extent_start(struct btrfs_device *device,
1556 u64 num_bytes, u64 search_start, u64 *start,
1557 u64 *len)
1558 {
1559 struct btrfs_fs_info *fs_info = device->fs_info;
1560 struct btrfs_root *root = fs_info->dev_root;
1561 struct btrfs_key key;
1562 struct btrfs_dev_extent *dev_extent;
1563 struct btrfs_path *path;
1564 u64 hole_size;
1565 u64 max_hole_start;
1566 u64 max_hole_size;
1567 u64 extent_end;
1568 u64 search_end = device->total_bytes;
1569 int ret;
1570 int slot;
1571 struct extent_buffer *l;
1572
1573 search_start = dev_extent_search_start(device, search_start);
1574
1575 path = btrfs_alloc_path();
1576 if (!path)
1577 return -ENOMEM;
1578
1579 max_hole_start = search_start;
1580 max_hole_size = 0;
1581
1582 again:
1583 if (search_start >= search_end ||
1584 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
1585 ret = -ENOSPC;
1586 goto out;
1587 }
1588
1589 path->reada = READA_FORWARD;
1590 path->search_commit_root = 1;
1591 path->skip_locking = 1;
1592
1593 key.objectid = device->devid;
1594 key.offset = search_start;
1595 key.type = BTRFS_DEV_EXTENT_KEY;
1596
1597 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1598 if (ret < 0)
1599 goto out;
1600 if (ret > 0) {
1601 ret = btrfs_previous_item(root, path, key.objectid, key.type);
1602 if (ret < 0)
1603 goto out;
1604 }
1605
1606 while (search_start < search_end) {
1607 l = path->nodes[0];
1608 slot = path->slots[0];
1609 if (slot >= btrfs_header_nritems(l)) {
1610 ret = btrfs_next_leaf(root, path);
1611 if (ret == 0)
1612 continue;
1613 if (ret < 0)
1614 goto out;
1615
1616 break;
1617 }
1618 btrfs_item_key_to_cpu(l, &key, slot);
1619
1620 if (key.objectid < device->devid)
1621 goto next;
1622
1623 if (key.objectid > device->devid)
1624 break;
1625
1626 if (key.type != BTRFS_DEV_EXTENT_KEY)
1627 goto next;
1628
1629 if (key.offset > search_end)
1630 break;
1631
1632 if (key.offset > search_start) {
1633 hole_size = key.offset - search_start;
1634 dev_extent_hole_check(device, &search_start, &hole_size,
1635 num_bytes);
1636
1637 if (hole_size > max_hole_size) {
1638 max_hole_start = search_start;
1639 max_hole_size = hole_size;
1640 }
1641
1642 /*
1643 * If this free space is greater than which we need,
1644 * it must be the max free space that we have found
1645 * until now, so max_hole_start must point to the start
1646 * of this free space and the length of this free space
1647 * is stored in max_hole_size. Thus, we return
1648 * max_hole_start and max_hole_size and go back to the
1649 * caller.
1650 */
1651 if (hole_size >= num_bytes) {
1652 ret = 0;
1653 goto out;
1654 }
1655 }
1656
1657 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1658 extent_end = key.offset + btrfs_dev_extent_length(l,
1659 dev_extent);
1660 if (extent_end > search_start)
1661 search_start = extent_end;
1662 next:
1663 path->slots[0]++;
1664 cond_resched();
1665 }
1666
1667 /*
1668 * At this point, search_start should be the end of
1669 * allocated dev extents, and when shrinking the device,
1670 * search_end may be smaller than search_start.
1671 */
1672 if (search_end > search_start) {
1673 hole_size = search_end - search_start;
1674 if (dev_extent_hole_check(device, &search_start, &hole_size,
1675 num_bytes)) {
1676 btrfs_release_path(path);
1677 goto again;
1678 }
1679
1680 if (hole_size > max_hole_size) {
1681 max_hole_start = search_start;
1682 max_hole_size = hole_size;
1683 }
1684 }
1685
1686 /* See above. */
1687 if (max_hole_size < num_bytes)
1688 ret = -ENOSPC;
1689 else
1690 ret = 0;
1691
1692 ASSERT(max_hole_start + max_hole_size <= search_end);
1693 out:
1694 btrfs_free_path(path);
1695 *start = max_hole_start;
1696 if (len)
1697 *len = max_hole_size;
1698 return ret;
1699 }
1700
find_free_dev_extent(struct btrfs_device * device,u64 num_bytes,u64 * start,u64 * len)1701 int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
1702 u64 *start, u64 *len)
1703 {
1704 /* FIXME use last free of some kind */
1705 return find_free_dev_extent_start(device, num_bytes, 0, start, len);
1706 }
1707
btrfs_free_dev_extent(struct btrfs_trans_handle * trans,struct btrfs_device * device,u64 start,u64 * dev_extent_len)1708 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1709 struct btrfs_device *device,
1710 u64 start, u64 *dev_extent_len)
1711 {
1712 struct btrfs_fs_info *fs_info = device->fs_info;
1713 struct btrfs_root *root = fs_info->dev_root;
1714 int ret;
1715 struct btrfs_path *path;
1716 struct btrfs_key key;
1717 struct btrfs_key found_key;
1718 struct extent_buffer *leaf = NULL;
1719 struct btrfs_dev_extent *extent = NULL;
1720
1721 path = btrfs_alloc_path();
1722 if (!path)
1723 return -ENOMEM;
1724
1725 key.objectid = device->devid;
1726 key.offset = start;
1727 key.type = BTRFS_DEV_EXTENT_KEY;
1728 again:
1729 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1730 if (ret > 0) {
1731 ret = btrfs_previous_item(root, path, key.objectid,
1732 BTRFS_DEV_EXTENT_KEY);
1733 if (ret)
1734 goto out;
1735 leaf = path->nodes[0];
1736 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1737 extent = btrfs_item_ptr(leaf, path->slots[0],
1738 struct btrfs_dev_extent);
1739 BUG_ON(found_key.offset > start || found_key.offset +
1740 btrfs_dev_extent_length(leaf, extent) < start);
1741 key = found_key;
1742 btrfs_release_path(path);
1743 goto again;
1744 } else if (ret == 0) {
1745 leaf = path->nodes[0];
1746 extent = btrfs_item_ptr(leaf, path->slots[0],
1747 struct btrfs_dev_extent);
1748 } else {
1749 btrfs_handle_fs_error(fs_info, ret, "Slot search failed");
1750 goto out;
1751 }
1752
1753 *dev_extent_len = btrfs_dev_extent_length(leaf, extent);
1754
1755 ret = btrfs_del_item(trans, root, path);
1756 if (ret) {
1757 btrfs_handle_fs_error(fs_info, ret,
1758 "Failed to remove dev extent item");
1759 } else {
1760 set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags);
1761 }
1762 out:
1763 btrfs_free_path(path);
1764 return ret;
1765 }
1766
btrfs_alloc_dev_extent(struct btrfs_trans_handle * trans,struct btrfs_device * device,u64 chunk_offset,u64 start,u64 num_bytes)1767 static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1768 struct btrfs_device *device,
1769 u64 chunk_offset, u64 start, u64 num_bytes)
1770 {
1771 int ret;
1772 struct btrfs_path *path;
1773 struct btrfs_fs_info *fs_info = device->fs_info;
1774 struct btrfs_root *root = fs_info->dev_root;
1775 struct btrfs_dev_extent *extent;
1776 struct extent_buffer *leaf;
1777 struct btrfs_key key;
1778
1779 WARN_ON(!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state));
1780 WARN_ON(test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state));
1781 path = btrfs_alloc_path();
1782 if (!path)
1783 return -ENOMEM;
1784
1785 key.objectid = device->devid;
1786 key.offset = start;
1787 key.type = BTRFS_DEV_EXTENT_KEY;
1788 ret = btrfs_insert_empty_item(trans, root, path, &key,
1789 sizeof(*extent));
1790 if (ret)
1791 goto out;
1792
1793 leaf = path->nodes[0];
1794 extent = btrfs_item_ptr(leaf, path->slots[0],
1795 struct btrfs_dev_extent);
1796 btrfs_set_dev_extent_chunk_tree(leaf, extent,
1797 BTRFS_CHUNK_TREE_OBJECTID);
1798 btrfs_set_dev_extent_chunk_objectid(leaf, extent,
1799 BTRFS_FIRST_CHUNK_TREE_OBJECTID);
1800 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
1801
1802 btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1803 btrfs_mark_buffer_dirty(leaf);
1804 out:
1805 btrfs_free_path(path);
1806 return ret;
1807 }
1808
find_next_chunk(struct btrfs_fs_info * fs_info)1809 static u64 find_next_chunk(struct btrfs_fs_info *fs_info)
1810 {
1811 struct extent_map_tree *em_tree;
1812 struct extent_map *em;
1813 struct rb_node *n;
1814 u64 ret = 0;
1815
1816 em_tree = &fs_info->mapping_tree;
1817 read_lock(&em_tree->lock);
1818 n = rb_last(&em_tree->map.rb_root);
1819 if (n) {
1820 em = rb_entry(n, struct extent_map, rb_node);
1821 ret = em->start + em->len;
1822 }
1823 read_unlock(&em_tree->lock);
1824
1825 return ret;
1826 }
1827
find_next_devid(struct btrfs_fs_info * fs_info,u64 * devid_ret)1828 static noinline int find_next_devid(struct btrfs_fs_info *fs_info,
1829 u64 *devid_ret)
1830 {
1831 int ret;
1832 struct btrfs_key key;
1833 struct btrfs_key found_key;
1834 struct btrfs_path *path;
1835
1836 path = btrfs_alloc_path();
1837 if (!path)
1838 return -ENOMEM;
1839
1840 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1841 key.type = BTRFS_DEV_ITEM_KEY;
1842 key.offset = (u64)-1;
1843
1844 ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0);
1845 if (ret < 0)
1846 goto error;
1847
1848 if (ret == 0) {
1849 /* Corruption */
1850 btrfs_err(fs_info, "corrupted chunk tree devid -1 matched");
1851 ret = -EUCLEAN;
1852 goto error;
1853 }
1854
1855 ret = btrfs_previous_item(fs_info->chunk_root, path,
1856 BTRFS_DEV_ITEMS_OBJECTID,
1857 BTRFS_DEV_ITEM_KEY);
1858 if (ret) {
1859 *devid_ret = 1;
1860 } else {
1861 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1862 path->slots[0]);
1863 *devid_ret = found_key.offset + 1;
1864 }
1865 ret = 0;
1866 error:
1867 btrfs_free_path(path);
1868 return ret;
1869 }
1870
1871 /*
1872 * the device information is stored in the chunk root
1873 * the btrfs_device struct should be fully filled in
1874 */
btrfs_add_dev_item(struct btrfs_trans_handle * trans,struct btrfs_device * device)1875 static int btrfs_add_dev_item(struct btrfs_trans_handle *trans,
1876 struct btrfs_device *device)
1877 {
1878 int ret;
1879 struct btrfs_path *path;
1880 struct btrfs_dev_item *dev_item;
1881 struct extent_buffer *leaf;
1882 struct btrfs_key key;
1883 unsigned long ptr;
1884
1885 path = btrfs_alloc_path();
1886 if (!path)
1887 return -ENOMEM;
1888
1889 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1890 key.type = BTRFS_DEV_ITEM_KEY;
1891 key.offset = device->devid;
1892
1893 ret = btrfs_insert_empty_item(trans, trans->fs_info->chunk_root, path,
1894 &key, sizeof(*dev_item));
1895 if (ret)
1896 goto out;
1897
1898 leaf = path->nodes[0];
1899 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1900
1901 btrfs_set_device_id(leaf, dev_item, device->devid);
1902 btrfs_set_device_generation(leaf, dev_item, 0);
1903 btrfs_set_device_type(leaf, dev_item, device->type);
1904 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1905 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1906 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1907 btrfs_set_device_total_bytes(leaf, dev_item,
1908 btrfs_device_get_disk_total_bytes(device));
1909 btrfs_set_device_bytes_used(leaf, dev_item,
1910 btrfs_device_get_bytes_used(device));
1911 btrfs_set_device_group(leaf, dev_item, 0);
1912 btrfs_set_device_seek_speed(leaf, dev_item, 0);
1913 btrfs_set_device_bandwidth(leaf, dev_item, 0);
1914 btrfs_set_device_start_offset(leaf, dev_item, 0);
1915
1916 ptr = btrfs_device_uuid(dev_item);
1917 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1918 ptr = btrfs_device_fsid(dev_item);
1919 write_extent_buffer(leaf, trans->fs_info->fs_devices->metadata_uuid,
1920 ptr, BTRFS_FSID_SIZE);
1921 btrfs_mark_buffer_dirty(leaf);
1922
1923 ret = 0;
1924 out:
1925 btrfs_free_path(path);
1926 return ret;
1927 }
1928
1929 /*
1930 * Function to update ctime/mtime for a given device path.
1931 * Mainly used for ctime/mtime based probe like libblkid.
1932 *
1933 * We don't care about errors here, this is just to be kind to userspace.
1934 */
update_dev_time(const char * device_path)1935 static void update_dev_time(const char *device_path)
1936 {
1937 struct path path;
1938 struct timespec64 now;
1939 int ret;
1940
1941 ret = kern_path(device_path, LOOKUP_FOLLOW, &path);
1942 if (ret)
1943 return;
1944
1945 now = current_time(d_inode(path.dentry));
1946 inode_update_time(d_inode(path.dentry), &now, S_MTIME | S_CTIME);
1947 path_put(&path);
1948 }
1949
btrfs_rm_dev_item(struct btrfs_device * device)1950 static int btrfs_rm_dev_item(struct btrfs_device *device)
1951 {
1952 struct btrfs_root *root = device->fs_info->chunk_root;
1953 int ret;
1954 struct btrfs_path *path;
1955 struct btrfs_key key;
1956 struct btrfs_trans_handle *trans;
1957
1958 path = btrfs_alloc_path();
1959 if (!path)
1960 return -ENOMEM;
1961
1962 trans = btrfs_start_transaction(root, 0);
1963 if (IS_ERR(trans)) {
1964 btrfs_free_path(path);
1965 return PTR_ERR(trans);
1966 }
1967 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1968 key.type = BTRFS_DEV_ITEM_KEY;
1969 key.offset = device->devid;
1970
1971 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1972 if (ret) {
1973 if (ret > 0)
1974 ret = -ENOENT;
1975 btrfs_abort_transaction(trans, ret);
1976 btrfs_end_transaction(trans);
1977 goto out;
1978 }
1979
1980 ret = btrfs_del_item(trans, root, path);
1981 if (ret) {
1982 btrfs_abort_transaction(trans, ret);
1983 btrfs_end_transaction(trans);
1984 }
1985
1986 out:
1987 btrfs_free_path(path);
1988 if (!ret)
1989 ret = btrfs_commit_transaction(trans);
1990 return ret;
1991 }
1992
1993 /*
1994 * Verify that @num_devices satisfies the RAID profile constraints in the whole
1995 * filesystem. It's up to the caller to adjust that number regarding eg. device
1996 * replace.
1997 */
btrfs_check_raid_min_devices(struct btrfs_fs_info * fs_info,u64 num_devices)1998 static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info,
1999 u64 num_devices)
2000 {
2001 u64 all_avail;
2002 unsigned seq;
2003 int i;
2004
2005 do {
2006 seq = read_seqbegin(&fs_info->profiles_lock);
2007
2008 all_avail = fs_info->avail_data_alloc_bits |
2009 fs_info->avail_system_alloc_bits |
2010 fs_info->avail_metadata_alloc_bits;
2011 } while (read_seqretry(&fs_info->profiles_lock, seq));
2012
2013 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
2014 if (!(all_avail & btrfs_raid_array[i].bg_flag))
2015 continue;
2016
2017 if (num_devices < btrfs_raid_array[i].devs_min) {
2018 int ret = btrfs_raid_array[i].mindev_error;
2019
2020 if (ret)
2021 return ret;
2022 }
2023 }
2024
2025 return 0;
2026 }
2027
btrfs_find_next_active_device(struct btrfs_fs_devices * fs_devs,struct btrfs_device * device)2028 static struct btrfs_device * btrfs_find_next_active_device(
2029 struct btrfs_fs_devices *fs_devs, struct btrfs_device *device)
2030 {
2031 struct btrfs_device *next_device;
2032
2033 list_for_each_entry(next_device, &fs_devs->devices, dev_list) {
2034 if (next_device != device &&
2035 !test_bit(BTRFS_DEV_STATE_MISSING, &next_device->dev_state)
2036 && next_device->bdev)
2037 return next_device;
2038 }
2039
2040 return NULL;
2041 }
2042
2043 /*
2044 * Helper function to check if the given device is part of s_bdev / latest_bdev
2045 * and replace it with the provided or the next active device, in the context
2046 * where this function called, there should be always be another device (or
2047 * this_dev) which is active.
2048 */
btrfs_assign_next_active_device(struct btrfs_device * device,struct btrfs_device * next_device)2049 void __cold btrfs_assign_next_active_device(struct btrfs_device *device,
2050 struct btrfs_device *next_device)
2051 {
2052 struct btrfs_fs_info *fs_info = device->fs_info;
2053
2054 if (!next_device)
2055 next_device = btrfs_find_next_active_device(fs_info->fs_devices,
2056 device);
2057 ASSERT(next_device);
2058
2059 if (fs_info->sb->s_bdev &&
2060 (fs_info->sb->s_bdev == device->bdev))
2061 fs_info->sb->s_bdev = next_device->bdev;
2062
2063 if (fs_info->fs_devices->latest_bdev == device->bdev)
2064 fs_info->fs_devices->latest_bdev = next_device->bdev;
2065 }
2066
2067 /*
2068 * Return btrfs_fs_devices::num_devices excluding the device that's being
2069 * currently replaced.
2070 */
btrfs_num_devices(struct btrfs_fs_info * fs_info)2071 static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info)
2072 {
2073 u64 num_devices = fs_info->fs_devices->num_devices;
2074
2075 down_read(&fs_info->dev_replace.rwsem);
2076 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
2077 ASSERT(num_devices > 1);
2078 num_devices--;
2079 }
2080 up_read(&fs_info->dev_replace.rwsem);
2081
2082 return num_devices;
2083 }
2084
btrfs_scratch_superblocks(struct btrfs_fs_info * fs_info,struct block_device * bdev,const char * device_path)2085 void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info,
2086 struct block_device *bdev,
2087 const char *device_path)
2088 {
2089 struct btrfs_super_block *disk_super;
2090 int copy_num;
2091
2092 if (!bdev)
2093 return;
2094
2095 for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX; copy_num++) {
2096 struct page *page;
2097 int ret;
2098
2099 disk_super = btrfs_read_dev_one_super(bdev, copy_num);
2100 if (IS_ERR(disk_super))
2101 continue;
2102
2103 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
2104
2105 page = virt_to_page(disk_super);
2106 set_page_dirty(page);
2107 lock_page(page);
2108 /* write_on_page() unlocks the page */
2109 ret = write_one_page(page);
2110 if (ret)
2111 btrfs_warn(fs_info,
2112 "error clearing superblock number %d (%d)",
2113 copy_num, ret);
2114 btrfs_release_disk_super(disk_super);
2115
2116 }
2117
2118 /* Notify udev that device has changed */
2119 btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
2120
2121 /* Update ctime/mtime for device path for libblkid */
2122 update_dev_time(device_path);
2123 }
2124
btrfs_rm_device(struct btrfs_fs_info * fs_info,const char * device_path,u64 devid)2125 int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path,
2126 u64 devid)
2127 {
2128 struct btrfs_device *device;
2129 struct btrfs_fs_devices *cur_devices;
2130 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2131 u64 num_devices;
2132 int ret = 0;
2133
2134 /*
2135 * The device list in fs_devices is accessed without locks (neither
2136 * uuid_mutex nor device_list_mutex) as it won't change on a mounted
2137 * filesystem and another device rm cannot run.
2138 */
2139 num_devices = btrfs_num_devices(fs_info);
2140
2141 ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1);
2142 if (ret)
2143 goto out;
2144
2145 device = btrfs_find_device_by_devspec(fs_info, devid, device_path);
2146
2147 if (IS_ERR(device)) {
2148 if (PTR_ERR(device) == -ENOENT &&
2149 device_path && strcmp(device_path, "missing") == 0)
2150 ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
2151 else
2152 ret = PTR_ERR(device);
2153 goto out;
2154 }
2155
2156 if (btrfs_pinned_by_swapfile(fs_info, device)) {
2157 btrfs_warn_in_rcu(fs_info,
2158 "cannot remove device %s (devid %llu) due to active swapfile",
2159 rcu_str_deref(device->name), device->devid);
2160 ret = -ETXTBSY;
2161 goto out;
2162 }
2163
2164 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
2165 ret = BTRFS_ERROR_DEV_TGT_REPLACE;
2166 goto out;
2167 }
2168
2169 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
2170 fs_info->fs_devices->rw_devices == 1) {
2171 ret = BTRFS_ERROR_DEV_ONLY_WRITABLE;
2172 goto out;
2173 }
2174
2175 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
2176 mutex_lock(&fs_info->chunk_mutex);
2177 list_del_init(&device->dev_alloc_list);
2178 device->fs_devices->rw_devices--;
2179 mutex_unlock(&fs_info->chunk_mutex);
2180 }
2181
2182 ret = btrfs_shrink_device(device, 0);
2183 if (!ret)
2184 btrfs_reada_remove_dev(device);
2185 if (ret)
2186 goto error_undo;
2187
2188 /*
2189 * TODO: the superblock still includes this device in its num_devices
2190 * counter although write_all_supers() is not locked out. This
2191 * could give a filesystem state which requires a degraded mount.
2192 */
2193 ret = btrfs_rm_dev_item(device);
2194 if (ret)
2195 goto error_undo;
2196
2197 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
2198 btrfs_scrub_cancel_dev(device);
2199
2200 /*
2201 * the device list mutex makes sure that we don't change
2202 * the device list while someone else is writing out all
2203 * the device supers. Whoever is writing all supers, should
2204 * lock the device list mutex before getting the number of
2205 * devices in the super block (super_copy). Conversely,
2206 * whoever updates the number of devices in the super block
2207 * (super_copy) should hold the device list mutex.
2208 */
2209
2210 /*
2211 * In normal cases the cur_devices == fs_devices. But in case
2212 * of deleting a seed device, the cur_devices should point to
2213 * its own fs_devices listed under the fs_devices->seed.
2214 */
2215 cur_devices = device->fs_devices;
2216 mutex_lock(&fs_devices->device_list_mutex);
2217 list_del_rcu(&device->dev_list);
2218
2219 cur_devices->num_devices--;
2220 cur_devices->total_devices--;
2221 /* Update total_devices of the parent fs_devices if it's seed */
2222 if (cur_devices != fs_devices)
2223 fs_devices->total_devices--;
2224
2225 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
2226 cur_devices->missing_devices--;
2227
2228 btrfs_assign_next_active_device(device, NULL);
2229
2230 if (device->bdev) {
2231 cur_devices->open_devices--;
2232 /* remove sysfs entry */
2233 btrfs_sysfs_remove_device(device);
2234 }
2235
2236 num_devices = btrfs_super_num_devices(fs_info->super_copy) - 1;
2237 btrfs_set_super_num_devices(fs_info->super_copy, num_devices);
2238 mutex_unlock(&fs_devices->device_list_mutex);
2239
2240 /*
2241 * at this point, the device is zero sized and detached from
2242 * the devices list. All that's left is to zero out the old
2243 * supers and free the device.
2244 */
2245 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
2246 btrfs_scratch_superblocks(fs_info, device->bdev,
2247 device->name->str);
2248
2249 btrfs_close_bdev(device);
2250 synchronize_rcu();
2251 btrfs_free_device(device);
2252
2253 if (cur_devices->open_devices == 0) {
2254 list_del_init(&cur_devices->seed_list);
2255 close_fs_devices(cur_devices);
2256 free_fs_devices(cur_devices);
2257 }
2258
2259 out:
2260 return ret;
2261
2262 error_undo:
2263 btrfs_reada_undo_remove_dev(device);
2264 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
2265 mutex_lock(&fs_info->chunk_mutex);
2266 list_add(&device->dev_alloc_list,
2267 &fs_devices->alloc_list);
2268 device->fs_devices->rw_devices++;
2269 mutex_unlock(&fs_info->chunk_mutex);
2270 }
2271 goto out;
2272 }
2273
btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device * srcdev)2274 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev)
2275 {
2276 struct btrfs_fs_devices *fs_devices;
2277
2278 lockdep_assert_held(&srcdev->fs_info->fs_devices->device_list_mutex);
2279
2280 /*
2281 * in case of fs with no seed, srcdev->fs_devices will point
2282 * to fs_devices of fs_info. However when the dev being replaced is
2283 * a seed dev it will point to the seed's local fs_devices. In short
2284 * srcdev will have its correct fs_devices in both the cases.
2285 */
2286 fs_devices = srcdev->fs_devices;
2287
2288 list_del_rcu(&srcdev->dev_list);
2289 list_del(&srcdev->dev_alloc_list);
2290 fs_devices->num_devices--;
2291 if (test_bit(BTRFS_DEV_STATE_MISSING, &srcdev->dev_state))
2292 fs_devices->missing_devices--;
2293
2294 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state))
2295 fs_devices->rw_devices--;
2296
2297 if (srcdev->bdev)
2298 fs_devices->open_devices--;
2299 }
2300
btrfs_rm_dev_replace_free_srcdev(struct btrfs_device * srcdev)2301 void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev)
2302 {
2303 struct btrfs_fs_devices *fs_devices = srcdev->fs_devices;
2304
2305 mutex_lock(&uuid_mutex);
2306
2307 btrfs_close_bdev(srcdev);
2308 synchronize_rcu();
2309 btrfs_free_device(srcdev);
2310
2311 /* if this is no devs we rather delete the fs_devices */
2312 if (!fs_devices->num_devices) {
2313 /*
2314 * On a mounted FS, num_devices can't be zero unless it's a
2315 * seed. In case of a seed device being replaced, the replace
2316 * target added to the sprout FS, so there will be no more
2317 * device left under the seed FS.
2318 */
2319 ASSERT(fs_devices->seeding);
2320
2321 list_del_init(&fs_devices->seed_list);
2322 close_fs_devices(fs_devices);
2323 free_fs_devices(fs_devices);
2324 }
2325 mutex_unlock(&uuid_mutex);
2326 }
2327
btrfs_destroy_dev_replace_tgtdev(struct btrfs_device * tgtdev)2328 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev)
2329 {
2330 struct btrfs_fs_devices *fs_devices = tgtdev->fs_info->fs_devices;
2331
2332 mutex_lock(&fs_devices->device_list_mutex);
2333
2334 btrfs_sysfs_remove_device(tgtdev);
2335
2336 if (tgtdev->bdev)
2337 fs_devices->open_devices--;
2338
2339 fs_devices->num_devices--;
2340
2341 btrfs_assign_next_active_device(tgtdev, NULL);
2342
2343 list_del_rcu(&tgtdev->dev_list);
2344
2345 mutex_unlock(&fs_devices->device_list_mutex);
2346
2347 /*
2348 * The update_dev_time() with in btrfs_scratch_superblocks()
2349 * may lead to a call to btrfs_show_devname() which will try
2350 * to hold device_list_mutex. And here this device
2351 * is already out of device list, so we don't have to hold
2352 * the device_list_mutex lock.
2353 */
2354 btrfs_scratch_superblocks(tgtdev->fs_info, tgtdev->bdev,
2355 tgtdev->name->str);
2356
2357 btrfs_close_bdev(tgtdev);
2358 synchronize_rcu();
2359 btrfs_free_device(tgtdev);
2360 }
2361
btrfs_find_device_by_path(struct btrfs_fs_info * fs_info,const char * device_path)2362 static struct btrfs_device *btrfs_find_device_by_path(
2363 struct btrfs_fs_info *fs_info, const char *device_path)
2364 {
2365 int ret = 0;
2366 struct btrfs_super_block *disk_super;
2367 u64 devid;
2368 u8 *dev_uuid;
2369 struct block_device *bdev;
2370 struct btrfs_device *device;
2371
2372 ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ,
2373 fs_info->bdev_holder, 0, &bdev, &disk_super);
2374 if (ret)
2375 return ERR_PTR(ret);
2376
2377 devid = btrfs_stack_device_id(&disk_super->dev_item);
2378 dev_uuid = disk_super->dev_item.uuid;
2379 if (btrfs_fs_incompat(fs_info, METADATA_UUID))
2380 device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
2381 disk_super->metadata_uuid, true);
2382 else
2383 device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
2384 disk_super->fsid, true);
2385
2386 btrfs_release_disk_super(disk_super);
2387 if (!device)
2388 device = ERR_PTR(-ENOENT);
2389 blkdev_put(bdev, FMODE_READ);
2390 return device;
2391 }
2392
2393 /*
2394 * Lookup a device given by device id, or the path if the id is 0.
2395 */
btrfs_find_device_by_devspec(struct btrfs_fs_info * fs_info,u64 devid,const char * device_path)2396 struct btrfs_device *btrfs_find_device_by_devspec(
2397 struct btrfs_fs_info *fs_info, u64 devid,
2398 const char *device_path)
2399 {
2400 struct btrfs_device *device;
2401
2402 if (devid) {
2403 device = btrfs_find_device(fs_info->fs_devices, devid, NULL,
2404 NULL, true);
2405 if (!device)
2406 return ERR_PTR(-ENOENT);
2407 return device;
2408 }
2409
2410 if (!device_path || !device_path[0])
2411 return ERR_PTR(-EINVAL);
2412
2413 if (strcmp(device_path, "missing") == 0) {
2414 /* Find first missing device */
2415 list_for_each_entry(device, &fs_info->fs_devices->devices,
2416 dev_list) {
2417 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
2418 &device->dev_state) && !device->bdev)
2419 return device;
2420 }
2421 return ERR_PTR(-ENOENT);
2422 }
2423
2424 return btrfs_find_device_by_path(fs_info, device_path);
2425 }
2426
2427 /*
2428 * does all the dirty work required for changing file system's UUID.
2429 */
btrfs_prepare_sprout(struct btrfs_fs_info * fs_info)2430 static int btrfs_prepare_sprout(struct btrfs_fs_info *fs_info)
2431 {
2432 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2433 struct btrfs_fs_devices *old_devices;
2434 struct btrfs_fs_devices *seed_devices;
2435 struct btrfs_super_block *disk_super = fs_info->super_copy;
2436 struct btrfs_device *device;
2437 u64 super_flags;
2438
2439 lockdep_assert_held(&uuid_mutex);
2440 if (!fs_devices->seeding)
2441 return -EINVAL;
2442
2443 /*
2444 * Private copy of the seed devices, anchored at
2445 * fs_info->fs_devices->seed_list
2446 */
2447 seed_devices = alloc_fs_devices(NULL, NULL);
2448 if (IS_ERR(seed_devices))
2449 return PTR_ERR(seed_devices);
2450
2451 /*
2452 * It's necessary to retain a copy of the original seed fs_devices in
2453 * fs_uuids so that filesystems which have been seeded can successfully
2454 * reference the seed device from open_seed_devices. This also supports
2455 * multiple fs seed.
2456 */
2457 old_devices = clone_fs_devices(fs_devices);
2458 if (IS_ERR(old_devices)) {
2459 kfree(seed_devices);
2460 return PTR_ERR(old_devices);
2461 }
2462
2463 list_add(&old_devices->fs_list, &fs_uuids);
2464
2465 memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
2466 seed_devices->opened = 1;
2467 INIT_LIST_HEAD(&seed_devices->devices);
2468 INIT_LIST_HEAD(&seed_devices->alloc_list);
2469 mutex_init(&seed_devices->device_list_mutex);
2470
2471 mutex_lock(&fs_devices->device_list_mutex);
2472 list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
2473 synchronize_rcu);
2474 list_for_each_entry(device, &seed_devices->devices, dev_list)
2475 device->fs_devices = seed_devices;
2476
2477 fs_devices->seeding = false;
2478 fs_devices->num_devices = 0;
2479 fs_devices->open_devices = 0;
2480 fs_devices->missing_devices = 0;
2481 fs_devices->rotating = false;
2482 list_add(&seed_devices->seed_list, &fs_devices->seed_list);
2483
2484 generate_random_uuid(fs_devices->fsid);
2485 memcpy(fs_devices->metadata_uuid, fs_devices->fsid, BTRFS_FSID_SIZE);
2486 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
2487 mutex_unlock(&fs_devices->device_list_mutex);
2488
2489 super_flags = btrfs_super_flags(disk_super) &
2490 ~BTRFS_SUPER_FLAG_SEEDING;
2491 btrfs_set_super_flags(disk_super, super_flags);
2492
2493 return 0;
2494 }
2495
2496 /*
2497 * Store the expected generation for seed devices in device items.
2498 */
btrfs_finish_sprout(struct btrfs_trans_handle * trans)2499 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans)
2500 {
2501 struct btrfs_fs_info *fs_info = trans->fs_info;
2502 struct btrfs_root *root = fs_info->chunk_root;
2503 struct btrfs_path *path;
2504 struct extent_buffer *leaf;
2505 struct btrfs_dev_item *dev_item;
2506 struct btrfs_device *device;
2507 struct btrfs_key key;
2508 u8 fs_uuid[BTRFS_FSID_SIZE];
2509 u8 dev_uuid[BTRFS_UUID_SIZE];
2510 u64 devid;
2511 int ret;
2512
2513 path = btrfs_alloc_path();
2514 if (!path)
2515 return -ENOMEM;
2516
2517 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2518 key.offset = 0;
2519 key.type = BTRFS_DEV_ITEM_KEY;
2520
2521 while (1) {
2522 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2523 if (ret < 0)
2524 goto error;
2525
2526 leaf = path->nodes[0];
2527 next_slot:
2528 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
2529 ret = btrfs_next_leaf(root, path);
2530 if (ret > 0)
2531 break;
2532 if (ret < 0)
2533 goto error;
2534 leaf = path->nodes[0];
2535 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2536 btrfs_release_path(path);
2537 continue;
2538 }
2539
2540 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2541 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
2542 key.type != BTRFS_DEV_ITEM_KEY)
2543 break;
2544
2545 dev_item = btrfs_item_ptr(leaf, path->slots[0],
2546 struct btrfs_dev_item);
2547 devid = btrfs_device_id(leaf, dev_item);
2548 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
2549 BTRFS_UUID_SIZE);
2550 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
2551 BTRFS_FSID_SIZE);
2552 device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
2553 fs_uuid, true);
2554 BUG_ON(!device); /* Logic error */
2555
2556 if (device->fs_devices->seeding) {
2557 btrfs_set_device_generation(leaf, dev_item,
2558 device->generation);
2559 btrfs_mark_buffer_dirty(leaf);
2560 }
2561
2562 path->slots[0]++;
2563 goto next_slot;
2564 }
2565 ret = 0;
2566 error:
2567 btrfs_free_path(path);
2568 return ret;
2569 }
2570
btrfs_init_new_device(struct btrfs_fs_info * fs_info,const char * device_path)2571 int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path)
2572 {
2573 struct btrfs_root *root = fs_info->dev_root;
2574 struct request_queue *q;
2575 struct btrfs_trans_handle *trans;
2576 struct btrfs_device *device;
2577 struct block_device *bdev;
2578 struct super_block *sb = fs_info->sb;
2579 struct rcu_string *name;
2580 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2581 u64 orig_super_total_bytes;
2582 u64 orig_super_num_devices;
2583 int seeding_dev = 0;
2584 int ret = 0;
2585 bool locked = false;
2586
2587 if (sb_rdonly(sb) && !fs_devices->seeding)
2588 return -EROFS;
2589
2590 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2591 fs_info->bdev_holder);
2592 if (IS_ERR(bdev))
2593 return PTR_ERR(bdev);
2594
2595 if (fs_devices->seeding) {
2596 seeding_dev = 1;
2597 down_write(&sb->s_umount);
2598 mutex_lock(&uuid_mutex);
2599 locked = true;
2600 }
2601
2602 sync_blockdev(bdev);
2603
2604 rcu_read_lock();
2605 list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) {
2606 if (device->bdev == bdev) {
2607 ret = -EEXIST;
2608 rcu_read_unlock();
2609 goto error;
2610 }
2611 }
2612 rcu_read_unlock();
2613
2614 device = btrfs_alloc_device(fs_info, NULL, NULL);
2615 if (IS_ERR(device)) {
2616 /* we can safely leave the fs_devices entry around */
2617 ret = PTR_ERR(device);
2618 goto error;
2619 }
2620
2621 name = rcu_string_strdup(device_path, GFP_KERNEL);
2622 if (!name) {
2623 ret = -ENOMEM;
2624 goto error_free_device;
2625 }
2626 rcu_assign_pointer(device->name, name);
2627
2628 trans = btrfs_start_transaction(root, 0);
2629 if (IS_ERR(trans)) {
2630 ret = PTR_ERR(trans);
2631 goto error_free_device;
2632 }
2633
2634 q = bdev_get_queue(bdev);
2635 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
2636 device->generation = trans->transid;
2637 device->io_width = fs_info->sectorsize;
2638 device->io_align = fs_info->sectorsize;
2639 device->sector_size = fs_info->sectorsize;
2640 device->total_bytes = round_down(i_size_read(bdev->bd_inode),
2641 fs_info->sectorsize);
2642 device->disk_total_bytes = device->total_bytes;
2643 device->commit_total_bytes = device->total_bytes;
2644 device->fs_info = fs_info;
2645 device->bdev = bdev;
2646 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
2647 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
2648 device->mode = FMODE_EXCL;
2649 device->dev_stats_valid = 1;
2650 set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE);
2651
2652 if (seeding_dev) {
2653 sb->s_flags &= ~SB_RDONLY;
2654 ret = btrfs_prepare_sprout(fs_info);
2655 if (ret) {
2656 btrfs_abort_transaction(trans, ret);
2657 goto error_trans;
2658 }
2659 }
2660
2661 device->fs_devices = fs_devices;
2662
2663 mutex_lock(&fs_devices->device_list_mutex);
2664 mutex_lock(&fs_info->chunk_mutex);
2665 list_add_rcu(&device->dev_list, &fs_devices->devices);
2666 list_add(&device->dev_alloc_list, &fs_devices->alloc_list);
2667 fs_devices->num_devices++;
2668 fs_devices->open_devices++;
2669 fs_devices->rw_devices++;
2670 fs_devices->total_devices++;
2671 fs_devices->total_rw_bytes += device->total_bytes;
2672
2673 atomic64_add(device->total_bytes, &fs_info->free_chunk_space);
2674
2675 if (!blk_queue_nonrot(q))
2676 fs_devices->rotating = true;
2677
2678 orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
2679 btrfs_set_super_total_bytes(fs_info->super_copy,
2680 round_down(orig_super_total_bytes + device->total_bytes,
2681 fs_info->sectorsize));
2682
2683 orig_super_num_devices = btrfs_super_num_devices(fs_info->super_copy);
2684 btrfs_set_super_num_devices(fs_info->super_copy,
2685 orig_super_num_devices + 1);
2686
2687 /*
2688 * we've got more storage, clear any full flags on the space
2689 * infos
2690 */
2691 btrfs_clear_space_info_full(fs_info);
2692
2693 mutex_unlock(&fs_info->chunk_mutex);
2694
2695 /* Add sysfs device entry */
2696 btrfs_sysfs_add_device(device);
2697
2698 mutex_unlock(&fs_devices->device_list_mutex);
2699
2700 if (seeding_dev) {
2701 mutex_lock(&fs_info->chunk_mutex);
2702 ret = init_first_rw_device(trans);
2703 mutex_unlock(&fs_info->chunk_mutex);
2704 if (ret) {
2705 btrfs_abort_transaction(trans, ret);
2706 goto error_sysfs;
2707 }
2708 }
2709
2710 ret = btrfs_add_dev_item(trans, device);
2711 if (ret) {
2712 btrfs_abort_transaction(trans, ret);
2713 goto error_sysfs;
2714 }
2715
2716 if (seeding_dev) {
2717 ret = btrfs_finish_sprout(trans);
2718 if (ret) {
2719 btrfs_abort_transaction(trans, ret);
2720 goto error_sysfs;
2721 }
2722
2723 /*
2724 * fs_devices now represents the newly sprouted filesystem and
2725 * its fsid has been changed by btrfs_prepare_sprout
2726 */
2727 btrfs_sysfs_update_sprout_fsid(fs_devices);
2728 }
2729
2730 ret = btrfs_commit_transaction(trans);
2731
2732 if (seeding_dev) {
2733 mutex_unlock(&uuid_mutex);
2734 up_write(&sb->s_umount);
2735 locked = false;
2736
2737 if (ret) /* transaction commit */
2738 return ret;
2739
2740 ret = btrfs_relocate_sys_chunks(fs_info);
2741 if (ret < 0)
2742 btrfs_handle_fs_error(fs_info, ret,
2743 "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command.");
2744 trans = btrfs_attach_transaction(root);
2745 if (IS_ERR(trans)) {
2746 if (PTR_ERR(trans) == -ENOENT)
2747 return 0;
2748 ret = PTR_ERR(trans);
2749 trans = NULL;
2750 goto error_sysfs;
2751 }
2752 ret = btrfs_commit_transaction(trans);
2753 }
2754
2755 /*
2756 * Now that we have written a new super block to this device, check all
2757 * other fs_devices list if device_path alienates any other scanned
2758 * device.
2759 * We can ignore the return value as it typically returns -EINVAL and
2760 * only succeeds if the device was an alien.
2761 */
2762 btrfs_forget_devices(device_path);
2763
2764 /* Update ctime/mtime for blkid or udev */
2765 update_dev_time(device_path);
2766
2767 return ret;
2768
2769 error_sysfs:
2770 btrfs_sysfs_remove_device(device);
2771 mutex_lock(&fs_info->fs_devices->device_list_mutex);
2772 mutex_lock(&fs_info->chunk_mutex);
2773 list_del_rcu(&device->dev_list);
2774 list_del(&device->dev_alloc_list);
2775 fs_info->fs_devices->num_devices--;
2776 fs_info->fs_devices->open_devices--;
2777 fs_info->fs_devices->rw_devices--;
2778 fs_info->fs_devices->total_devices--;
2779 fs_info->fs_devices->total_rw_bytes -= device->total_bytes;
2780 atomic64_sub(device->total_bytes, &fs_info->free_chunk_space);
2781 btrfs_set_super_total_bytes(fs_info->super_copy,
2782 orig_super_total_bytes);
2783 btrfs_set_super_num_devices(fs_info->super_copy,
2784 orig_super_num_devices);
2785 mutex_unlock(&fs_info->chunk_mutex);
2786 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2787 error_trans:
2788 if (seeding_dev)
2789 sb->s_flags |= SB_RDONLY;
2790 if (trans)
2791 btrfs_end_transaction(trans);
2792 error_free_device:
2793 btrfs_free_device(device);
2794 error:
2795 blkdev_put(bdev, FMODE_EXCL);
2796 if (locked) {
2797 mutex_unlock(&uuid_mutex);
2798 up_write(&sb->s_umount);
2799 }
2800 return ret;
2801 }
2802
btrfs_update_device(struct btrfs_trans_handle * trans,struct btrfs_device * device)2803 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
2804 struct btrfs_device *device)
2805 {
2806 int ret;
2807 struct btrfs_path *path;
2808 struct btrfs_root *root = device->fs_info->chunk_root;
2809 struct btrfs_dev_item *dev_item;
2810 struct extent_buffer *leaf;
2811 struct btrfs_key key;
2812
2813 path = btrfs_alloc_path();
2814 if (!path)
2815 return -ENOMEM;
2816
2817 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2818 key.type = BTRFS_DEV_ITEM_KEY;
2819 key.offset = device->devid;
2820
2821 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2822 if (ret < 0)
2823 goto out;
2824
2825 if (ret > 0) {
2826 ret = -ENOENT;
2827 goto out;
2828 }
2829
2830 leaf = path->nodes[0];
2831 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
2832
2833 btrfs_set_device_id(leaf, dev_item, device->devid);
2834 btrfs_set_device_type(leaf, dev_item, device->type);
2835 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
2836 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
2837 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
2838 btrfs_set_device_total_bytes(leaf, dev_item,
2839 btrfs_device_get_disk_total_bytes(device));
2840 btrfs_set_device_bytes_used(leaf, dev_item,
2841 btrfs_device_get_bytes_used(device));
2842 btrfs_mark_buffer_dirty(leaf);
2843
2844 out:
2845 btrfs_free_path(path);
2846 return ret;
2847 }
2848
btrfs_grow_device(struct btrfs_trans_handle * trans,struct btrfs_device * device,u64 new_size)2849 int btrfs_grow_device(struct btrfs_trans_handle *trans,
2850 struct btrfs_device *device, u64 new_size)
2851 {
2852 struct btrfs_fs_info *fs_info = device->fs_info;
2853 struct btrfs_super_block *super_copy = fs_info->super_copy;
2854 u64 old_total;
2855 u64 diff;
2856
2857 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
2858 return -EACCES;
2859
2860 new_size = round_down(new_size, fs_info->sectorsize);
2861
2862 mutex_lock(&fs_info->chunk_mutex);
2863 old_total = btrfs_super_total_bytes(super_copy);
2864 diff = round_down(new_size - device->total_bytes, fs_info->sectorsize);
2865
2866 if (new_size <= device->total_bytes ||
2867 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
2868 mutex_unlock(&fs_info->chunk_mutex);
2869 return -EINVAL;
2870 }
2871
2872 btrfs_set_super_total_bytes(super_copy,
2873 round_down(old_total + diff, fs_info->sectorsize));
2874 device->fs_devices->total_rw_bytes += diff;
2875
2876 btrfs_device_set_total_bytes(device, new_size);
2877 btrfs_device_set_disk_total_bytes(device, new_size);
2878 btrfs_clear_space_info_full(device->fs_info);
2879 if (list_empty(&device->post_commit_list))
2880 list_add_tail(&device->post_commit_list,
2881 &trans->transaction->dev_update_list);
2882 mutex_unlock(&fs_info->chunk_mutex);
2883
2884 return btrfs_update_device(trans, device);
2885 }
2886
btrfs_free_chunk(struct btrfs_trans_handle * trans,u64 chunk_offset)2887 static int btrfs_free_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
2888 {
2889 struct btrfs_fs_info *fs_info = trans->fs_info;
2890 struct btrfs_root *root = fs_info->chunk_root;
2891 int ret;
2892 struct btrfs_path *path;
2893 struct btrfs_key key;
2894
2895 path = btrfs_alloc_path();
2896 if (!path)
2897 return -ENOMEM;
2898
2899 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2900 key.offset = chunk_offset;
2901 key.type = BTRFS_CHUNK_ITEM_KEY;
2902
2903 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2904 if (ret < 0)
2905 goto out;
2906 else if (ret > 0) { /* Logic error or corruption */
2907 btrfs_handle_fs_error(fs_info, -ENOENT,
2908 "Failed lookup while freeing chunk.");
2909 ret = -ENOENT;
2910 goto out;
2911 }
2912
2913 ret = btrfs_del_item(trans, root, path);
2914 if (ret < 0)
2915 btrfs_handle_fs_error(fs_info, ret,
2916 "Failed to delete chunk item.");
2917 out:
2918 btrfs_free_path(path);
2919 return ret;
2920 }
2921
btrfs_del_sys_chunk(struct btrfs_fs_info * fs_info,u64 chunk_offset)2922 static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
2923 {
2924 struct btrfs_super_block *super_copy = fs_info->super_copy;
2925 struct btrfs_disk_key *disk_key;
2926 struct btrfs_chunk *chunk;
2927 u8 *ptr;
2928 int ret = 0;
2929 u32 num_stripes;
2930 u32 array_size;
2931 u32 len = 0;
2932 u32 cur;
2933 struct btrfs_key key;
2934
2935 mutex_lock(&fs_info->chunk_mutex);
2936 array_size = btrfs_super_sys_array_size(super_copy);
2937
2938 ptr = super_copy->sys_chunk_array;
2939 cur = 0;
2940
2941 while (cur < array_size) {
2942 disk_key = (struct btrfs_disk_key *)ptr;
2943 btrfs_disk_key_to_cpu(&key, disk_key);
2944
2945 len = sizeof(*disk_key);
2946
2947 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
2948 chunk = (struct btrfs_chunk *)(ptr + len);
2949 num_stripes = btrfs_stack_chunk_num_stripes(chunk);
2950 len += btrfs_chunk_item_size(num_stripes);
2951 } else {
2952 ret = -EIO;
2953 break;
2954 }
2955 if (key.objectid == BTRFS_FIRST_CHUNK_TREE_OBJECTID &&
2956 key.offset == chunk_offset) {
2957 memmove(ptr, ptr + len, array_size - (cur + len));
2958 array_size -= len;
2959 btrfs_set_super_sys_array_size(super_copy, array_size);
2960 } else {
2961 ptr += len;
2962 cur += len;
2963 }
2964 }
2965 mutex_unlock(&fs_info->chunk_mutex);
2966 return ret;
2967 }
2968
2969 /*
2970 * btrfs_get_chunk_map() - Find the mapping containing the given logical extent.
2971 * @logical: Logical block offset in bytes.
2972 * @length: Length of extent in bytes.
2973 *
2974 * Return: Chunk mapping or ERR_PTR.
2975 */
btrfs_get_chunk_map(struct btrfs_fs_info * fs_info,u64 logical,u64 length)2976 struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
2977 u64 logical, u64 length)
2978 {
2979 struct extent_map_tree *em_tree;
2980 struct extent_map *em;
2981
2982 em_tree = &fs_info->mapping_tree;
2983 read_lock(&em_tree->lock);
2984 em = lookup_extent_mapping(em_tree, logical, length);
2985 read_unlock(&em_tree->lock);
2986
2987 if (!em) {
2988 btrfs_crit(fs_info, "unable to find logical %llu length %llu",
2989 logical, length);
2990 return ERR_PTR(-EINVAL);
2991 }
2992
2993 if (em->start > logical || em->start + em->len < logical) {
2994 btrfs_crit(fs_info,
2995 "found a bad mapping, wanted %llu-%llu, found %llu-%llu",
2996 logical, length, em->start, em->start + em->len);
2997 free_extent_map(em);
2998 return ERR_PTR(-EINVAL);
2999 }
3000
3001 /* callers are responsible for dropping em's ref. */
3002 return em;
3003 }
3004
btrfs_remove_chunk(struct btrfs_trans_handle * trans,u64 chunk_offset)3005 int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
3006 {
3007 struct btrfs_fs_info *fs_info = trans->fs_info;
3008 struct extent_map *em;
3009 struct map_lookup *map;
3010 u64 dev_extent_len = 0;
3011 int i, ret = 0;
3012 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
3013
3014 em = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
3015 if (IS_ERR(em)) {
3016 /*
3017 * This is a logic error, but we don't want to just rely on the
3018 * user having built with ASSERT enabled, so if ASSERT doesn't
3019 * do anything we still error out.
3020 */
3021 ASSERT(0);
3022 return PTR_ERR(em);
3023 }
3024 map = em->map_lookup;
3025 mutex_lock(&fs_info->chunk_mutex);
3026 check_system_chunk(trans, map->type);
3027 mutex_unlock(&fs_info->chunk_mutex);
3028
3029 /*
3030 * Take the device list mutex to prevent races with the final phase of
3031 * a device replace operation that replaces the device object associated
3032 * with map stripes (dev-replace.c:btrfs_dev_replace_finishing()).
3033 */
3034 mutex_lock(&fs_devices->device_list_mutex);
3035 for (i = 0; i < map->num_stripes; i++) {
3036 struct btrfs_device *device = map->stripes[i].dev;
3037 ret = btrfs_free_dev_extent(trans, device,
3038 map->stripes[i].physical,
3039 &dev_extent_len);
3040 if (ret) {
3041 mutex_unlock(&fs_devices->device_list_mutex);
3042 btrfs_abort_transaction(trans, ret);
3043 goto out;
3044 }
3045
3046 if (device->bytes_used > 0) {
3047 mutex_lock(&fs_info->chunk_mutex);
3048 btrfs_device_set_bytes_used(device,
3049 device->bytes_used - dev_extent_len);
3050 atomic64_add(dev_extent_len, &fs_info->free_chunk_space);
3051 btrfs_clear_space_info_full(fs_info);
3052 mutex_unlock(&fs_info->chunk_mutex);
3053 }
3054
3055 ret = btrfs_update_device(trans, device);
3056 if (ret) {
3057 mutex_unlock(&fs_devices->device_list_mutex);
3058 btrfs_abort_transaction(trans, ret);
3059 goto out;
3060 }
3061 }
3062 mutex_unlock(&fs_devices->device_list_mutex);
3063
3064 ret = btrfs_free_chunk(trans, chunk_offset);
3065 if (ret) {
3066 btrfs_abort_transaction(trans, ret);
3067 goto out;
3068 }
3069
3070 trace_btrfs_chunk_free(fs_info, map, chunk_offset, em->len);
3071
3072 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
3073 ret = btrfs_del_sys_chunk(fs_info, chunk_offset);
3074 if (ret) {
3075 btrfs_abort_transaction(trans, ret);
3076 goto out;
3077 }
3078 }
3079
3080 ret = btrfs_remove_block_group(trans, chunk_offset, em);
3081 if (ret) {
3082 btrfs_abort_transaction(trans, ret);
3083 goto out;
3084 }
3085
3086 out:
3087 /* once for us */
3088 free_extent_map(em);
3089 return ret;
3090 }
3091
btrfs_relocate_chunk(struct btrfs_fs_info * fs_info,u64 chunk_offset)3092 static int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
3093 {
3094 struct btrfs_root *root = fs_info->chunk_root;
3095 struct btrfs_trans_handle *trans;
3096 struct btrfs_block_group *block_group;
3097 int ret;
3098
3099 /*
3100 * Prevent races with automatic removal of unused block groups.
3101 * After we relocate and before we remove the chunk with offset
3102 * chunk_offset, automatic removal of the block group can kick in,
3103 * resulting in a failure when calling btrfs_remove_chunk() below.
3104 *
3105 * Make sure to acquire this mutex before doing a tree search (dev
3106 * or chunk trees) to find chunks. Otherwise the cleaner kthread might
3107 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after
3108 * we release the path used to search the chunk/dev tree and before
3109 * the current task acquires this mutex and calls us.
3110 */
3111 lockdep_assert_held(&fs_info->delete_unused_bgs_mutex);
3112
3113 /* step one, relocate all the extents inside this chunk */
3114 btrfs_scrub_pause(fs_info);
3115 ret = btrfs_relocate_block_group(fs_info, chunk_offset);
3116 btrfs_scrub_continue(fs_info);
3117 if (ret)
3118 return ret;
3119
3120 block_group = btrfs_lookup_block_group(fs_info, chunk_offset);
3121 if (!block_group)
3122 return -ENOENT;
3123 btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group);
3124 btrfs_put_block_group(block_group);
3125
3126 trans = btrfs_start_trans_remove_block_group(root->fs_info,
3127 chunk_offset);
3128 if (IS_ERR(trans)) {
3129 ret = PTR_ERR(trans);
3130 btrfs_handle_fs_error(root->fs_info, ret, NULL);
3131 return ret;
3132 }
3133
3134 /*
3135 * step two, delete the device extents and the
3136 * chunk tree entries
3137 */
3138 ret = btrfs_remove_chunk(trans, chunk_offset);
3139 btrfs_end_transaction(trans);
3140 return ret;
3141 }
3142
btrfs_relocate_sys_chunks(struct btrfs_fs_info * fs_info)3143 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info)
3144 {
3145 struct btrfs_root *chunk_root = fs_info->chunk_root;
3146 struct btrfs_path *path;
3147 struct extent_buffer *leaf;
3148 struct btrfs_chunk *chunk;
3149 struct btrfs_key key;
3150 struct btrfs_key found_key;
3151 u64 chunk_type;
3152 bool retried = false;
3153 int failed = 0;
3154 int ret;
3155
3156 path = btrfs_alloc_path();
3157 if (!path)
3158 return -ENOMEM;
3159
3160 again:
3161 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3162 key.offset = (u64)-1;
3163 key.type = BTRFS_CHUNK_ITEM_KEY;
3164
3165 while (1) {
3166 mutex_lock(&fs_info->delete_unused_bgs_mutex);
3167 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3168 if (ret < 0) {
3169 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3170 goto error;
3171 }
3172 BUG_ON(ret == 0); /* Corruption */
3173
3174 ret = btrfs_previous_item(chunk_root, path, key.objectid,
3175 key.type);
3176 if (ret)
3177 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3178 if (ret < 0)
3179 goto error;
3180 if (ret > 0)
3181 break;
3182
3183 leaf = path->nodes[0];
3184 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3185
3186 chunk = btrfs_item_ptr(leaf, path->slots[0],
3187 struct btrfs_chunk);
3188 chunk_type = btrfs_chunk_type(leaf, chunk);
3189 btrfs_release_path(path);
3190
3191 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
3192 ret = btrfs_relocate_chunk(fs_info, found_key.offset);
3193 if (ret == -ENOSPC)
3194 failed++;
3195 else
3196 BUG_ON(ret);
3197 }
3198 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3199
3200 if (found_key.offset == 0)
3201 break;
3202 key.offset = found_key.offset - 1;
3203 }
3204 ret = 0;
3205 if (failed && !retried) {
3206 failed = 0;
3207 retried = true;
3208 goto again;
3209 } else if (WARN_ON(failed && retried)) {
3210 ret = -ENOSPC;
3211 }
3212 error:
3213 btrfs_free_path(path);
3214 return ret;
3215 }
3216
3217 /*
3218 * return 1 : allocate a data chunk successfully,
3219 * return <0: errors during allocating a data chunk,
3220 * return 0 : no need to allocate a data chunk.
3221 */
btrfs_may_alloc_data_chunk(struct btrfs_fs_info * fs_info,u64 chunk_offset)3222 static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info,
3223 u64 chunk_offset)
3224 {
3225 struct btrfs_block_group *cache;
3226 u64 bytes_used;
3227 u64 chunk_type;
3228
3229 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3230 ASSERT(cache);
3231 chunk_type = cache->flags;
3232 btrfs_put_block_group(cache);
3233
3234 if (!(chunk_type & BTRFS_BLOCK_GROUP_DATA))
3235 return 0;
3236
3237 spin_lock(&fs_info->data_sinfo->lock);
3238 bytes_used = fs_info->data_sinfo->bytes_used;
3239 spin_unlock(&fs_info->data_sinfo->lock);
3240
3241 if (!bytes_used) {
3242 struct btrfs_trans_handle *trans;
3243 int ret;
3244
3245 trans = btrfs_join_transaction(fs_info->tree_root);
3246 if (IS_ERR(trans))
3247 return PTR_ERR(trans);
3248
3249 ret = btrfs_force_chunk_alloc(trans, BTRFS_BLOCK_GROUP_DATA);
3250 btrfs_end_transaction(trans);
3251 if (ret < 0)
3252 return ret;
3253 return 1;
3254 }
3255
3256 return 0;
3257 }
3258
insert_balance_item(struct btrfs_fs_info * fs_info,struct btrfs_balance_control * bctl)3259 static int insert_balance_item(struct btrfs_fs_info *fs_info,
3260 struct btrfs_balance_control *bctl)
3261 {
3262 struct btrfs_root *root = fs_info->tree_root;
3263 struct btrfs_trans_handle *trans;
3264 struct btrfs_balance_item *item;
3265 struct btrfs_disk_balance_args disk_bargs;
3266 struct btrfs_path *path;
3267 struct extent_buffer *leaf;
3268 struct btrfs_key key;
3269 int ret, err;
3270
3271 path = btrfs_alloc_path();
3272 if (!path)
3273 return -ENOMEM;
3274
3275 trans = btrfs_start_transaction(root, 0);
3276 if (IS_ERR(trans)) {
3277 btrfs_free_path(path);
3278 return PTR_ERR(trans);
3279 }
3280
3281 key.objectid = BTRFS_BALANCE_OBJECTID;
3282 key.type = BTRFS_TEMPORARY_ITEM_KEY;
3283 key.offset = 0;
3284
3285 ret = btrfs_insert_empty_item(trans, root, path, &key,
3286 sizeof(*item));
3287 if (ret)
3288 goto out;
3289
3290 leaf = path->nodes[0];
3291 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
3292
3293 memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item));
3294
3295 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
3296 btrfs_set_balance_data(leaf, item, &disk_bargs);
3297 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
3298 btrfs_set_balance_meta(leaf, item, &disk_bargs);
3299 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
3300 btrfs_set_balance_sys(leaf, item, &disk_bargs);
3301
3302 btrfs_set_balance_flags(leaf, item, bctl->flags);
3303
3304 btrfs_mark_buffer_dirty(leaf);
3305 out:
3306 btrfs_free_path(path);
3307 err = btrfs_commit_transaction(trans);
3308 if (err && !ret)
3309 ret = err;
3310 return ret;
3311 }
3312
del_balance_item(struct btrfs_fs_info * fs_info)3313 static int del_balance_item(struct btrfs_fs_info *fs_info)
3314 {
3315 struct btrfs_root *root = fs_info->tree_root;
3316 struct btrfs_trans_handle *trans;
3317 struct btrfs_path *path;
3318 struct btrfs_key key;
3319 int ret, err;
3320
3321 path = btrfs_alloc_path();
3322 if (!path)
3323 return -ENOMEM;
3324
3325 trans = btrfs_start_transaction_fallback_global_rsv(root, 0);
3326 if (IS_ERR(trans)) {
3327 btrfs_free_path(path);
3328 return PTR_ERR(trans);
3329 }
3330
3331 key.objectid = BTRFS_BALANCE_OBJECTID;
3332 key.type = BTRFS_TEMPORARY_ITEM_KEY;
3333 key.offset = 0;
3334
3335 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3336 if (ret < 0)
3337 goto out;
3338 if (ret > 0) {
3339 ret = -ENOENT;
3340 goto out;
3341 }
3342
3343 ret = btrfs_del_item(trans, root, path);
3344 out:
3345 btrfs_free_path(path);
3346 err = btrfs_commit_transaction(trans);
3347 if (err && !ret)
3348 ret = err;
3349 return ret;
3350 }
3351
3352 /*
3353 * This is a heuristic used to reduce the number of chunks balanced on
3354 * resume after balance was interrupted.
3355 */
update_balance_args(struct btrfs_balance_control * bctl)3356 static void update_balance_args(struct btrfs_balance_control *bctl)
3357 {
3358 /*
3359 * Turn on soft mode for chunk types that were being converted.
3360 */
3361 if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
3362 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
3363 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
3364 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
3365 if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
3366 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
3367
3368 /*
3369 * Turn on usage filter if is not already used. The idea is
3370 * that chunks that we have already balanced should be
3371 * reasonably full. Don't do it for chunks that are being
3372 * converted - that will keep us from relocating unconverted
3373 * (albeit full) chunks.
3374 */
3375 if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3376 !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3377 !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3378 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
3379 bctl->data.usage = 90;
3380 }
3381 if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3382 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3383 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3384 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
3385 bctl->sys.usage = 90;
3386 }
3387 if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3388 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3389 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3390 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
3391 bctl->meta.usage = 90;
3392 }
3393 }
3394
3395 /*
3396 * Clear the balance status in fs_info and delete the balance item from disk.
3397 */
reset_balance_state(struct btrfs_fs_info * fs_info)3398 static void reset_balance_state(struct btrfs_fs_info *fs_info)
3399 {
3400 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3401 int ret;
3402
3403 BUG_ON(!fs_info->balance_ctl);
3404
3405 spin_lock(&fs_info->balance_lock);
3406 fs_info->balance_ctl = NULL;
3407 spin_unlock(&fs_info->balance_lock);
3408
3409 kfree(bctl);
3410 ret = del_balance_item(fs_info);
3411 if (ret)
3412 btrfs_handle_fs_error(fs_info, ret, NULL);
3413 }
3414
3415 /*
3416 * Balance filters. Return 1 if chunk should be filtered out
3417 * (should not be balanced).
3418 */
chunk_profiles_filter(u64 chunk_type,struct btrfs_balance_args * bargs)3419 static int chunk_profiles_filter(u64 chunk_type,
3420 struct btrfs_balance_args *bargs)
3421 {
3422 chunk_type = chunk_to_extended(chunk_type) &
3423 BTRFS_EXTENDED_PROFILE_MASK;
3424
3425 if (bargs->profiles & chunk_type)
3426 return 0;
3427
3428 return 1;
3429 }
3430
chunk_usage_range_filter(struct btrfs_fs_info * fs_info,u64 chunk_offset,struct btrfs_balance_args * bargs)3431 static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
3432 struct btrfs_balance_args *bargs)
3433 {
3434 struct btrfs_block_group *cache;
3435 u64 chunk_used;
3436 u64 user_thresh_min;
3437 u64 user_thresh_max;
3438 int ret = 1;
3439
3440 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3441 chunk_used = cache->used;
3442
3443 if (bargs->usage_min == 0)
3444 user_thresh_min = 0;
3445 else
3446 user_thresh_min = div_factor_fine(cache->length,
3447 bargs->usage_min);
3448
3449 if (bargs->usage_max == 0)
3450 user_thresh_max = 1;
3451 else if (bargs->usage_max > 100)
3452 user_thresh_max = cache->length;
3453 else
3454 user_thresh_max = div_factor_fine(cache->length,
3455 bargs->usage_max);
3456
3457 if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max)
3458 ret = 0;
3459
3460 btrfs_put_block_group(cache);
3461 return ret;
3462 }
3463
chunk_usage_filter(struct btrfs_fs_info * fs_info,u64 chunk_offset,struct btrfs_balance_args * bargs)3464 static int chunk_usage_filter(struct btrfs_fs_info *fs_info,
3465 u64 chunk_offset, struct btrfs_balance_args *bargs)
3466 {
3467 struct btrfs_block_group *cache;
3468 u64 chunk_used, user_thresh;
3469 int ret = 1;
3470
3471 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3472 chunk_used = cache->used;
3473
3474 if (bargs->usage_min == 0)
3475 user_thresh = 1;
3476 else if (bargs->usage > 100)
3477 user_thresh = cache->length;
3478 else
3479 user_thresh = div_factor_fine(cache->length, bargs->usage);
3480
3481 if (chunk_used < user_thresh)
3482 ret = 0;
3483
3484 btrfs_put_block_group(cache);
3485 return ret;
3486 }
3487
chunk_devid_filter(struct extent_buffer * leaf,struct btrfs_chunk * chunk,struct btrfs_balance_args * bargs)3488 static int chunk_devid_filter(struct extent_buffer *leaf,
3489 struct btrfs_chunk *chunk,
3490 struct btrfs_balance_args *bargs)
3491 {
3492 struct btrfs_stripe *stripe;
3493 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3494 int i;
3495
3496 for (i = 0; i < num_stripes; i++) {
3497 stripe = btrfs_stripe_nr(chunk, i);
3498 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
3499 return 0;
3500 }
3501
3502 return 1;
3503 }
3504
calc_data_stripes(u64 type,int num_stripes)3505 static u64 calc_data_stripes(u64 type, int num_stripes)
3506 {
3507 const int index = btrfs_bg_flags_to_raid_index(type);
3508 const int ncopies = btrfs_raid_array[index].ncopies;
3509 const int nparity = btrfs_raid_array[index].nparity;
3510
3511 if (nparity)
3512 return num_stripes - nparity;
3513 else
3514 return num_stripes / ncopies;
3515 }
3516
3517 /* [pstart, pend) */
chunk_drange_filter(struct extent_buffer * leaf,struct btrfs_chunk * chunk,struct btrfs_balance_args * bargs)3518 static int chunk_drange_filter(struct extent_buffer *leaf,
3519 struct btrfs_chunk *chunk,
3520 struct btrfs_balance_args *bargs)
3521 {
3522 struct btrfs_stripe *stripe;
3523 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3524 u64 stripe_offset;
3525 u64 stripe_length;
3526 u64 type;
3527 int factor;
3528 int i;
3529
3530 if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
3531 return 0;
3532
3533 type = btrfs_chunk_type(leaf, chunk);
3534 factor = calc_data_stripes(type, num_stripes);
3535
3536 for (i = 0; i < num_stripes; i++) {
3537 stripe = btrfs_stripe_nr(chunk, i);
3538 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
3539 continue;
3540
3541 stripe_offset = btrfs_stripe_offset(leaf, stripe);
3542 stripe_length = btrfs_chunk_length(leaf, chunk);
3543 stripe_length = div_u64(stripe_length, factor);
3544
3545 if (stripe_offset < bargs->pend &&
3546 stripe_offset + stripe_length > bargs->pstart)
3547 return 0;
3548 }
3549
3550 return 1;
3551 }
3552
3553 /* [vstart, vend) */
chunk_vrange_filter(struct extent_buffer * leaf,struct btrfs_chunk * chunk,u64 chunk_offset,struct btrfs_balance_args * bargs)3554 static int chunk_vrange_filter(struct extent_buffer *leaf,
3555 struct btrfs_chunk *chunk,
3556 u64 chunk_offset,
3557 struct btrfs_balance_args *bargs)
3558 {
3559 if (chunk_offset < bargs->vend &&
3560 chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
3561 /* at least part of the chunk is inside this vrange */
3562 return 0;
3563
3564 return 1;
3565 }
3566
chunk_stripes_range_filter(struct extent_buffer * leaf,struct btrfs_chunk * chunk,struct btrfs_balance_args * bargs)3567 static int chunk_stripes_range_filter(struct extent_buffer *leaf,
3568 struct btrfs_chunk *chunk,
3569 struct btrfs_balance_args *bargs)
3570 {
3571 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3572
3573 if (bargs->stripes_min <= num_stripes
3574 && num_stripes <= bargs->stripes_max)
3575 return 0;
3576
3577 return 1;
3578 }
3579
chunk_soft_convert_filter(u64 chunk_type,struct btrfs_balance_args * bargs)3580 static int chunk_soft_convert_filter(u64 chunk_type,
3581 struct btrfs_balance_args *bargs)
3582 {
3583 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
3584 return 0;
3585
3586 chunk_type = chunk_to_extended(chunk_type) &
3587 BTRFS_EXTENDED_PROFILE_MASK;
3588
3589 if (bargs->target == chunk_type)
3590 return 1;
3591
3592 return 0;
3593 }
3594
should_balance_chunk(struct extent_buffer * leaf,struct btrfs_chunk * chunk,u64 chunk_offset)3595 static int should_balance_chunk(struct extent_buffer *leaf,
3596 struct btrfs_chunk *chunk, u64 chunk_offset)
3597 {
3598 struct btrfs_fs_info *fs_info = leaf->fs_info;
3599 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3600 struct btrfs_balance_args *bargs = NULL;
3601 u64 chunk_type = btrfs_chunk_type(leaf, chunk);
3602
3603 /* type filter */
3604 if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
3605 (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
3606 return 0;
3607 }
3608
3609 if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3610 bargs = &bctl->data;
3611 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3612 bargs = &bctl->sys;
3613 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3614 bargs = &bctl->meta;
3615
3616 /* profiles filter */
3617 if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
3618 chunk_profiles_filter(chunk_type, bargs)) {
3619 return 0;
3620 }
3621
3622 /* usage filter */
3623 if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
3624 chunk_usage_filter(fs_info, chunk_offset, bargs)) {
3625 return 0;
3626 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3627 chunk_usage_range_filter(fs_info, chunk_offset, bargs)) {
3628 return 0;
3629 }
3630
3631 /* devid filter */
3632 if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
3633 chunk_devid_filter(leaf, chunk, bargs)) {
3634 return 0;
3635 }
3636
3637 /* drange filter, makes sense only with devid filter */
3638 if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
3639 chunk_drange_filter(leaf, chunk, bargs)) {
3640 return 0;
3641 }
3642
3643 /* vrange filter */
3644 if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
3645 chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
3646 return 0;
3647 }
3648
3649 /* stripes filter */
3650 if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) &&
3651 chunk_stripes_range_filter(leaf, chunk, bargs)) {
3652 return 0;
3653 }
3654
3655 /* soft profile changing mode */
3656 if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
3657 chunk_soft_convert_filter(chunk_type, bargs)) {
3658 return 0;
3659 }
3660
3661 /*
3662 * limited by count, must be the last filter
3663 */
3664 if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) {
3665 if (bargs->limit == 0)
3666 return 0;
3667 else
3668 bargs->limit--;
3669 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) {
3670 /*
3671 * Same logic as the 'limit' filter; the minimum cannot be
3672 * determined here because we do not have the global information
3673 * about the count of all chunks that satisfy the filters.
3674 */
3675 if (bargs->limit_max == 0)
3676 return 0;
3677 else
3678 bargs->limit_max--;
3679 }
3680
3681 return 1;
3682 }
3683
__btrfs_balance(struct btrfs_fs_info * fs_info)3684 static int __btrfs_balance(struct btrfs_fs_info *fs_info)
3685 {
3686 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3687 struct btrfs_root *chunk_root = fs_info->chunk_root;
3688 u64 chunk_type;
3689 struct btrfs_chunk *chunk;
3690 struct btrfs_path *path = NULL;
3691 struct btrfs_key key;
3692 struct btrfs_key found_key;
3693 struct extent_buffer *leaf;
3694 int slot;
3695 int ret;
3696 int enospc_errors = 0;
3697 bool counting = true;
3698 /* The single value limit and min/max limits use the same bytes in the */
3699 u64 limit_data = bctl->data.limit;
3700 u64 limit_meta = bctl->meta.limit;
3701 u64 limit_sys = bctl->sys.limit;
3702 u32 count_data = 0;
3703 u32 count_meta = 0;
3704 u32 count_sys = 0;
3705 int chunk_reserved = 0;
3706
3707 path = btrfs_alloc_path();
3708 if (!path) {
3709 ret = -ENOMEM;
3710 goto error;
3711 }
3712
3713 /* zero out stat counters */
3714 spin_lock(&fs_info->balance_lock);
3715 memset(&bctl->stat, 0, sizeof(bctl->stat));
3716 spin_unlock(&fs_info->balance_lock);
3717 again:
3718 if (!counting) {
3719 /*
3720 * The single value limit and min/max limits use the same bytes
3721 * in the
3722 */
3723 bctl->data.limit = limit_data;
3724 bctl->meta.limit = limit_meta;
3725 bctl->sys.limit = limit_sys;
3726 }
3727 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3728 key.offset = (u64)-1;
3729 key.type = BTRFS_CHUNK_ITEM_KEY;
3730
3731 while (1) {
3732 if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
3733 atomic_read(&fs_info->balance_cancel_req)) {
3734 ret = -ECANCELED;
3735 goto error;
3736 }
3737
3738 mutex_lock(&fs_info->delete_unused_bgs_mutex);
3739 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3740 if (ret < 0) {
3741 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3742 goto error;
3743 }
3744
3745 /*
3746 * this shouldn't happen, it means the last relocate
3747 * failed
3748 */
3749 if (ret == 0)
3750 BUG(); /* FIXME break ? */
3751
3752 ret = btrfs_previous_item(chunk_root, path, 0,
3753 BTRFS_CHUNK_ITEM_KEY);
3754 if (ret) {
3755 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3756 ret = 0;
3757 break;
3758 }
3759
3760 leaf = path->nodes[0];
3761 slot = path->slots[0];
3762 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3763
3764 if (found_key.objectid != key.objectid) {
3765 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3766 break;
3767 }
3768
3769 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
3770 chunk_type = btrfs_chunk_type(leaf, chunk);
3771
3772 if (!counting) {
3773 spin_lock(&fs_info->balance_lock);
3774 bctl->stat.considered++;
3775 spin_unlock(&fs_info->balance_lock);
3776 }
3777
3778 ret = should_balance_chunk(leaf, chunk, found_key.offset);
3779
3780 btrfs_release_path(path);
3781 if (!ret) {
3782 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3783 goto loop;
3784 }
3785
3786 if (counting) {
3787 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3788 spin_lock(&fs_info->balance_lock);
3789 bctl->stat.expected++;
3790 spin_unlock(&fs_info->balance_lock);
3791
3792 if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3793 count_data++;
3794 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3795 count_sys++;
3796 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3797 count_meta++;
3798
3799 goto loop;
3800 }
3801
3802 /*
3803 * Apply limit_min filter, no need to check if the LIMITS
3804 * filter is used, limit_min is 0 by default
3805 */
3806 if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) &&
3807 count_data < bctl->data.limit_min)
3808 || ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) &&
3809 count_meta < bctl->meta.limit_min)
3810 || ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) &&
3811 count_sys < bctl->sys.limit_min)) {
3812 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3813 goto loop;
3814 }
3815
3816 if (!chunk_reserved) {
3817 /*
3818 * We may be relocating the only data chunk we have,
3819 * which could potentially end up with losing data's
3820 * raid profile, so lets allocate an empty one in
3821 * advance.
3822 */
3823 ret = btrfs_may_alloc_data_chunk(fs_info,
3824 found_key.offset);
3825 if (ret < 0) {
3826 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3827 goto error;
3828 } else if (ret == 1) {
3829 chunk_reserved = 1;
3830 }
3831 }
3832
3833 ret = btrfs_relocate_chunk(fs_info, found_key.offset);
3834 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3835 if (ret == -ENOSPC) {
3836 enospc_errors++;
3837 } else if (ret == -ETXTBSY) {
3838 btrfs_info(fs_info,
3839 "skipping relocation of block group %llu due to active swapfile",
3840 found_key.offset);
3841 ret = 0;
3842 } else if (ret) {
3843 goto error;
3844 } else {
3845 spin_lock(&fs_info->balance_lock);
3846 bctl->stat.completed++;
3847 spin_unlock(&fs_info->balance_lock);
3848 }
3849 loop:
3850 if (found_key.offset == 0)
3851 break;
3852 key.offset = found_key.offset - 1;
3853 }
3854
3855 if (counting) {
3856 btrfs_release_path(path);
3857 counting = false;
3858 goto again;
3859 }
3860 error:
3861 btrfs_free_path(path);
3862 if (enospc_errors) {
3863 btrfs_info(fs_info, "%d enospc errors during balance",
3864 enospc_errors);
3865 if (!ret)
3866 ret = -ENOSPC;
3867 }
3868
3869 return ret;
3870 }
3871
3872 /**
3873 * alloc_profile_is_valid - see if a given profile is valid and reduced
3874 * @flags: profile to validate
3875 * @extended: if true @flags is treated as an extended profile
3876 */
alloc_profile_is_valid(u64 flags,int extended)3877 static int alloc_profile_is_valid(u64 flags, int extended)
3878 {
3879 u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
3880 BTRFS_BLOCK_GROUP_PROFILE_MASK);
3881
3882 flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
3883
3884 /* 1) check that all other bits are zeroed */
3885 if (flags & ~mask)
3886 return 0;
3887
3888 /* 2) see if profile is reduced */
3889 if (flags == 0)
3890 return !extended; /* "0" is valid for usual profiles */
3891
3892 return has_single_bit_set(flags);
3893 }
3894
balance_need_close(struct btrfs_fs_info * fs_info)3895 static inline int balance_need_close(struct btrfs_fs_info *fs_info)
3896 {
3897 /* cancel requested || normal exit path */
3898 return atomic_read(&fs_info->balance_cancel_req) ||
3899 (atomic_read(&fs_info->balance_pause_req) == 0 &&
3900 atomic_read(&fs_info->balance_cancel_req) == 0);
3901 }
3902
3903 /*
3904 * Validate target profile against allowed profiles and return true if it's OK.
3905 * Otherwise print the error message and return false.
3906 */
validate_convert_profile(struct btrfs_fs_info * fs_info,const struct btrfs_balance_args * bargs,u64 allowed,const char * type)3907 static inline int validate_convert_profile(struct btrfs_fs_info *fs_info,
3908 const struct btrfs_balance_args *bargs,
3909 u64 allowed, const char *type)
3910 {
3911 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
3912 return true;
3913
3914 /* Profile is valid and does not have bits outside of the allowed set */
3915 if (alloc_profile_is_valid(bargs->target, 1) &&
3916 (bargs->target & ~allowed) == 0)
3917 return true;
3918
3919 btrfs_err(fs_info, "balance: invalid convert %s profile %s",
3920 type, btrfs_bg_type_to_raid_name(bargs->target));
3921 return false;
3922 }
3923
3924 /*
3925 * Fill @buf with textual description of balance filter flags @bargs, up to
3926 * @size_buf including the terminating null. The output may be trimmed if it
3927 * does not fit into the provided buffer.
3928 */
describe_balance_args(struct btrfs_balance_args * bargs,char * buf,u32 size_buf)3929 static void describe_balance_args(struct btrfs_balance_args *bargs, char *buf,
3930 u32 size_buf)
3931 {
3932 int ret;
3933 u32 size_bp = size_buf;
3934 char *bp = buf;
3935 u64 flags = bargs->flags;
3936 char tmp_buf[128] = {'\0'};
3937
3938 if (!flags)
3939 return;
3940
3941 #define CHECK_APPEND_NOARG(a) \
3942 do { \
3943 ret = snprintf(bp, size_bp, (a)); \
3944 if (ret < 0 || ret >= size_bp) \
3945 goto out_overflow; \
3946 size_bp -= ret; \
3947 bp += ret; \
3948 } while (0)
3949
3950 #define CHECK_APPEND_1ARG(a, v1) \
3951 do { \
3952 ret = snprintf(bp, size_bp, (a), (v1)); \
3953 if (ret < 0 || ret >= size_bp) \
3954 goto out_overflow; \
3955 size_bp -= ret; \
3956 bp += ret; \
3957 } while (0)
3958
3959 #define CHECK_APPEND_2ARG(a, v1, v2) \
3960 do { \
3961 ret = snprintf(bp, size_bp, (a), (v1), (v2)); \
3962 if (ret < 0 || ret >= size_bp) \
3963 goto out_overflow; \
3964 size_bp -= ret; \
3965 bp += ret; \
3966 } while (0)
3967
3968 if (flags & BTRFS_BALANCE_ARGS_CONVERT)
3969 CHECK_APPEND_1ARG("convert=%s,",
3970 btrfs_bg_type_to_raid_name(bargs->target));
3971
3972 if (flags & BTRFS_BALANCE_ARGS_SOFT)
3973 CHECK_APPEND_NOARG("soft,");
3974
3975 if (flags & BTRFS_BALANCE_ARGS_PROFILES) {
3976 btrfs_describe_block_groups(bargs->profiles, tmp_buf,
3977 sizeof(tmp_buf));
3978 CHECK_APPEND_1ARG("profiles=%s,", tmp_buf);
3979 }
3980
3981 if (flags & BTRFS_BALANCE_ARGS_USAGE)
3982 CHECK_APPEND_1ARG("usage=%llu,", bargs->usage);
3983
3984 if (flags & BTRFS_BALANCE_ARGS_USAGE_RANGE)
3985 CHECK_APPEND_2ARG("usage=%u..%u,",
3986 bargs->usage_min, bargs->usage_max);
3987
3988 if (flags & BTRFS_BALANCE_ARGS_DEVID)
3989 CHECK_APPEND_1ARG("devid=%llu,", bargs->devid);
3990
3991 if (flags & BTRFS_BALANCE_ARGS_DRANGE)
3992 CHECK_APPEND_2ARG("drange=%llu..%llu,",
3993 bargs->pstart, bargs->pend);
3994
3995 if (flags & BTRFS_BALANCE_ARGS_VRANGE)
3996 CHECK_APPEND_2ARG("vrange=%llu..%llu,",
3997 bargs->vstart, bargs->vend);
3998
3999 if (flags & BTRFS_BALANCE_ARGS_LIMIT)
4000 CHECK_APPEND_1ARG("limit=%llu,", bargs->limit);
4001
4002 if (flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)
4003 CHECK_APPEND_2ARG("limit=%u..%u,",
4004 bargs->limit_min, bargs->limit_max);
4005
4006 if (flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE)
4007 CHECK_APPEND_2ARG("stripes=%u..%u,",
4008 bargs->stripes_min, bargs->stripes_max);
4009
4010 #undef CHECK_APPEND_2ARG
4011 #undef CHECK_APPEND_1ARG
4012 #undef CHECK_APPEND_NOARG
4013
4014 out_overflow:
4015
4016 if (size_bp < size_buf)
4017 buf[size_buf - size_bp - 1] = '\0'; /* remove last , */
4018 else
4019 buf[0] = '\0';
4020 }
4021
describe_balance_start_or_resume(struct btrfs_fs_info * fs_info)4022 static void describe_balance_start_or_resume(struct btrfs_fs_info *fs_info)
4023 {
4024 u32 size_buf = 1024;
4025 char tmp_buf[192] = {'\0'};
4026 char *buf;
4027 char *bp;
4028 u32 size_bp = size_buf;
4029 int ret;
4030 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
4031
4032 buf = kzalloc(size_buf, GFP_KERNEL);
4033 if (!buf)
4034 return;
4035
4036 bp = buf;
4037
4038 #define CHECK_APPEND_1ARG(a, v1) \
4039 do { \
4040 ret = snprintf(bp, size_bp, (a), (v1)); \
4041 if (ret < 0 || ret >= size_bp) \
4042 goto out_overflow; \
4043 size_bp -= ret; \
4044 bp += ret; \
4045 } while (0)
4046
4047 if (bctl->flags & BTRFS_BALANCE_FORCE)
4048 CHECK_APPEND_1ARG("%s", "-f ");
4049
4050 if (bctl->flags & BTRFS_BALANCE_DATA) {
4051 describe_balance_args(&bctl->data, tmp_buf, sizeof(tmp_buf));
4052 CHECK_APPEND_1ARG("-d%s ", tmp_buf);
4053 }
4054
4055 if (bctl->flags & BTRFS_BALANCE_METADATA) {
4056 describe_balance_args(&bctl->meta, tmp_buf, sizeof(tmp_buf));
4057 CHECK_APPEND_1ARG("-m%s ", tmp_buf);
4058 }
4059
4060 if (bctl->flags & BTRFS_BALANCE_SYSTEM) {
4061 describe_balance_args(&bctl->sys, tmp_buf, sizeof(tmp_buf));
4062 CHECK_APPEND_1ARG("-s%s ", tmp_buf);
4063 }
4064
4065 #undef CHECK_APPEND_1ARG
4066
4067 out_overflow:
4068
4069 if (size_bp < size_buf)
4070 buf[size_buf - size_bp - 1] = '\0'; /* remove last " " */
4071 btrfs_info(fs_info, "balance: %s %s",
4072 (bctl->flags & BTRFS_BALANCE_RESUME) ?
4073 "resume" : "start", buf);
4074
4075 kfree(buf);
4076 }
4077
4078 /*
4079 * Should be called with balance mutexe held
4080 */
btrfs_balance(struct btrfs_fs_info * fs_info,struct btrfs_balance_control * bctl,struct btrfs_ioctl_balance_args * bargs)4081 int btrfs_balance(struct btrfs_fs_info *fs_info,
4082 struct btrfs_balance_control *bctl,
4083 struct btrfs_ioctl_balance_args *bargs)
4084 {
4085 u64 meta_target, data_target;
4086 u64 allowed;
4087 int mixed = 0;
4088 int ret;
4089 u64 num_devices;
4090 unsigned seq;
4091 bool reducing_redundancy;
4092 int i;
4093
4094 if (btrfs_fs_closing(fs_info) ||
4095 atomic_read(&fs_info->balance_pause_req) ||
4096 btrfs_should_cancel_balance(fs_info)) {
4097 ret = -EINVAL;
4098 goto out;
4099 }
4100
4101 allowed = btrfs_super_incompat_flags(fs_info->super_copy);
4102 if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
4103 mixed = 1;
4104
4105 /*
4106 * In case of mixed groups both data and meta should be picked,
4107 * and identical options should be given for both of them.
4108 */
4109 allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
4110 if (mixed && (bctl->flags & allowed)) {
4111 if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
4112 !(bctl->flags & BTRFS_BALANCE_METADATA) ||
4113 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
4114 btrfs_err(fs_info,
4115 "balance: mixed groups data and metadata options must be the same");
4116 ret = -EINVAL;
4117 goto out;
4118 }
4119 }
4120
4121 /*
4122 * rw_devices will not change at the moment, device add/delete/replace
4123 * are exclusive
4124 */
4125 num_devices = fs_info->fs_devices->rw_devices;
4126
4127 /*
4128 * SINGLE profile on-disk has no profile bit, but in-memory we have a
4129 * special bit for it, to make it easier to distinguish. Thus we need
4130 * to set it manually, or balance would refuse the profile.
4131 */
4132 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
4133 for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++)
4134 if (num_devices >= btrfs_raid_array[i].devs_min)
4135 allowed |= btrfs_raid_array[i].bg_flag;
4136
4137 if (!validate_convert_profile(fs_info, &bctl->data, allowed, "data") ||
4138 !validate_convert_profile(fs_info, &bctl->meta, allowed, "metadata") ||
4139 !validate_convert_profile(fs_info, &bctl->sys, allowed, "system")) {
4140 ret = -EINVAL;
4141 goto out;
4142 }
4143
4144 /*
4145 * Allow to reduce metadata or system integrity only if force set for
4146 * profiles with redundancy (copies, parity)
4147 */
4148 allowed = 0;
4149 for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) {
4150 if (btrfs_raid_array[i].ncopies >= 2 ||
4151 btrfs_raid_array[i].tolerated_failures >= 1)
4152 allowed |= btrfs_raid_array[i].bg_flag;
4153 }
4154 do {
4155 seq = read_seqbegin(&fs_info->profiles_lock);
4156
4157 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
4158 (fs_info->avail_system_alloc_bits & allowed) &&
4159 !(bctl->sys.target & allowed)) ||
4160 ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
4161 (fs_info->avail_metadata_alloc_bits & allowed) &&
4162 !(bctl->meta.target & allowed)))
4163 reducing_redundancy = true;
4164 else
4165 reducing_redundancy = false;
4166
4167 /* if we're not converting, the target field is uninitialized */
4168 meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
4169 bctl->meta.target : fs_info->avail_metadata_alloc_bits;
4170 data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
4171 bctl->data.target : fs_info->avail_data_alloc_bits;
4172 } while (read_seqretry(&fs_info->profiles_lock, seq));
4173
4174 if (reducing_redundancy) {
4175 if (bctl->flags & BTRFS_BALANCE_FORCE) {
4176 btrfs_info(fs_info,
4177 "balance: force reducing metadata redundancy");
4178 } else {
4179 btrfs_err(fs_info,
4180 "balance: reduces metadata redundancy, use --force if you want this");
4181 ret = -EINVAL;
4182 goto out;
4183 }
4184 }
4185
4186 if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) <
4187 btrfs_get_num_tolerated_disk_barrier_failures(data_target)) {
4188 btrfs_warn(fs_info,
4189 "balance: metadata profile %s has lower redundancy than data profile %s",
4190 btrfs_bg_type_to_raid_name(meta_target),
4191 btrfs_bg_type_to_raid_name(data_target));
4192 }
4193
4194 if (fs_info->send_in_progress) {
4195 btrfs_warn_rl(fs_info,
4196 "cannot run balance while send operations are in progress (%d in progress)",
4197 fs_info->send_in_progress);
4198 ret = -EAGAIN;
4199 goto out;
4200 }
4201
4202 ret = insert_balance_item(fs_info, bctl);
4203 if (ret && ret != -EEXIST)
4204 goto out;
4205
4206 if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
4207 BUG_ON(ret == -EEXIST);
4208 BUG_ON(fs_info->balance_ctl);
4209 spin_lock(&fs_info->balance_lock);
4210 fs_info->balance_ctl = bctl;
4211 spin_unlock(&fs_info->balance_lock);
4212 } else {
4213 BUG_ON(ret != -EEXIST);
4214 spin_lock(&fs_info->balance_lock);
4215 update_balance_args(bctl);
4216 spin_unlock(&fs_info->balance_lock);
4217 }
4218
4219 ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4220 set_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags);
4221 describe_balance_start_or_resume(fs_info);
4222 mutex_unlock(&fs_info->balance_mutex);
4223
4224 ret = __btrfs_balance(fs_info);
4225
4226 mutex_lock(&fs_info->balance_mutex);
4227 if (ret == -ECANCELED && atomic_read(&fs_info->balance_pause_req))
4228 btrfs_info(fs_info, "balance: paused");
4229 /*
4230 * Balance can be canceled by:
4231 *
4232 * - Regular cancel request
4233 * Then ret == -ECANCELED and balance_cancel_req > 0
4234 *
4235 * - Fatal signal to "btrfs" process
4236 * Either the signal caught by wait_reserve_ticket() and callers
4237 * got -EINTR, or caught by btrfs_should_cancel_balance() and
4238 * got -ECANCELED.
4239 * Either way, in this case balance_cancel_req = 0, and
4240 * ret == -EINTR or ret == -ECANCELED.
4241 *
4242 * So here we only check the return value to catch canceled balance.
4243 */
4244 else if (ret == -ECANCELED || ret == -EINTR)
4245 btrfs_info(fs_info, "balance: canceled");
4246 else
4247 btrfs_info(fs_info, "balance: ended with status: %d", ret);
4248
4249 clear_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags);
4250
4251 if (bargs) {
4252 memset(bargs, 0, sizeof(*bargs));
4253 btrfs_update_ioctl_balance_args(fs_info, bargs);
4254 }
4255
4256 if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
4257 balance_need_close(fs_info)) {
4258 reset_balance_state(fs_info);
4259 btrfs_exclop_finish(fs_info);
4260 }
4261
4262 wake_up(&fs_info->balance_wait_q);
4263
4264 return ret;
4265 out:
4266 if (bctl->flags & BTRFS_BALANCE_RESUME)
4267 reset_balance_state(fs_info);
4268 else
4269 kfree(bctl);
4270 btrfs_exclop_finish(fs_info);
4271
4272 return ret;
4273 }
4274
balance_kthread(void * data)4275 static int balance_kthread(void *data)
4276 {
4277 struct btrfs_fs_info *fs_info = data;
4278 int ret = 0;
4279
4280 sb_start_write(fs_info->sb);
4281 mutex_lock(&fs_info->balance_mutex);
4282 if (fs_info->balance_ctl)
4283 ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL);
4284 mutex_unlock(&fs_info->balance_mutex);
4285 sb_end_write(fs_info->sb);
4286
4287 return ret;
4288 }
4289
btrfs_resume_balance_async(struct btrfs_fs_info * fs_info)4290 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
4291 {
4292 struct task_struct *tsk;
4293
4294 mutex_lock(&fs_info->balance_mutex);
4295 if (!fs_info->balance_ctl) {
4296 mutex_unlock(&fs_info->balance_mutex);
4297 return 0;
4298 }
4299 mutex_unlock(&fs_info->balance_mutex);
4300
4301 if (btrfs_test_opt(fs_info, SKIP_BALANCE)) {
4302 btrfs_info(fs_info, "balance: resume skipped");
4303 return 0;
4304 }
4305
4306 /*
4307 * A ro->rw remount sequence should continue with the paused balance
4308 * regardless of who pauses it, system or the user as of now, so set
4309 * the resume flag.
4310 */
4311 spin_lock(&fs_info->balance_lock);
4312 fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME;
4313 spin_unlock(&fs_info->balance_lock);
4314
4315 tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
4316 return PTR_ERR_OR_ZERO(tsk);
4317 }
4318
btrfs_recover_balance(struct btrfs_fs_info * fs_info)4319 int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
4320 {
4321 struct btrfs_balance_control *bctl;
4322 struct btrfs_balance_item *item;
4323 struct btrfs_disk_balance_args disk_bargs;
4324 struct btrfs_path *path;
4325 struct extent_buffer *leaf;
4326 struct btrfs_key key;
4327 int ret;
4328
4329 path = btrfs_alloc_path();
4330 if (!path)
4331 return -ENOMEM;
4332
4333 key.objectid = BTRFS_BALANCE_OBJECTID;
4334 key.type = BTRFS_TEMPORARY_ITEM_KEY;
4335 key.offset = 0;
4336
4337 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
4338 if (ret < 0)
4339 goto out;
4340 if (ret > 0) { /* ret = -ENOENT; */
4341 ret = 0;
4342 goto out;
4343 }
4344
4345 bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
4346 if (!bctl) {
4347 ret = -ENOMEM;
4348 goto out;
4349 }
4350
4351 leaf = path->nodes[0];
4352 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
4353
4354 bctl->flags = btrfs_balance_flags(leaf, item);
4355 bctl->flags |= BTRFS_BALANCE_RESUME;
4356
4357 btrfs_balance_data(leaf, item, &disk_bargs);
4358 btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
4359 btrfs_balance_meta(leaf, item, &disk_bargs);
4360 btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
4361 btrfs_balance_sys(leaf, item, &disk_bargs);
4362 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
4363
4364 /*
4365 * This should never happen, as the paused balance state is recovered
4366 * during mount without any chance of other exclusive ops to collide.
4367 *
4368 * This gives the exclusive op status to balance and keeps in paused
4369 * state until user intervention (cancel or umount). If the ownership
4370 * cannot be assigned, show a message but do not fail. The balance
4371 * is in a paused state and must have fs_info::balance_ctl properly
4372 * set up.
4373 */
4374 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE))
4375 btrfs_warn(fs_info,
4376 "balance: cannot set exclusive op status, resume manually");
4377
4378 btrfs_release_path(path);
4379
4380 mutex_lock(&fs_info->balance_mutex);
4381 BUG_ON(fs_info->balance_ctl);
4382 spin_lock(&fs_info->balance_lock);
4383 fs_info->balance_ctl = bctl;
4384 spin_unlock(&fs_info->balance_lock);
4385 mutex_unlock(&fs_info->balance_mutex);
4386 out:
4387 btrfs_free_path(path);
4388 return ret;
4389 }
4390
btrfs_pause_balance(struct btrfs_fs_info * fs_info)4391 int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
4392 {
4393 int ret = 0;
4394
4395 mutex_lock(&fs_info->balance_mutex);
4396 if (!fs_info->balance_ctl) {
4397 mutex_unlock(&fs_info->balance_mutex);
4398 return -ENOTCONN;
4399 }
4400
4401 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4402 atomic_inc(&fs_info->balance_pause_req);
4403 mutex_unlock(&fs_info->balance_mutex);
4404
4405 wait_event(fs_info->balance_wait_q,
4406 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4407
4408 mutex_lock(&fs_info->balance_mutex);
4409 /* we are good with balance_ctl ripped off from under us */
4410 BUG_ON(test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4411 atomic_dec(&fs_info->balance_pause_req);
4412 } else {
4413 ret = -ENOTCONN;
4414 }
4415
4416 mutex_unlock(&fs_info->balance_mutex);
4417 return ret;
4418 }
4419
btrfs_cancel_balance(struct btrfs_fs_info * fs_info)4420 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
4421 {
4422 mutex_lock(&fs_info->balance_mutex);
4423 if (!fs_info->balance_ctl) {
4424 mutex_unlock(&fs_info->balance_mutex);
4425 return -ENOTCONN;
4426 }
4427
4428 /*
4429 * A paused balance with the item stored on disk can be resumed at
4430 * mount time if the mount is read-write. Otherwise it's still paused
4431 * and we must not allow cancelling as it deletes the item.
4432 */
4433 if (sb_rdonly(fs_info->sb)) {
4434 mutex_unlock(&fs_info->balance_mutex);
4435 return -EROFS;
4436 }
4437
4438 atomic_inc(&fs_info->balance_cancel_req);
4439 /*
4440 * if we are running just wait and return, balance item is
4441 * deleted in btrfs_balance in this case
4442 */
4443 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4444 mutex_unlock(&fs_info->balance_mutex);
4445 wait_event(fs_info->balance_wait_q,
4446 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4447 mutex_lock(&fs_info->balance_mutex);
4448 } else {
4449 mutex_unlock(&fs_info->balance_mutex);
4450 /*
4451 * Lock released to allow other waiters to continue, we'll
4452 * reexamine the status again.
4453 */
4454 mutex_lock(&fs_info->balance_mutex);
4455
4456 if (fs_info->balance_ctl) {
4457 reset_balance_state(fs_info);
4458 btrfs_exclop_finish(fs_info);
4459 btrfs_info(fs_info, "balance: canceled");
4460 }
4461 }
4462
4463 BUG_ON(fs_info->balance_ctl ||
4464 test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4465 atomic_dec(&fs_info->balance_cancel_req);
4466 mutex_unlock(&fs_info->balance_mutex);
4467 return 0;
4468 }
4469
btrfs_uuid_scan_kthread(void * data)4470 int btrfs_uuid_scan_kthread(void *data)
4471 {
4472 struct btrfs_fs_info *fs_info = data;
4473 struct btrfs_root *root = fs_info->tree_root;
4474 struct btrfs_key key;
4475 struct btrfs_path *path = NULL;
4476 int ret = 0;
4477 struct extent_buffer *eb;
4478 int slot;
4479 struct btrfs_root_item root_item;
4480 u32 item_size;
4481 struct btrfs_trans_handle *trans = NULL;
4482 bool closing = false;
4483
4484 path = btrfs_alloc_path();
4485 if (!path) {
4486 ret = -ENOMEM;
4487 goto out;
4488 }
4489
4490 key.objectid = 0;
4491 key.type = BTRFS_ROOT_ITEM_KEY;
4492 key.offset = 0;
4493
4494 while (1) {
4495 if (btrfs_fs_closing(fs_info)) {
4496 closing = true;
4497 break;
4498 }
4499 ret = btrfs_search_forward(root, &key, path,
4500 BTRFS_OLDEST_GENERATION);
4501 if (ret) {
4502 if (ret > 0)
4503 ret = 0;
4504 break;
4505 }
4506
4507 if (key.type != BTRFS_ROOT_ITEM_KEY ||
4508 (key.objectid < BTRFS_FIRST_FREE_OBJECTID &&
4509 key.objectid != BTRFS_FS_TREE_OBJECTID) ||
4510 key.objectid > BTRFS_LAST_FREE_OBJECTID)
4511 goto skip;
4512
4513 eb = path->nodes[0];
4514 slot = path->slots[0];
4515 item_size = btrfs_item_size_nr(eb, slot);
4516 if (item_size < sizeof(root_item))
4517 goto skip;
4518
4519 read_extent_buffer(eb, &root_item,
4520 btrfs_item_ptr_offset(eb, slot),
4521 (int)sizeof(root_item));
4522 if (btrfs_root_refs(&root_item) == 0)
4523 goto skip;
4524
4525 if (!btrfs_is_empty_uuid(root_item.uuid) ||
4526 !btrfs_is_empty_uuid(root_item.received_uuid)) {
4527 if (trans)
4528 goto update_tree;
4529
4530 btrfs_release_path(path);
4531 /*
4532 * 1 - subvol uuid item
4533 * 1 - received_subvol uuid item
4534 */
4535 trans = btrfs_start_transaction(fs_info->uuid_root, 2);
4536 if (IS_ERR(trans)) {
4537 ret = PTR_ERR(trans);
4538 break;
4539 }
4540 continue;
4541 } else {
4542 goto skip;
4543 }
4544 update_tree:
4545 btrfs_release_path(path);
4546 if (!btrfs_is_empty_uuid(root_item.uuid)) {
4547 ret = btrfs_uuid_tree_add(trans, root_item.uuid,
4548 BTRFS_UUID_KEY_SUBVOL,
4549 key.objectid);
4550 if (ret < 0) {
4551 btrfs_warn(fs_info, "uuid_tree_add failed %d",
4552 ret);
4553 break;
4554 }
4555 }
4556
4557 if (!btrfs_is_empty_uuid(root_item.received_uuid)) {
4558 ret = btrfs_uuid_tree_add(trans,
4559 root_item.received_uuid,
4560 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
4561 key.objectid);
4562 if (ret < 0) {
4563 btrfs_warn(fs_info, "uuid_tree_add failed %d",
4564 ret);
4565 break;
4566 }
4567 }
4568
4569 skip:
4570 btrfs_release_path(path);
4571 if (trans) {
4572 ret = btrfs_end_transaction(trans);
4573 trans = NULL;
4574 if (ret)
4575 break;
4576 }
4577
4578 if (key.offset < (u64)-1) {
4579 key.offset++;
4580 } else if (key.type < BTRFS_ROOT_ITEM_KEY) {
4581 key.offset = 0;
4582 key.type = BTRFS_ROOT_ITEM_KEY;
4583 } else if (key.objectid < (u64)-1) {
4584 key.offset = 0;
4585 key.type = BTRFS_ROOT_ITEM_KEY;
4586 key.objectid++;
4587 } else {
4588 break;
4589 }
4590 cond_resched();
4591 }
4592
4593 out:
4594 btrfs_free_path(path);
4595 if (trans && !IS_ERR(trans))
4596 btrfs_end_transaction(trans);
4597 if (ret)
4598 btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret);
4599 else if (!closing)
4600 set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
4601 up(&fs_info->uuid_tree_rescan_sem);
4602 return 0;
4603 }
4604
btrfs_create_uuid_tree(struct btrfs_fs_info * fs_info)4605 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
4606 {
4607 struct btrfs_trans_handle *trans;
4608 struct btrfs_root *tree_root = fs_info->tree_root;
4609 struct btrfs_root *uuid_root;
4610 struct task_struct *task;
4611 int ret;
4612
4613 /*
4614 * 1 - root node
4615 * 1 - root item
4616 */
4617 trans = btrfs_start_transaction(tree_root, 2);
4618 if (IS_ERR(trans))
4619 return PTR_ERR(trans);
4620
4621 uuid_root = btrfs_create_tree(trans, BTRFS_UUID_TREE_OBJECTID);
4622 if (IS_ERR(uuid_root)) {
4623 ret = PTR_ERR(uuid_root);
4624 btrfs_abort_transaction(trans, ret);
4625 btrfs_end_transaction(trans);
4626 return ret;
4627 }
4628
4629 fs_info->uuid_root = uuid_root;
4630
4631 ret = btrfs_commit_transaction(trans);
4632 if (ret)
4633 return ret;
4634
4635 down(&fs_info->uuid_tree_rescan_sem);
4636 task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid");
4637 if (IS_ERR(task)) {
4638 /* fs_info->update_uuid_tree_gen remains 0 in all error case */
4639 btrfs_warn(fs_info, "failed to start uuid_scan task");
4640 up(&fs_info->uuid_tree_rescan_sem);
4641 return PTR_ERR(task);
4642 }
4643
4644 return 0;
4645 }
4646
4647 /*
4648 * shrinking a device means finding all of the device extents past
4649 * the new size, and then following the back refs to the chunks.
4650 * The chunk relocation code actually frees the device extent
4651 */
btrfs_shrink_device(struct btrfs_device * device,u64 new_size)4652 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
4653 {
4654 struct btrfs_fs_info *fs_info = device->fs_info;
4655 struct btrfs_root *root = fs_info->dev_root;
4656 struct btrfs_trans_handle *trans;
4657 struct btrfs_dev_extent *dev_extent = NULL;
4658 struct btrfs_path *path;
4659 u64 length;
4660 u64 chunk_offset;
4661 int ret;
4662 int slot;
4663 int failed = 0;
4664 bool retried = false;
4665 struct extent_buffer *l;
4666 struct btrfs_key key;
4667 struct btrfs_super_block *super_copy = fs_info->super_copy;
4668 u64 old_total = btrfs_super_total_bytes(super_copy);
4669 u64 old_size = btrfs_device_get_total_bytes(device);
4670 u64 diff;
4671 u64 start;
4672
4673 new_size = round_down(new_size, fs_info->sectorsize);
4674 start = new_size;
4675 diff = round_down(old_size - new_size, fs_info->sectorsize);
4676
4677 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
4678 return -EINVAL;
4679
4680 path = btrfs_alloc_path();
4681 if (!path)
4682 return -ENOMEM;
4683
4684 path->reada = READA_BACK;
4685
4686 trans = btrfs_start_transaction(root, 0);
4687 if (IS_ERR(trans)) {
4688 btrfs_free_path(path);
4689 return PTR_ERR(trans);
4690 }
4691
4692 mutex_lock(&fs_info->chunk_mutex);
4693
4694 btrfs_device_set_total_bytes(device, new_size);
4695 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
4696 device->fs_devices->total_rw_bytes -= diff;
4697 atomic64_sub(diff, &fs_info->free_chunk_space);
4698 }
4699
4700 /*
4701 * Once the device's size has been set to the new size, ensure all
4702 * in-memory chunks are synced to disk so that the loop below sees them
4703 * and relocates them accordingly.
4704 */
4705 if (contains_pending_extent(device, &start, diff)) {
4706 mutex_unlock(&fs_info->chunk_mutex);
4707 ret = btrfs_commit_transaction(trans);
4708 if (ret)
4709 goto done;
4710 } else {
4711 mutex_unlock(&fs_info->chunk_mutex);
4712 btrfs_end_transaction(trans);
4713 }
4714
4715 again:
4716 key.objectid = device->devid;
4717 key.offset = (u64)-1;
4718 key.type = BTRFS_DEV_EXTENT_KEY;
4719
4720 do {
4721 mutex_lock(&fs_info->delete_unused_bgs_mutex);
4722 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4723 if (ret < 0) {
4724 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4725 goto done;
4726 }
4727
4728 ret = btrfs_previous_item(root, path, 0, key.type);
4729 if (ret)
4730 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4731 if (ret < 0)
4732 goto done;
4733 if (ret) {
4734 ret = 0;
4735 btrfs_release_path(path);
4736 break;
4737 }
4738
4739 l = path->nodes[0];
4740 slot = path->slots[0];
4741 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
4742
4743 if (key.objectid != device->devid) {
4744 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4745 btrfs_release_path(path);
4746 break;
4747 }
4748
4749 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
4750 length = btrfs_dev_extent_length(l, dev_extent);
4751
4752 if (key.offset + length <= new_size) {
4753 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4754 btrfs_release_path(path);
4755 break;
4756 }
4757
4758 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
4759 btrfs_release_path(path);
4760
4761 /*
4762 * We may be relocating the only data chunk we have,
4763 * which could potentially end up with losing data's
4764 * raid profile, so lets allocate an empty one in
4765 * advance.
4766 */
4767 ret = btrfs_may_alloc_data_chunk(fs_info, chunk_offset);
4768 if (ret < 0) {
4769 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4770 goto done;
4771 }
4772
4773 ret = btrfs_relocate_chunk(fs_info, chunk_offset);
4774 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4775 if (ret == -ENOSPC) {
4776 failed++;
4777 } else if (ret) {
4778 if (ret == -ETXTBSY) {
4779 btrfs_warn(fs_info,
4780 "could not shrink block group %llu due to active swapfile",
4781 chunk_offset);
4782 }
4783 goto done;
4784 }
4785 } while (key.offset-- > 0);
4786
4787 if (failed && !retried) {
4788 failed = 0;
4789 retried = true;
4790 goto again;
4791 } else if (failed && retried) {
4792 ret = -ENOSPC;
4793 goto done;
4794 }
4795
4796 /* Shrinking succeeded, else we would be at "done". */
4797 trans = btrfs_start_transaction(root, 0);
4798 if (IS_ERR(trans)) {
4799 ret = PTR_ERR(trans);
4800 goto done;
4801 }
4802
4803 mutex_lock(&fs_info->chunk_mutex);
4804 /* Clear all state bits beyond the shrunk device size */
4805 clear_extent_bits(&device->alloc_state, new_size, (u64)-1,
4806 CHUNK_STATE_MASK);
4807
4808 btrfs_device_set_disk_total_bytes(device, new_size);
4809 if (list_empty(&device->post_commit_list))
4810 list_add_tail(&device->post_commit_list,
4811 &trans->transaction->dev_update_list);
4812
4813 WARN_ON(diff > old_total);
4814 btrfs_set_super_total_bytes(super_copy,
4815 round_down(old_total - diff, fs_info->sectorsize));
4816 mutex_unlock(&fs_info->chunk_mutex);
4817
4818 /* Now btrfs_update_device() will change the on-disk size. */
4819 ret = btrfs_update_device(trans, device);
4820 if (ret < 0) {
4821 btrfs_abort_transaction(trans, ret);
4822 btrfs_end_transaction(trans);
4823 } else {
4824 ret = btrfs_commit_transaction(trans);
4825 }
4826 done:
4827 btrfs_free_path(path);
4828 if (ret) {
4829 mutex_lock(&fs_info->chunk_mutex);
4830 btrfs_device_set_total_bytes(device, old_size);
4831 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
4832 device->fs_devices->total_rw_bytes += diff;
4833 atomic64_add(diff, &fs_info->free_chunk_space);
4834 mutex_unlock(&fs_info->chunk_mutex);
4835 }
4836 return ret;
4837 }
4838
btrfs_add_system_chunk(struct btrfs_fs_info * fs_info,struct btrfs_key * key,struct btrfs_chunk * chunk,int item_size)4839 static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info,
4840 struct btrfs_key *key,
4841 struct btrfs_chunk *chunk, int item_size)
4842 {
4843 struct btrfs_super_block *super_copy = fs_info->super_copy;
4844 struct btrfs_disk_key disk_key;
4845 u32 array_size;
4846 u8 *ptr;
4847
4848 mutex_lock(&fs_info->chunk_mutex);
4849 array_size = btrfs_super_sys_array_size(super_copy);
4850 if (array_size + item_size + sizeof(disk_key)
4851 > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
4852 mutex_unlock(&fs_info->chunk_mutex);
4853 return -EFBIG;
4854 }
4855
4856 ptr = super_copy->sys_chunk_array + array_size;
4857 btrfs_cpu_key_to_disk(&disk_key, key);
4858 memcpy(ptr, &disk_key, sizeof(disk_key));
4859 ptr += sizeof(disk_key);
4860 memcpy(ptr, chunk, item_size);
4861 item_size += sizeof(disk_key);
4862 btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
4863 mutex_unlock(&fs_info->chunk_mutex);
4864
4865 return 0;
4866 }
4867
4868 /*
4869 * sort the devices in descending order by max_avail, total_avail
4870 */
btrfs_cmp_device_info(const void * a,const void * b)4871 static int btrfs_cmp_device_info(const void *a, const void *b)
4872 {
4873 const struct btrfs_device_info *di_a = a;
4874 const struct btrfs_device_info *di_b = b;
4875
4876 if (di_a->max_avail > di_b->max_avail)
4877 return -1;
4878 if (di_a->max_avail < di_b->max_avail)
4879 return 1;
4880 if (di_a->total_avail > di_b->total_avail)
4881 return -1;
4882 if (di_a->total_avail < di_b->total_avail)
4883 return 1;
4884 return 0;
4885 }
4886
check_raid56_incompat_flag(struct btrfs_fs_info * info,u64 type)4887 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
4888 {
4889 if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK))
4890 return;
4891
4892 btrfs_set_fs_incompat(info, RAID56);
4893 }
4894
check_raid1c34_incompat_flag(struct btrfs_fs_info * info,u64 type)4895 static void check_raid1c34_incompat_flag(struct btrfs_fs_info *info, u64 type)
4896 {
4897 if (!(type & (BTRFS_BLOCK_GROUP_RAID1C3 | BTRFS_BLOCK_GROUP_RAID1C4)))
4898 return;
4899
4900 btrfs_set_fs_incompat(info, RAID1C34);
4901 }
4902
4903 /*
4904 * Structure used internally for __btrfs_alloc_chunk() function.
4905 * Wraps needed parameters.
4906 */
4907 struct alloc_chunk_ctl {
4908 u64 start;
4909 u64 type;
4910 /* Total number of stripes to allocate */
4911 int num_stripes;
4912 /* sub_stripes info for map */
4913 int sub_stripes;
4914 /* Stripes per device */
4915 int dev_stripes;
4916 /* Maximum number of devices to use */
4917 int devs_max;
4918 /* Minimum number of devices to use */
4919 int devs_min;
4920 /* ndevs has to be a multiple of this */
4921 int devs_increment;
4922 /* Number of copies */
4923 int ncopies;
4924 /* Number of stripes worth of bytes to store parity information */
4925 int nparity;
4926 u64 max_stripe_size;
4927 u64 max_chunk_size;
4928 u64 dev_extent_min;
4929 u64 stripe_size;
4930 u64 chunk_size;
4931 int ndevs;
4932 };
4933
init_alloc_chunk_ctl_policy_regular(struct btrfs_fs_devices * fs_devices,struct alloc_chunk_ctl * ctl)4934 static void init_alloc_chunk_ctl_policy_regular(
4935 struct btrfs_fs_devices *fs_devices,
4936 struct alloc_chunk_ctl *ctl)
4937 {
4938 u64 type = ctl->type;
4939
4940 if (type & BTRFS_BLOCK_GROUP_DATA) {
4941 ctl->max_stripe_size = SZ_1G;
4942 ctl->max_chunk_size = BTRFS_MAX_DATA_CHUNK_SIZE;
4943 } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
4944 /* For larger filesystems, use larger metadata chunks */
4945 if (fs_devices->total_rw_bytes > 50ULL * SZ_1G)
4946 ctl->max_stripe_size = SZ_1G;
4947 else
4948 ctl->max_stripe_size = SZ_256M;
4949 ctl->max_chunk_size = ctl->max_stripe_size;
4950 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
4951 ctl->max_stripe_size = SZ_32M;
4952 ctl->max_chunk_size = 2 * ctl->max_stripe_size;
4953 ctl->devs_max = min_t(int, ctl->devs_max,
4954 BTRFS_MAX_DEVS_SYS_CHUNK);
4955 } else {
4956 BUG();
4957 }
4958
4959 /* We don't want a chunk larger than 10% of writable space */
4960 ctl->max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
4961 ctl->max_chunk_size);
4962 ctl->dev_extent_min = BTRFS_STRIPE_LEN * ctl->dev_stripes;
4963 }
4964
init_alloc_chunk_ctl(struct btrfs_fs_devices * fs_devices,struct alloc_chunk_ctl * ctl)4965 static void init_alloc_chunk_ctl(struct btrfs_fs_devices *fs_devices,
4966 struct alloc_chunk_ctl *ctl)
4967 {
4968 int index = btrfs_bg_flags_to_raid_index(ctl->type);
4969
4970 ctl->sub_stripes = btrfs_raid_array[index].sub_stripes;
4971 ctl->dev_stripes = btrfs_raid_array[index].dev_stripes;
4972 ctl->devs_max = btrfs_raid_array[index].devs_max;
4973 if (!ctl->devs_max)
4974 ctl->devs_max = BTRFS_MAX_DEVS(fs_devices->fs_info);
4975 ctl->devs_min = btrfs_raid_array[index].devs_min;
4976 ctl->devs_increment = btrfs_raid_array[index].devs_increment;
4977 ctl->ncopies = btrfs_raid_array[index].ncopies;
4978 ctl->nparity = btrfs_raid_array[index].nparity;
4979 ctl->ndevs = 0;
4980
4981 switch (fs_devices->chunk_alloc_policy) {
4982 case BTRFS_CHUNK_ALLOC_REGULAR:
4983 init_alloc_chunk_ctl_policy_regular(fs_devices, ctl);
4984 break;
4985 default:
4986 BUG();
4987 }
4988 }
4989
gather_device_info(struct btrfs_fs_devices * fs_devices,struct alloc_chunk_ctl * ctl,struct btrfs_device_info * devices_info)4990 static int gather_device_info(struct btrfs_fs_devices *fs_devices,
4991 struct alloc_chunk_ctl *ctl,
4992 struct btrfs_device_info *devices_info)
4993 {
4994 struct btrfs_fs_info *info = fs_devices->fs_info;
4995 struct btrfs_device *device;
4996 u64 total_avail;
4997 u64 dev_extent_want = ctl->max_stripe_size * ctl->dev_stripes;
4998 int ret;
4999 int ndevs = 0;
5000 u64 max_avail;
5001 u64 dev_offset;
5002
5003 /*
5004 * in the first pass through the devices list, we gather information
5005 * about the available holes on each device.
5006 */
5007 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
5008 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
5009 WARN(1, KERN_ERR
5010 "BTRFS: read-only device in alloc_list\n");
5011 continue;
5012 }
5013
5014 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
5015 &device->dev_state) ||
5016 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
5017 continue;
5018
5019 if (device->total_bytes > device->bytes_used)
5020 total_avail = device->total_bytes - device->bytes_used;
5021 else
5022 total_avail = 0;
5023
5024 /* If there is no space on this device, skip it. */
5025 if (total_avail < ctl->dev_extent_min)
5026 continue;
5027
5028 ret = find_free_dev_extent(device, dev_extent_want, &dev_offset,
5029 &max_avail);
5030 if (ret && ret != -ENOSPC)
5031 return ret;
5032
5033 if (ret == 0)
5034 max_avail = dev_extent_want;
5035
5036 if (max_avail < ctl->dev_extent_min) {
5037 if (btrfs_test_opt(info, ENOSPC_DEBUG))
5038 btrfs_debug(info,
5039 "%s: devid %llu has no free space, have=%llu want=%llu",
5040 __func__, device->devid, max_avail,
5041 ctl->dev_extent_min);
5042 continue;
5043 }
5044
5045 if (ndevs == fs_devices->rw_devices) {
5046 WARN(1, "%s: found more than %llu devices\n",
5047 __func__, fs_devices->rw_devices);
5048 break;
5049 }
5050 devices_info[ndevs].dev_offset = dev_offset;
5051 devices_info[ndevs].max_avail = max_avail;
5052 devices_info[ndevs].total_avail = total_avail;
5053 devices_info[ndevs].dev = device;
5054 ++ndevs;
5055 }
5056 ctl->ndevs = ndevs;
5057
5058 /*
5059 * now sort the devices by hole size / available space
5060 */
5061 sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
5062 btrfs_cmp_device_info, NULL);
5063
5064 return 0;
5065 }
5066
decide_stripe_size_regular(struct alloc_chunk_ctl * ctl,struct btrfs_device_info * devices_info)5067 static int decide_stripe_size_regular(struct alloc_chunk_ctl *ctl,
5068 struct btrfs_device_info *devices_info)
5069 {
5070 /* Number of stripes that count for block group size */
5071 int data_stripes;
5072
5073 /*
5074 * The primary goal is to maximize the number of stripes, so use as
5075 * many devices as possible, even if the stripes are not maximum sized.
5076 *
5077 * The DUP profile stores more than one stripe per device, the
5078 * max_avail is the total size so we have to adjust.
5079 */
5080 ctl->stripe_size = div_u64(devices_info[ctl->ndevs - 1].max_avail,
5081 ctl->dev_stripes);
5082 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes;
5083
5084 /* This will have to be fixed for RAID1 and RAID10 over more drives */
5085 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies;
5086
5087 /*
5088 * Use the number of data stripes to figure out how big this chunk is
5089 * really going to be in terms of logical address space, and compare
5090 * that answer with the max chunk size. If it's higher, we try to
5091 * reduce stripe_size.
5092 */
5093 if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) {
5094 /*
5095 * Reduce stripe_size, round it up to a 16MB boundary again and
5096 * then use it, unless it ends up being even bigger than the
5097 * previous value we had already.
5098 */
5099 ctl->stripe_size = min(round_up(div_u64(ctl->max_chunk_size,
5100 data_stripes), SZ_16M),
5101 ctl->stripe_size);
5102 }
5103
5104 /* Align to BTRFS_STRIPE_LEN */
5105 ctl->stripe_size = round_down(ctl->stripe_size, BTRFS_STRIPE_LEN);
5106 ctl->chunk_size = ctl->stripe_size * data_stripes;
5107
5108 return 0;
5109 }
5110
decide_stripe_size(struct btrfs_fs_devices * fs_devices,struct alloc_chunk_ctl * ctl,struct btrfs_device_info * devices_info)5111 static int decide_stripe_size(struct btrfs_fs_devices *fs_devices,
5112 struct alloc_chunk_ctl *ctl,
5113 struct btrfs_device_info *devices_info)
5114 {
5115 struct btrfs_fs_info *info = fs_devices->fs_info;
5116
5117 /*
5118 * Round down to number of usable stripes, devs_increment can be any
5119 * number so we can't use round_down() that requires power of 2, while
5120 * rounddown is safe.
5121 */
5122 ctl->ndevs = rounddown(ctl->ndevs, ctl->devs_increment);
5123
5124 if (ctl->ndevs < ctl->devs_min) {
5125 if (btrfs_test_opt(info, ENOSPC_DEBUG)) {
5126 btrfs_debug(info,
5127 "%s: not enough devices with free space: have=%d minimum required=%d",
5128 __func__, ctl->ndevs, ctl->devs_min);
5129 }
5130 return -ENOSPC;
5131 }
5132
5133 ctl->ndevs = min(ctl->ndevs, ctl->devs_max);
5134
5135 switch (fs_devices->chunk_alloc_policy) {
5136 case BTRFS_CHUNK_ALLOC_REGULAR:
5137 return decide_stripe_size_regular(ctl, devices_info);
5138 default:
5139 BUG();
5140 }
5141 }
5142
create_chunk(struct btrfs_trans_handle * trans,struct alloc_chunk_ctl * ctl,struct btrfs_device_info * devices_info)5143 static int create_chunk(struct btrfs_trans_handle *trans,
5144 struct alloc_chunk_ctl *ctl,
5145 struct btrfs_device_info *devices_info)
5146 {
5147 struct btrfs_fs_info *info = trans->fs_info;
5148 struct map_lookup *map = NULL;
5149 struct extent_map_tree *em_tree;
5150 struct extent_map *em;
5151 u64 start = ctl->start;
5152 u64 type = ctl->type;
5153 int ret;
5154 int i;
5155 int j;
5156
5157 map = kmalloc(map_lookup_size(ctl->num_stripes), GFP_NOFS);
5158 if (!map)
5159 return -ENOMEM;
5160 map->num_stripes = ctl->num_stripes;
5161
5162 for (i = 0; i < ctl->ndevs; ++i) {
5163 for (j = 0; j < ctl->dev_stripes; ++j) {
5164 int s = i * ctl->dev_stripes + j;
5165 map->stripes[s].dev = devices_info[i].dev;
5166 map->stripes[s].physical = devices_info[i].dev_offset +
5167 j * ctl->stripe_size;
5168 }
5169 }
5170 map->stripe_len = BTRFS_STRIPE_LEN;
5171 map->io_align = BTRFS_STRIPE_LEN;
5172 map->io_width = BTRFS_STRIPE_LEN;
5173 map->type = type;
5174 map->sub_stripes = ctl->sub_stripes;
5175
5176 trace_btrfs_chunk_alloc(info, map, start, ctl->chunk_size);
5177
5178 em = alloc_extent_map();
5179 if (!em) {
5180 kfree(map);
5181 return -ENOMEM;
5182 }
5183 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
5184 em->map_lookup = map;
5185 em->start = start;
5186 em->len = ctl->chunk_size;
5187 em->block_start = 0;
5188 em->block_len = em->len;
5189 em->orig_block_len = ctl->stripe_size;
5190
5191 em_tree = &info->mapping_tree;
5192 write_lock(&em_tree->lock);
5193 ret = add_extent_mapping(em_tree, em, 0);
5194 if (ret) {
5195 write_unlock(&em_tree->lock);
5196 free_extent_map(em);
5197 return ret;
5198 }
5199 write_unlock(&em_tree->lock);
5200
5201 ret = btrfs_make_block_group(trans, 0, type, start, ctl->chunk_size);
5202 if (ret)
5203 goto error_del_extent;
5204
5205 for (i = 0; i < map->num_stripes; i++) {
5206 struct btrfs_device *dev = map->stripes[i].dev;
5207
5208 btrfs_device_set_bytes_used(dev,
5209 dev->bytes_used + ctl->stripe_size);
5210 if (list_empty(&dev->post_commit_list))
5211 list_add_tail(&dev->post_commit_list,
5212 &trans->transaction->dev_update_list);
5213 }
5214
5215 atomic64_sub(ctl->stripe_size * map->num_stripes,
5216 &info->free_chunk_space);
5217
5218 free_extent_map(em);
5219 check_raid56_incompat_flag(info, type);
5220 check_raid1c34_incompat_flag(info, type);
5221
5222 return 0;
5223
5224 error_del_extent:
5225 write_lock(&em_tree->lock);
5226 remove_extent_mapping(em_tree, em);
5227 write_unlock(&em_tree->lock);
5228
5229 /* One for our allocation */
5230 free_extent_map(em);
5231 /* One for the tree reference */
5232 free_extent_map(em);
5233
5234 return ret;
5235 }
5236
btrfs_alloc_chunk(struct btrfs_trans_handle * trans,u64 type)5237 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 type)
5238 {
5239 struct btrfs_fs_info *info = trans->fs_info;
5240 struct btrfs_fs_devices *fs_devices = info->fs_devices;
5241 struct btrfs_device_info *devices_info = NULL;
5242 struct alloc_chunk_ctl ctl;
5243 int ret;
5244
5245 lockdep_assert_held(&info->chunk_mutex);
5246
5247 if (!alloc_profile_is_valid(type, 0)) {
5248 ASSERT(0);
5249 return -EINVAL;
5250 }
5251
5252 if (list_empty(&fs_devices->alloc_list)) {
5253 if (btrfs_test_opt(info, ENOSPC_DEBUG))
5254 btrfs_debug(info, "%s: no writable device", __func__);
5255 return -ENOSPC;
5256 }
5257
5258 if (!(type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
5259 btrfs_err(info, "invalid chunk type 0x%llx requested", type);
5260 ASSERT(0);
5261 return -EINVAL;
5262 }
5263
5264 ctl.start = find_next_chunk(info);
5265 ctl.type = type;
5266 init_alloc_chunk_ctl(fs_devices, &ctl);
5267
5268 devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info),
5269 GFP_NOFS);
5270 if (!devices_info)
5271 return -ENOMEM;
5272
5273 ret = gather_device_info(fs_devices, &ctl, devices_info);
5274 if (ret < 0)
5275 goto out;
5276
5277 ret = decide_stripe_size(fs_devices, &ctl, devices_info);
5278 if (ret < 0)
5279 goto out;
5280
5281 ret = create_chunk(trans, &ctl, devices_info);
5282
5283 out:
5284 kfree(devices_info);
5285 return ret;
5286 }
5287
5288 /*
5289 * Chunk allocation falls into two parts. The first part does work
5290 * that makes the new allocated chunk usable, but does not do any operation
5291 * that modifies the chunk tree. The second part does the work that
5292 * requires modifying the chunk tree. This division is important for the
5293 * bootstrap process of adding storage to a seed btrfs.
5294 */
btrfs_finish_chunk_alloc(struct btrfs_trans_handle * trans,u64 chunk_offset,u64 chunk_size)5295 int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
5296 u64 chunk_offset, u64 chunk_size)
5297 {
5298 struct btrfs_fs_info *fs_info = trans->fs_info;
5299 struct btrfs_root *extent_root = fs_info->extent_root;
5300 struct btrfs_root *chunk_root = fs_info->chunk_root;
5301 struct btrfs_key key;
5302 struct btrfs_device *device;
5303 struct btrfs_chunk *chunk;
5304 struct btrfs_stripe *stripe;
5305 struct extent_map *em;
5306 struct map_lookup *map;
5307 size_t item_size;
5308 u64 dev_offset;
5309 u64 stripe_size;
5310 int i = 0;
5311 int ret = 0;
5312
5313 em = btrfs_get_chunk_map(fs_info, chunk_offset, chunk_size);
5314 if (IS_ERR(em))
5315 return PTR_ERR(em);
5316
5317 map = em->map_lookup;
5318 item_size = btrfs_chunk_item_size(map->num_stripes);
5319 stripe_size = em->orig_block_len;
5320
5321 chunk = kzalloc(item_size, GFP_NOFS);
5322 if (!chunk) {
5323 ret = -ENOMEM;
5324 goto out;
5325 }
5326
5327 /*
5328 * Take the device list mutex to prevent races with the final phase of
5329 * a device replace operation that replaces the device object associated
5330 * with the map's stripes, because the device object's id can change
5331 * at any time during that final phase of the device replace operation
5332 * (dev-replace.c:btrfs_dev_replace_finishing()).
5333 */
5334 mutex_lock(&fs_info->fs_devices->device_list_mutex);
5335 for (i = 0; i < map->num_stripes; i++) {
5336 device = map->stripes[i].dev;
5337 dev_offset = map->stripes[i].physical;
5338
5339 ret = btrfs_update_device(trans, device);
5340 if (ret)
5341 break;
5342 ret = btrfs_alloc_dev_extent(trans, device, chunk_offset,
5343 dev_offset, stripe_size);
5344 if (ret)
5345 break;
5346 }
5347 if (ret) {
5348 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
5349 goto out;
5350 }
5351
5352 stripe = &chunk->stripe;
5353 for (i = 0; i < map->num_stripes; i++) {
5354 device = map->stripes[i].dev;
5355 dev_offset = map->stripes[i].physical;
5356
5357 btrfs_set_stack_stripe_devid(stripe, device->devid);
5358 btrfs_set_stack_stripe_offset(stripe, dev_offset);
5359 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
5360 stripe++;
5361 }
5362 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
5363
5364 btrfs_set_stack_chunk_length(chunk, chunk_size);
5365 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
5366 btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
5367 btrfs_set_stack_chunk_type(chunk, map->type);
5368 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
5369 btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
5370 btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
5371 btrfs_set_stack_chunk_sector_size(chunk, fs_info->sectorsize);
5372 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
5373
5374 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
5375 key.type = BTRFS_CHUNK_ITEM_KEY;
5376 key.offset = chunk_offset;
5377
5378 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
5379 if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
5380 /*
5381 * TODO: Cleanup of inserted chunk root in case of
5382 * failure.
5383 */
5384 ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size);
5385 }
5386
5387 out:
5388 kfree(chunk);
5389 free_extent_map(em);
5390 return ret;
5391 }
5392
init_first_rw_device(struct btrfs_trans_handle * trans)5393 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans)
5394 {
5395 struct btrfs_fs_info *fs_info = trans->fs_info;
5396 u64 alloc_profile;
5397 int ret;
5398
5399 alloc_profile = btrfs_metadata_alloc_profile(fs_info);
5400 ret = btrfs_alloc_chunk(trans, alloc_profile);
5401 if (ret)
5402 return ret;
5403
5404 alloc_profile = btrfs_system_alloc_profile(fs_info);
5405 ret = btrfs_alloc_chunk(trans, alloc_profile);
5406 return ret;
5407 }
5408
btrfs_chunk_max_errors(struct map_lookup * map)5409 static inline int btrfs_chunk_max_errors(struct map_lookup *map)
5410 {
5411 const int index = btrfs_bg_flags_to_raid_index(map->type);
5412
5413 return btrfs_raid_array[index].tolerated_failures;
5414 }
5415
btrfs_chunk_readonly(struct btrfs_fs_info * fs_info,u64 chunk_offset)5416 int btrfs_chunk_readonly(struct btrfs_fs_info *fs_info, u64 chunk_offset)
5417 {
5418 struct extent_map *em;
5419 struct map_lookup *map;
5420 int readonly = 0;
5421 int miss_ndevs = 0;
5422 int i;
5423
5424 em = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
5425 if (IS_ERR(em))
5426 return 1;
5427
5428 map = em->map_lookup;
5429 for (i = 0; i < map->num_stripes; i++) {
5430 if (test_bit(BTRFS_DEV_STATE_MISSING,
5431 &map->stripes[i].dev->dev_state)) {
5432 miss_ndevs++;
5433 continue;
5434 }
5435 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE,
5436 &map->stripes[i].dev->dev_state)) {
5437 readonly = 1;
5438 goto end;
5439 }
5440 }
5441
5442 /*
5443 * If the number of missing devices is larger than max errors,
5444 * we can not write the data into that chunk successfully, so
5445 * set it readonly.
5446 */
5447 if (miss_ndevs > btrfs_chunk_max_errors(map))
5448 readonly = 1;
5449 end:
5450 free_extent_map(em);
5451 return readonly;
5452 }
5453
btrfs_mapping_tree_free(struct extent_map_tree * tree)5454 void btrfs_mapping_tree_free(struct extent_map_tree *tree)
5455 {
5456 struct extent_map *em;
5457
5458 while (1) {
5459 write_lock(&tree->lock);
5460 em = lookup_extent_mapping(tree, 0, (u64)-1);
5461 if (em)
5462 remove_extent_mapping(tree, em);
5463 write_unlock(&tree->lock);
5464 if (!em)
5465 break;
5466 /* once for us */
5467 free_extent_map(em);
5468 /* once for the tree */
5469 free_extent_map(em);
5470 }
5471 }
5472
btrfs_num_copies(struct btrfs_fs_info * fs_info,u64 logical,u64 len)5473 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5474 {
5475 struct extent_map *em;
5476 struct map_lookup *map;
5477 int ret;
5478
5479 em = btrfs_get_chunk_map(fs_info, logical, len);
5480 if (IS_ERR(em))
5481 /*
5482 * We could return errors for these cases, but that could get
5483 * ugly and we'd probably do the same thing which is just not do
5484 * anything else and exit, so return 1 so the callers don't try
5485 * to use other copies.
5486 */
5487 return 1;
5488
5489 map = em->map_lookup;
5490 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1_MASK))
5491 ret = map->num_stripes;
5492 else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5493 ret = map->sub_stripes;
5494 else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
5495 ret = 2;
5496 else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
5497 /*
5498 * There could be two corrupted data stripes, we need
5499 * to loop retry in order to rebuild the correct data.
5500 *
5501 * Fail a stripe at a time on every retry except the
5502 * stripe under reconstruction.
5503 */
5504 ret = map->num_stripes;
5505 else
5506 ret = 1;
5507 free_extent_map(em);
5508
5509 down_read(&fs_info->dev_replace.rwsem);
5510 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace) &&
5511 fs_info->dev_replace.tgtdev)
5512 ret++;
5513 up_read(&fs_info->dev_replace.rwsem);
5514
5515 return ret;
5516 }
5517
btrfs_full_stripe_len(struct btrfs_fs_info * fs_info,u64 logical)5518 unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
5519 u64 logical)
5520 {
5521 struct extent_map *em;
5522 struct map_lookup *map;
5523 unsigned long len = fs_info->sectorsize;
5524
5525 em = btrfs_get_chunk_map(fs_info, logical, len);
5526
5527 if (!WARN_ON(IS_ERR(em))) {
5528 map = em->map_lookup;
5529 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5530 len = map->stripe_len * nr_data_stripes(map);
5531 free_extent_map(em);
5532 }
5533 return len;
5534 }
5535
btrfs_is_parity_mirror(struct btrfs_fs_info * fs_info,u64 logical,u64 len)5536 int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5537 {
5538 struct extent_map *em;
5539 struct map_lookup *map;
5540 int ret = 0;
5541
5542 em = btrfs_get_chunk_map(fs_info, logical, len);
5543
5544 if(!WARN_ON(IS_ERR(em))) {
5545 map = em->map_lookup;
5546 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5547 ret = 1;
5548 free_extent_map(em);
5549 }
5550 return ret;
5551 }
5552
find_live_mirror(struct btrfs_fs_info * fs_info,struct map_lookup * map,int first,int dev_replace_is_ongoing)5553 static int find_live_mirror(struct btrfs_fs_info *fs_info,
5554 struct map_lookup *map, int first,
5555 int dev_replace_is_ongoing)
5556 {
5557 int i;
5558 int num_stripes;
5559 int preferred_mirror;
5560 int tolerance;
5561 struct btrfs_device *srcdev;
5562
5563 ASSERT((map->type &
5564 (BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10)));
5565
5566 if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5567 num_stripes = map->sub_stripes;
5568 else
5569 num_stripes = map->num_stripes;
5570
5571 preferred_mirror = first + current->pid % num_stripes;
5572
5573 if (dev_replace_is_ongoing &&
5574 fs_info->dev_replace.cont_reading_from_srcdev_mode ==
5575 BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID)
5576 srcdev = fs_info->dev_replace.srcdev;
5577 else
5578 srcdev = NULL;
5579
5580 /*
5581 * try to avoid the drive that is the source drive for a
5582 * dev-replace procedure, only choose it if no other non-missing
5583 * mirror is available
5584 */
5585 for (tolerance = 0; tolerance < 2; tolerance++) {
5586 if (map->stripes[preferred_mirror].dev->bdev &&
5587 (tolerance || map->stripes[preferred_mirror].dev != srcdev))
5588 return preferred_mirror;
5589 for (i = first; i < first + num_stripes; i++) {
5590 if (map->stripes[i].dev->bdev &&
5591 (tolerance || map->stripes[i].dev != srcdev))
5592 return i;
5593 }
5594 }
5595
5596 /* we couldn't find one that doesn't fail. Just return something
5597 * and the io error handling code will clean up eventually
5598 */
5599 return preferred_mirror;
5600 }
5601
5602 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */
sort_parity_stripes(struct btrfs_bio * bbio,int num_stripes)5603 static void sort_parity_stripes(struct btrfs_bio *bbio, int num_stripes)
5604 {
5605 int i;
5606 int again = 1;
5607
5608 while (again) {
5609 again = 0;
5610 for (i = 0; i < num_stripes - 1; i++) {
5611 /* Swap if parity is on a smaller index */
5612 if (bbio->raid_map[i] > bbio->raid_map[i + 1]) {
5613 swap(bbio->stripes[i], bbio->stripes[i + 1]);
5614 swap(bbio->raid_map[i], bbio->raid_map[i + 1]);
5615 again = 1;
5616 }
5617 }
5618 }
5619 }
5620
alloc_btrfs_bio(int total_stripes,int real_stripes)5621 static struct btrfs_bio *alloc_btrfs_bio(int total_stripes, int real_stripes)
5622 {
5623 struct btrfs_bio *bbio = kzalloc(
5624 /* the size of the btrfs_bio */
5625 sizeof(struct btrfs_bio) +
5626 /* plus the variable array for the stripes */
5627 sizeof(struct btrfs_bio_stripe) * (total_stripes) +
5628 /* plus the variable array for the tgt dev */
5629 sizeof(int) * (real_stripes) +
5630 /*
5631 * plus the raid_map, which includes both the tgt dev
5632 * and the stripes
5633 */
5634 sizeof(u64) * (total_stripes),
5635 GFP_NOFS|__GFP_NOFAIL);
5636
5637 atomic_set(&bbio->error, 0);
5638 refcount_set(&bbio->refs, 1);
5639
5640 bbio->tgtdev_map = (int *)(bbio->stripes + total_stripes);
5641 bbio->raid_map = (u64 *)(bbio->tgtdev_map + real_stripes);
5642
5643 return bbio;
5644 }
5645
btrfs_get_bbio(struct btrfs_bio * bbio)5646 void btrfs_get_bbio(struct btrfs_bio *bbio)
5647 {
5648 WARN_ON(!refcount_read(&bbio->refs));
5649 refcount_inc(&bbio->refs);
5650 }
5651
btrfs_put_bbio(struct btrfs_bio * bbio)5652 void btrfs_put_bbio(struct btrfs_bio *bbio)
5653 {
5654 if (!bbio)
5655 return;
5656 if (refcount_dec_and_test(&bbio->refs))
5657 kfree(bbio);
5658 }
5659
5660 /* can REQ_OP_DISCARD be sent with other REQ like REQ_OP_WRITE? */
5661 /*
5662 * Please note that, discard won't be sent to target device of device
5663 * replace.
5664 */
__btrfs_map_block_for_discard(struct btrfs_fs_info * fs_info,u64 logical,u64 * length_ret,struct btrfs_bio ** bbio_ret)5665 static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info,
5666 u64 logical, u64 *length_ret,
5667 struct btrfs_bio **bbio_ret)
5668 {
5669 struct extent_map *em;
5670 struct map_lookup *map;
5671 struct btrfs_bio *bbio;
5672 u64 length = *length_ret;
5673 u64 offset;
5674 u64 stripe_nr;
5675 u64 stripe_nr_end;
5676 u64 stripe_end_offset;
5677 u64 stripe_cnt;
5678 u64 stripe_len;
5679 u64 stripe_offset;
5680 u64 num_stripes;
5681 u32 stripe_index;
5682 u32 factor = 0;
5683 u32 sub_stripes = 0;
5684 u64 stripes_per_dev = 0;
5685 u32 remaining_stripes = 0;
5686 u32 last_stripe = 0;
5687 int ret = 0;
5688 int i;
5689
5690 /* discard always return a bbio */
5691 ASSERT(bbio_ret);
5692
5693 em = btrfs_get_chunk_map(fs_info, logical, length);
5694 if (IS_ERR(em))
5695 return PTR_ERR(em);
5696
5697 map = em->map_lookup;
5698 /* we don't discard raid56 yet */
5699 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5700 ret = -EOPNOTSUPP;
5701 goto out;
5702 }
5703
5704 offset = logical - em->start;
5705 length = min_t(u64, em->start + em->len - logical, length);
5706 *length_ret = length;
5707
5708 stripe_len = map->stripe_len;
5709 /*
5710 * stripe_nr counts the total number of stripes we have to stride
5711 * to get to this block
5712 */
5713 stripe_nr = div64_u64(offset, stripe_len);
5714
5715 /* stripe_offset is the offset of this block in its stripe */
5716 stripe_offset = offset - stripe_nr * stripe_len;
5717
5718 stripe_nr_end = round_up(offset + length, map->stripe_len);
5719 stripe_nr_end = div64_u64(stripe_nr_end, map->stripe_len);
5720 stripe_cnt = stripe_nr_end - stripe_nr;
5721 stripe_end_offset = stripe_nr_end * map->stripe_len -
5722 (offset + length);
5723 /*
5724 * after this, stripe_nr is the number of stripes on this
5725 * device we have to walk to find the data, and stripe_index is
5726 * the number of our device in the stripe array
5727 */
5728 num_stripes = 1;
5729 stripe_index = 0;
5730 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
5731 BTRFS_BLOCK_GROUP_RAID10)) {
5732 if (map->type & BTRFS_BLOCK_GROUP_RAID0)
5733 sub_stripes = 1;
5734 else
5735 sub_stripes = map->sub_stripes;
5736
5737 factor = map->num_stripes / sub_stripes;
5738 num_stripes = min_t(u64, map->num_stripes,
5739 sub_stripes * stripe_cnt);
5740 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
5741 stripe_index *= sub_stripes;
5742 stripes_per_dev = div_u64_rem(stripe_cnt, factor,
5743 &remaining_stripes);
5744 div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
5745 last_stripe *= sub_stripes;
5746 } else if (map->type & (BTRFS_BLOCK_GROUP_RAID1_MASK |
5747 BTRFS_BLOCK_GROUP_DUP)) {
5748 num_stripes = map->num_stripes;
5749 } else {
5750 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
5751 &stripe_index);
5752 }
5753
5754 bbio = alloc_btrfs_bio(num_stripes, 0);
5755 if (!bbio) {
5756 ret = -ENOMEM;
5757 goto out;
5758 }
5759
5760 for (i = 0; i < num_stripes; i++) {
5761 bbio->stripes[i].physical =
5762 map->stripes[stripe_index].physical +
5763 stripe_offset + stripe_nr * map->stripe_len;
5764 bbio->stripes[i].dev = map->stripes[stripe_index].dev;
5765
5766 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
5767 BTRFS_BLOCK_GROUP_RAID10)) {
5768 bbio->stripes[i].length = stripes_per_dev *
5769 map->stripe_len;
5770
5771 if (i / sub_stripes < remaining_stripes)
5772 bbio->stripes[i].length +=
5773 map->stripe_len;
5774
5775 /*
5776 * Special for the first stripe and
5777 * the last stripe:
5778 *
5779 * |-------|...|-------|
5780 * |----------|
5781 * off end_off
5782 */
5783 if (i < sub_stripes)
5784 bbio->stripes[i].length -=
5785 stripe_offset;
5786
5787 if (stripe_index >= last_stripe &&
5788 stripe_index <= (last_stripe +
5789 sub_stripes - 1))
5790 bbio->stripes[i].length -=
5791 stripe_end_offset;
5792
5793 if (i == sub_stripes - 1)
5794 stripe_offset = 0;
5795 } else {
5796 bbio->stripes[i].length = length;
5797 }
5798
5799 stripe_index++;
5800 if (stripe_index == map->num_stripes) {
5801 stripe_index = 0;
5802 stripe_nr++;
5803 }
5804 }
5805
5806 *bbio_ret = bbio;
5807 bbio->map_type = map->type;
5808 bbio->num_stripes = num_stripes;
5809 out:
5810 free_extent_map(em);
5811 return ret;
5812 }
5813
5814 /*
5815 * In dev-replace case, for repair case (that's the only case where the mirror
5816 * is selected explicitly when calling btrfs_map_block), blocks left of the
5817 * left cursor can also be read from the target drive.
5818 *
5819 * For REQ_GET_READ_MIRRORS, the target drive is added as the last one to the
5820 * array of stripes.
5821 * For READ, it also needs to be supported using the same mirror number.
5822 *
5823 * If the requested block is not left of the left cursor, EIO is returned. This
5824 * can happen because btrfs_num_copies() returns one more in the dev-replace
5825 * case.
5826 */
get_extra_mirror_from_replace(struct btrfs_fs_info * fs_info,u64 logical,u64 length,u64 srcdev_devid,int * mirror_num,u64 * physical)5827 static int get_extra_mirror_from_replace(struct btrfs_fs_info *fs_info,
5828 u64 logical, u64 length,
5829 u64 srcdev_devid, int *mirror_num,
5830 u64 *physical)
5831 {
5832 struct btrfs_bio *bbio = NULL;
5833 int num_stripes;
5834 int index_srcdev = 0;
5835 int found = 0;
5836 u64 physical_of_found = 0;
5837 int i;
5838 int ret = 0;
5839
5840 ret = __btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
5841 logical, &length, &bbio, 0, 0);
5842 if (ret) {
5843 ASSERT(bbio == NULL);
5844 return ret;
5845 }
5846
5847 num_stripes = bbio->num_stripes;
5848 if (*mirror_num > num_stripes) {
5849 /*
5850 * BTRFS_MAP_GET_READ_MIRRORS does not contain this mirror,
5851 * that means that the requested area is not left of the left
5852 * cursor
5853 */
5854 btrfs_put_bbio(bbio);
5855 return -EIO;
5856 }
5857
5858 /*
5859 * process the rest of the function using the mirror_num of the source
5860 * drive. Therefore look it up first. At the end, patch the device
5861 * pointer to the one of the target drive.
5862 */
5863 for (i = 0; i < num_stripes; i++) {
5864 if (bbio->stripes[i].dev->devid != srcdev_devid)
5865 continue;
5866
5867 /*
5868 * In case of DUP, in order to keep it simple, only add the
5869 * mirror with the lowest physical address
5870 */
5871 if (found &&
5872 physical_of_found <= bbio->stripes[i].physical)
5873 continue;
5874
5875 index_srcdev = i;
5876 found = 1;
5877 physical_of_found = bbio->stripes[i].physical;
5878 }
5879
5880 btrfs_put_bbio(bbio);
5881
5882 ASSERT(found);
5883 if (!found)
5884 return -EIO;
5885
5886 *mirror_num = index_srcdev + 1;
5887 *physical = physical_of_found;
5888 return ret;
5889 }
5890
handle_ops_on_dev_replace(enum btrfs_map_op op,struct btrfs_bio ** bbio_ret,struct btrfs_dev_replace * dev_replace,int * num_stripes_ret,int * max_errors_ret)5891 static void handle_ops_on_dev_replace(enum btrfs_map_op op,
5892 struct btrfs_bio **bbio_ret,
5893 struct btrfs_dev_replace *dev_replace,
5894 int *num_stripes_ret, int *max_errors_ret)
5895 {
5896 struct btrfs_bio *bbio = *bbio_ret;
5897 u64 srcdev_devid = dev_replace->srcdev->devid;
5898 int tgtdev_indexes = 0;
5899 int num_stripes = *num_stripes_ret;
5900 int max_errors = *max_errors_ret;
5901 int i;
5902
5903 if (op == BTRFS_MAP_WRITE) {
5904 int index_where_to_add;
5905
5906 /*
5907 * duplicate the write operations while the dev replace
5908 * procedure is running. Since the copying of the old disk to
5909 * the new disk takes place at run time while the filesystem is
5910 * mounted writable, the regular write operations to the old
5911 * disk have to be duplicated to go to the new disk as well.
5912 *
5913 * Note that device->missing is handled by the caller, and that
5914 * the write to the old disk is already set up in the stripes
5915 * array.
5916 */
5917 index_where_to_add = num_stripes;
5918 for (i = 0; i < num_stripes; i++) {
5919 if (bbio->stripes[i].dev->devid == srcdev_devid) {
5920 /* write to new disk, too */
5921 struct btrfs_bio_stripe *new =
5922 bbio->stripes + index_where_to_add;
5923 struct btrfs_bio_stripe *old =
5924 bbio->stripes + i;
5925
5926 new->physical = old->physical;
5927 new->length = old->length;
5928 new->dev = dev_replace->tgtdev;
5929 bbio->tgtdev_map[i] = index_where_to_add;
5930 index_where_to_add++;
5931 max_errors++;
5932 tgtdev_indexes++;
5933 }
5934 }
5935 num_stripes = index_where_to_add;
5936 } else if (op == BTRFS_MAP_GET_READ_MIRRORS) {
5937 int index_srcdev = 0;
5938 int found = 0;
5939 u64 physical_of_found = 0;
5940
5941 /*
5942 * During the dev-replace procedure, the target drive can also
5943 * be used to read data in case it is needed to repair a corrupt
5944 * block elsewhere. This is possible if the requested area is
5945 * left of the left cursor. In this area, the target drive is a
5946 * full copy of the source drive.
5947 */
5948 for (i = 0; i < num_stripes; i++) {
5949 if (bbio->stripes[i].dev->devid == srcdev_devid) {
5950 /*
5951 * In case of DUP, in order to keep it simple,
5952 * only add the mirror with the lowest physical
5953 * address
5954 */
5955 if (found &&
5956 physical_of_found <=
5957 bbio->stripes[i].physical)
5958 continue;
5959 index_srcdev = i;
5960 found = 1;
5961 physical_of_found = bbio->stripes[i].physical;
5962 }
5963 }
5964 if (found) {
5965 struct btrfs_bio_stripe *tgtdev_stripe =
5966 bbio->stripes + num_stripes;
5967
5968 tgtdev_stripe->physical = physical_of_found;
5969 tgtdev_stripe->length =
5970 bbio->stripes[index_srcdev].length;
5971 tgtdev_stripe->dev = dev_replace->tgtdev;
5972 bbio->tgtdev_map[index_srcdev] = num_stripes;
5973
5974 tgtdev_indexes++;
5975 num_stripes++;
5976 }
5977 }
5978
5979 *num_stripes_ret = num_stripes;
5980 *max_errors_ret = max_errors;
5981 bbio->num_tgtdevs = tgtdev_indexes;
5982 *bbio_ret = bbio;
5983 }
5984
need_full_stripe(enum btrfs_map_op op)5985 static bool need_full_stripe(enum btrfs_map_op op)
5986 {
5987 return (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS);
5988 }
5989
5990 /*
5991 * btrfs_get_io_geometry - calculates the geomery of a particular (address, len)
5992 * tuple. This information is used to calculate how big a
5993 * particular bio can get before it straddles a stripe.
5994 *
5995 * @fs_info - the filesystem
5996 * @logical - address that we want to figure out the geometry of
5997 * @len - the length of IO we are going to perform, starting at @logical
5998 * @op - type of operation - write or read
5999 * @io_geom - pointer used to return values
6000 *
6001 * Returns < 0 in case a chunk for the given logical address cannot be found,
6002 * usually shouldn't happen unless @logical is corrupted, 0 otherwise.
6003 */
btrfs_get_io_geometry(struct btrfs_fs_info * fs_info,enum btrfs_map_op op,u64 logical,u64 len,struct btrfs_io_geometry * io_geom)6004 int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
6005 u64 logical, u64 len, struct btrfs_io_geometry *io_geom)
6006 {
6007 struct extent_map *em;
6008 struct map_lookup *map;
6009 u64 offset;
6010 u64 stripe_offset;
6011 u64 stripe_nr;
6012 u64 stripe_len;
6013 u64 raid56_full_stripe_start = (u64)-1;
6014 int data_stripes;
6015 int ret = 0;
6016
6017 ASSERT(op != BTRFS_MAP_DISCARD);
6018
6019 em = btrfs_get_chunk_map(fs_info, logical, len);
6020 if (IS_ERR(em))
6021 return PTR_ERR(em);
6022
6023 map = em->map_lookup;
6024 /* Offset of this logical address in the chunk */
6025 offset = logical - em->start;
6026 /* Len of a stripe in a chunk */
6027 stripe_len = map->stripe_len;
6028 /* Stripe wher this block falls in */
6029 stripe_nr = div64_u64(offset, stripe_len);
6030 /* Offset of stripe in the chunk */
6031 stripe_offset = stripe_nr * stripe_len;
6032 if (offset < stripe_offset) {
6033 btrfs_crit(fs_info,
6034 "stripe math has gone wrong, stripe_offset=%llu offset=%llu start=%llu logical=%llu stripe_len=%llu",
6035 stripe_offset, offset, em->start, logical, stripe_len);
6036 ret = -EINVAL;
6037 goto out;
6038 }
6039
6040 /* stripe_offset is the offset of this block in its stripe */
6041 stripe_offset = offset - stripe_offset;
6042 data_stripes = nr_data_stripes(map);
6043
6044 if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
6045 u64 max_len = stripe_len - stripe_offset;
6046
6047 /*
6048 * In case of raid56, we need to know the stripe aligned start
6049 */
6050 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
6051 unsigned long full_stripe_len = stripe_len * data_stripes;
6052 raid56_full_stripe_start = offset;
6053
6054 /*
6055 * Allow a write of a full stripe, but make sure we
6056 * don't allow straddling of stripes
6057 */
6058 raid56_full_stripe_start = div64_u64(raid56_full_stripe_start,
6059 full_stripe_len);
6060 raid56_full_stripe_start *= full_stripe_len;
6061
6062 /*
6063 * For writes to RAID[56], allow a full stripeset across
6064 * all disks. For other RAID types and for RAID[56]
6065 * reads, just allow a single stripe (on a single disk).
6066 */
6067 if (op == BTRFS_MAP_WRITE) {
6068 max_len = stripe_len * data_stripes -
6069 (offset - raid56_full_stripe_start);
6070 }
6071 }
6072 len = min_t(u64, em->len - offset, max_len);
6073 } else {
6074 len = em->len - offset;
6075 }
6076
6077 io_geom->len = len;
6078 io_geom->offset = offset;
6079 io_geom->stripe_len = stripe_len;
6080 io_geom->stripe_nr = stripe_nr;
6081 io_geom->stripe_offset = stripe_offset;
6082 io_geom->raid56_stripe_offset = raid56_full_stripe_start;
6083
6084 out:
6085 /* once for us */
6086 free_extent_map(em);
6087 return ret;
6088 }
6089
__btrfs_map_block(struct btrfs_fs_info * fs_info,enum btrfs_map_op op,u64 logical,u64 * length,struct btrfs_bio ** bbio_ret,int mirror_num,int need_raid_map)6090 static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
6091 enum btrfs_map_op op,
6092 u64 logical, u64 *length,
6093 struct btrfs_bio **bbio_ret,
6094 int mirror_num, int need_raid_map)
6095 {
6096 struct extent_map *em;
6097 struct map_lookup *map;
6098 u64 stripe_offset;
6099 u64 stripe_nr;
6100 u64 stripe_len;
6101 u32 stripe_index;
6102 int data_stripes;
6103 int i;
6104 int ret = 0;
6105 int num_stripes;
6106 int max_errors = 0;
6107 int tgtdev_indexes = 0;
6108 struct btrfs_bio *bbio = NULL;
6109 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
6110 int dev_replace_is_ongoing = 0;
6111 int num_alloc_stripes;
6112 int patch_the_first_stripe_for_dev_replace = 0;
6113 u64 physical_to_patch_in_first_stripe = 0;
6114 u64 raid56_full_stripe_start = (u64)-1;
6115 struct btrfs_io_geometry geom;
6116
6117 ASSERT(bbio_ret);
6118 ASSERT(op != BTRFS_MAP_DISCARD);
6119
6120 ret = btrfs_get_io_geometry(fs_info, op, logical, *length, &geom);
6121 if (ret < 0)
6122 return ret;
6123
6124 em = btrfs_get_chunk_map(fs_info, logical, *length);
6125 ASSERT(!IS_ERR(em));
6126 map = em->map_lookup;
6127
6128 *length = geom.len;
6129 stripe_len = geom.stripe_len;
6130 stripe_nr = geom.stripe_nr;
6131 stripe_offset = geom.stripe_offset;
6132 raid56_full_stripe_start = geom.raid56_stripe_offset;
6133 data_stripes = nr_data_stripes(map);
6134
6135 down_read(&dev_replace->rwsem);
6136 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
6137 /*
6138 * Hold the semaphore for read during the whole operation, write is
6139 * requested at commit time but must wait.
6140 */
6141 if (!dev_replace_is_ongoing)
6142 up_read(&dev_replace->rwsem);
6143
6144 if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
6145 !need_full_stripe(op) && dev_replace->tgtdev != NULL) {
6146 ret = get_extra_mirror_from_replace(fs_info, logical, *length,
6147 dev_replace->srcdev->devid,
6148 &mirror_num,
6149 &physical_to_patch_in_first_stripe);
6150 if (ret)
6151 goto out;
6152 else
6153 patch_the_first_stripe_for_dev_replace = 1;
6154 } else if (mirror_num > map->num_stripes) {
6155 mirror_num = 0;
6156 }
6157
6158 num_stripes = 1;
6159 stripe_index = 0;
6160 if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
6161 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
6162 &stripe_index);
6163 if (!need_full_stripe(op))
6164 mirror_num = 1;
6165 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1_MASK) {
6166 if (need_full_stripe(op))
6167 num_stripes = map->num_stripes;
6168 else if (mirror_num)
6169 stripe_index = mirror_num - 1;
6170 else {
6171 stripe_index = find_live_mirror(fs_info, map, 0,
6172 dev_replace_is_ongoing);
6173 mirror_num = stripe_index + 1;
6174 }
6175
6176 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
6177 if (need_full_stripe(op)) {
6178 num_stripes = map->num_stripes;
6179 } else if (mirror_num) {
6180 stripe_index = mirror_num - 1;
6181 } else {
6182 mirror_num = 1;
6183 }
6184
6185 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
6186 u32 factor = map->num_stripes / map->sub_stripes;
6187
6188 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
6189 stripe_index *= map->sub_stripes;
6190
6191 if (need_full_stripe(op))
6192 num_stripes = map->sub_stripes;
6193 else if (mirror_num)
6194 stripe_index += mirror_num - 1;
6195 else {
6196 int old_stripe_index = stripe_index;
6197 stripe_index = find_live_mirror(fs_info, map,
6198 stripe_index,
6199 dev_replace_is_ongoing);
6200 mirror_num = stripe_index - old_stripe_index + 1;
6201 }
6202
6203 } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
6204 if (need_raid_map && (need_full_stripe(op) || mirror_num > 1)) {
6205 /* push stripe_nr back to the start of the full stripe */
6206 stripe_nr = div64_u64(raid56_full_stripe_start,
6207 stripe_len * data_stripes);
6208
6209 /* RAID[56] write or recovery. Return all stripes */
6210 num_stripes = map->num_stripes;
6211 max_errors = nr_parity_stripes(map);
6212
6213 *length = map->stripe_len;
6214 stripe_index = 0;
6215 stripe_offset = 0;
6216 } else {
6217 /*
6218 * Mirror #0 or #1 means the original data block.
6219 * Mirror #2 is RAID5 parity block.
6220 * Mirror #3 is RAID6 Q block.
6221 */
6222 stripe_nr = div_u64_rem(stripe_nr,
6223 data_stripes, &stripe_index);
6224 if (mirror_num > 1)
6225 stripe_index = data_stripes + mirror_num - 2;
6226
6227 /* We distribute the parity blocks across stripes */
6228 div_u64_rem(stripe_nr + stripe_index, map->num_stripes,
6229 &stripe_index);
6230 if (!need_full_stripe(op) && mirror_num <= 1)
6231 mirror_num = 1;
6232 }
6233 } else {
6234 /*
6235 * after this, stripe_nr is the number of stripes on this
6236 * device we have to walk to find the data, and stripe_index is
6237 * the number of our device in the stripe array
6238 */
6239 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
6240 &stripe_index);
6241 mirror_num = stripe_index + 1;
6242 }
6243 if (stripe_index >= map->num_stripes) {
6244 btrfs_crit(fs_info,
6245 "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u",
6246 stripe_index, map->num_stripes);
6247 ret = -EINVAL;
6248 goto out;
6249 }
6250
6251 num_alloc_stripes = num_stripes;
6252 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL) {
6253 if (op == BTRFS_MAP_WRITE)
6254 num_alloc_stripes <<= 1;
6255 if (op == BTRFS_MAP_GET_READ_MIRRORS)
6256 num_alloc_stripes++;
6257 tgtdev_indexes = num_stripes;
6258 }
6259
6260 bbio = alloc_btrfs_bio(num_alloc_stripes, tgtdev_indexes);
6261 if (!bbio) {
6262 ret = -ENOMEM;
6263 goto out;
6264 }
6265
6266 for (i = 0; i < num_stripes; i++) {
6267 bbio->stripes[i].physical = map->stripes[stripe_index].physical +
6268 stripe_offset + stripe_nr * map->stripe_len;
6269 bbio->stripes[i].dev = map->stripes[stripe_index].dev;
6270 stripe_index++;
6271 }
6272
6273 /* build raid_map */
6274 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && need_raid_map &&
6275 (need_full_stripe(op) || mirror_num > 1)) {
6276 u64 tmp;
6277 unsigned rot;
6278
6279 /* Work out the disk rotation on this stripe-set */
6280 div_u64_rem(stripe_nr, num_stripes, &rot);
6281
6282 /* Fill in the logical address of each stripe */
6283 tmp = stripe_nr * data_stripes;
6284 for (i = 0; i < data_stripes; i++)
6285 bbio->raid_map[(i+rot) % num_stripes] =
6286 em->start + (tmp + i) * map->stripe_len;
6287
6288 bbio->raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE;
6289 if (map->type & BTRFS_BLOCK_GROUP_RAID6)
6290 bbio->raid_map[(i+rot+1) % num_stripes] =
6291 RAID6_Q_STRIPE;
6292
6293 sort_parity_stripes(bbio, num_stripes);
6294 }
6295
6296 if (need_full_stripe(op))
6297 max_errors = btrfs_chunk_max_errors(map);
6298
6299 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL &&
6300 need_full_stripe(op)) {
6301 handle_ops_on_dev_replace(op, &bbio, dev_replace, &num_stripes,
6302 &max_errors);
6303 }
6304
6305 *bbio_ret = bbio;
6306 bbio->map_type = map->type;
6307 bbio->num_stripes = num_stripes;
6308 bbio->max_errors = max_errors;
6309 bbio->mirror_num = mirror_num;
6310
6311 /*
6312 * this is the case that REQ_READ && dev_replace_is_ongoing &&
6313 * mirror_num == num_stripes + 1 && dev_replace target drive is
6314 * available as a mirror
6315 */
6316 if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) {
6317 WARN_ON(num_stripes > 1);
6318 bbio->stripes[0].dev = dev_replace->tgtdev;
6319 bbio->stripes[0].physical = physical_to_patch_in_first_stripe;
6320 bbio->mirror_num = map->num_stripes + 1;
6321 }
6322 out:
6323 if (dev_replace_is_ongoing) {
6324 lockdep_assert_held(&dev_replace->rwsem);
6325 /* Unlock and let waiting writers proceed */
6326 up_read(&dev_replace->rwsem);
6327 }
6328 free_extent_map(em);
6329 return ret;
6330 }
6331
btrfs_map_block(struct btrfs_fs_info * fs_info,enum btrfs_map_op op,u64 logical,u64 * length,struct btrfs_bio ** bbio_ret,int mirror_num)6332 int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
6333 u64 logical, u64 *length,
6334 struct btrfs_bio **bbio_ret, int mirror_num)
6335 {
6336 if (op == BTRFS_MAP_DISCARD)
6337 return __btrfs_map_block_for_discard(fs_info, logical,
6338 length, bbio_ret);
6339
6340 return __btrfs_map_block(fs_info, op, logical, length, bbio_ret,
6341 mirror_num, 0);
6342 }
6343
6344 /* For Scrub/replace */
btrfs_map_sblock(struct btrfs_fs_info * fs_info,enum btrfs_map_op op,u64 logical,u64 * length,struct btrfs_bio ** bbio_ret)6345 int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
6346 u64 logical, u64 *length,
6347 struct btrfs_bio **bbio_ret)
6348 {
6349 return __btrfs_map_block(fs_info, op, logical, length, bbio_ret, 0, 1);
6350 }
6351
btrfs_end_bbio(struct btrfs_bio * bbio,struct bio * bio)6352 static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio)
6353 {
6354 bio->bi_private = bbio->private;
6355 bio->bi_end_io = bbio->end_io;
6356 bio_endio(bio);
6357
6358 btrfs_put_bbio(bbio);
6359 }
6360
btrfs_end_bio(struct bio * bio)6361 static void btrfs_end_bio(struct bio *bio)
6362 {
6363 struct btrfs_bio *bbio = bio->bi_private;
6364 int is_orig_bio = 0;
6365
6366 if (bio->bi_status) {
6367 atomic_inc(&bbio->error);
6368 if (bio->bi_status == BLK_STS_IOERR ||
6369 bio->bi_status == BLK_STS_TARGET) {
6370 struct btrfs_device *dev = btrfs_io_bio(bio)->device;
6371
6372 ASSERT(dev->bdev);
6373 if (bio_op(bio) == REQ_OP_WRITE)
6374 btrfs_dev_stat_inc_and_print(dev,
6375 BTRFS_DEV_STAT_WRITE_ERRS);
6376 else if (!(bio->bi_opf & REQ_RAHEAD))
6377 btrfs_dev_stat_inc_and_print(dev,
6378 BTRFS_DEV_STAT_READ_ERRS);
6379 if (bio->bi_opf & REQ_PREFLUSH)
6380 btrfs_dev_stat_inc_and_print(dev,
6381 BTRFS_DEV_STAT_FLUSH_ERRS);
6382 }
6383 }
6384
6385 if (bio == bbio->orig_bio)
6386 is_orig_bio = 1;
6387
6388 btrfs_bio_counter_dec(bbio->fs_info);
6389
6390 if (atomic_dec_and_test(&bbio->stripes_pending)) {
6391 if (!is_orig_bio) {
6392 bio_put(bio);
6393 bio = bbio->orig_bio;
6394 }
6395
6396 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
6397 /* only send an error to the higher layers if it is
6398 * beyond the tolerance of the btrfs bio
6399 */
6400 if (atomic_read(&bbio->error) > bbio->max_errors) {
6401 bio->bi_status = BLK_STS_IOERR;
6402 } else {
6403 /*
6404 * this bio is actually up to date, we didn't
6405 * go over the max number of errors
6406 */
6407 bio->bi_status = BLK_STS_OK;
6408 }
6409
6410 btrfs_end_bbio(bbio, bio);
6411 } else if (!is_orig_bio) {
6412 bio_put(bio);
6413 }
6414 }
6415
submit_stripe_bio(struct btrfs_bio * bbio,struct bio * bio,u64 physical,struct btrfs_device * dev)6416 static void submit_stripe_bio(struct btrfs_bio *bbio, struct bio *bio,
6417 u64 physical, struct btrfs_device *dev)
6418 {
6419 struct btrfs_fs_info *fs_info = bbio->fs_info;
6420
6421 bio->bi_private = bbio;
6422 btrfs_io_bio(bio)->device = dev;
6423 bio->bi_end_io = btrfs_end_bio;
6424 bio->bi_iter.bi_sector = physical >> 9;
6425 btrfs_debug_in_rcu(fs_info,
6426 "btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u",
6427 bio_op(bio), bio->bi_opf, (u64)bio->bi_iter.bi_sector,
6428 (unsigned long)dev->bdev->bd_dev, rcu_str_deref(dev->name),
6429 dev->devid, bio->bi_iter.bi_size);
6430 bio_set_dev(bio, dev->bdev);
6431
6432 btrfs_bio_counter_inc_noblocked(fs_info);
6433
6434 btrfsic_submit_bio(bio);
6435 }
6436
bbio_error(struct btrfs_bio * bbio,struct bio * bio,u64 logical)6437 static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
6438 {
6439 atomic_inc(&bbio->error);
6440 if (atomic_dec_and_test(&bbio->stripes_pending)) {
6441 /* Should be the original bio. */
6442 WARN_ON(bio != bbio->orig_bio);
6443
6444 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
6445 bio->bi_iter.bi_sector = logical >> 9;
6446 if (atomic_read(&bbio->error) > bbio->max_errors)
6447 bio->bi_status = BLK_STS_IOERR;
6448 else
6449 bio->bi_status = BLK_STS_OK;
6450 btrfs_end_bbio(bbio, bio);
6451 }
6452 }
6453
btrfs_map_bio(struct btrfs_fs_info * fs_info,struct bio * bio,int mirror_num)6454 blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
6455 int mirror_num)
6456 {
6457 struct btrfs_device *dev;
6458 struct bio *first_bio = bio;
6459 u64 logical = (u64)bio->bi_iter.bi_sector << 9;
6460 u64 length = 0;
6461 u64 map_length;
6462 int ret;
6463 int dev_nr;
6464 int total_devs;
6465 struct btrfs_bio *bbio = NULL;
6466
6467 length = bio->bi_iter.bi_size;
6468 map_length = length;
6469
6470 btrfs_bio_counter_inc_blocked(fs_info);
6471 ret = __btrfs_map_block(fs_info, btrfs_op(bio), logical,
6472 &map_length, &bbio, mirror_num, 1);
6473 if (ret) {
6474 btrfs_bio_counter_dec(fs_info);
6475 return errno_to_blk_status(ret);
6476 }
6477
6478 total_devs = bbio->num_stripes;
6479 bbio->orig_bio = first_bio;
6480 bbio->private = first_bio->bi_private;
6481 bbio->end_io = first_bio->bi_end_io;
6482 bbio->fs_info = fs_info;
6483 atomic_set(&bbio->stripes_pending, bbio->num_stripes);
6484
6485 if ((bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
6486 ((bio_op(bio) == REQ_OP_WRITE) || (mirror_num > 1))) {
6487 /* In this case, map_length has been set to the length of
6488 a single stripe; not the whole write */
6489 if (bio_op(bio) == REQ_OP_WRITE) {
6490 ret = raid56_parity_write(fs_info, bio, bbio,
6491 map_length);
6492 } else {
6493 ret = raid56_parity_recover(fs_info, bio, bbio,
6494 map_length, mirror_num, 1);
6495 }
6496
6497 btrfs_bio_counter_dec(fs_info);
6498 return errno_to_blk_status(ret);
6499 }
6500
6501 if (map_length < length) {
6502 btrfs_crit(fs_info,
6503 "mapping failed logical %llu bio len %llu len %llu",
6504 logical, length, map_length);
6505 BUG();
6506 }
6507
6508 for (dev_nr = 0; dev_nr < total_devs; dev_nr++) {
6509 dev = bbio->stripes[dev_nr].dev;
6510 if (!dev || !dev->bdev || test_bit(BTRFS_DEV_STATE_MISSING,
6511 &dev->dev_state) ||
6512 (bio_op(first_bio) == REQ_OP_WRITE &&
6513 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) {
6514 bbio_error(bbio, first_bio, logical);
6515 continue;
6516 }
6517
6518 if (dev_nr < total_devs - 1)
6519 bio = btrfs_bio_clone(first_bio);
6520 else
6521 bio = first_bio;
6522
6523 submit_stripe_bio(bbio, bio, bbio->stripes[dev_nr].physical, dev);
6524 }
6525 btrfs_bio_counter_dec(fs_info);
6526 return BLK_STS_OK;
6527 }
6528
6529 /*
6530 * Find a device specified by @devid or @uuid in the list of @fs_devices, or
6531 * return NULL.
6532 *
6533 * If devid and uuid are both specified, the match must be exact, otherwise
6534 * only devid is used.
6535 *
6536 * If @seed is true, traverse through the seed devices.
6537 */
btrfs_find_device(struct btrfs_fs_devices * fs_devices,u64 devid,u8 * uuid,u8 * fsid,bool seed)6538 struct btrfs_device *btrfs_find_device(struct btrfs_fs_devices *fs_devices,
6539 u64 devid, u8 *uuid, u8 *fsid,
6540 bool seed)
6541 {
6542 struct btrfs_device *device;
6543 struct btrfs_fs_devices *seed_devs;
6544
6545 if (!fsid || !memcmp(fs_devices->metadata_uuid, fsid, BTRFS_FSID_SIZE)) {
6546 list_for_each_entry(device, &fs_devices->devices, dev_list) {
6547 if (device->devid == devid &&
6548 (!uuid || memcmp(device->uuid, uuid,
6549 BTRFS_UUID_SIZE) == 0))
6550 return device;
6551 }
6552 }
6553
6554 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
6555 if (!fsid ||
6556 !memcmp(seed_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE)) {
6557 list_for_each_entry(device, &seed_devs->devices,
6558 dev_list) {
6559 if (device->devid == devid &&
6560 (!uuid || memcmp(device->uuid, uuid,
6561 BTRFS_UUID_SIZE) == 0))
6562 return device;
6563 }
6564 }
6565 }
6566
6567 return NULL;
6568 }
6569
add_missing_dev(struct btrfs_fs_devices * fs_devices,u64 devid,u8 * dev_uuid)6570 static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices,
6571 u64 devid, u8 *dev_uuid)
6572 {
6573 struct btrfs_device *device;
6574 unsigned int nofs_flag;
6575
6576 /*
6577 * We call this under the chunk_mutex, so we want to use NOFS for this
6578 * allocation, however we don't want to change btrfs_alloc_device() to
6579 * always do NOFS because we use it in a lot of other GFP_KERNEL safe
6580 * places.
6581 */
6582 nofs_flag = memalloc_nofs_save();
6583 device = btrfs_alloc_device(NULL, &devid, dev_uuid);
6584 memalloc_nofs_restore(nofs_flag);
6585 if (IS_ERR(device))
6586 return device;
6587
6588 list_add(&device->dev_list, &fs_devices->devices);
6589 device->fs_devices = fs_devices;
6590 fs_devices->num_devices++;
6591
6592 set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
6593 fs_devices->missing_devices++;
6594
6595 return device;
6596 }
6597
6598 /**
6599 * btrfs_alloc_device - allocate struct btrfs_device
6600 * @fs_info: used only for generating a new devid, can be NULL if
6601 * devid is provided (i.e. @devid != NULL).
6602 * @devid: a pointer to devid for this device. If NULL a new devid
6603 * is generated.
6604 * @uuid: a pointer to UUID for this device. If NULL a new UUID
6605 * is generated.
6606 *
6607 * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR()
6608 * on error. Returned struct is not linked onto any lists and must be
6609 * destroyed with btrfs_free_device.
6610 */
btrfs_alloc_device(struct btrfs_fs_info * fs_info,const u64 * devid,const u8 * uuid)6611 struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
6612 const u64 *devid,
6613 const u8 *uuid)
6614 {
6615 struct btrfs_device *dev;
6616 u64 tmp;
6617
6618 if (WARN_ON(!devid && !fs_info))
6619 return ERR_PTR(-EINVAL);
6620
6621 dev = __alloc_device(fs_info);
6622 if (IS_ERR(dev))
6623 return dev;
6624
6625 if (devid)
6626 tmp = *devid;
6627 else {
6628 int ret;
6629
6630 ret = find_next_devid(fs_info, &tmp);
6631 if (ret) {
6632 btrfs_free_device(dev);
6633 return ERR_PTR(ret);
6634 }
6635 }
6636 dev->devid = tmp;
6637
6638 if (uuid)
6639 memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE);
6640 else
6641 generate_random_uuid(dev->uuid);
6642
6643 return dev;
6644 }
6645
btrfs_report_missing_device(struct btrfs_fs_info * fs_info,u64 devid,u8 * uuid,bool error)6646 static void btrfs_report_missing_device(struct btrfs_fs_info *fs_info,
6647 u64 devid, u8 *uuid, bool error)
6648 {
6649 if (error)
6650 btrfs_err_rl(fs_info, "devid %llu uuid %pU is missing",
6651 devid, uuid);
6652 else
6653 btrfs_warn_rl(fs_info, "devid %llu uuid %pU is missing",
6654 devid, uuid);
6655 }
6656
calc_stripe_length(u64 type,u64 chunk_len,int num_stripes)6657 static u64 calc_stripe_length(u64 type, u64 chunk_len, int num_stripes)
6658 {
6659 int index = btrfs_bg_flags_to_raid_index(type);
6660 int ncopies = btrfs_raid_array[index].ncopies;
6661 const int nparity = btrfs_raid_array[index].nparity;
6662 int data_stripes;
6663
6664 if (nparity)
6665 data_stripes = num_stripes - nparity;
6666 else
6667 data_stripes = num_stripes / ncopies;
6668
6669 return div_u64(chunk_len, data_stripes);
6670 }
6671
read_one_chunk(struct btrfs_key * key,struct extent_buffer * leaf,struct btrfs_chunk * chunk)6672 static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf,
6673 struct btrfs_chunk *chunk)
6674 {
6675 struct btrfs_fs_info *fs_info = leaf->fs_info;
6676 struct extent_map_tree *map_tree = &fs_info->mapping_tree;
6677 struct map_lookup *map;
6678 struct extent_map *em;
6679 u64 logical;
6680 u64 length;
6681 u64 devid;
6682 u8 uuid[BTRFS_UUID_SIZE];
6683 int num_stripes;
6684 int ret;
6685 int i;
6686
6687 logical = key->offset;
6688 length = btrfs_chunk_length(leaf, chunk);
6689 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
6690
6691 /*
6692 * Only need to verify chunk item if we're reading from sys chunk array,
6693 * as chunk item in tree block is already verified by tree-checker.
6694 */
6695 if (leaf->start == BTRFS_SUPER_INFO_OFFSET) {
6696 ret = btrfs_check_chunk_valid(leaf, chunk, logical);
6697 if (ret)
6698 return ret;
6699 }
6700
6701 read_lock(&map_tree->lock);
6702 em = lookup_extent_mapping(map_tree, logical, 1);
6703 read_unlock(&map_tree->lock);
6704
6705 /* already mapped? */
6706 if (em && em->start <= logical && em->start + em->len > logical) {
6707 free_extent_map(em);
6708 return 0;
6709 } else if (em) {
6710 free_extent_map(em);
6711 }
6712
6713 em = alloc_extent_map();
6714 if (!em)
6715 return -ENOMEM;
6716 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
6717 if (!map) {
6718 free_extent_map(em);
6719 return -ENOMEM;
6720 }
6721
6722 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
6723 em->map_lookup = map;
6724 em->start = logical;
6725 em->len = length;
6726 em->orig_start = 0;
6727 em->block_start = 0;
6728 em->block_len = em->len;
6729
6730 map->num_stripes = num_stripes;
6731 map->io_width = btrfs_chunk_io_width(leaf, chunk);
6732 map->io_align = btrfs_chunk_io_align(leaf, chunk);
6733 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
6734 map->type = btrfs_chunk_type(leaf, chunk);
6735 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
6736 map->verified_stripes = 0;
6737 em->orig_block_len = calc_stripe_length(map->type, em->len,
6738 map->num_stripes);
6739 for (i = 0; i < num_stripes; i++) {
6740 map->stripes[i].physical =
6741 btrfs_stripe_offset_nr(leaf, chunk, i);
6742 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
6743 read_extent_buffer(leaf, uuid, (unsigned long)
6744 btrfs_stripe_dev_uuid_nr(chunk, i),
6745 BTRFS_UUID_SIZE);
6746 map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices,
6747 devid, uuid, NULL, true);
6748 if (!map->stripes[i].dev &&
6749 !btrfs_test_opt(fs_info, DEGRADED)) {
6750 free_extent_map(em);
6751 btrfs_report_missing_device(fs_info, devid, uuid, true);
6752 return -ENOENT;
6753 }
6754 if (!map->stripes[i].dev) {
6755 map->stripes[i].dev =
6756 add_missing_dev(fs_info->fs_devices, devid,
6757 uuid);
6758 if (IS_ERR(map->stripes[i].dev)) {
6759 free_extent_map(em);
6760 btrfs_err(fs_info,
6761 "failed to init missing dev %llu: %ld",
6762 devid, PTR_ERR(map->stripes[i].dev));
6763 return PTR_ERR(map->stripes[i].dev);
6764 }
6765 btrfs_report_missing_device(fs_info, devid, uuid, false);
6766 }
6767 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
6768 &(map->stripes[i].dev->dev_state));
6769
6770 }
6771
6772 write_lock(&map_tree->lock);
6773 ret = add_extent_mapping(map_tree, em, 0);
6774 write_unlock(&map_tree->lock);
6775 if (ret < 0) {
6776 btrfs_err(fs_info,
6777 "failed to add chunk map, start=%llu len=%llu: %d",
6778 em->start, em->len, ret);
6779 }
6780 free_extent_map(em);
6781
6782 return ret;
6783 }
6784
fill_device_from_item(struct extent_buffer * leaf,struct btrfs_dev_item * dev_item,struct btrfs_device * device)6785 static void fill_device_from_item(struct extent_buffer *leaf,
6786 struct btrfs_dev_item *dev_item,
6787 struct btrfs_device *device)
6788 {
6789 unsigned long ptr;
6790
6791 device->devid = btrfs_device_id(leaf, dev_item);
6792 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
6793 device->total_bytes = device->disk_total_bytes;
6794 device->commit_total_bytes = device->disk_total_bytes;
6795 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
6796 device->commit_bytes_used = device->bytes_used;
6797 device->type = btrfs_device_type(leaf, dev_item);
6798 device->io_align = btrfs_device_io_align(leaf, dev_item);
6799 device->io_width = btrfs_device_io_width(leaf, dev_item);
6800 device->sector_size = btrfs_device_sector_size(leaf, dev_item);
6801 WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID);
6802 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
6803
6804 ptr = btrfs_device_uuid(dev_item);
6805 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
6806 }
6807
open_seed_devices(struct btrfs_fs_info * fs_info,u8 * fsid)6808 static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info,
6809 u8 *fsid)
6810 {
6811 struct btrfs_fs_devices *fs_devices;
6812 int ret;
6813
6814 lockdep_assert_held(&uuid_mutex);
6815 ASSERT(fsid);
6816
6817 /* This will match only for multi-device seed fs */
6818 list_for_each_entry(fs_devices, &fs_info->fs_devices->seed_list, seed_list)
6819 if (!memcmp(fs_devices->fsid, fsid, BTRFS_FSID_SIZE))
6820 return fs_devices;
6821
6822
6823 fs_devices = find_fsid(fsid, NULL);
6824 if (!fs_devices) {
6825 if (!btrfs_test_opt(fs_info, DEGRADED))
6826 return ERR_PTR(-ENOENT);
6827
6828 fs_devices = alloc_fs_devices(fsid, NULL);
6829 if (IS_ERR(fs_devices))
6830 return fs_devices;
6831
6832 fs_devices->seeding = true;
6833 fs_devices->opened = 1;
6834 return fs_devices;
6835 }
6836
6837 /*
6838 * Upon first call for a seed fs fsid, just create a private copy of the
6839 * respective fs_devices and anchor it at fs_info->fs_devices->seed_list
6840 */
6841 fs_devices = clone_fs_devices(fs_devices);
6842 if (IS_ERR(fs_devices))
6843 return fs_devices;
6844
6845 ret = open_fs_devices(fs_devices, FMODE_READ, fs_info->bdev_holder);
6846 if (ret) {
6847 free_fs_devices(fs_devices);
6848 return ERR_PTR(ret);
6849 }
6850
6851 if (!fs_devices->seeding) {
6852 close_fs_devices(fs_devices);
6853 free_fs_devices(fs_devices);
6854 return ERR_PTR(-EINVAL);
6855 }
6856
6857 list_add(&fs_devices->seed_list, &fs_info->fs_devices->seed_list);
6858
6859 return fs_devices;
6860 }
6861
read_one_dev(struct extent_buffer * leaf,struct btrfs_dev_item * dev_item)6862 static int read_one_dev(struct extent_buffer *leaf,
6863 struct btrfs_dev_item *dev_item)
6864 {
6865 struct btrfs_fs_info *fs_info = leaf->fs_info;
6866 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6867 struct btrfs_device *device;
6868 u64 devid;
6869 int ret;
6870 u8 fs_uuid[BTRFS_FSID_SIZE];
6871 u8 dev_uuid[BTRFS_UUID_SIZE];
6872
6873 devid = btrfs_device_id(leaf, dev_item);
6874 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
6875 BTRFS_UUID_SIZE);
6876 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
6877 BTRFS_FSID_SIZE);
6878
6879 if (memcmp(fs_uuid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE)) {
6880 fs_devices = open_seed_devices(fs_info, fs_uuid);
6881 if (IS_ERR(fs_devices))
6882 return PTR_ERR(fs_devices);
6883 }
6884
6885 device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
6886 fs_uuid, true);
6887 if (!device) {
6888 if (!btrfs_test_opt(fs_info, DEGRADED)) {
6889 btrfs_report_missing_device(fs_info, devid,
6890 dev_uuid, true);
6891 return -ENOENT;
6892 }
6893
6894 device = add_missing_dev(fs_devices, devid, dev_uuid);
6895 if (IS_ERR(device)) {
6896 btrfs_err(fs_info,
6897 "failed to add missing dev %llu: %ld",
6898 devid, PTR_ERR(device));
6899 return PTR_ERR(device);
6900 }
6901 btrfs_report_missing_device(fs_info, devid, dev_uuid, false);
6902 } else {
6903 if (!device->bdev) {
6904 if (!btrfs_test_opt(fs_info, DEGRADED)) {
6905 btrfs_report_missing_device(fs_info,
6906 devid, dev_uuid, true);
6907 return -ENOENT;
6908 }
6909 btrfs_report_missing_device(fs_info, devid,
6910 dev_uuid, false);
6911 }
6912
6913 if (!device->bdev &&
6914 !test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
6915 /*
6916 * this happens when a device that was properly setup
6917 * in the device info lists suddenly goes bad.
6918 * device->bdev is NULL, and so we have to set
6919 * device->missing to one here
6920 */
6921 device->fs_devices->missing_devices++;
6922 set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
6923 }
6924
6925 /* Move the device to its own fs_devices */
6926 if (device->fs_devices != fs_devices) {
6927 ASSERT(test_bit(BTRFS_DEV_STATE_MISSING,
6928 &device->dev_state));
6929
6930 list_move(&device->dev_list, &fs_devices->devices);
6931 device->fs_devices->num_devices--;
6932 fs_devices->num_devices++;
6933
6934 device->fs_devices->missing_devices--;
6935 fs_devices->missing_devices++;
6936
6937 device->fs_devices = fs_devices;
6938 }
6939 }
6940
6941 if (device->fs_devices != fs_info->fs_devices) {
6942 BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state));
6943 if (device->generation !=
6944 btrfs_device_generation(leaf, dev_item))
6945 return -EINVAL;
6946 }
6947
6948 fill_device_from_item(leaf, dev_item, device);
6949 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
6950 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
6951 !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
6952 device->fs_devices->total_rw_bytes += device->total_bytes;
6953 atomic64_add(device->total_bytes - device->bytes_used,
6954 &fs_info->free_chunk_space);
6955 }
6956 ret = 0;
6957 return ret;
6958 }
6959
btrfs_read_sys_array(struct btrfs_fs_info * fs_info)6960 int btrfs_read_sys_array(struct btrfs_fs_info *fs_info)
6961 {
6962 struct btrfs_root *root = fs_info->tree_root;
6963 struct btrfs_super_block *super_copy = fs_info->super_copy;
6964 struct extent_buffer *sb;
6965 struct btrfs_disk_key *disk_key;
6966 struct btrfs_chunk *chunk;
6967 u8 *array_ptr;
6968 unsigned long sb_array_offset;
6969 int ret = 0;
6970 u32 num_stripes;
6971 u32 array_size;
6972 u32 len = 0;
6973 u32 cur_offset;
6974 u64 type;
6975 struct btrfs_key key;
6976
6977 ASSERT(BTRFS_SUPER_INFO_SIZE <= fs_info->nodesize);
6978 /*
6979 * This will create extent buffer of nodesize, superblock size is
6980 * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will
6981 * overallocate but we can keep it as-is, only the first page is used.
6982 */
6983 sb = btrfs_find_create_tree_block(fs_info, BTRFS_SUPER_INFO_OFFSET);
6984 if (IS_ERR(sb))
6985 return PTR_ERR(sb);
6986 set_extent_buffer_uptodate(sb);
6987 btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
6988 /*
6989 * The sb extent buffer is artificial and just used to read the system array.
6990 * set_extent_buffer_uptodate() call does not properly mark all it's
6991 * pages up-to-date when the page is larger: extent does not cover the
6992 * whole page and consequently check_page_uptodate does not find all
6993 * the page's extents up-to-date (the hole beyond sb),
6994 * write_extent_buffer then triggers a WARN_ON.
6995 *
6996 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
6997 * but sb spans only this function. Add an explicit SetPageUptodate call
6998 * to silence the warning eg. on PowerPC 64.
6999 */
7000 if (PAGE_SIZE > BTRFS_SUPER_INFO_SIZE)
7001 SetPageUptodate(sb->pages[0]);
7002
7003 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
7004 array_size = btrfs_super_sys_array_size(super_copy);
7005
7006 array_ptr = super_copy->sys_chunk_array;
7007 sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array);
7008 cur_offset = 0;
7009
7010 while (cur_offset < array_size) {
7011 disk_key = (struct btrfs_disk_key *)array_ptr;
7012 len = sizeof(*disk_key);
7013 if (cur_offset + len > array_size)
7014 goto out_short_read;
7015
7016 btrfs_disk_key_to_cpu(&key, disk_key);
7017
7018 array_ptr += len;
7019 sb_array_offset += len;
7020 cur_offset += len;
7021
7022 if (key.type != BTRFS_CHUNK_ITEM_KEY) {
7023 btrfs_err(fs_info,
7024 "unexpected item type %u in sys_array at offset %u",
7025 (u32)key.type, cur_offset);
7026 ret = -EIO;
7027 break;
7028 }
7029
7030 chunk = (struct btrfs_chunk *)sb_array_offset;
7031 /*
7032 * At least one btrfs_chunk with one stripe must be present,
7033 * exact stripe count check comes afterwards
7034 */
7035 len = btrfs_chunk_item_size(1);
7036 if (cur_offset + len > array_size)
7037 goto out_short_read;
7038
7039 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
7040 if (!num_stripes) {
7041 btrfs_err(fs_info,
7042 "invalid number of stripes %u in sys_array at offset %u",
7043 num_stripes, cur_offset);
7044 ret = -EIO;
7045 break;
7046 }
7047
7048 type = btrfs_chunk_type(sb, chunk);
7049 if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) {
7050 btrfs_err(fs_info,
7051 "invalid chunk type %llu in sys_array at offset %u",
7052 type, cur_offset);
7053 ret = -EIO;
7054 break;
7055 }
7056
7057 len = btrfs_chunk_item_size(num_stripes);
7058 if (cur_offset + len > array_size)
7059 goto out_short_read;
7060
7061 ret = read_one_chunk(&key, sb, chunk);
7062 if (ret)
7063 break;
7064
7065 array_ptr += len;
7066 sb_array_offset += len;
7067 cur_offset += len;
7068 }
7069 clear_extent_buffer_uptodate(sb);
7070 free_extent_buffer_stale(sb);
7071 return ret;
7072
7073 out_short_read:
7074 btrfs_err(fs_info, "sys_array too short to read %u bytes at offset %u",
7075 len, cur_offset);
7076 clear_extent_buffer_uptodate(sb);
7077 free_extent_buffer_stale(sb);
7078 return -EIO;
7079 }
7080
7081 /*
7082 * Check if all chunks in the fs are OK for read-write degraded mount
7083 *
7084 * If the @failing_dev is specified, it's accounted as missing.
7085 *
7086 * Return true if all chunks meet the minimal RW mount requirements.
7087 * Return false if any chunk doesn't meet the minimal RW mount requirements.
7088 */
btrfs_check_rw_degradable(struct btrfs_fs_info * fs_info,struct btrfs_device * failing_dev)7089 bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info,
7090 struct btrfs_device *failing_dev)
7091 {
7092 struct extent_map_tree *map_tree = &fs_info->mapping_tree;
7093 struct extent_map *em;
7094 u64 next_start = 0;
7095 bool ret = true;
7096
7097 read_lock(&map_tree->lock);
7098 em = lookup_extent_mapping(map_tree, 0, (u64)-1);
7099 read_unlock(&map_tree->lock);
7100 /* No chunk at all? Return false anyway */
7101 if (!em) {
7102 ret = false;
7103 goto out;
7104 }
7105 while (em) {
7106 struct map_lookup *map;
7107 int missing = 0;
7108 int max_tolerated;
7109 int i;
7110
7111 map = em->map_lookup;
7112 max_tolerated =
7113 btrfs_get_num_tolerated_disk_barrier_failures(
7114 map->type);
7115 for (i = 0; i < map->num_stripes; i++) {
7116 struct btrfs_device *dev = map->stripes[i].dev;
7117
7118 if (!dev || !dev->bdev ||
7119 test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) ||
7120 dev->last_flush_error)
7121 missing++;
7122 else if (failing_dev && failing_dev == dev)
7123 missing++;
7124 }
7125 if (missing > max_tolerated) {
7126 if (!failing_dev)
7127 btrfs_warn(fs_info,
7128 "chunk %llu missing %d devices, max tolerance is %d for writable mount",
7129 em->start, missing, max_tolerated);
7130 free_extent_map(em);
7131 ret = false;
7132 goto out;
7133 }
7134 next_start = extent_map_end(em);
7135 free_extent_map(em);
7136
7137 read_lock(&map_tree->lock);
7138 em = lookup_extent_mapping(map_tree, next_start,
7139 (u64)(-1) - next_start);
7140 read_unlock(&map_tree->lock);
7141 }
7142 out:
7143 return ret;
7144 }
7145
readahead_tree_node_children(struct extent_buffer * node)7146 static void readahead_tree_node_children(struct extent_buffer *node)
7147 {
7148 int i;
7149 const int nr_items = btrfs_header_nritems(node);
7150
7151 for (i = 0; i < nr_items; i++) {
7152 u64 start;
7153
7154 start = btrfs_node_blockptr(node, i);
7155 readahead_tree_block(node->fs_info, start);
7156 }
7157 }
7158
btrfs_read_chunk_tree(struct btrfs_fs_info * fs_info)7159 int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
7160 {
7161 struct btrfs_root *root = fs_info->chunk_root;
7162 struct btrfs_path *path;
7163 struct extent_buffer *leaf;
7164 struct btrfs_key key;
7165 struct btrfs_key found_key;
7166 int ret;
7167 int slot;
7168 u64 total_dev = 0;
7169 u64 last_ra_node = 0;
7170
7171 path = btrfs_alloc_path();
7172 if (!path)
7173 return -ENOMEM;
7174
7175 /*
7176 * uuid_mutex is needed only if we are mounting a sprout FS
7177 * otherwise we don't need it.
7178 */
7179 mutex_lock(&uuid_mutex);
7180
7181 /*
7182 * It is possible for mount and umount to race in such a way that
7183 * we execute this code path, but open_fs_devices failed to clear
7184 * total_rw_bytes. We certainly want it cleared before reading the
7185 * device items, so clear it here.
7186 */
7187 fs_info->fs_devices->total_rw_bytes = 0;
7188
7189 /*
7190 * Read all device items, and then all the chunk items. All
7191 * device items are found before any chunk item (their object id
7192 * is smaller than the lowest possible object id for a chunk
7193 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
7194 */
7195 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
7196 key.offset = 0;
7197 key.type = 0;
7198 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
7199 if (ret < 0)
7200 goto error;
7201 while (1) {
7202 struct extent_buffer *node;
7203
7204 leaf = path->nodes[0];
7205 slot = path->slots[0];
7206 if (slot >= btrfs_header_nritems(leaf)) {
7207 ret = btrfs_next_leaf(root, path);
7208 if (ret == 0)
7209 continue;
7210 if (ret < 0)
7211 goto error;
7212 break;
7213 }
7214 /*
7215 * The nodes on level 1 are not locked but we don't need to do
7216 * that during mount time as nothing else can access the tree
7217 */
7218 node = path->nodes[1];
7219 if (node) {
7220 if (last_ra_node != node->start) {
7221 readahead_tree_node_children(node);
7222 last_ra_node = node->start;
7223 }
7224 }
7225 btrfs_item_key_to_cpu(leaf, &found_key, slot);
7226 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
7227 struct btrfs_dev_item *dev_item;
7228 dev_item = btrfs_item_ptr(leaf, slot,
7229 struct btrfs_dev_item);
7230 ret = read_one_dev(leaf, dev_item);
7231 if (ret)
7232 goto error;
7233 total_dev++;
7234 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
7235 struct btrfs_chunk *chunk;
7236 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
7237 mutex_lock(&fs_info->chunk_mutex);
7238 ret = read_one_chunk(&found_key, leaf, chunk);
7239 mutex_unlock(&fs_info->chunk_mutex);
7240 if (ret)
7241 goto error;
7242 }
7243 path->slots[0]++;
7244 }
7245
7246 /*
7247 * After loading chunk tree, we've got all device information,
7248 * do another round of validation checks.
7249 */
7250 if (total_dev != fs_info->fs_devices->total_devices) {
7251 btrfs_warn(fs_info,
7252 "super block num_devices %llu mismatch with DEV_ITEM count %llu, will be repaired on next transaction commit",
7253 btrfs_super_num_devices(fs_info->super_copy),
7254 total_dev);
7255 fs_info->fs_devices->total_devices = total_dev;
7256 btrfs_set_super_num_devices(fs_info->super_copy, total_dev);
7257 }
7258 if (btrfs_super_total_bytes(fs_info->super_copy) <
7259 fs_info->fs_devices->total_rw_bytes) {
7260 btrfs_err(fs_info,
7261 "super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu",
7262 btrfs_super_total_bytes(fs_info->super_copy),
7263 fs_info->fs_devices->total_rw_bytes);
7264 ret = -EINVAL;
7265 goto error;
7266 }
7267 ret = 0;
7268 error:
7269 mutex_unlock(&uuid_mutex);
7270
7271 btrfs_free_path(path);
7272 return ret;
7273 }
7274
btrfs_init_devices_late(struct btrfs_fs_info * fs_info)7275 void btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
7276 {
7277 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs;
7278 struct btrfs_device *device;
7279
7280 fs_devices->fs_info = fs_info;
7281
7282 mutex_lock(&fs_devices->device_list_mutex);
7283 list_for_each_entry(device, &fs_devices->devices, dev_list)
7284 device->fs_info = fs_info;
7285
7286 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
7287 list_for_each_entry(device, &seed_devs->devices, dev_list)
7288 device->fs_info = fs_info;
7289
7290 seed_devs->fs_info = fs_info;
7291 }
7292 mutex_unlock(&fs_devices->device_list_mutex);
7293 }
7294
btrfs_dev_stats_value(const struct extent_buffer * eb,const struct btrfs_dev_stats_item * ptr,int index)7295 static u64 btrfs_dev_stats_value(const struct extent_buffer *eb,
7296 const struct btrfs_dev_stats_item *ptr,
7297 int index)
7298 {
7299 u64 val;
7300
7301 read_extent_buffer(eb, &val,
7302 offsetof(struct btrfs_dev_stats_item, values) +
7303 ((unsigned long)ptr) + (index * sizeof(u64)),
7304 sizeof(val));
7305 return val;
7306 }
7307
btrfs_set_dev_stats_value(struct extent_buffer * eb,struct btrfs_dev_stats_item * ptr,int index,u64 val)7308 static void btrfs_set_dev_stats_value(struct extent_buffer *eb,
7309 struct btrfs_dev_stats_item *ptr,
7310 int index, u64 val)
7311 {
7312 write_extent_buffer(eb, &val,
7313 offsetof(struct btrfs_dev_stats_item, values) +
7314 ((unsigned long)ptr) + (index * sizeof(u64)),
7315 sizeof(val));
7316 }
7317
btrfs_device_init_dev_stats(struct btrfs_device * device,struct btrfs_path * path)7318 static int btrfs_device_init_dev_stats(struct btrfs_device *device,
7319 struct btrfs_path *path)
7320 {
7321 struct btrfs_dev_stats_item *ptr;
7322 struct extent_buffer *eb;
7323 struct btrfs_key key;
7324 int item_size;
7325 int i, ret, slot;
7326
7327 key.objectid = BTRFS_DEV_STATS_OBJECTID;
7328 key.type = BTRFS_PERSISTENT_ITEM_KEY;
7329 key.offset = device->devid;
7330 ret = btrfs_search_slot(NULL, device->fs_info->dev_root, &key, path, 0, 0);
7331 if (ret) {
7332 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7333 btrfs_dev_stat_set(device, i, 0);
7334 device->dev_stats_valid = 1;
7335 btrfs_release_path(path);
7336 return ret < 0 ? ret : 0;
7337 }
7338 slot = path->slots[0];
7339 eb = path->nodes[0];
7340 item_size = btrfs_item_size_nr(eb, slot);
7341
7342 ptr = btrfs_item_ptr(eb, slot, struct btrfs_dev_stats_item);
7343
7344 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
7345 if (item_size >= (1 + i) * sizeof(__le64))
7346 btrfs_dev_stat_set(device, i,
7347 btrfs_dev_stats_value(eb, ptr, i));
7348 else
7349 btrfs_dev_stat_set(device, i, 0);
7350 }
7351
7352 device->dev_stats_valid = 1;
7353 btrfs_dev_stat_print_on_load(device);
7354 btrfs_release_path(path);
7355
7356 return 0;
7357 }
7358
btrfs_init_dev_stats(struct btrfs_fs_info * fs_info)7359 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
7360 {
7361 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs;
7362 struct btrfs_device *device;
7363 struct btrfs_path *path = NULL;
7364 int ret = 0;
7365
7366 path = btrfs_alloc_path();
7367 if (!path)
7368 return -ENOMEM;
7369
7370 mutex_lock(&fs_devices->device_list_mutex);
7371 list_for_each_entry(device, &fs_devices->devices, dev_list) {
7372 ret = btrfs_device_init_dev_stats(device, path);
7373 if (ret)
7374 goto out;
7375 }
7376 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
7377 list_for_each_entry(device, &seed_devs->devices, dev_list) {
7378 ret = btrfs_device_init_dev_stats(device, path);
7379 if (ret)
7380 goto out;
7381 }
7382 }
7383 out:
7384 mutex_unlock(&fs_devices->device_list_mutex);
7385
7386 btrfs_free_path(path);
7387 return ret;
7388 }
7389
update_dev_stat_item(struct btrfs_trans_handle * trans,struct btrfs_device * device)7390 static int update_dev_stat_item(struct btrfs_trans_handle *trans,
7391 struct btrfs_device *device)
7392 {
7393 struct btrfs_fs_info *fs_info = trans->fs_info;
7394 struct btrfs_root *dev_root = fs_info->dev_root;
7395 struct btrfs_path *path;
7396 struct btrfs_key key;
7397 struct extent_buffer *eb;
7398 struct btrfs_dev_stats_item *ptr;
7399 int ret;
7400 int i;
7401
7402 key.objectid = BTRFS_DEV_STATS_OBJECTID;
7403 key.type = BTRFS_PERSISTENT_ITEM_KEY;
7404 key.offset = device->devid;
7405
7406 path = btrfs_alloc_path();
7407 if (!path)
7408 return -ENOMEM;
7409 ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
7410 if (ret < 0) {
7411 btrfs_warn_in_rcu(fs_info,
7412 "error %d while searching for dev_stats item for device %s",
7413 ret, rcu_str_deref(device->name));
7414 goto out;
7415 }
7416
7417 if (ret == 0 &&
7418 btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
7419 /* need to delete old one and insert a new one */
7420 ret = btrfs_del_item(trans, dev_root, path);
7421 if (ret != 0) {
7422 btrfs_warn_in_rcu(fs_info,
7423 "delete too small dev_stats item for device %s failed %d",
7424 rcu_str_deref(device->name), ret);
7425 goto out;
7426 }
7427 ret = 1;
7428 }
7429
7430 if (ret == 1) {
7431 /* need to insert a new item */
7432 btrfs_release_path(path);
7433 ret = btrfs_insert_empty_item(trans, dev_root, path,
7434 &key, sizeof(*ptr));
7435 if (ret < 0) {
7436 btrfs_warn_in_rcu(fs_info,
7437 "insert dev_stats item for device %s failed %d",
7438 rcu_str_deref(device->name), ret);
7439 goto out;
7440 }
7441 }
7442
7443 eb = path->nodes[0];
7444 ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
7445 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7446 btrfs_set_dev_stats_value(eb, ptr, i,
7447 btrfs_dev_stat_read(device, i));
7448 btrfs_mark_buffer_dirty(eb);
7449
7450 out:
7451 btrfs_free_path(path);
7452 return ret;
7453 }
7454
7455 /*
7456 * called from commit_transaction. Writes all changed device stats to disk.
7457 */
btrfs_run_dev_stats(struct btrfs_trans_handle * trans)7458 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans)
7459 {
7460 struct btrfs_fs_info *fs_info = trans->fs_info;
7461 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7462 struct btrfs_device *device;
7463 int stats_cnt;
7464 int ret = 0;
7465
7466 mutex_lock(&fs_devices->device_list_mutex);
7467 list_for_each_entry(device, &fs_devices->devices, dev_list) {
7468 stats_cnt = atomic_read(&device->dev_stats_ccnt);
7469 if (!device->dev_stats_valid || stats_cnt == 0)
7470 continue;
7471
7472
7473 /*
7474 * There is a LOAD-LOAD control dependency between the value of
7475 * dev_stats_ccnt and updating the on-disk values which requires
7476 * reading the in-memory counters. Such control dependencies
7477 * require explicit read memory barriers.
7478 *
7479 * This memory barriers pairs with smp_mb__before_atomic in
7480 * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full
7481 * barrier implied by atomic_xchg in
7482 * btrfs_dev_stats_read_and_reset
7483 */
7484 smp_rmb();
7485
7486 ret = update_dev_stat_item(trans, device);
7487 if (!ret)
7488 atomic_sub(stats_cnt, &device->dev_stats_ccnt);
7489 }
7490 mutex_unlock(&fs_devices->device_list_mutex);
7491
7492 return ret;
7493 }
7494
btrfs_dev_stat_inc_and_print(struct btrfs_device * dev,int index)7495 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
7496 {
7497 btrfs_dev_stat_inc(dev, index);
7498 btrfs_dev_stat_print_on_error(dev);
7499 }
7500
btrfs_dev_stat_print_on_error(struct btrfs_device * dev)7501 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
7502 {
7503 if (!dev->dev_stats_valid)
7504 return;
7505 btrfs_err_rl_in_rcu(dev->fs_info,
7506 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7507 rcu_str_deref(dev->name),
7508 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
7509 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
7510 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
7511 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
7512 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
7513 }
7514
btrfs_dev_stat_print_on_load(struct btrfs_device * dev)7515 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
7516 {
7517 int i;
7518
7519 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7520 if (btrfs_dev_stat_read(dev, i) != 0)
7521 break;
7522 if (i == BTRFS_DEV_STAT_VALUES_MAX)
7523 return; /* all values == 0, suppress message */
7524
7525 btrfs_info_in_rcu(dev->fs_info,
7526 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7527 rcu_str_deref(dev->name),
7528 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
7529 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
7530 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
7531 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
7532 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
7533 }
7534
btrfs_get_dev_stats(struct btrfs_fs_info * fs_info,struct btrfs_ioctl_get_dev_stats * stats)7535 int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info,
7536 struct btrfs_ioctl_get_dev_stats *stats)
7537 {
7538 struct btrfs_device *dev;
7539 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7540 int i;
7541
7542 mutex_lock(&fs_devices->device_list_mutex);
7543 dev = btrfs_find_device(fs_info->fs_devices, stats->devid, NULL, NULL,
7544 true);
7545 mutex_unlock(&fs_devices->device_list_mutex);
7546
7547 if (!dev) {
7548 btrfs_warn(fs_info, "get dev_stats failed, device not found");
7549 return -ENODEV;
7550 } else if (!dev->dev_stats_valid) {
7551 btrfs_warn(fs_info, "get dev_stats failed, not yet valid");
7552 return -ENODEV;
7553 } else if (stats->flags & BTRFS_DEV_STATS_RESET) {
7554 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
7555 if (stats->nr_items > i)
7556 stats->values[i] =
7557 btrfs_dev_stat_read_and_reset(dev, i);
7558 else
7559 btrfs_dev_stat_set(dev, i, 0);
7560 }
7561 btrfs_info(fs_info, "device stats zeroed by %s (%d)",
7562 current->comm, task_pid_nr(current));
7563 } else {
7564 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7565 if (stats->nr_items > i)
7566 stats->values[i] = btrfs_dev_stat_read(dev, i);
7567 }
7568 if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
7569 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
7570 return 0;
7571 }
7572
7573 /*
7574 * Update the size and bytes used for each device where it changed. This is
7575 * delayed since we would otherwise get errors while writing out the
7576 * superblocks.
7577 *
7578 * Must be invoked during transaction commit.
7579 */
btrfs_commit_device_sizes(struct btrfs_transaction * trans)7580 void btrfs_commit_device_sizes(struct btrfs_transaction *trans)
7581 {
7582 struct btrfs_device *curr, *next;
7583
7584 ASSERT(trans->state == TRANS_STATE_COMMIT_DOING);
7585
7586 if (list_empty(&trans->dev_update_list))
7587 return;
7588
7589 /*
7590 * We don't need the device_list_mutex here. This list is owned by the
7591 * transaction and the transaction must complete before the device is
7592 * released.
7593 */
7594 mutex_lock(&trans->fs_info->chunk_mutex);
7595 list_for_each_entry_safe(curr, next, &trans->dev_update_list,
7596 post_commit_list) {
7597 list_del_init(&curr->post_commit_list);
7598 curr->commit_total_bytes = curr->disk_total_bytes;
7599 curr->commit_bytes_used = curr->bytes_used;
7600 }
7601 mutex_unlock(&trans->fs_info->chunk_mutex);
7602 }
7603
7604 /*
7605 * Multiplicity factor for simple profiles: DUP, RAID1-like and RAID10.
7606 */
btrfs_bg_type_to_factor(u64 flags)7607 int btrfs_bg_type_to_factor(u64 flags)
7608 {
7609 const int index = btrfs_bg_flags_to_raid_index(flags);
7610
7611 return btrfs_raid_array[index].ncopies;
7612 }
7613
7614
7615
verify_one_dev_extent(struct btrfs_fs_info * fs_info,u64 chunk_offset,u64 devid,u64 physical_offset,u64 physical_len)7616 static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
7617 u64 chunk_offset, u64 devid,
7618 u64 physical_offset, u64 physical_len)
7619 {
7620 struct extent_map_tree *em_tree = &fs_info->mapping_tree;
7621 struct extent_map *em;
7622 struct map_lookup *map;
7623 struct btrfs_device *dev;
7624 u64 stripe_len;
7625 bool found = false;
7626 int ret = 0;
7627 int i;
7628
7629 read_lock(&em_tree->lock);
7630 em = lookup_extent_mapping(em_tree, chunk_offset, 1);
7631 read_unlock(&em_tree->lock);
7632
7633 if (!em) {
7634 btrfs_err(fs_info,
7635 "dev extent physical offset %llu on devid %llu doesn't have corresponding chunk",
7636 physical_offset, devid);
7637 ret = -EUCLEAN;
7638 goto out;
7639 }
7640
7641 map = em->map_lookup;
7642 stripe_len = calc_stripe_length(map->type, em->len, map->num_stripes);
7643 if (physical_len != stripe_len) {
7644 btrfs_err(fs_info,
7645 "dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu",
7646 physical_offset, devid, em->start, physical_len,
7647 stripe_len);
7648 ret = -EUCLEAN;
7649 goto out;
7650 }
7651
7652 for (i = 0; i < map->num_stripes; i++) {
7653 if (map->stripes[i].dev->devid == devid &&
7654 map->stripes[i].physical == physical_offset) {
7655 found = true;
7656 if (map->verified_stripes >= map->num_stripes) {
7657 btrfs_err(fs_info,
7658 "too many dev extents for chunk %llu found",
7659 em->start);
7660 ret = -EUCLEAN;
7661 goto out;
7662 }
7663 map->verified_stripes++;
7664 break;
7665 }
7666 }
7667 if (!found) {
7668 btrfs_err(fs_info,
7669 "dev extent physical offset %llu devid %llu has no corresponding chunk",
7670 physical_offset, devid);
7671 ret = -EUCLEAN;
7672 }
7673
7674 /* Make sure no dev extent is beyond device bondary */
7675 dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL, true);
7676 if (!dev) {
7677 btrfs_err(fs_info, "failed to find devid %llu", devid);
7678 ret = -EUCLEAN;
7679 goto out;
7680 }
7681
7682 /* It's possible this device is a dummy for seed device */
7683 if (dev->disk_total_bytes == 0) {
7684 struct btrfs_fs_devices *devs;
7685
7686 devs = list_first_entry(&fs_info->fs_devices->seed_list,
7687 struct btrfs_fs_devices, seed_list);
7688 dev = btrfs_find_device(devs, devid, NULL, NULL, false);
7689 if (!dev) {
7690 btrfs_err(fs_info, "failed to find seed devid %llu",
7691 devid);
7692 ret = -EUCLEAN;
7693 goto out;
7694 }
7695 }
7696
7697 if (physical_offset + physical_len > dev->disk_total_bytes) {
7698 btrfs_err(fs_info,
7699 "dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu",
7700 devid, physical_offset, physical_len,
7701 dev->disk_total_bytes);
7702 ret = -EUCLEAN;
7703 goto out;
7704 }
7705 out:
7706 free_extent_map(em);
7707 return ret;
7708 }
7709
verify_chunk_dev_extent_mapping(struct btrfs_fs_info * fs_info)7710 static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info *fs_info)
7711 {
7712 struct extent_map_tree *em_tree = &fs_info->mapping_tree;
7713 struct extent_map *em;
7714 struct rb_node *node;
7715 int ret = 0;
7716
7717 read_lock(&em_tree->lock);
7718 for (node = rb_first_cached(&em_tree->map); node; node = rb_next(node)) {
7719 em = rb_entry(node, struct extent_map, rb_node);
7720 if (em->map_lookup->num_stripes !=
7721 em->map_lookup->verified_stripes) {
7722 btrfs_err(fs_info,
7723 "chunk %llu has missing dev extent, have %d expect %d",
7724 em->start, em->map_lookup->verified_stripes,
7725 em->map_lookup->num_stripes);
7726 ret = -EUCLEAN;
7727 goto out;
7728 }
7729 }
7730 out:
7731 read_unlock(&em_tree->lock);
7732 return ret;
7733 }
7734
7735 /*
7736 * Ensure that all dev extents are mapped to correct chunk, otherwise
7737 * later chunk allocation/free would cause unexpected behavior.
7738 *
7739 * NOTE: This will iterate through the whole device tree, which should be of
7740 * the same size level as the chunk tree. This slightly increases mount time.
7741 */
btrfs_verify_dev_extents(struct btrfs_fs_info * fs_info)7742 int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info)
7743 {
7744 struct btrfs_path *path;
7745 struct btrfs_root *root = fs_info->dev_root;
7746 struct btrfs_key key;
7747 u64 prev_devid = 0;
7748 u64 prev_dev_ext_end = 0;
7749 int ret = 0;
7750
7751 key.objectid = 1;
7752 key.type = BTRFS_DEV_EXTENT_KEY;
7753 key.offset = 0;
7754
7755 path = btrfs_alloc_path();
7756 if (!path)
7757 return -ENOMEM;
7758
7759 path->reada = READA_FORWARD;
7760 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
7761 if (ret < 0)
7762 goto out;
7763
7764 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
7765 ret = btrfs_next_item(root, path);
7766 if (ret < 0)
7767 goto out;
7768 /* No dev extents at all? Not good */
7769 if (ret > 0) {
7770 ret = -EUCLEAN;
7771 goto out;
7772 }
7773 }
7774 while (1) {
7775 struct extent_buffer *leaf = path->nodes[0];
7776 struct btrfs_dev_extent *dext;
7777 int slot = path->slots[0];
7778 u64 chunk_offset;
7779 u64 physical_offset;
7780 u64 physical_len;
7781 u64 devid;
7782
7783 btrfs_item_key_to_cpu(leaf, &key, slot);
7784 if (key.type != BTRFS_DEV_EXTENT_KEY)
7785 break;
7786 devid = key.objectid;
7787 physical_offset = key.offset;
7788
7789 dext = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent);
7790 chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dext);
7791 physical_len = btrfs_dev_extent_length(leaf, dext);
7792
7793 /* Check if this dev extent overlaps with the previous one */
7794 if (devid == prev_devid && physical_offset < prev_dev_ext_end) {
7795 btrfs_err(fs_info,
7796 "dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu",
7797 devid, physical_offset, prev_dev_ext_end);
7798 ret = -EUCLEAN;
7799 goto out;
7800 }
7801
7802 ret = verify_one_dev_extent(fs_info, chunk_offset, devid,
7803 physical_offset, physical_len);
7804 if (ret < 0)
7805 goto out;
7806 prev_devid = devid;
7807 prev_dev_ext_end = physical_offset + physical_len;
7808
7809 ret = btrfs_next_item(root, path);
7810 if (ret < 0)
7811 goto out;
7812 if (ret > 0) {
7813 ret = 0;
7814 break;
7815 }
7816 }
7817
7818 /* Ensure all chunks have corresponding dev extents */
7819 ret = verify_chunk_dev_extent_mapping(fs_info);
7820 out:
7821 btrfs_free_path(path);
7822 return ret;
7823 }
7824
7825 /*
7826 * Check whether the given block group or device is pinned by any inode being
7827 * used as a swapfile.
7828 */
btrfs_pinned_by_swapfile(struct btrfs_fs_info * fs_info,void * ptr)7829 bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr)
7830 {
7831 struct btrfs_swapfile_pin *sp;
7832 struct rb_node *node;
7833
7834 spin_lock(&fs_info->swapfile_pins_lock);
7835 node = fs_info->swapfile_pins.rb_node;
7836 while (node) {
7837 sp = rb_entry(node, struct btrfs_swapfile_pin, node);
7838 if (ptr < sp->ptr)
7839 node = node->rb_left;
7840 else if (ptr > sp->ptr)
7841 node = node->rb_right;
7842 else
7843 break;
7844 }
7845 spin_unlock(&fs_info->swapfile_pins_lock);
7846 return node != NULL;
7847 }
7848