• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2011-2012 Red Hat, Inc.
3  *
4  * This file is released under the GPL.
5  */
6 
7 #include "dm-thin-metadata.h"
8 #include "persistent-data/dm-btree.h"
9 #include "persistent-data/dm-space-map.h"
10 #include "persistent-data/dm-space-map-disk.h"
11 #include "persistent-data/dm-transaction-manager.h"
12 
13 #include <linux/list.h>
14 #include <linux/device-mapper.h>
15 #include <linux/workqueue.h>
16 
17 /*--------------------------------------------------------------------------
18  * As far as the metadata goes, there is:
19  *
20  * - A superblock in block zero, taking up fewer than 512 bytes for
21  *   atomic writes.
22  *
23  * - A space map managing the metadata blocks.
24  *
25  * - A space map managing the data blocks.
26  *
27  * - A btree mapping our internal thin dev ids onto struct disk_device_details.
28  *
29  * - A hierarchical btree, with 2 levels which effectively maps (thin
30  *   dev id, virtual block) -> block_time.  Block time is a 64-bit
31  *   field holding the time in the low 24 bits, and block in the top 40
32  *   bits.
33  *
34  * BTrees consist solely of btree_nodes, that fill a block.  Some are
35  * internal nodes, as such their values are a __le64 pointing to other
36  * nodes.  Leaf nodes can store data of any reasonable size (ie. much
37  * smaller than the block size).  The nodes consist of the header,
38  * followed by an array of keys, followed by an array of values.  We have
39  * to binary search on the keys so they're all held together to help the
40  * cpu cache.
41  *
42  * Space maps have 2 btrees:
43  *
44  * - One maps a uint64_t onto a struct index_entry.  Which points to a
45  *   bitmap block, and has some details about how many free entries there
46  *   are etc.
47  *
48  * - The bitmap blocks have a header (for the checksum).  Then the rest
49  *   of the block is pairs of bits.  With the meaning being:
50  *
51  *   0 - ref count is 0
52  *   1 - ref count is 1
53  *   2 - ref count is 2
54  *   3 - ref count is higher than 2
55  *
56  * - If the count is higher than 2 then the ref count is entered in a
57  *   second btree that directly maps the block_address to a uint32_t ref
58  *   count.
59  *
60  * The space map metadata variant doesn't have a bitmaps btree.  Instead
61  * it has one single blocks worth of index_entries.  This avoids
62  * recursive issues with the bitmap btree needing to allocate space in
63  * order to insert.  With a small data block size such as 64k the
64  * metadata support data devices that are hundreds of terrabytes.
65  *
66  * The space maps allocate space linearly from front to back.  Space that
67  * is freed in a transaction is never recycled within that transaction.
68  * To try and avoid fragmenting _free_ space the allocator always goes
69  * back and fills in gaps.
70  *
71  * All metadata io is in THIN_METADATA_BLOCK_SIZE sized/aligned chunks
72  * from the block manager.
73  *--------------------------------------------------------------------------*/
74 
75 #define DM_MSG_PREFIX   "thin metadata"
76 
77 #define THIN_SUPERBLOCK_MAGIC 27022010
78 #define THIN_SUPERBLOCK_LOCATION 0
79 #define THIN_VERSION 2
80 #define SECTOR_TO_BLOCK_SHIFT 3
81 
82 /*
83  * For btree insert:
84  *  3 for btree insert +
85  *  2 for btree lookup used within space map
86  * For btree remove:
87  *  2 for shadow spine +
88  *  4 for rebalance 3 child node
89  */
90 #define THIN_MAX_CONCURRENT_LOCKS 6
91 
92 /* This should be plenty */
93 #define SPACE_MAP_ROOT_SIZE 128
94 
95 /*
96  * Little endian on-disk superblock and device details.
97  */
98 struct thin_disk_superblock {
99 	__le32 csum;	/* Checksum of superblock except for this field. */
100 	__le32 flags;
101 	__le64 blocknr;	/* This block number, dm_block_t. */
102 
103 	__u8 uuid[16];
104 	__le64 magic;
105 	__le32 version;
106 	__le32 time;
107 
108 	__le64 trans_id;
109 
110 	/*
111 	 * Root held by userspace transactions.
112 	 */
113 	__le64 held_root;
114 
115 	__u8 data_space_map_root[SPACE_MAP_ROOT_SIZE];
116 	__u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];
117 
118 	/*
119 	 * 2-level btree mapping (dev_id, (dev block, time)) -> data block
120 	 */
121 	__le64 data_mapping_root;
122 
123 	/*
124 	 * Device detail root mapping dev_id -> device_details
125 	 */
126 	__le64 device_details_root;
127 
128 	__le32 data_block_size;		/* In 512-byte sectors. */
129 
130 	__le32 metadata_block_size;	/* In 512-byte sectors. */
131 	__le64 metadata_nr_blocks;
132 
133 	__le32 compat_flags;
134 	__le32 compat_ro_flags;
135 	__le32 incompat_flags;
136 } __packed;
137 
138 struct disk_device_details {
139 	__le64 mapped_blocks;
140 	__le64 transaction_id;		/* When created. */
141 	__le32 creation_time;
142 	__le32 snapshotted_time;
143 } __packed;
144 
145 struct dm_pool_metadata {
146 	struct hlist_node hash;
147 
148 	struct block_device *bdev;
149 	struct dm_block_manager *bm;
150 	struct dm_space_map *metadata_sm;
151 	struct dm_space_map *data_sm;
152 	struct dm_transaction_manager *tm;
153 	struct dm_transaction_manager *nb_tm;
154 
155 	/*
156 	 * Two-level btree.
157 	 * First level holds thin_dev_t.
158 	 * Second level holds mappings.
159 	 */
160 	struct dm_btree_info info;
161 
162 	/*
163 	 * Non-blocking version of the above.
164 	 */
165 	struct dm_btree_info nb_info;
166 
167 	/*
168 	 * Just the top level for deleting whole devices.
169 	 */
170 	struct dm_btree_info tl_info;
171 
172 	/*
173 	 * Just the bottom level for creating new devices.
174 	 */
175 	struct dm_btree_info bl_info;
176 
177 	/*
178 	 * Describes the device details btree.
179 	 */
180 	struct dm_btree_info details_info;
181 
182 	struct rw_semaphore root_lock;
183 	uint32_t time;
184 	dm_block_t root;
185 	dm_block_t details_root;
186 	struct list_head thin_devices;
187 	uint64_t trans_id;
188 	unsigned long flags;
189 	sector_t data_block_size;
190 
191 	/*
192 	 * Pre-commit callback.
193 	 *
194 	 * This allows the thin provisioning target to run a callback before
195 	 * the metadata are committed.
196 	 */
197 	dm_pool_pre_commit_fn pre_commit_fn;
198 	void *pre_commit_context;
199 
200 	/*
201 	 * We reserve a section of the metadata for commit overhead.
202 	 * All reported space does *not* include this.
203 	 */
204 	dm_block_t metadata_reserve;
205 
206 	/*
207 	 * Set if a transaction has to be aborted but the attempt to roll back
208 	 * to the previous (good) transaction failed.  The only pool metadata
209 	 * operation possible in this state is the closing of the device.
210 	 */
211 	bool fail_io:1;
212 
213 	/*
214 	 * Set once a thin-pool has been accessed through one of the interfaces
215 	 * that imply the pool is in-service (e.g. thin devices created/deleted,
216 	 * thin-pool message, metadata snapshots, etc).
217 	 */
218 	bool in_service:1;
219 
220 	/*
221 	 * Reading the space map roots can fail, so we read it into these
222 	 * buffers before the superblock is locked and updated.
223 	 */
224 	__u8 data_space_map_root[SPACE_MAP_ROOT_SIZE];
225 	__u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];
226 };
227 
228 struct dm_thin_device {
229 	struct list_head list;
230 	struct dm_pool_metadata *pmd;
231 	dm_thin_id id;
232 
233 	int open_count;
234 	bool changed:1;
235 	bool aborted_with_changes:1;
236 	uint64_t mapped_blocks;
237 	uint64_t transaction_id;
238 	uint32_t creation_time;
239 	uint32_t snapshotted_time;
240 };
241 
242 /*----------------------------------------------------------------
243  * superblock validator
244  *--------------------------------------------------------------*/
245 
246 #define SUPERBLOCK_CSUM_XOR 160774
247 
sb_prepare_for_write(struct dm_block_validator * v,struct dm_block * b,size_t block_size)248 static void sb_prepare_for_write(struct dm_block_validator *v,
249 				 struct dm_block *b,
250 				 size_t block_size)
251 {
252 	struct thin_disk_superblock *disk_super = dm_block_data(b);
253 
254 	disk_super->blocknr = cpu_to_le64(dm_block_location(b));
255 	disk_super->csum = cpu_to_le32(dm_bm_checksum(&disk_super->flags,
256 						      block_size - sizeof(__le32),
257 						      SUPERBLOCK_CSUM_XOR));
258 }
259 
sb_check(struct dm_block_validator * v,struct dm_block * b,size_t block_size)260 static int sb_check(struct dm_block_validator *v,
261 		    struct dm_block *b,
262 		    size_t block_size)
263 {
264 	struct thin_disk_superblock *disk_super = dm_block_data(b);
265 	__le32 csum_le;
266 
267 	if (dm_block_location(b) != le64_to_cpu(disk_super->blocknr)) {
268 		DMERR("sb_check failed: blocknr %llu: wanted %llu",
269 		      le64_to_cpu(disk_super->blocknr),
270 		      (unsigned long long)dm_block_location(b));
271 		return -ENOTBLK;
272 	}
273 
274 	if (le64_to_cpu(disk_super->magic) != THIN_SUPERBLOCK_MAGIC) {
275 		DMERR("sb_check failed: magic %llu: wanted %llu",
276 		      le64_to_cpu(disk_super->magic),
277 		      (unsigned long long)THIN_SUPERBLOCK_MAGIC);
278 		return -EILSEQ;
279 	}
280 
281 	csum_le = cpu_to_le32(dm_bm_checksum(&disk_super->flags,
282 					     block_size - sizeof(__le32),
283 					     SUPERBLOCK_CSUM_XOR));
284 	if (csum_le != disk_super->csum) {
285 		DMERR("sb_check failed: csum %u: wanted %u",
286 		      le32_to_cpu(csum_le), le32_to_cpu(disk_super->csum));
287 		return -EILSEQ;
288 	}
289 
290 	return 0;
291 }
292 
293 static struct dm_block_validator sb_validator = {
294 	.name = "superblock",
295 	.prepare_for_write = sb_prepare_for_write,
296 	.check = sb_check
297 };
298 
299 /*----------------------------------------------------------------
300  * Methods for the btree value types
301  *--------------------------------------------------------------*/
302 
pack_block_time(dm_block_t b,uint32_t t)303 static uint64_t pack_block_time(dm_block_t b, uint32_t t)
304 {
305 	return (b << 24) | t;
306 }
307 
unpack_block_time(uint64_t v,dm_block_t * b,uint32_t * t)308 static void unpack_block_time(uint64_t v, dm_block_t *b, uint32_t *t)
309 {
310 	*b = v >> 24;
311 	*t = v & ((1 << 24) - 1);
312 }
313 
314 /*
315  * It's more efficient to call dm_sm_{inc,dec}_blocks as few times as
316  * possible.  'with_runs' reads contiguous runs of blocks, and calls the
317  * given sm function.
318  */
319 typedef int (*run_fn)(struct dm_space_map *, dm_block_t, dm_block_t);
320 
with_runs(struct dm_space_map * sm,const __le64 * value_le,unsigned int count,run_fn fn)321 static void with_runs(struct dm_space_map *sm, const __le64 *value_le, unsigned int count, run_fn fn)
322 {
323 	uint64_t b, begin, end;
324 	uint32_t t;
325 	bool in_run = false;
326 	unsigned int i;
327 
328 	for (i = 0; i < count; i++, value_le++) {
329 		/* We know value_le is 8 byte aligned */
330 		unpack_block_time(le64_to_cpu(*value_le), &b, &t);
331 
332 		if (in_run) {
333 			if (b == end) {
334 				end++;
335 			} else {
336 				fn(sm, begin, end);
337 				begin = b;
338 				end = b + 1;
339 			}
340 		} else {
341 			in_run = true;
342 			begin = b;
343 			end = b + 1;
344 		}
345 	}
346 
347 	if (in_run)
348 		fn(sm, begin, end);
349 }
350 
data_block_inc(void * context,const void * value_le,unsigned int count)351 static void data_block_inc(void *context, const void *value_le, unsigned int count)
352 {
353 	with_runs((struct dm_space_map *) context,
354 		  (const __le64 *) value_le, count, dm_sm_inc_blocks);
355 }
356 
data_block_dec(void * context,const void * value_le,unsigned int count)357 static void data_block_dec(void *context, const void *value_le, unsigned int count)
358 {
359 	with_runs((struct dm_space_map *) context,
360 		  (const __le64 *) value_le, count, dm_sm_dec_blocks);
361 }
362 
data_block_equal(void * context,const void * value1_le,const void * value2_le)363 static int data_block_equal(void *context, const void *value1_le, const void *value2_le)
364 {
365 	__le64 v1_le, v2_le;
366 	uint64_t b1, b2;
367 	uint32_t t;
368 
369 	memcpy(&v1_le, value1_le, sizeof(v1_le));
370 	memcpy(&v2_le, value2_le, sizeof(v2_le));
371 	unpack_block_time(le64_to_cpu(v1_le), &b1, &t);
372 	unpack_block_time(le64_to_cpu(v2_le), &b2, &t);
373 
374 	return b1 == b2;
375 }
376 
subtree_inc(void * context,const void * value,unsigned int count)377 static void subtree_inc(void *context, const void *value, unsigned int count)
378 {
379 	struct dm_btree_info *info = context;
380 	const __le64 *root_le = value;
381 	unsigned int i;
382 
383 	for (i = 0; i < count; i++, root_le++)
384 		dm_tm_inc(info->tm, le64_to_cpu(*root_le));
385 }
386 
subtree_dec(void * context,const void * value,unsigned int count)387 static void subtree_dec(void *context, const void *value, unsigned int count)
388 {
389 	struct dm_btree_info *info = context;
390 	const __le64 *root_le = value;
391 	unsigned int i;
392 
393 	for (i = 0; i < count; i++, root_le++)
394 		if (dm_btree_del(info, le64_to_cpu(*root_le)))
395 			DMERR("btree delete failed");
396 }
397 
subtree_equal(void * context,const void * value1_le,const void * value2_le)398 static int subtree_equal(void *context, const void *value1_le, const void *value2_le)
399 {
400 	__le64 v1_le, v2_le;
401 	memcpy(&v1_le, value1_le, sizeof(v1_le));
402 	memcpy(&v2_le, value2_le, sizeof(v2_le));
403 
404 	return v1_le == v2_le;
405 }
406 
407 /*----------------------------------------------------------------*/
408 
409 /*
410  * Variant that is used for in-core only changes or code that
411  * shouldn't put the pool in service on its own (e.g. commit).
412  */
pmd_write_lock_in_core(struct dm_pool_metadata * pmd)413 static inline void pmd_write_lock_in_core(struct dm_pool_metadata *pmd)
414 	__acquires(pmd->root_lock)
415 {
416 	down_write(&pmd->root_lock);
417 }
418 
pmd_write_lock(struct dm_pool_metadata * pmd)419 static inline void pmd_write_lock(struct dm_pool_metadata *pmd)
420 {
421 	pmd_write_lock_in_core(pmd);
422 	if (unlikely(!pmd->in_service))
423 		pmd->in_service = true;
424 }
425 
pmd_write_unlock(struct dm_pool_metadata * pmd)426 static inline void pmd_write_unlock(struct dm_pool_metadata *pmd)
427 	__releases(pmd->root_lock)
428 {
429 	up_write(&pmd->root_lock);
430 }
431 
432 /*----------------------------------------------------------------*/
433 
superblock_lock_zero(struct dm_pool_metadata * pmd,struct dm_block ** sblock)434 static int superblock_lock_zero(struct dm_pool_metadata *pmd,
435 				struct dm_block **sblock)
436 {
437 	return dm_bm_write_lock_zero(pmd->bm, THIN_SUPERBLOCK_LOCATION,
438 				     &sb_validator, sblock);
439 }
440 
superblock_lock(struct dm_pool_metadata * pmd,struct dm_block ** sblock)441 static int superblock_lock(struct dm_pool_metadata *pmd,
442 			   struct dm_block **sblock)
443 {
444 	return dm_bm_write_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
445 				&sb_validator, sblock);
446 }
447 
__superblock_all_zeroes(struct dm_block_manager * bm,int * result)448 static int __superblock_all_zeroes(struct dm_block_manager *bm, int *result)
449 {
450 	int r;
451 	unsigned int i;
452 	struct dm_block *b;
453 	__le64 *data_le, zero = cpu_to_le64(0);
454 	unsigned int block_size = dm_bm_block_size(bm) / sizeof(__le64);
455 
456 	/*
457 	 * We can't use a validator here - it may be all zeroes.
458 	 */
459 	r = dm_bm_read_lock(bm, THIN_SUPERBLOCK_LOCATION, NULL, &b);
460 	if (r)
461 		return r;
462 
463 	data_le = dm_block_data(b);
464 	*result = 1;
465 	for (i = 0; i < block_size; i++) {
466 		if (data_le[i] != zero) {
467 			*result = 0;
468 			break;
469 		}
470 	}
471 
472 	dm_bm_unlock(b);
473 
474 	return 0;
475 }
476 
__setup_btree_details(struct dm_pool_metadata * pmd)477 static void __setup_btree_details(struct dm_pool_metadata *pmd)
478 {
479 	pmd->info.tm = pmd->tm;
480 	pmd->info.levels = 2;
481 	pmd->info.value_type.context = pmd->data_sm;
482 	pmd->info.value_type.size = sizeof(__le64);
483 	pmd->info.value_type.inc = data_block_inc;
484 	pmd->info.value_type.dec = data_block_dec;
485 	pmd->info.value_type.equal = data_block_equal;
486 
487 	memcpy(&pmd->nb_info, &pmd->info, sizeof(pmd->nb_info));
488 	pmd->nb_info.tm = pmd->nb_tm;
489 
490 	pmd->tl_info.tm = pmd->tm;
491 	pmd->tl_info.levels = 1;
492 	pmd->tl_info.value_type.context = &pmd->bl_info;
493 	pmd->tl_info.value_type.size = sizeof(__le64);
494 	pmd->tl_info.value_type.inc = subtree_inc;
495 	pmd->tl_info.value_type.dec = subtree_dec;
496 	pmd->tl_info.value_type.equal = subtree_equal;
497 
498 	pmd->bl_info.tm = pmd->tm;
499 	pmd->bl_info.levels = 1;
500 	pmd->bl_info.value_type.context = pmd->data_sm;
501 	pmd->bl_info.value_type.size = sizeof(__le64);
502 	pmd->bl_info.value_type.inc = data_block_inc;
503 	pmd->bl_info.value_type.dec = data_block_dec;
504 	pmd->bl_info.value_type.equal = data_block_equal;
505 
506 	pmd->details_info.tm = pmd->tm;
507 	pmd->details_info.levels = 1;
508 	pmd->details_info.value_type.context = NULL;
509 	pmd->details_info.value_type.size = sizeof(struct disk_device_details);
510 	pmd->details_info.value_type.inc = NULL;
511 	pmd->details_info.value_type.dec = NULL;
512 	pmd->details_info.value_type.equal = NULL;
513 }
514 
save_sm_roots(struct dm_pool_metadata * pmd)515 static int save_sm_roots(struct dm_pool_metadata *pmd)
516 {
517 	int r;
518 	size_t len;
519 
520 	r = dm_sm_root_size(pmd->metadata_sm, &len);
521 	if (r < 0)
522 		return r;
523 
524 	r = dm_sm_copy_root(pmd->metadata_sm, &pmd->metadata_space_map_root, len);
525 	if (r < 0)
526 		return r;
527 
528 	r = dm_sm_root_size(pmd->data_sm, &len);
529 	if (r < 0)
530 		return r;
531 
532 	return dm_sm_copy_root(pmd->data_sm, &pmd->data_space_map_root, len);
533 }
534 
copy_sm_roots(struct dm_pool_metadata * pmd,struct thin_disk_superblock * disk)535 static void copy_sm_roots(struct dm_pool_metadata *pmd,
536 			  struct thin_disk_superblock *disk)
537 {
538 	memcpy(&disk->metadata_space_map_root,
539 	       &pmd->metadata_space_map_root,
540 	       sizeof(pmd->metadata_space_map_root));
541 
542 	memcpy(&disk->data_space_map_root,
543 	       &pmd->data_space_map_root,
544 	       sizeof(pmd->data_space_map_root));
545 }
546 
__write_initial_superblock(struct dm_pool_metadata * pmd)547 static int __write_initial_superblock(struct dm_pool_metadata *pmd)
548 {
549 	int r;
550 	struct dm_block *sblock;
551 	struct thin_disk_superblock *disk_super;
552 	sector_t bdev_size = bdev_nr_sectors(pmd->bdev);
553 
554 	if (bdev_size > THIN_METADATA_MAX_SECTORS)
555 		bdev_size = THIN_METADATA_MAX_SECTORS;
556 
557 	r = dm_sm_commit(pmd->data_sm);
558 	if (r < 0)
559 		return r;
560 
561 	r = dm_tm_pre_commit(pmd->tm);
562 	if (r < 0)
563 		return r;
564 
565 	r = save_sm_roots(pmd);
566 	if (r < 0)
567 		return r;
568 
569 	r = superblock_lock_zero(pmd, &sblock);
570 	if (r)
571 		return r;
572 
573 	disk_super = dm_block_data(sblock);
574 	disk_super->flags = 0;
575 	memset(disk_super->uuid, 0, sizeof(disk_super->uuid));
576 	disk_super->magic = cpu_to_le64(THIN_SUPERBLOCK_MAGIC);
577 	disk_super->version = cpu_to_le32(THIN_VERSION);
578 	disk_super->time = 0;
579 	disk_super->trans_id = 0;
580 	disk_super->held_root = 0;
581 
582 	copy_sm_roots(pmd, disk_super);
583 
584 	disk_super->data_mapping_root = cpu_to_le64(pmd->root);
585 	disk_super->device_details_root = cpu_to_le64(pmd->details_root);
586 	disk_super->metadata_block_size = cpu_to_le32(THIN_METADATA_BLOCK_SIZE);
587 	disk_super->metadata_nr_blocks = cpu_to_le64(bdev_size >> SECTOR_TO_BLOCK_SHIFT);
588 	disk_super->data_block_size = cpu_to_le32(pmd->data_block_size);
589 
590 	return dm_tm_commit(pmd->tm, sblock);
591 }
592 
__format_metadata(struct dm_pool_metadata * pmd)593 static int __format_metadata(struct dm_pool_metadata *pmd)
594 {
595 	int r;
596 
597 	r = dm_tm_create_with_sm(pmd->bm, THIN_SUPERBLOCK_LOCATION,
598 				 &pmd->tm, &pmd->metadata_sm);
599 	if (r < 0) {
600 		pmd->tm = NULL;
601 		pmd->metadata_sm = NULL;
602 		DMERR("tm_create_with_sm failed");
603 		return r;
604 	}
605 
606 	pmd->data_sm = dm_sm_disk_create(pmd->tm, 0);
607 	if (IS_ERR(pmd->data_sm)) {
608 		DMERR("sm_disk_create failed");
609 		r = PTR_ERR(pmd->data_sm);
610 		pmd->data_sm = NULL;
611 		goto bad_cleanup_tm;
612 	}
613 
614 	pmd->nb_tm = dm_tm_create_non_blocking_clone(pmd->tm);
615 	if (!pmd->nb_tm) {
616 		DMERR("could not create non-blocking clone tm");
617 		r = -ENOMEM;
618 		goto bad_cleanup_data_sm;
619 	}
620 
621 	__setup_btree_details(pmd);
622 
623 	r = dm_btree_empty(&pmd->info, &pmd->root);
624 	if (r < 0)
625 		goto bad_cleanup_nb_tm;
626 
627 	r = dm_btree_empty(&pmd->details_info, &pmd->details_root);
628 	if (r < 0) {
629 		DMERR("couldn't create devices root");
630 		goto bad_cleanup_nb_tm;
631 	}
632 
633 	r = __write_initial_superblock(pmd);
634 	if (r)
635 		goto bad_cleanup_nb_tm;
636 
637 	return 0;
638 
639 bad_cleanup_nb_tm:
640 	dm_tm_destroy(pmd->nb_tm);
641 	pmd->nb_tm = NULL;
642 bad_cleanup_data_sm:
643 	dm_sm_destroy(pmd->data_sm);
644 	pmd->data_sm = NULL;
645 bad_cleanup_tm:
646 	dm_tm_destroy(pmd->tm);
647 	pmd->tm = NULL;
648 	dm_sm_destroy(pmd->metadata_sm);
649 	pmd->metadata_sm = NULL;
650 
651 	return r;
652 }
653 
__check_incompat_features(struct thin_disk_superblock * disk_super,struct dm_pool_metadata * pmd)654 static int __check_incompat_features(struct thin_disk_superblock *disk_super,
655 				     struct dm_pool_metadata *pmd)
656 {
657 	uint32_t features;
658 
659 	features = le32_to_cpu(disk_super->incompat_flags) & ~THIN_FEATURE_INCOMPAT_SUPP;
660 	if (features) {
661 		DMERR("could not access metadata due to unsupported optional features (%lx).",
662 		      (unsigned long)features);
663 		return -EINVAL;
664 	}
665 
666 	/*
667 	 * Check for read-only metadata to skip the following RDWR checks.
668 	 */
669 	if (bdev_read_only(pmd->bdev))
670 		return 0;
671 
672 	features = le32_to_cpu(disk_super->compat_ro_flags) & ~THIN_FEATURE_COMPAT_RO_SUPP;
673 	if (features) {
674 		DMERR("could not access metadata RDWR due to unsupported optional features (%lx).",
675 		      (unsigned long)features);
676 		return -EINVAL;
677 	}
678 
679 	return 0;
680 }
681 
__open_metadata(struct dm_pool_metadata * pmd)682 static int __open_metadata(struct dm_pool_metadata *pmd)
683 {
684 	int r;
685 	struct dm_block *sblock;
686 	struct thin_disk_superblock *disk_super;
687 
688 	r = dm_bm_read_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
689 			    &sb_validator, &sblock);
690 	if (r < 0) {
691 		DMERR("couldn't read superblock");
692 		return r;
693 	}
694 
695 	disk_super = dm_block_data(sblock);
696 
697 	/* Verify the data block size hasn't changed */
698 	if (le32_to_cpu(disk_super->data_block_size) != pmd->data_block_size) {
699 		DMERR("changing the data block size (from %u to %llu) is not supported",
700 		      le32_to_cpu(disk_super->data_block_size),
701 		      (unsigned long long)pmd->data_block_size);
702 		r = -EINVAL;
703 		goto bad_unlock_sblock;
704 	}
705 
706 	r = __check_incompat_features(disk_super, pmd);
707 	if (r < 0)
708 		goto bad_unlock_sblock;
709 
710 	r = dm_tm_open_with_sm(pmd->bm, THIN_SUPERBLOCK_LOCATION,
711 			       disk_super->metadata_space_map_root,
712 			       sizeof(disk_super->metadata_space_map_root),
713 			       &pmd->tm, &pmd->metadata_sm);
714 	if (r < 0) {
715 		pmd->tm = NULL;
716 		pmd->metadata_sm = NULL;
717 		DMERR("tm_open_with_sm failed");
718 		goto bad_unlock_sblock;
719 	}
720 
721 	pmd->data_sm = dm_sm_disk_open(pmd->tm, disk_super->data_space_map_root,
722 				       sizeof(disk_super->data_space_map_root));
723 	if (IS_ERR(pmd->data_sm)) {
724 		DMERR("sm_disk_open failed");
725 		r = PTR_ERR(pmd->data_sm);
726 		pmd->data_sm = NULL;
727 		goto bad_cleanup_tm;
728 	}
729 
730 	pmd->nb_tm = dm_tm_create_non_blocking_clone(pmd->tm);
731 	if (!pmd->nb_tm) {
732 		DMERR("could not create non-blocking clone tm");
733 		r = -ENOMEM;
734 		goto bad_cleanup_data_sm;
735 	}
736 
737 	/*
738 	 * For pool metadata opening process, root setting is redundant
739 	 * because it will be set again in __begin_transaction(). But dm
740 	 * pool aborting process really needs to get last transaction's
741 	 * root to avoid accessing broken btree.
742 	 */
743 	pmd->root = le64_to_cpu(disk_super->data_mapping_root);
744 	pmd->details_root = le64_to_cpu(disk_super->device_details_root);
745 
746 	__setup_btree_details(pmd);
747 	dm_bm_unlock(sblock);
748 
749 	return 0;
750 
751 bad_cleanup_data_sm:
752 	dm_sm_destroy(pmd->data_sm);
753 	pmd->data_sm = NULL;
754 bad_cleanup_tm:
755 	dm_tm_destroy(pmd->tm);
756 	pmd->tm = NULL;
757 	dm_sm_destroy(pmd->metadata_sm);
758 	pmd->metadata_sm = NULL;
759 bad_unlock_sblock:
760 	dm_bm_unlock(sblock);
761 
762 	return r;
763 }
764 
__open_or_format_metadata(struct dm_pool_metadata * pmd,bool format_device)765 static int __open_or_format_metadata(struct dm_pool_metadata *pmd, bool format_device)
766 {
767 	int r, unformatted;
768 
769 	r = __superblock_all_zeroes(pmd->bm, &unformatted);
770 	if (r)
771 		return r;
772 
773 	if (unformatted)
774 		return format_device ? __format_metadata(pmd) : -EPERM;
775 
776 	return __open_metadata(pmd);
777 }
778 
__create_persistent_data_objects(struct dm_pool_metadata * pmd,bool format_device)779 static int __create_persistent_data_objects(struct dm_pool_metadata *pmd, bool format_device)
780 {
781 	int r;
782 
783 	pmd->bm = dm_block_manager_create(pmd->bdev, THIN_METADATA_BLOCK_SIZE << SECTOR_SHIFT,
784 					  THIN_MAX_CONCURRENT_LOCKS);
785 	if (IS_ERR(pmd->bm)) {
786 		DMERR("could not create block manager");
787 		r = PTR_ERR(pmd->bm);
788 		pmd->bm = NULL;
789 		return r;
790 	}
791 
792 	r = __open_or_format_metadata(pmd, format_device);
793 	if (r) {
794 		dm_block_manager_destroy(pmd->bm);
795 		pmd->bm = NULL;
796 	}
797 
798 	return r;
799 }
800 
__destroy_persistent_data_objects(struct dm_pool_metadata * pmd,bool destroy_bm)801 static void __destroy_persistent_data_objects(struct dm_pool_metadata *pmd,
802 					      bool destroy_bm)
803 {
804 	dm_sm_destroy(pmd->data_sm);
805 	pmd->data_sm = NULL;
806 	dm_sm_destroy(pmd->metadata_sm);
807 	pmd->metadata_sm = NULL;
808 	dm_tm_destroy(pmd->nb_tm);
809 	pmd->nb_tm = NULL;
810 	dm_tm_destroy(pmd->tm);
811 	pmd->tm = NULL;
812 	if (destroy_bm)
813 		dm_block_manager_destroy(pmd->bm);
814 }
815 
__begin_transaction(struct dm_pool_metadata * pmd)816 static int __begin_transaction(struct dm_pool_metadata *pmd)
817 {
818 	int r;
819 	struct thin_disk_superblock *disk_super;
820 	struct dm_block *sblock;
821 
822 	/*
823 	 * We re-read the superblock every time.  Shouldn't need to do this
824 	 * really.
825 	 */
826 	r = dm_bm_read_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
827 			    &sb_validator, &sblock);
828 	if (r)
829 		return r;
830 
831 	disk_super = dm_block_data(sblock);
832 	pmd->time = le32_to_cpu(disk_super->time);
833 	pmd->root = le64_to_cpu(disk_super->data_mapping_root);
834 	pmd->details_root = le64_to_cpu(disk_super->device_details_root);
835 	pmd->trans_id = le64_to_cpu(disk_super->trans_id);
836 	pmd->flags = le32_to_cpu(disk_super->flags);
837 	pmd->data_block_size = le32_to_cpu(disk_super->data_block_size);
838 
839 	dm_bm_unlock(sblock);
840 	return 0;
841 }
842 
__write_changed_details(struct dm_pool_metadata * pmd)843 static int __write_changed_details(struct dm_pool_metadata *pmd)
844 {
845 	int r;
846 	struct dm_thin_device *td, *tmp;
847 	struct disk_device_details details;
848 	uint64_t key;
849 
850 	list_for_each_entry_safe(td, tmp, &pmd->thin_devices, list) {
851 		if (!td->changed)
852 			continue;
853 
854 		key = td->id;
855 
856 		details.mapped_blocks = cpu_to_le64(td->mapped_blocks);
857 		details.transaction_id = cpu_to_le64(td->transaction_id);
858 		details.creation_time = cpu_to_le32(td->creation_time);
859 		details.snapshotted_time = cpu_to_le32(td->snapshotted_time);
860 		__dm_bless_for_disk(&details);
861 
862 		r = dm_btree_insert(&pmd->details_info, pmd->details_root,
863 				    &key, &details, &pmd->details_root);
864 		if (r)
865 			return r;
866 
867 		if (td->open_count)
868 			td->changed = false;
869 		else {
870 			list_del(&td->list);
871 			kfree(td);
872 		}
873 	}
874 
875 	return 0;
876 }
877 
__commit_transaction(struct dm_pool_metadata * pmd)878 static int __commit_transaction(struct dm_pool_metadata *pmd)
879 {
880 	int r;
881 	struct thin_disk_superblock *disk_super;
882 	struct dm_block *sblock;
883 
884 	/*
885 	 * We need to know if the thin_disk_superblock exceeds a 512-byte sector.
886 	 */
887 	BUILD_BUG_ON(sizeof(struct thin_disk_superblock) > 512);
888 	BUG_ON(!rwsem_is_locked(&pmd->root_lock));
889 
890 	if (unlikely(!pmd->in_service))
891 		return 0;
892 
893 	if (pmd->pre_commit_fn) {
894 		r = pmd->pre_commit_fn(pmd->pre_commit_context);
895 		if (r < 0) {
896 			DMERR("pre-commit callback failed");
897 			return r;
898 		}
899 	}
900 
901 	r = __write_changed_details(pmd);
902 	if (r < 0)
903 		return r;
904 
905 	r = dm_sm_commit(pmd->data_sm);
906 	if (r < 0)
907 		return r;
908 
909 	r = dm_tm_pre_commit(pmd->tm);
910 	if (r < 0)
911 		return r;
912 
913 	r = save_sm_roots(pmd);
914 	if (r < 0)
915 		return r;
916 
917 	r = superblock_lock(pmd, &sblock);
918 	if (r)
919 		return r;
920 
921 	disk_super = dm_block_data(sblock);
922 	disk_super->time = cpu_to_le32(pmd->time);
923 	disk_super->data_mapping_root = cpu_to_le64(pmd->root);
924 	disk_super->device_details_root = cpu_to_le64(pmd->details_root);
925 	disk_super->trans_id = cpu_to_le64(pmd->trans_id);
926 	disk_super->flags = cpu_to_le32(pmd->flags);
927 
928 	copy_sm_roots(pmd, disk_super);
929 
930 	return dm_tm_commit(pmd->tm, sblock);
931 }
932 
__set_metadata_reserve(struct dm_pool_metadata * pmd)933 static void __set_metadata_reserve(struct dm_pool_metadata *pmd)
934 {
935 	int r;
936 	dm_block_t total;
937 	dm_block_t max_blocks = 4096; /* 16M */
938 
939 	r = dm_sm_get_nr_blocks(pmd->metadata_sm, &total);
940 	if (r) {
941 		DMERR("could not get size of metadata device");
942 		pmd->metadata_reserve = max_blocks;
943 	} else
944 		pmd->metadata_reserve = min(max_blocks, div_u64(total, 10));
945 }
946 
dm_pool_metadata_open(struct block_device * bdev,sector_t data_block_size,bool format_device)947 struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
948 					       sector_t data_block_size,
949 					       bool format_device)
950 {
951 	int r;
952 	struct dm_pool_metadata *pmd;
953 
954 	pmd = kmalloc(sizeof(*pmd), GFP_KERNEL);
955 	if (!pmd) {
956 		DMERR("could not allocate metadata struct");
957 		return ERR_PTR(-ENOMEM);
958 	}
959 
960 	init_rwsem(&pmd->root_lock);
961 	pmd->time = 0;
962 	INIT_LIST_HEAD(&pmd->thin_devices);
963 	pmd->fail_io = false;
964 	pmd->in_service = false;
965 	pmd->bdev = bdev;
966 	pmd->data_block_size = data_block_size;
967 	pmd->pre_commit_fn = NULL;
968 	pmd->pre_commit_context = NULL;
969 
970 	r = __create_persistent_data_objects(pmd, format_device);
971 	if (r) {
972 		kfree(pmd);
973 		return ERR_PTR(r);
974 	}
975 
976 	r = __begin_transaction(pmd);
977 	if (r < 0) {
978 		if (dm_pool_metadata_close(pmd) < 0)
979 			DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
980 		return ERR_PTR(r);
981 	}
982 
983 	__set_metadata_reserve(pmd);
984 
985 	return pmd;
986 }
987 
dm_pool_metadata_close(struct dm_pool_metadata * pmd)988 int dm_pool_metadata_close(struct dm_pool_metadata *pmd)
989 {
990 	int r;
991 	unsigned int open_devices = 0;
992 	struct dm_thin_device *td, *tmp;
993 
994 	down_read(&pmd->root_lock);
995 	list_for_each_entry_safe(td, tmp, &pmd->thin_devices, list) {
996 		if (td->open_count)
997 			open_devices++;
998 		else {
999 			list_del(&td->list);
1000 			kfree(td);
1001 		}
1002 	}
1003 	up_read(&pmd->root_lock);
1004 
1005 	if (open_devices) {
1006 		DMERR("attempt to close pmd when %u device(s) are still open",
1007 		       open_devices);
1008 		return -EBUSY;
1009 	}
1010 
1011 	pmd_write_lock_in_core(pmd);
1012 	if (!pmd->fail_io && !dm_bm_is_read_only(pmd->bm)) {
1013 		r = __commit_transaction(pmd);
1014 		if (r < 0)
1015 			DMWARN("%s: __commit_transaction() failed, error = %d",
1016 			       __func__, r);
1017 	}
1018 	pmd_write_unlock(pmd);
1019 	__destroy_persistent_data_objects(pmd, true);
1020 
1021 	kfree(pmd);
1022 	return 0;
1023 }
1024 
1025 /*
1026  * __open_device: Returns @td corresponding to device with id @dev,
1027  * creating it if @create is set and incrementing @td->open_count.
1028  * On failure, @td is undefined.
1029  */
__open_device(struct dm_pool_metadata * pmd,dm_thin_id dev,int create,struct dm_thin_device ** td)1030 static int __open_device(struct dm_pool_metadata *pmd,
1031 			 dm_thin_id dev, int create,
1032 			 struct dm_thin_device **td)
1033 {
1034 	int r, changed = 0;
1035 	struct dm_thin_device *td2;
1036 	uint64_t key = dev;
1037 	struct disk_device_details details_le;
1038 
1039 	/*
1040 	 * If the device is already open, return it.
1041 	 */
1042 	list_for_each_entry(td2, &pmd->thin_devices, list)
1043 		if (td2->id == dev) {
1044 			/*
1045 			 * May not create an already-open device.
1046 			 */
1047 			if (create)
1048 				return -EEXIST;
1049 
1050 			td2->open_count++;
1051 			*td = td2;
1052 			return 0;
1053 		}
1054 
1055 	/*
1056 	 * Check the device exists.
1057 	 */
1058 	r = dm_btree_lookup(&pmd->details_info, pmd->details_root,
1059 			    &key, &details_le);
1060 	if (r) {
1061 		if (r != -ENODATA || !create)
1062 			return r;
1063 
1064 		/*
1065 		 * Create new device.
1066 		 */
1067 		changed = 1;
1068 		details_le.mapped_blocks = 0;
1069 		details_le.transaction_id = cpu_to_le64(pmd->trans_id);
1070 		details_le.creation_time = cpu_to_le32(pmd->time);
1071 		details_le.snapshotted_time = cpu_to_le32(pmd->time);
1072 	}
1073 
1074 	*td = kmalloc(sizeof(**td), GFP_NOIO);
1075 	if (!*td)
1076 		return -ENOMEM;
1077 
1078 	(*td)->pmd = pmd;
1079 	(*td)->id = dev;
1080 	(*td)->open_count = 1;
1081 	(*td)->changed = changed;
1082 	(*td)->aborted_with_changes = false;
1083 	(*td)->mapped_blocks = le64_to_cpu(details_le.mapped_blocks);
1084 	(*td)->transaction_id = le64_to_cpu(details_le.transaction_id);
1085 	(*td)->creation_time = le32_to_cpu(details_le.creation_time);
1086 	(*td)->snapshotted_time = le32_to_cpu(details_le.snapshotted_time);
1087 
1088 	list_add(&(*td)->list, &pmd->thin_devices);
1089 
1090 	return 0;
1091 }
1092 
__close_device(struct dm_thin_device * td)1093 static void __close_device(struct dm_thin_device *td)
1094 {
1095 	--td->open_count;
1096 }
1097 
__create_thin(struct dm_pool_metadata * pmd,dm_thin_id dev)1098 static int __create_thin(struct dm_pool_metadata *pmd,
1099 			 dm_thin_id dev)
1100 {
1101 	int r;
1102 	dm_block_t dev_root;
1103 	uint64_t key = dev;
1104 	struct dm_thin_device *td;
1105 	__le64 value;
1106 
1107 	r = dm_btree_lookup(&pmd->details_info, pmd->details_root,
1108 			    &key, NULL);
1109 	if (!r)
1110 		return -EEXIST;
1111 
1112 	/*
1113 	 * Create an empty btree for the mappings.
1114 	 */
1115 	r = dm_btree_empty(&pmd->bl_info, &dev_root);
1116 	if (r)
1117 		return r;
1118 
1119 	/*
1120 	 * Insert it into the main mapping tree.
1121 	 */
1122 	value = cpu_to_le64(dev_root);
1123 	__dm_bless_for_disk(&value);
1124 	r = dm_btree_insert(&pmd->tl_info, pmd->root, &key, &value, &pmd->root);
1125 	if (r) {
1126 		dm_btree_del(&pmd->bl_info, dev_root);
1127 		return r;
1128 	}
1129 
1130 	r = __open_device(pmd, dev, 1, &td);
1131 	if (r) {
1132 		dm_btree_remove(&pmd->tl_info, pmd->root, &key, &pmd->root);
1133 		dm_btree_del(&pmd->bl_info, dev_root);
1134 		return r;
1135 	}
1136 	__close_device(td);
1137 
1138 	return r;
1139 }
1140 
dm_pool_create_thin(struct dm_pool_metadata * pmd,dm_thin_id dev)1141 int dm_pool_create_thin(struct dm_pool_metadata *pmd, dm_thin_id dev)
1142 {
1143 	int r = -EINVAL;
1144 
1145 	pmd_write_lock(pmd);
1146 	if (!pmd->fail_io)
1147 		r = __create_thin(pmd, dev);
1148 	pmd_write_unlock(pmd);
1149 
1150 	return r;
1151 }
1152 
__set_snapshot_details(struct dm_pool_metadata * pmd,struct dm_thin_device * snap,dm_thin_id origin,uint32_t time)1153 static int __set_snapshot_details(struct dm_pool_metadata *pmd,
1154 				  struct dm_thin_device *snap,
1155 				  dm_thin_id origin, uint32_t time)
1156 {
1157 	int r;
1158 	struct dm_thin_device *td;
1159 
1160 	r = __open_device(pmd, origin, 0, &td);
1161 	if (r)
1162 		return r;
1163 
1164 	td->changed = true;
1165 	td->snapshotted_time = time;
1166 
1167 	snap->mapped_blocks = td->mapped_blocks;
1168 	snap->snapshotted_time = time;
1169 	__close_device(td);
1170 
1171 	return 0;
1172 }
1173 
__create_snap(struct dm_pool_metadata * pmd,dm_thin_id dev,dm_thin_id origin)1174 static int __create_snap(struct dm_pool_metadata *pmd,
1175 			 dm_thin_id dev, dm_thin_id origin)
1176 {
1177 	int r;
1178 	dm_block_t origin_root;
1179 	uint64_t key = origin, dev_key = dev;
1180 	struct dm_thin_device *td;
1181 	__le64 value;
1182 
1183 	/* check this device is unused */
1184 	r = dm_btree_lookup(&pmd->details_info, pmd->details_root,
1185 			    &dev_key, NULL);
1186 	if (!r)
1187 		return -EEXIST;
1188 
1189 	/* find the mapping tree for the origin */
1190 	r = dm_btree_lookup(&pmd->tl_info, pmd->root, &key, &value);
1191 	if (r)
1192 		return r;
1193 	origin_root = le64_to_cpu(value);
1194 
1195 	/* clone the origin, an inc will do */
1196 	dm_tm_inc(pmd->tm, origin_root);
1197 
1198 	/* insert into the main mapping tree */
1199 	value = cpu_to_le64(origin_root);
1200 	__dm_bless_for_disk(&value);
1201 	key = dev;
1202 	r = dm_btree_insert(&pmd->tl_info, pmd->root, &key, &value, &pmd->root);
1203 	if (r) {
1204 		dm_tm_dec(pmd->tm, origin_root);
1205 		return r;
1206 	}
1207 
1208 	pmd->time++;
1209 
1210 	r = __open_device(pmd, dev, 1, &td);
1211 	if (r)
1212 		goto bad;
1213 
1214 	r = __set_snapshot_details(pmd, td, origin, pmd->time);
1215 	__close_device(td);
1216 
1217 	if (r)
1218 		goto bad;
1219 
1220 	return 0;
1221 
1222 bad:
1223 	dm_btree_remove(&pmd->tl_info, pmd->root, &key, &pmd->root);
1224 	dm_btree_remove(&pmd->details_info, pmd->details_root,
1225 			&key, &pmd->details_root);
1226 	return r;
1227 }
1228 
dm_pool_create_snap(struct dm_pool_metadata * pmd,dm_thin_id dev,dm_thin_id origin)1229 int dm_pool_create_snap(struct dm_pool_metadata *pmd,
1230 				 dm_thin_id dev,
1231 				 dm_thin_id origin)
1232 {
1233 	int r = -EINVAL;
1234 
1235 	pmd_write_lock(pmd);
1236 	if (!pmd->fail_io)
1237 		r = __create_snap(pmd, dev, origin);
1238 	pmd_write_unlock(pmd);
1239 
1240 	return r;
1241 }
1242 
__delete_device(struct dm_pool_metadata * pmd,dm_thin_id dev)1243 static int __delete_device(struct dm_pool_metadata *pmd, dm_thin_id dev)
1244 {
1245 	int r;
1246 	uint64_t key = dev;
1247 	struct dm_thin_device *td;
1248 
1249 	/* TODO: failure should mark the transaction invalid */
1250 	r = __open_device(pmd, dev, 0, &td);
1251 	if (r)
1252 		return r;
1253 
1254 	if (td->open_count > 1) {
1255 		__close_device(td);
1256 		return -EBUSY;
1257 	}
1258 
1259 	list_del(&td->list);
1260 	kfree(td);
1261 	r = dm_btree_remove(&pmd->details_info, pmd->details_root,
1262 			    &key, &pmd->details_root);
1263 	if (r)
1264 		return r;
1265 
1266 	r = dm_btree_remove(&pmd->tl_info, pmd->root, &key, &pmd->root);
1267 	if (r)
1268 		return r;
1269 
1270 	return 0;
1271 }
1272 
dm_pool_delete_thin_device(struct dm_pool_metadata * pmd,dm_thin_id dev)1273 int dm_pool_delete_thin_device(struct dm_pool_metadata *pmd,
1274 			       dm_thin_id dev)
1275 {
1276 	int r = -EINVAL;
1277 
1278 	pmd_write_lock(pmd);
1279 	if (!pmd->fail_io)
1280 		r = __delete_device(pmd, dev);
1281 	pmd_write_unlock(pmd);
1282 
1283 	return r;
1284 }
1285 
dm_pool_set_metadata_transaction_id(struct dm_pool_metadata * pmd,uint64_t current_id,uint64_t new_id)1286 int dm_pool_set_metadata_transaction_id(struct dm_pool_metadata *pmd,
1287 					uint64_t current_id,
1288 					uint64_t new_id)
1289 {
1290 	int r = -EINVAL;
1291 
1292 	pmd_write_lock(pmd);
1293 
1294 	if (pmd->fail_io)
1295 		goto out;
1296 
1297 	if (pmd->trans_id != current_id) {
1298 		DMERR("mismatched transaction id");
1299 		goto out;
1300 	}
1301 
1302 	pmd->trans_id = new_id;
1303 	r = 0;
1304 
1305 out:
1306 	pmd_write_unlock(pmd);
1307 
1308 	return r;
1309 }
1310 
dm_pool_get_metadata_transaction_id(struct dm_pool_metadata * pmd,uint64_t * result)1311 int dm_pool_get_metadata_transaction_id(struct dm_pool_metadata *pmd,
1312 					uint64_t *result)
1313 {
1314 	int r = -EINVAL;
1315 
1316 	down_read(&pmd->root_lock);
1317 	if (!pmd->fail_io) {
1318 		*result = pmd->trans_id;
1319 		r = 0;
1320 	}
1321 	up_read(&pmd->root_lock);
1322 
1323 	return r;
1324 }
1325 
__reserve_metadata_snap(struct dm_pool_metadata * pmd)1326 static int __reserve_metadata_snap(struct dm_pool_metadata *pmd)
1327 {
1328 	int r, inc;
1329 	struct thin_disk_superblock *disk_super;
1330 	struct dm_block *copy, *sblock;
1331 	dm_block_t held_root;
1332 
1333 	/*
1334 	 * We commit to ensure the btree roots which we increment in a
1335 	 * moment are up to date.
1336 	 */
1337 	r = __commit_transaction(pmd);
1338 	if (r < 0) {
1339 		DMWARN("%s: __commit_transaction() failed, error = %d",
1340 		       __func__, r);
1341 		return r;
1342 	}
1343 
1344 	/*
1345 	 * Copy the superblock.
1346 	 */
1347 	dm_sm_inc_block(pmd->metadata_sm, THIN_SUPERBLOCK_LOCATION);
1348 	r = dm_tm_shadow_block(pmd->tm, THIN_SUPERBLOCK_LOCATION,
1349 			       &sb_validator, &copy, &inc);
1350 	if (r)
1351 		return r;
1352 
1353 	BUG_ON(!inc);
1354 
1355 	held_root = dm_block_location(copy);
1356 	disk_super = dm_block_data(copy);
1357 
1358 	if (le64_to_cpu(disk_super->held_root)) {
1359 		DMWARN("Pool metadata snapshot already exists: release this before taking another.");
1360 
1361 		dm_tm_dec(pmd->tm, held_root);
1362 		dm_tm_unlock(pmd->tm, copy);
1363 		return -EBUSY;
1364 	}
1365 
1366 	/*
1367 	 * Wipe the spacemap since we're not publishing this.
1368 	 */
1369 	memset(&disk_super->data_space_map_root, 0,
1370 	       sizeof(disk_super->data_space_map_root));
1371 	memset(&disk_super->metadata_space_map_root, 0,
1372 	       sizeof(disk_super->metadata_space_map_root));
1373 
1374 	/*
1375 	 * Increment the data structures that need to be preserved.
1376 	 */
1377 	dm_tm_inc(pmd->tm, le64_to_cpu(disk_super->data_mapping_root));
1378 	dm_tm_inc(pmd->tm, le64_to_cpu(disk_super->device_details_root));
1379 	dm_tm_unlock(pmd->tm, copy);
1380 
1381 	/*
1382 	 * Write the held root into the superblock.
1383 	 */
1384 	r = superblock_lock(pmd, &sblock);
1385 	if (r) {
1386 		dm_tm_dec(pmd->tm, held_root);
1387 		return r;
1388 	}
1389 
1390 	disk_super = dm_block_data(sblock);
1391 	disk_super->held_root = cpu_to_le64(held_root);
1392 	dm_bm_unlock(sblock);
1393 	return 0;
1394 }
1395 
dm_pool_reserve_metadata_snap(struct dm_pool_metadata * pmd)1396 int dm_pool_reserve_metadata_snap(struct dm_pool_metadata *pmd)
1397 {
1398 	int r = -EINVAL;
1399 
1400 	pmd_write_lock(pmd);
1401 	if (!pmd->fail_io)
1402 		r = __reserve_metadata_snap(pmd);
1403 	pmd_write_unlock(pmd);
1404 
1405 	return r;
1406 }
1407 
__release_metadata_snap(struct dm_pool_metadata * pmd)1408 static int __release_metadata_snap(struct dm_pool_metadata *pmd)
1409 {
1410 	int r;
1411 	struct thin_disk_superblock *disk_super;
1412 	struct dm_block *sblock, *copy;
1413 	dm_block_t held_root;
1414 
1415 	r = superblock_lock(pmd, &sblock);
1416 	if (r)
1417 		return r;
1418 
1419 	disk_super = dm_block_data(sblock);
1420 	held_root = le64_to_cpu(disk_super->held_root);
1421 	disk_super->held_root = cpu_to_le64(0);
1422 
1423 	dm_bm_unlock(sblock);
1424 
1425 	if (!held_root) {
1426 		DMWARN("No pool metadata snapshot found: nothing to release.");
1427 		return -EINVAL;
1428 	}
1429 
1430 	r = dm_tm_read_lock(pmd->tm, held_root, &sb_validator, &copy);
1431 	if (r)
1432 		return r;
1433 
1434 	disk_super = dm_block_data(copy);
1435 	dm_btree_del(&pmd->info, le64_to_cpu(disk_super->data_mapping_root));
1436 	dm_btree_del(&pmd->details_info, le64_to_cpu(disk_super->device_details_root));
1437 	dm_sm_dec_block(pmd->metadata_sm, held_root);
1438 
1439 	dm_tm_unlock(pmd->tm, copy);
1440 
1441 	return 0;
1442 }
1443 
dm_pool_release_metadata_snap(struct dm_pool_metadata * pmd)1444 int dm_pool_release_metadata_snap(struct dm_pool_metadata *pmd)
1445 {
1446 	int r = -EINVAL;
1447 
1448 	pmd_write_lock(pmd);
1449 	if (!pmd->fail_io)
1450 		r = __release_metadata_snap(pmd);
1451 	pmd_write_unlock(pmd);
1452 
1453 	return r;
1454 }
1455 
__get_metadata_snap(struct dm_pool_metadata * pmd,dm_block_t * result)1456 static int __get_metadata_snap(struct dm_pool_metadata *pmd,
1457 			       dm_block_t *result)
1458 {
1459 	int r;
1460 	struct thin_disk_superblock *disk_super;
1461 	struct dm_block *sblock;
1462 
1463 	r = dm_bm_read_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
1464 			    &sb_validator, &sblock);
1465 	if (r)
1466 		return r;
1467 
1468 	disk_super = dm_block_data(sblock);
1469 	*result = le64_to_cpu(disk_super->held_root);
1470 
1471 	dm_bm_unlock(sblock);
1472 
1473 	return 0;
1474 }
1475 
dm_pool_get_metadata_snap(struct dm_pool_metadata * pmd,dm_block_t * result)1476 int dm_pool_get_metadata_snap(struct dm_pool_metadata *pmd,
1477 			      dm_block_t *result)
1478 {
1479 	int r = -EINVAL;
1480 
1481 	down_read(&pmd->root_lock);
1482 	if (!pmd->fail_io)
1483 		r = __get_metadata_snap(pmd, result);
1484 	up_read(&pmd->root_lock);
1485 
1486 	return r;
1487 }
1488 
dm_pool_open_thin_device(struct dm_pool_metadata * pmd,dm_thin_id dev,struct dm_thin_device ** td)1489 int dm_pool_open_thin_device(struct dm_pool_metadata *pmd, dm_thin_id dev,
1490 			     struct dm_thin_device **td)
1491 {
1492 	int r = -EINVAL;
1493 
1494 	pmd_write_lock_in_core(pmd);
1495 	if (!pmd->fail_io)
1496 		r = __open_device(pmd, dev, 0, td);
1497 	pmd_write_unlock(pmd);
1498 
1499 	return r;
1500 }
1501 
dm_pool_close_thin_device(struct dm_thin_device * td)1502 int dm_pool_close_thin_device(struct dm_thin_device *td)
1503 {
1504 	pmd_write_lock_in_core(td->pmd);
1505 	__close_device(td);
1506 	pmd_write_unlock(td->pmd);
1507 
1508 	return 0;
1509 }
1510 
dm_thin_dev_id(struct dm_thin_device * td)1511 dm_thin_id dm_thin_dev_id(struct dm_thin_device *td)
1512 {
1513 	return td->id;
1514 }
1515 
1516 /*
1517  * Check whether @time (of block creation) is older than @td's last snapshot.
1518  * If so then the associated block is shared with the last snapshot device.
1519  * Any block on a device created *after* the device last got snapshotted is
1520  * necessarily not shared.
1521  */
__snapshotted_since(struct dm_thin_device * td,uint32_t time)1522 static bool __snapshotted_since(struct dm_thin_device *td, uint32_t time)
1523 {
1524 	return td->snapshotted_time > time;
1525 }
1526 
unpack_lookup_result(struct dm_thin_device * td,__le64 value,struct dm_thin_lookup_result * result)1527 static void unpack_lookup_result(struct dm_thin_device *td, __le64 value,
1528 				 struct dm_thin_lookup_result *result)
1529 {
1530 	uint64_t block_time = 0;
1531 	dm_block_t exception_block;
1532 	uint32_t exception_time;
1533 
1534 	block_time = le64_to_cpu(value);
1535 	unpack_block_time(block_time, &exception_block, &exception_time);
1536 	result->block = exception_block;
1537 	result->shared = __snapshotted_since(td, exception_time);
1538 }
1539 
__find_block(struct dm_thin_device * td,dm_block_t block,int can_issue_io,struct dm_thin_lookup_result * result)1540 static int __find_block(struct dm_thin_device *td, dm_block_t block,
1541 			int can_issue_io, struct dm_thin_lookup_result *result)
1542 {
1543 	int r;
1544 	__le64 value;
1545 	struct dm_pool_metadata *pmd = td->pmd;
1546 	dm_block_t keys[2] = { td->id, block };
1547 	struct dm_btree_info *info;
1548 
1549 	if (can_issue_io) {
1550 		info = &pmd->info;
1551 	} else
1552 		info = &pmd->nb_info;
1553 
1554 	r = dm_btree_lookup(info, pmd->root, keys, &value);
1555 	if (!r)
1556 		unpack_lookup_result(td, value, result);
1557 
1558 	return r;
1559 }
1560 
dm_thin_find_block(struct dm_thin_device * td,dm_block_t block,int can_issue_io,struct dm_thin_lookup_result * result)1561 int dm_thin_find_block(struct dm_thin_device *td, dm_block_t block,
1562 		       int can_issue_io, struct dm_thin_lookup_result *result)
1563 {
1564 	int r;
1565 	struct dm_pool_metadata *pmd = td->pmd;
1566 
1567 	down_read(&pmd->root_lock);
1568 	if (pmd->fail_io) {
1569 		up_read(&pmd->root_lock);
1570 		return -EINVAL;
1571 	}
1572 
1573 	r = __find_block(td, block, can_issue_io, result);
1574 
1575 	up_read(&pmd->root_lock);
1576 	return r;
1577 }
1578 
__find_next_mapped_block(struct dm_thin_device * td,dm_block_t block,dm_block_t * vblock,struct dm_thin_lookup_result * result)1579 static int __find_next_mapped_block(struct dm_thin_device *td, dm_block_t block,
1580 					  dm_block_t *vblock,
1581 					  struct dm_thin_lookup_result *result)
1582 {
1583 	int r;
1584 	__le64 value;
1585 	struct dm_pool_metadata *pmd = td->pmd;
1586 	dm_block_t keys[2] = { td->id, block };
1587 
1588 	r = dm_btree_lookup_next(&pmd->info, pmd->root, keys, vblock, &value);
1589 	if (!r)
1590 		unpack_lookup_result(td, value, result);
1591 
1592 	return r;
1593 }
1594 
__find_mapped_range(struct dm_thin_device * td,dm_block_t begin,dm_block_t end,dm_block_t * thin_begin,dm_block_t * thin_end,dm_block_t * pool_begin,bool * maybe_shared)1595 static int __find_mapped_range(struct dm_thin_device *td,
1596 			       dm_block_t begin, dm_block_t end,
1597 			       dm_block_t *thin_begin, dm_block_t *thin_end,
1598 			       dm_block_t *pool_begin, bool *maybe_shared)
1599 {
1600 	int r;
1601 	dm_block_t pool_end;
1602 	struct dm_thin_lookup_result lookup;
1603 
1604 	if (end < begin)
1605 		return -ENODATA;
1606 
1607 	r = __find_next_mapped_block(td, begin, &begin, &lookup);
1608 	if (r)
1609 		return r;
1610 
1611 	if (begin >= end)
1612 		return -ENODATA;
1613 
1614 	*thin_begin = begin;
1615 	*pool_begin = lookup.block;
1616 	*maybe_shared = lookup.shared;
1617 
1618 	begin++;
1619 	pool_end = *pool_begin + 1;
1620 	while (begin != end) {
1621 		r = __find_block(td, begin, true, &lookup);
1622 		if (r) {
1623 			if (r == -ENODATA)
1624 				break;
1625 			else
1626 				return r;
1627 		}
1628 
1629 		if ((lookup.block != pool_end) ||
1630 		    (lookup.shared != *maybe_shared))
1631 			break;
1632 
1633 		pool_end++;
1634 		begin++;
1635 	}
1636 
1637 	*thin_end = begin;
1638 	return 0;
1639 }
1640 
dm_thin_find_mapped_range(struct dm_thin_device * td,dm_block_t begin,dm_block_t end,dm_block_t * thin_begin,dm_block_t * thin_end,dm_block_t * pool_begin,bool * maybe_shared)1641 int dm_thin_find_mapped_range(struct dm_thin_device *td,
1642 			      dm_block_t begin, dm_block_t end,
1643 			      dm_block_t *thin_begin, dm_block_t *thin_end,
1644 			      dm_block_t *pool_begin, bool *maybe_shared)
1645 {
1646 	int r = -EINVAL;
1647 	struct dm_pool_metadata *pmd = td->pmd;
1648 
1649 	down_read(&pmd->root_lock);
1650 	if (!pmd->fail_io) {
1651 		r = __find_mapped_range(td, begin, end, thin_begin, thin_end,
1652 					pool_begin, maybe_shared);
1653 	}
1654 	up_read(&pmd->root_lock);
1655 
1656 	return r;
1657 }
1658 
__insert(struct dm_thin_device * td,dm_block_t block,dm_block_t data_block)1659 static int __insert(struct dm_thin_device *td, dm_block_t block,
1660 		    dm_block_t data_block)
1661 {
1662 	int r, inserted;
1663 	__le64 value;
1664 	struct dm_pool_metadata *pmd = td->pmd;
1665 	dm_block_t keys[2] = { td->id, block };
1666 
1667 	value = cpu_to_le64(pack_block_time(data_block, pmd->time));
1668 	__dm_bless_for_disk(&value);
1669 
1670 	r = dm_btree_insert_notify(&pmd->info, pmd->root, keys, &value,
1671 				   &pmd->root, &inserted);
1672 	if (r)
1673 		return r;
1674 
1675 	td->changed = true;
1676 	if (inserted)
1677 		td->mapped_blocks++;
1678 
1679 	return 0;
1680 }
1681 
dm_thin_insert_block(struct dm_thin_device * td,dm_block_t block,dm_block_t data_block)1682 int dm_thin_insert_block(struct dm_thin_device *td, dm_block_t block,
1683 			 dm_block_t data_block)
1684 {
1685 	int r = -EINVAL;
1686 
1687 	pmd_write_lock(td->pmd);
1688 	if (!td->pmd->fail_io)
1689 		r = __insert(td, block, data_block);
1690 	pmd_write_unlock(td->pmd);
1691 
1692 	return r;
1693 }
1694 
__remove_range(struct dm_thin_device * td,dm_block_t begin,dm_block_t end)1695 static int __remove_range(struct dm_thin_device *td, dm_block_t begin, dm_block_t end)
1696 {
1697 	int r;
1698 	unsigned int count, total_count = 0;
1699 	struct dm_pool_metadata *pmd = td->pmd;
1700 	dm_block_t keys[1] = { td->id };
1701 	__le64 value;
1702 	dm_block_t mapping_root;
1703 
1704 	/*
1705 	 * Find the mapping tree
1706 	 */
1707 	r = dm_btree_lookup(&pmd->tl_info, pmd->root, keys, &value);
1708 	if (r)
1709 		return r;
1710 
1711 	/*
1712 	 * Remove from the mapping tree, taking care to inc the
1713 	 * ref count so it doesn't get deleted.
1714 	 */
1715 	mapping_root = le64_to_cpu(value);
1716 	dm_tm_inc(pmd->tm, mapping_root);
1717 	r = dm_btree_remove(&pmd->tl_info, pmd->root, keys, &pmd->root);
1718 	if (r)
1719 		return r;
1720 
1721 	/*
1722 	 * Remove leaves stops at the first unmapped entry, so we have to
1723 	 * loop round finding mapped ranges.
1724 	 */
1725 	while (begin < end) {
1726 		r = dm_btree_lookup_next(&pmd->bl_info, mapping_root, &begin, &begin, &value);
1727 		if (r == -ENODATA)
1728 			break;
1729 
1730 		if (r)
1731 			return r;
1732 
1733 		if (begin >= end)
1734 			break;
1735 
1736 		r = dm_btree_remove_leaves(&pmd->bl_info, mapping_root, &begin, end, &mapping_root, &count);
1737 		if (r)
1738 			return r;
1739 
1740 		total_count += count;
1741 	}
1742 
1743 	td->mapped_blocks -= total_count;
1744 	td->changed = true;
1745 
1746 	/*
1747 	 * Reinsert the mapping tree.
1748 	 */
1749 	value = cpu_to_le64(mapping_root);
1750 	__dm_bless_for_disk(&value);
1751 	return dm_btree_insert(&pmd->tl_info, pmd->root, keys, &value, &pmd->root);
1752 }
1753 
dm_thin_remove_range(struct dm_thin_device * td,dm_block_t begin,dm_block_t end)1754 int dm_thin_remove_range(struct dm_thin_device *td,
1755 			 dm_block_t begin, dm_block_t end)
1756 {
1757 	int r = -EINVAL;
1758 
1759 	pmd_write_lock(td->pmd);
1760 	if (!td->pmd->fail_io)
1761 		r = __remove_range(td, begin, end);
1762 	pmd_write_unlock(td->pmd);
1763 
1764 	return r;
1765 }
1766 
dm_pool_block_is_shared(struct dm_pool_metadata * pmd,dm_block_t b,bool * result)1767 int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *result)
1768 {
1769 	int r = -EINVAL;
1770 	uint32_t ref_count;
1771 
1772 	down_read(&pmd->root_lock);
1773 	if (!pmd->fail_io) {
1774 		r = dm_sm_get_count(pmd->data_sm, b, &ref_count);
1775 		if (!r)
1776 			*result = (ref_count > 1);
1777 	}
1778 	up_read(&pmd->root_lock);
1779 
1780 	return r;
1781 }
1782 
dm_pool_inc_data_range(struct dm_pool_metadata * pmd,dm_block_t b,dm_block_t e)1783 int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e)
1784 {
1785 	int r = -EINVAL;
1786 
1787 	pmd_write_lock(pmd);
1788 	if (!pmd->fail_io)
1789 		r = dm_sm_inc_blocks(pmd->data_sm, b, e);
1790 	pmd_write_unlock(pmd);
1791 
1792 	return r;
1793 }
1794 
dm_pool_dec_data_range(struct dm_pool_metadata * pmd,dm_block_t b,dm_block_t e)1795 int dm_pool_dec_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e)
1796 {
1797 	int r = -EINVAL;
1798 
1799 	pmd_write_lock(pmd);
1800 	if (!pmd->fail_io)
1801 		r = dm_sm_dec_blocks(pmd->data_sm, b, e);
1802 	pmd_write_unlock(pmd);
1803 
1804 	return r;
1805 }
1806 
dm_thin_changed_this_transaction(struct dm_thin_device * td)1807 bool dm_thin_changed_this_transaction(struct dm_thin_device *td)
1808 {
1809 	int r;
1810 
1811 	down_read(&td->pmd->root_lock);
1812 	r = td->changed;
1813 	up_read(&td->pmd->root_lock);
1814 
1815 	return r;
1816 }
1817 
dm_pool_changed_this_transaction(struct dm_pool_metadata * pmd)1818 bool dm_pool_changed_this_transaction(struct dm_pool_metadata *pmd)
1819 {
1820 	bool r = false;
1821 	struct dm_thin_device *td, *tmp;
1822 
1823 	down_read(&pmd->root_lock);
1824 	list_for_each_entry_safe(td, tmp, &pmd->thin_devices, list) {
1825 		if (td->changed) {
1826 			r = td->changed;
1827 			break;
1828 		}
1829 	}
1830 	up_read(&pmd->root_lock);
1831 
1832 	return r;
1833 }
1834 
dm_thin_aborted_changes(struct dm_thin_device * td)1835 bool dm_thin_aborted_changes(struct dm_thin_device *td)
1836 {
1837 	bool r;
1838 
1839 	down_read(&td->pmd->root_lock);
1840 	r = td->aborted_with_changes;
1841 	up_read(&td->pmd->root_lock);
1842 
1843 	return r;
1844 }
1845 
dm_pool_alloc_data_block(struct dm_pool_metadata * pmd,dm_block_t * result)1846 int dm_pool_alloc_data_block(struct dm_pool_metadata *pmd, dm_block_t *result)
1847 {
1848 	int r = -EINVAL;
1849 
1850 	pmd_write_lock(pmd);
1851 	if (!pmd->fail_io)
1852 		r = dm_sm_new_block(pmd->data_sm, result);
1853 	pmd_write_unlock(pmd);
1854 
1855 	return r;
1856 }
1857 
dm_pool_commit_metadata(struct dm_pool_metadata * pmd)1858 int dm_pool_commit_metadata(struct dm_pool_metadata *pmd)
1859 {
1860 	int r = -EINVAL;
1861 
1862 	/*
1863 	 * Care is taken to not have commit be what
1864 	 * triggers putting the thin-pool in-service.
1865 	 */
1866 	pmd_write_lock_in_core(pmd);
1867 	if (pmd->fail_io)
1868 		goto out;
1869 
1870 	r = __commit_transaction(pmd);
1871 	if (r < 0)
1872 		goto out;
1873 
1874 	/*
1875 	 * Open the next transaction.
1876 	 */
1877 	r = __begin_transaction(pmd);
1878 out:
1879 	pmd_write_unlock(pmd);
1880 	return r;
1881 }
1882 
__set_abort_with_changes_flags(struct dm_pool_metadata * pmd)1883 static void __set_abort_with_changes_flags(struct dm_pool_metadata *pmd)
1884 {
1885 	struct dm_thin_device *td;
1886 
1887 	list_for_each_entry(td, &pmd->thin_devices, list)
1888 		td->aborted_with_changes = td->changed;
1889 }
1890 
dm_pool_abort_metadata(struct dm_pool_metadata * pmd)1891 int dm_pool_abort_metadata(struct dm_pool_metadata *pmd)
1892 {
1893 	int r = -EINVAL;
1894 
1895 	/* fail_io is double-checked with pmd->root_lock held below */
1896 	if (unlikely(pmd->fail_io))
1897 		return r;
1898 
1899 	pmd_write_lock(pmd);
1900 	if (pmd->fail_io) {
1901 		pmd_write_unlock(pmd);
1902 		return r;
1903 	}
1904 	__set_abort_with_changes_flags(pmd);
1905 
1906 	/* destroy data_sm/metadata_sm/nb_tm/tm */
1907 	__destroy_persistent_data_objects(pmd, false);
1908 
1909 	/* reset bm */
1910 	dm_block_manager_reset(pmd->bm);
1911 
1912 	/* rebuild data_sm/metadata_sm/nb_tm/tm */
1913 	r = __open_or_format_metadata(pmd, false);
1914 	if (r)
1915 		pmd->fail_io = true;
1916 	pmd_write_unlock(pmd);
1917 	return r;
1918 }
1919 
dm_pool_get_free_block_count(struct dm_pool_metadata * pmd,dm_block_t * result)1920 int dm_pool_get_free_block_count(struct dm_pool_metadata *pmd, dm_block_t *result)
1921 {
1922 	int r = -EINVAL;
1923 
1924 	down_read(&pmd->root_lock);
1925 	if (!pmd->fail_io)
1926 		r = dm_sm_get_nr_free(pmd->data_sm, result);
1927 	up_read(&pmd->root_lock);
1928 
1929 	return r;
1930 }
1931 
dm_pool_get_free_metadata_block_count(struct dm_pool_metadata * pmd,dm_block_t * result)1932 int dm_pool_get_free_metadata_block_count(struct dm_pool_metadata *pmd,
1933 					  dm_block_t *result)
1934 {
1935 	int r = -EINVAL;
1936 
1937 	down_read(&pmd->root_lock);
1938 	if (!pmd->fail_io)
1939 		r = dm_sm_get_nr_free(pmd->metadata_sm, result);
1940 
1941 	if (!r) {
1942 		if (*result < pmd->metadata_reserve)
1943 			*result = 0;
1944 		else
1945 			*result -= pmd->metadata_reserve;
1946 	}
1947 	up_read(&pmd->root_lock);
1948 
1949 	return r;
1950 }
1951 
dm_pool_get_metadata_dev_size(struct dm_pool_metadata * pmd,dm_block_t * result)1952 int dm_pool_get_metadata_dev_size(struct dm_pool_metadata *pmd,
1953 				  dm_block_t *result)
1954 {
1955 	int r = -EINVAL;
1956 
1957 	down_read(&pmd->root_lock);
1958 	if (!pmd->fail_io)
1959 		r = dm_sm_get_nr_blocks(pmd->metadata_sm, result);
1960 	up_read(&pmd->root_lock);
1961 
1962 	return r;
1963 }
1964 
dm_pool_get_data_dev_size(struct dm_pool_metadata * pmd,dm_block_t * result)1965 int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result)
1966 {
1967 	int r = -EINVAL;
1968 
1969 	down_read(&pmd->root_lock);
1970 	if (!pmd->fail_io)
1971 		r = dm_sm_get_nr_blocks(pmd->data_sm, result);
1972 	up_read(&pmd->root_lock);
1973 
1974 	return r;
1975 }
1976 
dm_thin_get_mapped_count(struct dm_thin_device * td,dm_block_t * result)1977 int dm_thin_get_mapped_count(struct dm_thin_device *td, dm_block_t *result)
1978 {
1979 	int r = -EINVAL;
1980 	struct dm_pool_metadata *pmd = td->pmd;
1981 
1982 	down_read(&pmd->root_lock);
1983 	if (!pmd->fail_io) {
1984 		*result = td->mapped_blocks;
1985 		r = 0;
1986 	}
1987 	up_read(&pmd->root_lock);
1988 
1989 	return r;
1990 }
1991 
__highest_block(struct dm_thin_device * td,dm_block_t * result)1992 static int __highest_block(struct dm_thin_device *td, dm_block_t *result)
1993 {
1994 	int r;
1995 	__le64 value_le;
1996 	dm_block_t thin_root;
1997 	struct dm_pool_metadata *pmd = td->pmd;
1998 
1999 	r = dm_btree_lookup(&pmd->tl_info, pmd->root, &td->id, &value_le);
2000 	if (r)
2001 		return r;
2002 
2003 	thin_root = le64_to_cpu(value_le);
2004 
2005 	return dm_btree_find_highest_key(&pmd->bl_info, thin_root, result);
2006 }
2007 
dm_thin_get_highest_mapped_block(struct dm_thin_device * td,dm_block_t * result)2008 int dm_thin_get_highest_mapped_block(struct dm_thin_device *td,
2009 				     dm_block_t *result)
2010 {
2011 	int r = -EINVAL;
2012 	struct dm_pool_metadata *pmd = td->pmd;
2013 
2014 	down_read(&pmd->root_lock);
2015 	if (!pmd->fail_io)
2016 		r = __highest_block(td, result);
2017 	up_read(&pmd->root_lock);
2018 
2019 	return r;
2020 }
2021 
__resize_space_map(struct dm_space_map * sm,dm_block_t new_count)2022 static int __resize_space_map(struct dm_space_map *sm, dm_block_t new_count)
2023 {
2024 	int r;
2025 	dm_block_t old_count;
2026 
2027 	r = dm_sm_get_nr_blocks(sm, &old_count);
2028 	if (r)
2029 		return r;
2030 
2031 	if (new_count == old_count)
2032 		return 0;
2033 
2034 	if (new_count < old_count) {
2035 		DMERR("cannot reduce size of space map");
2036 		return -EINVAL;
2037 	}
2038 
2039 	return dm_sm_extend(sm, new_count - old_count);
2040 }
2041 
dm_pool_resize_data_dev(struct dm_pool_metadata * pmd,dm_block_t new_count)2042 int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
2043 {
2044 	int r = -EINVAL;
2045 
2046 	pmd_write_lock(pmd);
2047 	if (!pmd->fail_io)
2048 		r = __resize_space_map(pmd->data_sm, new_count);
2049 	pmd_write_unlock(pmd);
2050 
2051 	return r;
2052 }
2053 
dm_pool_resize_metadata_dev(struct dm_pool_metadata * pmd,dm_block_t new_count)2054 int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
2055 {
2056 	int r = -EINVAL;
2057 
2058 	pmd_write_lock(pmd);
2059 	if (!pmd->fail_io) {
2060 		r = __resize_space_map(pmd->metadata_sm, new_count);
2061 		if (!r)
2062 			__set_metadata_reserve(pmd);
2063 	}
2064 	pmd_write_unlock(pmd);
2065 
2066 	return r;
2067 }
2068 
dm_pool_metadata_read_only(struct dm_pool_metadata * pmd)2069 void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd)
2070 {
2071 	pmd_write_lock_in_core(pmd);
2072 	dm_bm_set_read_only(pmd->bm);
2073 	pmd_write_unlock(pmd);
2074 }
2075 
dm_pool_metadata_read_write(struct dm_pool_metadata * pmd)2076 void dm_pool_metadata_read_write(struct dm_pool_metadata *pmd)
2077 {
2078 	pmd_write_lock_in_core(pmd);
2079 	dm_bm_set_read_write(pmd->bm);
2080 	pmd_write_unlock(pmd);
2081 }
2082 
dm_pool_register_metadata_threshold(struct dm_pool_metadata * pmd,dm_block_t threshold,dm_sm_threshold_fn fn,void * context)2083 int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
2084 					dm_block_t threshold,
2085 					dm_sm_threshold_fn fn,
2086 					void *context)
2087 {
2088 	int r = -EINVAL;
2089 
2090 	pmd_write_lock_in_core(pmd);
2091 	if (!pmd->fail_io) {
2092 		r = dm_sm_register_threshold_callback(pmd->metadata_sm,
2093 						      threshold, fn, context);
2094 	}
2095 	pmd_write_unlock(pmd);
2096 
2097 	return r;
2098 }
2099 
dm_pool_register_pre_commit_callback(struct dm_pool_metadata * pmd,dm_pool_pre_commit_fn fn,void * context)2100 void dm_pool_register_pre_commit_callback(struct dm_pool_metadata *pmd,
2101 					  dm_pool_pre_commit_fn fn,
2102 					  void *context)
2103 {
2104 	pmd_write_lock_in_core(pmd);
2105 	pmd->pre_commit_fn = fn;
2106 	pmd->pre_commit_context = context;
2107 	pmd_write_unlock(pmd);
2108 }
2109 
dm_pool_metadata_set_needs_check(struct dm_pool_metadata * pmd)2110 int dm_pool_metadata_set_needs_check(struct dm_pool_metadata *pmd)
2111 {
2112 	int r = -EINVAL;
2113 	struct dm_block *sblock;
2114 	struct thin_disk_superblock *disk_super;
2115 
2116 	pmd_write_lock(pmd);
2117 	if (pmd->fail_io)
2118 		goto out;
2119 
2120 	pmd->flags |= THIN_METADATA_NEEDS_CHECK_FLAG;
2121 
2122 	r = superblock_lock(pmd, &sblock);
2123 	if (r) {
2124 		DMERR("couldn't lock superblock");
2125 		goto out;
2126 	}
2127 
2128 	disk_super = dm_block_data(sblock);
2129 	disk_super->flags = cpu_to_le32(pmd->flags);
2130 
2131 	dm_bm_unlock(sblock);
2132 out:
2133 	pmd_write_unlock(pmd);
2134 	return r;
2135 }
2136 
dm_pool_metadata_needs_check(struct dm_pool_metadata * pmd)2137 bool dm_pool_metadata_needs_check(struct dm_pool_metadata *pmd)
2138 {
2139 	bool needs_check;
2140 
2141 	down_read(&pmd->root_lock);
2142 	needs_check = pmd->flags & THIN_METADATA_NEEDS_CHECK_FLAG;
2143 	up_read(&pmd->root_lock);
2144 
2145 	return needs_check;
2146 }
2147 
dm_pool_issue_prefetches(struct dm_pool_metadata * pmd)2148 void dm_pool_issue_prefetches(struct dm_pool_metadata *pmd)
2149 {
2150 	down_read(&pmd->root_lock);
2151 	if (!pmd->fail_io)
2152 		dm_tm_issue_prefetches(pmd->tm);
2153 	up_read(&pmd->root_lock);
2154 }
2155