• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * dm-snapshot.c
3  *
4  * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
5  *
6  * This file is released under the GPL.
7  */
8 
9 #include <linux/blkdev.h>
10 #include <linux/ctype.h>
11 #include <linux/device-mapper.h>
12 #include <linux/delay.h>
13 #include <linux/fs.h>
14 #include <linux/init.h>
15 #include <linux/kdev_t.h>
16 #include <linux/list.h>
17 #include <linux/mempool.h>
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/vmalloc.h>
21 #include <linux/log2.h>
22 #include <linux/dm-kcopyd.h>
23 
24 #include "dm-exception-store.h"
25 #include "dm-snap.h"
26 #include "dm-bio-list.h"
27 
28 #define DM_MSG_PREFIX "snapshots"
29 
30 /*
31  * The percentage increment we will wake up users at
32  */
33 #define WAKE_UP_PERCENT 5
34 
35 /*
36  * kcopyd priority of snapshot operations
37  */
38 #define SNAPSHOT_COPY_PRIORITY 2
39 
40 /*
41  * Reserve 1MB for each snapshot initially (with minimum of 1 page).
42  */
43 #define SNAPSHOT_PAGES (((1UL << 20) >> PAGE_SHIFT) ? : 1)
44 
45 /*
46  * The size of the mempool used to track chunks in use.
47  */
48 #define MIN_IOS 256
49 
50 static struct workqueue_struct *ksnapd;
51 static void flush_queued_bios(struct work_struct *work);
52 
53 struct dm_snap_pending_exception {
54 	struct dm_snap_exception e;
55 
56 	/*
57 	 * Origin buffers waiting for this to complete are held
58 	 * in a bio list
59 	 */
60 	struct bio_list origin_bios;
61 	struct bio_list snapshot_bios;
62 
63 	/*
64 	 * Short-term queue of pending exceptions prior to submission.
65 	 */
66 	struct list_head list;
67 
68 	/*
69 	 * The primary pending_exception is the one that holds
70 	 * the ref_count and the list of origin_bios for a
71 	 * group of pending_exceptions.  It is always last to get freed.
72 	 * These fields get set up when writing to the origin.
73 	 */
74 	struct dm_snap_pending_exception *primary_pe;
75 
76 	/*
77 	 * Number of pending_exceptions processing this chunk.
78 	 * When this drops to zero we must complete the origin bios.
79 	 * If incrementing or decrementing this, hold pe->snap->lock for
80 	 * the sibling concerned and not pe->primary_pe->snap->lock unless
81 	 * they are the same.
82 	 */
83 	atomic_t ref_count;
84 
85 	/* Pointer back to snapshot context */
86 	struct dm_snapshot *snap;
87 
88 	/*
89 	 * 1 indicates the exception has already been sent to
90 	 * kcopyd.
91 	 */
92 	int started;
93 };
94 
95 /*
96  * Hash table mapping origin volumes to lists of snapshots and
97  * a lock to protect it
98  */
99 static struct kmem_cache *exception_cache;
100 static struct kmem_cache *pending_cache;
101 
102 struct dm_snap_tracked_chunk {
103 	struct hlist_node node;
104 	chunk_t chunk;
105 };
106 
107 static struct kmem_cache *tracked_chunk_cache;
108 
track_chunk(struct dm_snapshot * s,chunk_t chunk)109 static struct dm_snap_tracked_chunk *track_chunk(struct dm_snapshot *s,
110 						 chunk_t chunk)
111 {
112 	struct dm_snap_tracked_chunk *c = mempool_alloc(s->tracked_chunk_pool,
113 							GFP_NOIO);
114 	unsigned long flags;
115 
116 	c->chunk = chunk;
117 
118 	spin_lock_irqsave(&s->tracked_chunk_lock, flags);
119 	hlist_add_head(&c->node,
120 		       &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]);
121 	spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
122 
123 	return c;
124 }
125 
stop_tracking_chunk(struct dm_snapshot * s,struct dm_snap_tracked_chunk * c)126 static void stop_tracking_chunk(struct dm_snapshot *s,
127 				struct dm_snap_tracked_chunk *c)
128 {
129 	unsigned long flags;
130 
131 	spin_lock_irqsave(&s->tracked_chunk_lock, flags);
132 	hlist_del(&c->node);
133 	spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
134 
135 	mempool_free(c, s->tracked_chunk_pool);
136 }
137 
__chunk_is_tracked(struct dm_snapshot * s,chunk_t chunk)138 static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk)
139 {
140 	struct dm_snap_tracked_chunk *c;
141 	struct hlist_node *hn;
142 	int found = 0;
143 
144 	spin_lock_irq(&s->tracked_chunk_lock);
145 
146 	hlist_for_each_entry(c, hn,
147 	    &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) {
148 		if (c->chunk == chunk) {
149 			found = 1;
150 			break;
151 		}
152 	}
153 
154 	spin_unlock_irq(&s->tracked_chunk_lock);
155 
156 	return found;
157 }
158 
159 /*
160  * One of these per registered origin, held in the snapshot_origins hash
161  */
162 struct origin {
163 	/* The origin device */
164 	struct block_device *bdev;
165 
166 	struct list_head hash_list;
167 
168 	/* List of snapshots for this origin */
169 	struct list_head snapshots;
170 };
171 
172 /*
173  * Size of the hash table for origin volumes. If we make this
174  * the size of the minors list then it should be nearly perfect
175  */
176 #define ORIGIN_HASH_SIZE 256
177 #define ORIGIN_MASK      0xFF
178 static struct list_head *_origins;
179 static struct rw_semaphore _origins_lock;
180 
init_origin_hash(void)181 static int init_origin_hash(void)
182 {
183 	int i;
184 
185 	_origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
186 			   GFP_KERNEL);
187 	if (!_origins) {
188 		DMERR("unable to allocate memory");
189 		return -ENOMEM;
190 	}
191 
192 	for (i = 0; i < ORIGIN_HASH_SIZE; i++)
193 		INIT_LIST_HEAD(_origins + i);
194 	init_rwsem(&_origins_lock);
195 
196 	return 0;
197 }
198 
exit_origin_hash(void)199 static void exit_origin_hash(void)
200 {
201 	kfree(_origins);
202 }
203 
origin_hash(struct block_device * bdev)204 static unsigned origin_hash(struct block_device *bdev)
205 {
206 	return bdev->bd_dev & ORIGIN_MASK;
207 }
208 
__lookup_origin(struct block_device * origin)209 static struct origin *__lookup_origin(struct block_device *origin)
210 {
211 	struct list_head *ol;
212 	struct origin *o;
213 
214 	ol = &_origins[origin_hash(origin)];
215 	list_for_each_entry (o, ol, hash_list)
216 		if (bdev_equal(o->bdev, origin))
217 			return o;
218 
219 	return NULL;
220 }
221 
__insert_origin(struct origin * o)222 static void __insert_origin(struct origin *o)
223 {
224 	struct list_head *sl = &_origins[origin_hash(o->bdev)];
225 	list_add_tail(&o->hash_list, sl);
226 }
227 
228 /*
229  * Make a note of the snapshot and its origin so we can look it
230  * up when the origin has a write on it.
231  */
register_snapshot(struct dm_snapshot * snap)232 static int register_snapshot(struct dm_snapshot *snap)
233 {
234 	struct origin *o, *new_o;
235 	struct block_device *bdev = snap->origin->bdev;
236 
237 	new_o = kmalloc(sizeof(*new_o), GFP_KERNEL);
238 	if (!new_o)
239 		return -ENOMEM;
240 
241 	down_write(&_origins_lock);
242 	o = __lookup_origin(bdev);
243 
244 	if (o)
245 		kfree(new_o);
246 	else {
247 		/* New origin */
248 		o = new_o;
249 
250 		/* Initialise the struct */
251 		INIT_LIST_HEAD(&o->snapshots);
252 		o->bdev = bdev;
253 
254 		__insert_origin(o);
255 	}
256 
257 	list_add_tail(&snap->list, &o->snapshots);
258 
259 	up_write(&_origins_lock);
260 	return 0;
261 }
262 
unregister_snapshot(struct dm_snapshot * s)263 static void unregister_snapshot(struct dm_snapshot *s)
264 {
265 	struct origin *o;
266 
267 	down_write(&_origins_lock);
268 	o = __lookup_origin(s->origin->bdev);
269 
270 	list_del(&s->list);
271 	if (list_empty(&o->snapshots)) {
272 		list_del(&o->hash_list);
273 		kfree(o);
274 	}
275 
276 	up_write(&_origins_lock);
277 }
278 
279 /*
280  * Implementation of the exception hash tables.
281  * The lowest hash_shift bits of the chunk number are ignored, allowing
282  * some consecutive chunks to be grouped together.
283  */
init_exception_table(struct exception_table * et,uint32_t size,unsigned hash_shift)284 static int init_exception_table(struct exception_table *et, uint32_t size,
285 				unsigned hash_shift)
286 {
287 	unsigned int i;
288 
289 	et->hash_shift = hash_shift;
290 	et->hash_mask = size - 1;
291 	et->table = dm_vcalloc(size, sizeof(struct list_head));
292 	if (!et->table)
293 		return -ENOMEM;
294 
295 	for (i = 0; i < size; i++)
296 		INIT_LIST_HEAD(et->table + i);
297 
298 	return 0;
299 }
300 
exit_exception_table(struct exception_table * et,struct kmem_cache * mem)301 static void exit_exception_table(struct exception_table *et, struct kmem_cache *mem)
302 {
303 	struct list_head *slot;
304 	struct dm_snap_exception *ex, *next;
305 	int i, size;
306 
307 	size = et->hash_mask + 1;
308 	for (i = 0; i < size; i++) {
309 		slot = et->table + i;
310 
311 		list_for_each_entry_safe (ex, next, slot, hash_list)
312 			kmem_cache_free(mem, ex);
313 	}
314 
315 	vfree(et->table);
316 }
317 
exception_hash(struct exception_table * et,chunk_t chunk)318 static uint32_t exception_hash(struct exception_table *et, chunk_t chunk)
319 {
320 	return (chunk >> et->hash_shift) & et->hash_mask;
321 }
322 
insert_exception(struct exception_table * eh,struct dm_snap_exception * e)323 static void insert_exception(struct exception_table *eh,
324 			     struct dm_snap_exception *e)
325 {
326 	struct list_head *l = &eh->table[exception_hash(eh, e->old_chunk)];
327 	list_add(&e->hash_list, l);
328 }
329 
remove_exception(struct dm_snap_exception * e)330 static void remove_exception(struct dm_snap_exception *e)
331 {
332 	list_del(&e->hash_list);
333 }
334 
335 /*
336  * Return the exception data for a sector, or NULL if not
337  * remapped.
338  */
lookup_exception(struct exception_table * et,chunk_t chunk)339 static struct dm_snap_exception *lookup_exception(struct exception_table *et,
340 						  chunk_t chunk)
341 {
342 	struct list_head *slot;
343 	struct dm_snap_exception *e;
344 
345 	slot = &et->table[exception_hash(et, chunk)];
346 	list_for_each_entry (e, slot, hash_list)
347 		if (chunk >= e->old_chunk &&
348 		    chunk <= e->old_chunk + dm_consecutive_chunk_count(e))
349 			return e;
350 
351 	return NULL;
352 }
353 
alloc_exception(void)354 static struct dm_snap_exception *alloc_exception(void)
355 {
356 	struct dm_snap_exception *e;
357 
358 	e = kmem_cache_alloc(exception_cache, GFP_NOIO);
359 	if (!e)
360 		e = kmem_cache_alloc(exception_cache, GFP_ATOMIC);
361 
362 	return e;
363 }
364 
free_exception(struct dm_snap_exception * e)365 static void free_exception(struct dm_snap_exception *e)
366 {
367 	kmem_cache_free(exception_cache, e);
368 }
369 
alloc_pending_exception(struct dm_snapshot * s)370 static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s)
371 {
372 	struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool,
373 							     GFP_NOIO);
374 
375 	atomic_inc(&s->pending_exceptions_count);
376 	pe->snap = s;
377 
378 	return pe;
379 }
380 
free_pending_exception(struct dm_snap_pending_exception * pe)381 static void free_pending_exception(struct dm_snap_pending_exception *pe)
382 {
383 	struct dm_snapshot *s = pe->snap;
384 
385 	mempool_free(pe, s->pending_pool);
386 	smp_mb__before_atomic_dec();
387 	atomic_dec(&s->pending_exceptions_count);
388 }
389 
insert_completed_exception(struct dm_snapshot * s,struct dm_snap_exception * new_e)390 static void insert_completed_exception(struct dm_snapshot *s,
391 				       struct dm_snap_exception *new_e)
392 {
393 	struct exception_table *eh = &s->complete;
394 	struct list_head *l;
395 	struct dm_snap_exception *e = NULL;
396 
397 	l = &eh->table[exception_hash(eh, new_e->old_chunk)];
398 
399 	/* Add immediately if this table doesn't support consecutive chunks */
400 	if (!eh->hash_shift)
401 		goto out;
402 
403 	/* List is ordered by old_chunk */
404 	list_for_each_entry_reverse(e, l, hash_list) {
405 		/* Insert after an existing chunk? */
406 		if (new_e->old_chunk == (e->old_chunk +
407 					 dm_consecutive_chunk_count(e) + 1) &&
408 		    new_e->new_chunk == (dm_chunk_number(e->new_chunk) +
409 					 dm_consecutive_chunk_count(e) + 1)) {
410 			dm_consecutive_chunk_count_inc(e);
411 			free_exception(new_e);
412 			return;
413 		}
414 
415 		/* Insert before an existing chunk? */
416 		if (new_e->old_chunk == (e->old_chunk - 1) &&
417 		    new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) {
418 			dm_consecutive_chunk_count_inc(e);
419 			e->old_chunk--;
420 			e->new_chunk--;
421 			free_exception(new_e);
422 			return;
423 		}
424 
425 		if (new_e->old_chunk > e->old_chunk)
426 			break;
427 	}
428 
429 out:
430 	list_add(&new_e->hash_list, e ? &e->hash_list : l);
431 }
432 
433 /*
434  * Callback used by the exception stores to load exceptions when
435  * initialising.
436  */
dm_add_exception(void * context,chunk_t old,chunk_t new)437 static int dm_add_exception(void *context, chunk_t old, chunk_t new)
438 {
439 	struct dm_snapshot *s = context;
440 	struct dm_snap_exception *e;
441 
442 	e = alloc_exception();
443 	if (!e)
444 		return -ENOMEM;
445 
446 	e->old_chunk = old;
447 
448 	/* Consecutive_count is implicitly initialised to zero */
449 	e->new_chunk = new;
450 
451 	insert_completed_exception(s, e);
452 
453 	return 0;
454 }
455 
456 /*
457  * Hard coded magic.
458  */
calc_max_buckets(void)459 static int calc_max_buckets(void)
460 {
461 	/* use a fixed size of 2MB */
462 	unsigned long mem = 2 * 1024 * 1024;
463 	mem /= sizeof(struct list_head);
464 
465 	return mem;
466 }
467 
468 /*
469  * Allocate room for a suitable hash table.
470  */
init_hash_tables(struct dm_snapshot * s)471 static int init_hash_tables(struct dm_snapshot *s)
472 {
473 	sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets;
474 
475 	/*
476 	 * Calculate based on the size of the original volume or
477 	 * the COW volume...
478 	 */
479 	cow_dev_size = get_dev_size(s->cow->bdev);
480 	origin_dev_size = get_dev_size(s->origin->bdev);
481 	max_buckets = calc_max_buckets();
482 
483 	hash_size = min(origin_dev_size, cow_dev_size) >> s->chunk_shift;
484 	hash_size = min(hash_size, max_buckets);
485 
486 	hash_size = rounddown_pow_of_two(hash_size);
487 	if (init_exception_table(&s->complete, hash_size,
488 				 DM_CHUNK_CONSECUTIVE_BITS))
489 		return -ENOMEM;
490 
491 	/*
492 	 * Allocate hash table for in-flight exceptions
493 	 * Make this smaller than the real hash table
494 	 */
495 	hash_size >>= 3;
496 	if (hash_size < 64)
497 		hash_size = 64;
498 
499 	if (init_exception_table(&s->pending, hash_size, 0)) {
500 		exit_exception_table(&s->complete, exception_cache);
501 		return -ENOMEM;
502 	}
503 
504 	return 0;
505 }
506 
507 /*
508  * Round a number up to the nearest 'size' boundary.  size must
509  * be a power of 2.
510  */
round_up(ulong n,ulong size)511 static ulong round_up(ulong n, ulong size)
512 {
513 	size--;
514 	return (n + size) & ~size;
515 }
516 
set_chunk_size(struct dm_snapshot * s,const char * chunk_size_arg,char ** error)517 static int set_chunk_size(struct dm_snapshot *s, const char *chunk_size_arg,
518 			  char **error)
519 {
520 	unsigned long chunk_size;
521 	char *value;
522 
523 	chunk_size = simple_strtoul(chunk_size_arg, &value, 10);
524 	if (*chunk_size_arg == '\0' || *value != '\0') {
525 		*error = "Invalid chunk size";
526 		return -EINVAL;
527 	}
528 
529 	if (!chunk_size) {
530 		s->chunk_size = s->chunk_mask = s->chunk_shift = 0;
531 		return 0;
532 	}
533 
534 	/*
535 	 * Chunk size must be multiple of page size.  Silently
536 	 * round up if it's not.
537 	 */
538 	chunk_size = round_up(chunk_size, PAGE_SIZE >> 9);
539 
540 	/* Check chunk_size is a power of 2 */
541 	if (!is_power_of_2(chunk_size)) {
542 		*error = "Chunk size is not a power of 2";
543 		return -EINVAL;
544 	}
545 
546 	/* Validate the chunk size against the device block size */
547 	if (chunk_size % (bdev_hardsect_size(s->cow->bdev) >> 9)) {
548 		*error = "Chunk size is not a multiple of device blocksize";
549 		return -EINVAL;
550 	}
551 
552 	s->chunk_size = chunk_size;
553 	s->chunk_mask = chunk_size - 1;
554 	s->chunk_shift = ffs(chunk_size) - 1;
555 
556 	return 0;
557 }
558 
559 /*
560  * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
561  */
snapshot_ctr(struct dm_target * ti,unsigned int argc,char ** argv)562 static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
563 {
564 	struct dm_snapshot *s;
565 	int i;
566 	int r = -EINVAL;
567 	char persistent;
568 	char *origin_path;
569 	char *cow_path;
570 
571 	if (argc != 4) {
572 		ti->error = "requires exactly 4 arguments";
573 		r = -EINVAL;
574 		goto bad1;
575 	}
576 
577 	origin_path = argv[0];
578 	cow_path = argv[1];
579 	persistent = toupper(*argv[2]);
580 
581 	if (persistent != 'P' && persistent != 'N') {
582 		ti->error = "Persistent flag is not P or N";
583 		r = -EINVAL;
584 		goto bad1;
585 	}
586 
587 	s = kmalloc(sizeof(*s), GFP_KERNEL);
588 	if (s == NULL) {
589 		ti->error = "Cannot allocate snapshot context private "
590 		    "structure";
591 		r = -ENOMEM;
592 		goto bad1;
593 	}
594 
595 	r = dm_get_device(ti, origin_path, 0, ti->len, FMODE_READ, &s->origin);
596 	if (r) {
597 		ti->error = "Cannot get origin device";
598 		goto bad2;
599 	}
600 
601 	r = dm_get_device(ti, cow_path, 0, 0,
602 			  FMODE_READ | FMODE_WRITE, &s->cow);
603 	if (r) {
604 		dm_put_device(ti, s->origin);
605 		ti->error = "Cannot get COW device";
606 		goto bad2;
607 	}
608 
609 	r = set_chunk_size(s, argv[3], &ti->error);
610 	if (r)
611 		goto bad3;
612 
613 	s->type = persistent;
614 
615 	s->valid = 1;
616 	s->active = 0;
617 	atomic_set(&s->pending_exceptions_count, 0);
618 	init_rwsem(&s->lock);
619 	spin_lock_init(&s->pe_lock);
620 	s->ti = ti;
621 
622 	/* Allocate hash table for COW data */
623 	if (init_hash_tables(s)) {
624 		ti->error = "Unable to allocate hash table space";
625 		r = -ENOMEM;
626 		goto bad3;
627 	}
628 
629 	s->store.snap = s;
630 
631 	if (persistent == 'P')
632 		r = dm_create_persistent(&s->store);
633 	else
634 		r = dm_create_transient(&s->store);
635 
636 	if (r) {
637 		ti->error = "Couldn't create exception store";
638 		r = -EINVAL;
639 		goto bad4;
640 	}
641 
642 	r = dm_kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client);
643 	if (r) {
644 		ti->error = "Could not create kcopyd client";
645 		goto bad5;
646 	}
647 
648 	s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache);
649 	if (!s->pending_pool) {
650 		ti->error = "Could not allocate mempool for pending exceptions";
651 		goto bad6;
652 	}
653 
654 	s->tracked_chunk_pool = mempool_create_slab_pool(MIN_IOS,
655 							 tracked_chunk_cache);
656 	if (!s->tracked_chunk_pool) {
657 		ti->error = "Could not allocate tracked_chunk mempool for "
658 			    "tracking reads";
659 		goto bad_tracked_chunk_pool;
660 	}
661 
662 	for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
663 		INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]);
664 
665 	spin_lock_init(&s->tracked_chunk_lock);
666 
667 	/* Metadata must only be loaded into one table at once */
668 	r = s->store.read_metadata(&s->store, dm_add_exception, (void *)s);
669 	if (r < 0) {
670 		ti->error = "Failed to read snapshot metadata";
671 		goto bad_load_and_register;
672 	} else if (r > 0) {
673 		s->valid = 0;
674 		DMWARN("Snapshot is marked invalid.");
675 	}
676 
677 	bio_list_init(&s->queued_bios);
678 	INIT_WORK(&s->queued_bios_work, flush_queued_bios);
679 
680 	/* Add snapshot to the list of snapshots for this origin */
681 	/* Exceptions aren't triggered till snapshot_resume() is called */
682 	if (register_snapshot(s)) {
683 		r = -EINVAL;
684 		ti->error = "Cannot register snapshot origin";
685 		goto bad_load_and_register;
686 	}
687 
688 	ti->private = s;
689 	ti->split_io = s->chunk_size;
690 
691 	return 0;
692 
693  bad_load_and_register:
694 	mempool_destroy(s->tracked_chunk_pool);
695 
696  bad_tracked_chunk_pool:
697 	mempool_destroy(s->pending_pool);
698 
699  bad6:
700 	dm_kcopyd_client_destroy(s->kcopyd_client);
701 
702  bad5:
703 	s->store.destroy(&s->store);
704 
705  bad4:
706 	exit_exception_table(&s->pending, pending_cache);
707 	exit_exception_table(&s->complete, exception_cache);
708 
709  bad3:
710 	dm_put_device(ti, s->cow);
711 	dm_put_device(ti, s->origin);
712 
713  bad2:
714 	kfree(s);
715 
716  bad1:
717 	return r;
718 }
719 
__free_exceptions(struct dm_snapshot * s)720 static void __free_exceptions(struct dm_snapshot *s)
721 {
722 	dm_kcopyd_client_destroy(s->kcopyd_client);
723 	s->kcopyd_client = NULL;
724 
725 	exit_exception_table(&s->pending, pending_cache);
726 	exit_exception_table(&s->complete, exception_cache);
727 
728 	s->store.destroy(&s->store);
729 }
730 
snapshot_dtr(struct dm_target * ti)731 static void snapshot_dtr(struct dm_target *ti)
732 {
733 #ifdef CONFIG_DM_DEBUG
734 	int i;
735 #endif
736 	struct dm_snapshot *s = ti->private;
737 
738 	flush_workqueue(ksnapd);
739 
740 	/* Prevent further origin writes from using this snapshot. */
741 	/* After this returns there can be no new kcopyd jobs. */
742 	unregister_snapshot(s);
743 
744 	while (atomic_read(&s->pending_exceptions_count))
745 		msleep(1);
746 	/*
747 	 * Ensure instructions in mempool_destroy aren't reordered
748 	 * before atomic_read.
749 	 */
750 	smp_mb();
751 
752 #ifdef CONFIG_DM_DEBUG
753 	for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
754 		BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i]));
755 #endif
756 
757 	mempool_destroy(s->tracked_chunk_pool);
758 
759 	__free_exceptions(s);
760 
761 	mempool_destroy(s->pending_pool);
762 
763 	dm_put_device(ti, s->origin);
764 	dm_put_device(ti, s->cow);
765 
766 	kfree(s);
767 }
768 
769 /*
770  * Flush a list of buffers.
771  */
flush_bios(struct bio * bio)772 static void flush_bios(struct bio *bio)
773 {
774 	struct bio *n;
775 
776 	while (bio) {
777 		n = bio->bi_next;
778 		bio->bi_next = NULL;
779 		generic_make_request(bio);
780 		bio = n;
781 	}
782 }
783 
flush_queued_bios(struct work_struct * work)784 static void flush_queued_bios(struct work_struct *work)
785 {
786 	struct dm_snapshot *s =
787 		container_of(work, struct dm_snapshot, queued_bios_work);
788 	struct bio *queued_bios;
789 	unsigned long flags;
790 
791 	spin_lock_irqsave(&s->pe_lock, flags);
792 	queued_bios = bio_list_get(&s->queued_bios);
793 	spin_unlock_irqrestore(&s->pe_lock, flags);
794 
795 	flush_bios(queued_bios);
796 }
797 
798 /*
799  * Error a list of buffers.
800  */
error_bios(struct bio * bio)801 static void error_bios(struct bio *bio)
802 {
803 	struct bio *n;
804 
805 	while (bio) {
806 		n = bio->bi_next;
807 		bio->bi_next = NULL;
808 		bio_io_error(bio);
809 		bio = n;
810 	}
811 }
812 
__invalidate_snapshot(struct dm_snapshot * s,int err)813 static void __invalidate_snapshot(struct dm_snapshot *s, int err)
814 {
815 	if (!s->valid)
816 		return;
817 
818 	if (err == -EIO)
819 		DMERR("Invalidating snapshot: Error reading/writing.");
820 	else if (err == -ENOMEM)
821 		DMERR("Invalidating snapshot: Unable to allocate exception.");
822 
823 	if (s->store.drop_snapshot)
824 		s->store.drop_snapshot(&s->store);
825 
826 	s->valid = 0;
827 
828 	dm_table_event(s->ti->table);
829 }
830 
get_pending_exception(struct dm_snap_pending_exception * pe)831 static void get_pending_exception(struct dm_snap_pending_exception *pe)
832 {
833 	atomic_inc(&pe->ref_count);
834 }
835 
put_pending_exception(struct dm_snap_pending_exception * pe)836 static struct bio *put_pending_exception(struct dm_snap_pending_exception *pe)
837 {
838 	struct dm_snap_pending_exception *primary_pe;
839 	struct bio *origin_bios = NULL;
840 
841 	primary_pe = pe->primary_pe;
842 
843 	/*
844 	 * If this pe is involved in a write to the origin and
845 	 * it is the last sibling to complete then release
846 	 * the bios for the original write to the origin.
847 	 */
848 	if (primary_pe &&
849 	    atomic_dec_and_test(&primary_pe->ref_count)) {
850 		origin_bios = bio_list_get(&primary_pe->origin_bios);
851 		free_pending_exception(primary_pe);
852 	}
853 
854 	/*
855 	 * Free the pe if it's not linked to an origin write or if
856 	 * it's not itself a primary pe.
857 	 */
858 	if (!primary_pe || primary_pe != pe)
859 		free_pending_exception(pe);
860 
861 	return origin_bios;
862 }
863 
pending_complete(struct dm_snap_pending_exception * pe,int success)864 static void pending_complete(struct dm_snap_pending_exception *pe, int success)
865 {
866 	struct dm_snap_exception *e;
867 	struct dm_snapshot *s = pe->snap;
868 	struct bio *origin_bios = NULL;
869 	struct bio *snapshot_bios = NULL;
870 	int error = 0;
871 
872 	if (!success) {
873 		/* Read/write error - snapshot is unusable */
874 		down_write(&s->lock);
875 		__invalidate_snapshot(s, -EIO);
876 		error = 1;
877 		goto out;
878 	}
879 
880 	e = alloc_exception();
881 	if (!e) {
882 		down_write(&s->lock);
883 		__invalidate_snapshot(s, -ENOMEM);
884 		error = 1;
885 		goto out;
886 	}
887 	*e = pe->e;
888 
889 	down_write(&s->lock);
890 	if (!s->valid) {
891 		free_exception(e);
892 		error = 1;
893 		goto out;
894 	}
895 
896 	/*
897 	 * Check for conflicting reads. This is extremely improbable,
898 	 * so msleep(1) is sufficient and there is no need for a wait queue.
899 	 */
900 	while (__chunk_is_tracked(s, pe->e.old_chunk))
901 		msleep(1);
902 
903 	/*
904 	 * Add a proper exception, and remove the
905 	 * in-flight exception from the list.
906 	 */
907 	insert_completed_exception(s, e);
908 
909  out:
910 	remove_exception(&pe->e);
911 	snapshot_bios = bio_list_get(&pe->snapshot_bios);
912 	origin_bios = put_pending_exception(pe);
913 
914 	up_write(&s->lock);
915 
916 	/* Submit any pending write bios */
917 	if (error)
918 		error_bios(snapshot_bios);
919 	else
920 		flush_bios(snapshot_bios);
921 
922 	flush_bios(origin_bios);
923 }
924 
commit_callback(void * context,int success)925 static void commit_callback(void *context, int success)
926 {
927 	struct dm_snap_pending_exception *pe = context;
928 
929 	pending_complete(pe, success);
930 }
931 
932 /*
933  * Called when the copy I/O has finished.  kcopyd actually runs
934  * this code so don't block.
935  */
copy_callback(int read_err,unsigned long write_err,void * context)936 static void copy_callback(int read_err, unsigned long write_err, void *context)
937 {
938 	struct dm_snap_pending_exception *pe = context;
939 	struct dm_snapshot *s = pe->snap;
940 
941 	if (read_err || write_err)
942 		pending_complete(pe, 0);
943 
944 	else
945 		/* Update the metadata if we are persistent */
946 		s->store.commit_exception(&s->store, &pe->e, commit_callback,
947 					  pe);
948 }
949 
950 /*
951  * Dispatches the copy operation to kcopyd.
952  */
start_copy(struct dm_snap_pending_exception * pe)953 static void start_copy(struct dm_snap_pending_exception *pe)
954 {
955 	struct dm_snapshot *s = pe->snap;
956 	struct dm_io_region src, dest;
957 	struct block_device *bdev = s->origin->bdev;
958 	sector_t dev_size;
959 
960 	dev_size = get_dev_size(bdev);
961 
962 	src.bdev = bdev;
963 	src.sector = chunk_to_sector(s, pe->e.old_chunk);
964 	src.count = min(s->chunk_size, dev_size - src.sector);
965 
966 	dest.bdev = s->cow->bdev;
967 	dest.sector = chunk_to_sector(s, pe->e.new_chunk);
968 	dest.count = src.count;
969 
970 	/* Hand over to kcopyd */
971 	dm_kcopyd_copy(s->kcopyd_client,
972 		    &src, 1, &dest, 0, copy_callback, pe);
973 }
974 
975 /*
976  * Looks to see if this snapshot already has a pending exception
977  * for this chunk, otherwise it allocates a new one and inserts
978  * it into the pending table.
979  *
980  * NOTE: a write lock must be held on snap->lock before calling
981  * this.
982  */
983 static struct dm_snap_pending_exception *
__find_pending_exception(struct dm_snapshot * s,struct bio * bio)984 __find_pending_exception(struct dm_snapshot *s, struct bio *bio)
985 {
986 	struct dm_snap_exception *e;
987 	struct dm_snap_pending_exception *pe;
988 	chunk_t chunk = sector_to_chunk(s, bio->bi_sector);
989 
990 	/*
991 	 * Is there a pending exception for this already ?
992 	 */
993 	e = lookup_exception(&s->pending, chunk);
994 	if (e) {
995 		/* cast the exception to a pending exception */
996 		pe = container_of(e, struct dm_snap_pending_exception, e);
997 		goto out;
998 	}
999 
1000 	/*
1001 	 * Create a new pending exception, we don't want
1002 	 * to hold the lock while we do this.
1003 	 */
1004 	up_write(&s->lock);
1005 	pe = alloc_pending_exception(s);
1006 	down_write(&s->lock);
1007 
1008 	if (!s->valid) {
1009 		free_pending_exception(pe);
1010 		return NULL;
1011 	}
1012 
1013 	e = lookup_exception(&s->pending, chunk);
1014 	if (e) {
1015 		free_pending_exception(pe);
1016 		pe = container_of(e, struct dm_snap_pending_exception, e);
1017 		goto out;
1018 	}
1019 
1020 	pe->e.old_chunk = chunk;
1021 	bio_list_init(&pe->origin_bios);
1022 	bio_list_init(&pe->snapshot_bios);
1023 	pe->primary_pe = NULL;
1024 	atomic_set(&pe->ref_count, 0);
1025 	pe->started = 0;
1026 
1027 	if (s->store.prepare_exception(&s->store, &pe->e)) {
1028 		free_pending_exception(pe);
1029 		return NULL;
1030 	}
1031 
1032 	get_pending_exception(pe);
1033 	insert_exception(&s->pending, &pe->e);
1034 
1035  out:
1036 	return pe;
1037 }
1038 
remap_exception(struct dm_snapshot * s,struct dm_snap_exception * e,struct bio * bio,chunk_t chunk)1039 static void remap_exception(struct dm_snapshot *s, struct dm_snap_exception *e,
1040 			    struct bio *bio, chunk_t chunk)
1041 {
1042 	bio->bi_bdev = s->cow->bdev;
1043 	bio->bi_sector = chunk_to_sector(s, dm_chunk_number(e->new_chunk) +
1044 			 (chunk - e->old_chunk)) +
1045 			 (bio->bi_sector & s->chunk_mask);
1046 }
1047 
snapshot_map(struct dm_target * ti,struct bio * bio,union map_info * map_context)1048 static int snapshot_map(struct dm_target *ti, struct bio *bio,
1049 			union map_info *map_context)
1050 {
1051 	struct dm_snap_exception *e;
1052 	struct dm_snapshot *s = ti->private;
1053 	int r = DM_MAPIO_REMAPPED;
1054 	chunk_t chunk;
1055 	struct dm_snap_pending_exception *pe = NULL;
1056 
1057 	chunk = sector_to_chunk(s, bio->bi_sector);
1058 
1059 	/* Full snapshots are not usable */
1060 	/* To get here the table must be live so s->active is always set. */
1061 	if (!s->valid)
1062 		return -EIO;
1063 
1064 	/* FIXME: should only take write lock if we need
1065 	 * to copy an exception */
1066 	down_write(&s->lock);
1067 
1068 	if (!s->valid) {
1069 		r = -EIO;
1070 		goto out_unlock;
1071 	}
1072 
1073 	/* If the block is already remapped - use that, else remap it */
1074 	e = lookup_exception(&s->complete, chunk);
1075 	if (e) {
1076 		remap_exception(s, e, bio, chunk);
1077 		goto out_unlock;
1078 	}
1079 
1080 	/*
1081 	 * Write to snapshot - higher level takes care of RW/RO
1082 	 * flags so we should only get this if we are
1083 	 * writeable.
1084 	 */
1085 	if (bio_rw(bio) == WRITE) {
1086 		pe = __find_pending_exception(s, bio);
1087 		if (!pe) {
1088 			__invalidate_snapshot(s, -ENOMEM);
1089 			r = -EIO;
1090 			goto out_unlock;
1091 		}
1092 
1093 		remap_exception(s, &pe->e, bio, chunk);
1094 		bio_list_add(&pe->snapshot_bios, bio);
1095 
1096 		r = DM_MAPIO_SUBMITTED;
1097 
1098 		if (!pe->started) {
1099 			/* this is protected by snap->lock */
1100 			pe->started = 1;
1101 			up_write(&s->lock);
1102 			start_copy(pe);
1103 			goto out;
1104 		}
1105 	} else {
1106 		bio->bi_bdev = s->origin->bdev;
1107 		map_context->ptr = track_chunk(s, chunk);
1108 	}
1109 
1110  out_unlock:
1111 	up_write(&s->lock);
1112  out:
1113 	return r;
1114 }
1115 
snapshot_end_io(struct dm_target * ti,struct bio * bio,int error,union map_info * map_context)1116 static int snapshot_end_io(struct dm_target *ti, struct bio *bio,
1117 			   int error, union map_info *map_context)
1118 {
1119 	struct dm_snapshot *s = ti->private;
1120 	struct dm_snap_tracked_chunk *c = map_context->ptr;
1121 
1122 	if (c)
1123 		stop_tracking_chunk(s, c);
1124 
1125 	return 0;
1126 }
1127 
snapshot_resume(struct dm_target * ti)1128 static void snapshot_resume(struct dm_target *ti)
1129 {
1130 	struct dm_snapshot *s = ti->private;
1131 
1132 	down_write(&s->lock);
1133 	s->active = 1;
1134 	up_write(&s->lock);
1135 }
1136 
snapshot_status(struct dm_target * ti,status_type_t type,char * result,unsigned int maxlen)1137 static int snapshot_status(struct dm_target *ti, status_type_t type,
1138 			   char *result, unsigned int maxlen)
1139 {
1140 	struct dm_snapshot *snap = ti->private;
1141 
1142 	switch (type) {
1143 	case STATUSTYPE_INFO:
1144 		if (!snap->valid)
1145 			snprintf(result, maxlen, "Invalid");
1146 		else {
1147 			if (snap->store.fraction_full) {
1148 				sector_t numerator, denominator;
1149 				snap->store.fraction_full(&snap->store,
1150 							  &numerator,
1151 							  &denominator);
1152 				snprintf(result, maxlen, "%llu/%llu",
1153 					(unsigned long long)numerator,
1154 					(unsigned long long)denominator);
1155 			}
1156 			else
1157 				snprintf(result, maxlen, "Unknown");
1158 		}
1159 		break;
1160 
1161 	case STATUSTYPE_TABLE:
1162 		/*
1163 		 * kdevname returns a static pointer so we need
1164 		 * to make private copies if the output is to
1165 		 * make sense.
1166 		 */
1167 		snprintf(result, maxlen, "%s %s %c %llu",
1168 			 snap->origin->name, snap->cow->name,
1169 			 snap->type,
1170 			 (unsigned long long)snap->chunk_size);
1171 		break;
1172 	}
1173 
1174 	return 0;
1175 }
1176 
1177 /*-----------------------------------------------------------------
1178  * Origin methods
1179  *---------------------------------------------------------------*/
__origin_write(struct list_head * snapshots,struct bio * bio)1180 static int __origin_write(struct list_head *snapshots, struct bio *bio)
1181 {
1182 	int r = DM_MAPIO_REMAPPED, first = 0;
1183 	struct dm_snapshot *snap;
1184 	struct dm_snap_exception *e;
1185 	struct dm_snap_pending_exception *pe, *next_pe, *primary_pe = NULL;
1186 	chunk_t chunk;
1187 	LIST_HEAD(pe_queue);
1188 
1189 	/* Do all the snapshots on this origin */
1190 	list_for_each_entry (snap, snapshots, list) {
1191 
1192 		down_write(&snap->lock);
1193 
1194 		/* Only deal with valid and active snapshots */
1195 		if (!snap->valid || !snap->active)
1196 			goto next_snapshot;
1197 
1198 		/* Nothing to do if writing beyond end of snapshot */
1199 		if (bio->bi_sector >= dm_table_get_size(snap->ti->table))
1200 			goto next_snapshot;
1201 
1202 		/*
1203 		 * Remember, different snapshots can have
1204 		 * different chunk sizes.
1205 		 */
1206 		chunk = sector_to_chunk(snap, bio->bi_sector);
1207 
1208 		/*
1209 		 * Check exception table to see if block
1210 		 * is already remapped in this snapshot
1211 		 * and trigger an exception if not.
1212 		 *
1213 		 * ref_count is initialised to 1 so pending_complete()
1214 		 * won't destroy the primary_pe while we're inside this loop.
1215 		 */
1216 		e = lookup_exception(&snap->complete, chunk);
1217 		if (e)
1218 			goto next_snapshot;
1219 
1220 		pe = __find_pending_exception(snap, bio);
1221 		if (!pe) {
1222 			__invalidate_snapshot(snap, -ENOMEM);
1223 			goto next_snapshot;
1224 		}
1225 
1226 		if (!primary_pe) {
1227 			/*
1228 			 * Either every pe here has same
1229 			 * primary_pe or none has one yet.
1230 			 */
1231 			if (pe->primary_pe)
1232 				primary_pe = pe->primary_pe;
1233 			else {
1234 				primary_pe = pe;
1235 				first = 1;
1236 			}
1237 
1238 			bio_list_add(&primary_pe->origin_bios, bio);
1239 
1240 			r = DM_MAPIO_SUBMITTED;
1241 		}
1242 
1243 		if (!pe->primary_pe) {
1244 			pe->primary_pe = primary_pe;
1245 			get_pending_exception(primary_pe);
1246 		}
1247 
1248 		if (!pe->started) {
1249 			pe->started = 1;
1250 			list_add_tail(&pe->list, &pe_queue);
1251 		}
1252 
1253  next_snapshot:
1254 		up_write(&snap->lock);
1255 	}
1256 
1257 	if (!primary_pe)
1258 		return r;
1259 
1260 	/*
1261 	 * If this is the first time we're processing this chunk and
1262 	 * ref_count is now 1 it means all the pending exceptions
1263 	 * got completed while we were in the loop above, so it falls to
1264 	 * us here to remove the primary_pe and submit any origin_bios.
1265 	 */
1266 
1267 	if (first && atomic_dec_and_test(&primary_pe->ref_count)) {
1268 		flush_bios(bio_list_get(&primary_pe->origin_bios));
1269 		free_pending_exception(primary_pe);
1270 		/* If we got here, pe_queue is necessarily empty. */
1271 		return r;
1272 	}
1273 
1274 	/*
1275 	 * Now that we have a complete pe list we can start the copying.
1276 	 */
1277 	list_for_each_entry_safe(pe, next_pe, &pe_queue, list)
1278 		start_copy(pe);
1279 
1280 	return r;
1281 }
1282 
1283 /*
1284  * Called on a write from the origin driver.
1285  */
do_origin(struct dm_dev * origin,struct bio * bio)1286 static int do_origin(struct dm_dev *origin, struct bio *bio)
1287 {
1288 	struct origin *o;
1289 	int r = DM_MAPIO_REMAPPED;
1290 
1291 	down_read(&_origins_lock);
1292 	o = __lookup_origin(origin->bdev);
1293 	if (o)
1294 		r = __origin_write(&o->snapshots, bio);
1295 	up_read(&_origins_lock);
1296 
1297 	return r;
1298 }
1299 
1300 /*
1301  * Origin: maps a linear range of a device, with hooks for snapshotting.
1302  */
1303 
1304 /*
1305  * Construct an origin mapping: <dev_path>
1306  * The context for an origin is merely a 'struct dm_dev *'
1307  * pointing to the real device.
1308  */
origin_ctr(struct dm_target * ti,unsigned int argc,char ** argv)1309 static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1310 {
1311 	int r;
1312 	struct dm_dev *dev;
1313 
1314 	if (argc != 1) {
1315 		ti->error = "origin: incorrect number of arguments";
1316 		return -EINVAL;
1317 	}
1318 
1319 	r = dm_get_device(ti, argv[0], 0, ti->len,
1320 			  dm_table_get_mode(ti->table), &dev);
1321 	if (r) {
1322 		ti->error = "Cannot get target device";
1323 		return r;
1324 	}
1325 
1326 	ti->private = dev;
1327 	return 0;
1328 }
1329 
origin_dtr(struct dm_target * ti)1330 static void origin_dtr(struct dm_target *ti)
1331 {
1332 	struct dm_dev *dev = ti->private;
1333 	dm_put_device(ti, dev);
1334 }
1335 
origin_map(struct dm_target * ti,struct bio * bio,union map_info * map_context)1336 static int origin_map(struct dm_target *ti, struct bio *bio,
1337 		      union map_info *map_context)
1338 {
1339 	struct dm_dev *dev = ti->private;
1340 	bio->bi_bdev = dev->bdev;
1341 
1342 	/* Only tell snapshots if this is a write */
1343 	return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED;
1344 }
1345 
1346 #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
1347 
1348 /*
1349  * Set the target "split_io" field to the minimum of all the snapshots'
1350  * chunk sizes.
1351  */
origin_resume(struct dm_target * ti)1352 static void origin_resume(struct dm_target *ti)
1353 {
1354 	struct dm_dev *dev = ti->private;
1355 	struct dm_snapshot *snap;
1356 	struct origin *o;
1357 	chunk_t chunk_size = 0;
1358 
1359 	down_read(&_origins_lock);
1360 	o = __lookup_origin(dev->bdev);
1361 	if (o)
1362 		list_for_each_entry (snap, &o->snapshots, list)
1363 			chunk_size = min_not_zero(chunk_size, snap->chunk_size);
1364 	up_read(&_origins_lock);
1365 
1366 	ti->split_io = chunk_size;
1367 }
1368 
origin_status(struct dm_target * ti,status_type_t type,char * result,unsigned int maxlen)1369 static int origin_status(struct dm_target *ti, status_type_t type, char *result,
1370 			 unsigned int maxlen)
1371 {
1372 	struct dm_dev *dev = ti->private;
1373 
1374 	switch (type) {
1375 	case STATUSTYPE_INFO:
1376 		result[0] = '\0';
1377 		break;
1378 
1379 	case STATUSTYPE_TABLE:
1380 		snprintf(result, maxlen, "%s", dev->name);
1381 		break;
1382 	}
1383 
1384 	return 0;
1385 }
1386 
1387 static struct target_type origin_target = {
1388 	.name    = "snapshot-origin",
1389 	.version = {1, 6, 0},
1390 	.module  = THIS_MODULE,
1391 	.ctr     = origin_ctr,
1392 	.dtr     = origin_dtr,
1393 	.map     = origin_map,
1394 	.resume  = origin_resume,
1395 	.status  = origin_status,
1396 };
1397 
1398 static struct target_type snapshot_target = {
1399 	.name    = "snapshot",
1400 	.version = {1, 6, 0},
1401 	.module  = THIS_MODULE,
1402 	.ctr     = snapshot_ctr,
1403 	.dtr     = snapshot_dtr,
1404 	.map     = snapshot_map,
1405 	.end_io  = snapshot_end_io,
1406 	.resume  = snapshot_resume,
1407 	.status  = snapshot_status,
1408 };
1409 
dm_snapshot_init(void)1410 static int __init dm_snapshot_init(void)
1411 {
1412 	int r;
1413 
1414 	r = dm_exception_store_init();
1415 	if (r) {
1416 		DMERR("Failed to initialize exception stores");
1417 		return r;
1418 	}
1419 
1420 	r = dm_register_target(&snapshot_target);
1421 	if (r) {
1422 		DMERR("snapshot target register failed %d", r);
1423 		return r;
1424 	}
1425 
1426 	r = dm_register_target(&origin_target);
1427 	if (r < 0) {
1428 		DMERR("Origin target register failed %d", r);
1429 		goto bad1;
1430 	}
1431 
1432 	r = init_origin_hash();
1433 	if (r) {
1434 		DMERR("init_origin_hash failed.");
1435 		goto bad2;
1436 	}
1437 
1438 	exception_cache = KMEM_CACHE(dm_snap_exception, 0);
1439 	if (!exception_cache) {
1440 		DMERR("Couldn't create exception cache.");
1441 		r = -ENOMEM;
1442 		goto bad3;
1443 	}
1444 
1445 	pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0);
1446 	if (!pending_cache) {
1447 		DMERR("Couldn't create pending cache.");
1448 		r = -ENOMEM;
1449 		goto bad4;
1450 	}
1451 
1452 	tracked_chunk_cache = KMEM_CACHE(dm_snap_tracked_chunk, 0);
1453 	if (!tracked_chunk_cache) {
1454 		DMERR("Couldn't create cache to track chunks in use.");
1455 		r = -ENOMEM;
1456 		goto bad5;
1457 	}
1458 
1459 	ksnapd = create_singlethread_workqueue("ksnapd");
1460 	if (!ksnapd) {
1461 		DMERR("Failed to create ksnapd workqueue.");
1462 		r = -ENOMEM;
1463 		goto bad_pending_pool;
1464 	}
1465 
1466 	return 0;
1467 
1468 bad_pending_pool:
1469 	kmem_cache_destroy(tracked_chunk_cache);
1470 bad5:
1471 	kmem_cache_destroy(pending_cache);
1472 bad4:
1473 	kmem_cache_destroy(exception_cache);
1474 bad3:
1475 	exit_origin_hash();
1476 bad2:
1477 	dm_unregister_target(&origin_target);
1478 bad1:
1479 	dm_unregister_target(&snapshot_target);
1480 	return r;
1481 }
1482 
dm_snapshot_exit(void)1483 static void __exit dm_snapshot_exit(void)
1484 {
1485 	destroy_workqueue(ksnapd);
1486 
1487 	dm_unregister_target(&snapshot_target);
1488 	dm_unregister_target(&origin_target);
1489 
1490 	exit_origin_hash();
1491 	kmem_cache_destroy(pending_cache);
1492 	kmem_cache_destroy(exception_cache);
1493 	kmem_cache_destroy(tracked_chunk_cache);
1494 
1495 	dm_exception_store_exit();
1496 }
1497 
1498 /* Module hooks */
1499 module_init(dm_snapshot_init);
1500 module_exit(dm_snapshot_exit);
1501 
1502 MODULE_DESCRIPTION(DM_NAME " snapshot target");
1503 MODULE_AUTHOR("Joe Thornber");
1504 MODULE_LICENSE("GPL");
1505