• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2001 Sistina Software (UK) Limited.
3  * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4  *
5  * This file is released under the GPL.
6  */
7 
8 #include "dm.h"
9 
10 #include <linux/module.h>
11 #include <linux/vmalloc.h>
12 #include <linux/blkdev.h>
13 #include <linux/namei.h>
14 #include <linux/ctype.h>
15 #include <linux/slab.h>
16 #include <linux/interrupt.h>
17 #include <linux/mutex.h>
18 #include <linux/delay.h>
19 #include <asm/atomic.h>
20 
21 #define DM_MSG_PREFIX "table"
22 
23 #define MAX_DEPTH 16
24 #define NODE_SIZE L1_CACHE_BYTES
25 #define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))
26 #define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)
27 
28 /*
29  * The table has always exactly one reference from either mapped_device->map
30  * or hash_cell->new_map. This reference is not counted in table->holders.
31  * A pair of dm_create_table/dm_destroy_table functions is used for table
32  * creation/destruction.
33  *
34  * Temporary references from the other code increase table->holders. A pair
35  * of dm_table_get/dm_table_put functions is used to manipulate it.
36  *
37  * When the table is about to be destroyed, we wait for table->holders to
38  * drop to zero.
39  */
40 
41 struct dm_table {
42 	struct mapped_device *md;
43 	atomic_t holders;
44 
45 	/* btree table */
46 	unsigned int depth;
47 	unsigned int counts[MAX_DEPTH];	/* in nodes */
48 	sector_t *index[MAX_DEPTH];
49 
50 	unsigned int num_targets;
51 	unsigned int num_allocated;
52 	sector_t *highs;
53 	struct dm_target *targets;
54 
55 	unsigned barriers_supported:1;
56 
57 	/*
58 	 * Indicates the rw permissions for the new logical
59 	 * device.  This should be a combination of FMODE_READ
60 	 * and FMODE_WRITE.
61 	 */
62 	fmode_t mode;
63 
64 	/* a list of devices used by this table */
65 	struct list_head devices;
66 
67 	/*
68 	 * These are optimistic limits taken from all the
69 	 * targets, some targets will need smaller limits.
70 	 */
71 	struct io_restrictions limits;
72 
73 	/* events get handed up using this callback */
74 	void (*event_fn)(void *);
75 	void *event_context;
76 };
77 
78 /*
79  * Similar to ceiling(log_size(n))
80  */
int_log(unsigned int n,unsigned int base)81 static unsigned int int_log(unsigned int n, unsigned int base)
82 {
83 	int result = 0;
84 
85 	while (n > 1) {
86 		n = dm_div_up(n, base);
87 		result++;
88 	}
89 
90 	return result;
91 }
92 
93 /*
94  * Returns the minimum that is _not_ zero, unless both are zero.
95  */
96 #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
97 
98 /*
99  * Combine two io_restrictions, always taking the lower value.
100  */
combine_restrictions_low(struct io_restrictions * lhs,struct io_restrictions * rhs)101 static void combine_restrictions_low(struct io_restrictions *lhs,
102 				     struct io_restrictions *rhs)
103 {
104 	lhs->max_sectors =
105 		min_not_zero(lhs->max_sectors, rhs->max_sectors);
106 
107 	lhs->max_phys_segments =
108 		min_not_zero(lhs->max_phys_segments, rhs->max_phys_segments);
109 
110 	lhs->max_hw_segments =
111 		min_not_zero(lhs->max_hw_segments, rhs->max_hw_segments);
112 
113 	lhs->hardsect_size = max(lhs->hardsect_size, rhs->hardsect_size);
114 
115 	lhs->max_segment_size =
116 		min_not_zero(lhs->max_segment_size, rhs->max_segment_size);
117 
118 	lhs->max_hw_sectors =
119 		min_not_zero(lhs->max_hw_sectors, rhs->max_hw_sectors);
120 
121 	lhs->seg_boundary_mask =
122 		min_not_zero(lhs->seg_boundary_mask, rhs->seg_boundary_mask);
123 
124 	lhs->bounce_pfn = min_not_zero(lhs->bounce_pfn, rhs->bounce_pfn);
125 
126 	lhs->no_cluster |= rhs->no_cluster;
127 }
128 
129 /*
130  * Calculate the index of the child node of the n'th node k'th key.
131  */
get_child(unsigned int n,unsigned int k)132 static inline unsigned int get_child(unsigned int n, unsigned int k)
133 {
134 	return (n * CHILDREN_PER_NODE) + k;
135 }
136 
137 /*
138  * Return the n'th node of level l from table t.
139  */
get_node(struct dm_table * t,unsigned int l,unsigned int n)140 static inline sector_t *get_node(struct dm_table *t,
141 				 unsigned int l, unsigned int n)
142 {
143 	return t->index[l] + (n * KEYS_PER_NODE);
144 }
145 
146 /*
147  * Return the highest key that you could lookup from the n'th
148  * node on level l of the btree.
149  */
high(struct dm_table * t,unsigned int l,unsigned int n)150 static sector_t high(struct dm_table *t, unsigned int l, unsigned int n)
151 {
152 	for (; l < t->depth - 1; l++)
153 		n = get_child(n, CHILDREN_PER_NODE - 1);
154 
155 	if (n >= t->counts[l])
156 		return (sector_t) - 1;
157 
158 	return get_node(t, l, n)[KEYS_PER_NODE - 1];
159 }
160 
161 /*
162  * Fills in a level of the btree based on the highs of the level
163  * below it.
164  */
setup_btree_index(unsigned int l,struct dm_table * t)165 static int setup_btree_index(unsigned int l, struct dm_table *t)
166 {
167 	unsigned int n, k;
168 	sector_t *node;
169 
170 	for (n = 0U; n < t->counts[l]; n++) {
171 		node = get_node(t, l, n);
172 
173 		for (k = 0U; k < KEYS_PER_NODE; k++)
174 			node[k] = high(t, l + 1, get_child(n, k));
175 	}
176 
177 	return 0;
178 }
179 
dm_vcalloc(unsigned long nmemb,unsigned long elem_size)180 void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size)
181 {
182 	unsigned long size;
183 	void *addr;
184 
185 	/*
186 	 * Check that we're not going to overflow.
187 	 */
188 	if (nmemb > (ULONG_MAX / elem_size))
189 		return NULL;
190 
191 	size = nmemb * elem_size;
192 	addr = vmalloc(size);
193 	if (addr)
194 		memset(addr, 0, size);
195 
196 	return addr;
197 }
198 
199 /*
200  * highs, and targets are managed as dynamic arrays during a
201  * table load.
202  */
alloc_targets(struct dm_table * t,unsigned int num)203 static int alloc_targets(struct dm_table *t, unsigned int num)
204 {
205 	sector_t *n_highs;
206 	struct dm_target *n_targets;
207 	int n = t->num_targets;
208 
209 	/*
210 	 * Allocate both the target array and offset array at once.
211 	 * Append an empty entry to catch sectors beyond the end of
212 	 * the device.
213 	 */
214 	n_highs = (sector_t *) dm_vcalloc(num + 1, sizeof(struct dm_target) +
215 					  sizeof(sector_t));
216 	if (!n_highs)
217 		return -ENOMEM;
218 
219 	n_targets = (struct dm_target *) (n_highs + num);
220 
221 	if (n) {
222 		memcpy(n_highs, t->highs, sizeof(*n_highs) * n);
223 		memcpy(n_targets, t->targets, sizeof(*n_targets) * n);
224 	}
225 
226 	memset(n_highs + n, -1, sizeof(*n_highs) * (num - n));
227 	vfree(t->highs);
228 
229 	t->num_allocated = num;
230 	t->highs = n_highs;
231 	t->targets = n_targets;
232 
233 	return 0;
234 }
235 
dm_table_create(struct dm_table ** result,fmode_t mode,unsigned num_targets,struct mapped_device * md)236 int dm_table_create(struct dm_table **result, fmode_t mode,
237 		    unsigned num_targets, struct mapped_device *md)
238 {
239 	struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL);
240 
241 	if (!t)
242 		return -ENOMEM;
243 
244 	INIT_LIST_HEAD(&t->devices);
245 	atomic_set(&t->holders, 0);
246 	t->barriers_supported = 1;
247 
248 	if (!num_targets)
249 		num_targets = KEYS_PER_NODE;
250 
251 	num_targets = dm_round_up(num_targets, KEYS_PER_NODE);
252 
253 	if (alloc_targets(t, num_targets)) {
254 		kfree(t);
255 		t = NULL;
256 		return -ENOMEM;
257 	}
258 
259 	t->mode = mode;
260 	t->md = md;
261 	*result = t;
262 	return 0;
263 }
264 
free_devices(struct list_head * devices)265 static void free_devices(struct list_head *devices)
266 {
267 	struct list_head *tmp, *next;
268 
269 	list_for_each_safe(tmp, next, devices) {
270 		struct dm_dev_internal *dd =
271 		    list_entry(tmp, struct dm_dev_internal, list);
272 		kfree(dd);
273 	}
274 }
275 
dm_table_destroy(struct dm_table * t)276 void dm_table_destroy(struct dm_table *t)
277 {
278 	unsigned int i;
279 
280 	while (atomic_read(&t->holders))
281 		msleep(1);
282 	smp_mb();
283 
284 	/* free the indexes (see dm_table_complete) */
285 	if (t->depth >= 2)
286 		vfree(t->index[t->depth - 2]);
287 
288 	/* free the targets */
289 	for (i = 0; i < t->num_targets; i++) {
290 		struct dm_target *tgt = t->targets + i;
291 
292 		if (tgt->type->dtr)
293 			tgt->type->dtr(tgt);
294 
295 		dm_put_target_type(tgt->type);
296 	}
297 
298 	vfree(t->highs);
299 
300 	/* free the device list */
301 	if (t->devices.next != &t->devices) {
302 		DMWARN("devices still present during destroy: "
303 		       "dm_table_remove_device calls missing");
304 
305 		free_devices(&t->devices);
306 	}
307 
308 	kfree(t);
309 }
310 
dm_table_get(struct dm_table * t)311 void dm_table_get(struct dm_table *t)
312 {
313 	atomic_inc(&t->holders);
314 }
315 
dm_table_put(struct dm_table * t)316 void dm_table_put(struct dm_table *t)
317 {
318 	if (!t)
319 		return;
320 
321 	smp_mb__before_atomic_dec();
322 	atomic_dec(&t->holders);
323 }
324 
325 /*
326  * Checks to see if we need to extend highs or targets.
327  */
check_space(struct dm_table * t)328 static inline int check_space(struct dm_table *t)
329 {
330 	if (t->num_targets >= t->num_allocated)
331 		return alloc_targets(t, t->num_allocated * 2);
332 
333 	return 0;
334 }
335 
336 /*
337  * See if we've already got a device in the list.
338  */
find_device(struct list_head * l,dev_t dev)339 static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
340 {
341 	struct dm_dev_internal *dd;
342 
343 	list_for_each_entry (dd, l, list)
344 		if (dd->dm_dev.bdev->bd_dev == dev)
345 			return dd;
346 
347 	return NULL;
348 }
349 
350 /*
351  * Open a device so we can use it as a map destination.
352  */
open_dev(struct dm_dev_internal * d,dev_t dev,struct mapped_device * md)353 static int open_dev(struct dm_dev_internal *d, dev_t dev,
354 		    struct mapped_device *md)
355 {
356 	static char *_claim_ptr = "I belong to device-mapper";
357 	struct block_device *bdev;
358 
359 	int r;
360 
361 	BUG_ON(d->dm_dev.bdev);
362 
363 	bdev = open_by_devnum(dev, d->dm_dev.mode);
364 	if (IS_ERR(bdev))
365 		return PTR_ERR(bdev);
366 	r = bd_claim_by_disk(bdev, _claim_ptr, dm_disk(md));
367 	if (r)
368 		blkdev_put(bdev, d->dm_dev.mode);
369 	else
370 		d->dm_dev.bdev = bdev;
371 	return r;
372 }
373 
374 /*
375  * Close a device that we've been using.
376  */
close_dev(struct dm_dev_internal * d,struct mapped_device * md)377 static void close_dev(struct dm_dev_internal *d, struct mapped_device *md)
378 {
379 	if (!d->dm_dev.bdev)
380 		return;
381 
382 	bd_release_from_disk(d->dm_dev.bdev, dm_disk(md));
383 	blkdev_put(d->dm_dev.bdev, d->dm_dev.mode);
384 	d->dm_dev.bdev = NULL;
385 }
386 
387 /*
388  * If possible, this checks an area of a destination device is valid.
389  */
check_device_area(struct dm_dev_internal * dd,sector_t start,sector_t len)390 static int check_device_area(struct dm_dev_internal *dd, sector_t start,
391 			     sector_t len)
392 {
393 	sector_t dev_size = dd->dm_dev.bdev->bd_inode->i_size >> SECTOR_SHIFT;
394 
395 	if (!dev_size)
396 		return 1;
397 
398 	return ((start < dev_size) && (len <= (dev_size - start)));
399 }
400 
401 /*
402  * This upgrades the mode on an already open dm_dev.  Being
403  * careful to leave things as they were if we fail to reopen the
404  * device.
405  */
upgrade_mode(struct dm_dev_internal * dd,fmode_t new_mode,struct mapped_device * md)406 static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
407 			struct mapped_device *md)
408 {
409 	int r;
410 	struct dm_dev_internal dd_copy;
411 	dev_t dev = dd->dm_dev.bdev->bd_dev;
412 
413 	dd_copy = *dd;
414 
415 	dd->dm_dev.mode |= new_mode;
416 	dd->dm_dev.bdev = NULL;
417 	r = open_dev(dd, dev, md);
418 	if (!r)
419 		close_dev(&dd_copy, md);
420 	else
421 		*dd = dd_copy;
422 
423 	return r;
424 }
425 
426 /*
427  * Add a device to the list, or just increment the usage count if
428  * it's already present.
429  */
__table_get_device(struct dm_table * t,struct dm_target * ti,const char * path,sector_t start,sector_t len,fmode_t mode,struct dm_dev ** result)430 static int __table_get_device(struct dm_table *t, struct dm_target *ti,
431 			      const char *path, sector_t start, sector_t len,
432 			      fmode_t mode, struct dm_dev **result)
433 {
434 	int r;
435 	dev_t uninitialized_var(dev);
436 	struct dm_dev_internal *dd;
437 	unsigned int major, minor;
438 
439 	BUG_ON(!t);
440 
441 	if (sscanf(path, "%u:%u", &major, &minor) == 2) {
442 		/* Extract the major/minor numbers */
443 		dev = MKDEV(major, minor);
444 		if (MAJOR(dev) != major || MINOR(dev) != minor)
445 			return -EOVERFLOW;
446 	} else {
447 		/* convert the path to a device */
448 		struct block_device *bdev = lookup_bdev(path);
449 
450 		if (IS_ERR(bdev))
451 			return PTR_ERR(bdev);
452 		dev = bdev->bd_dev;
453 		bdput(bdev);
454 	}
455 
456 	dd = find_device(&t->devices, dev);
457 	if (!dd) {
458 		dd = kmalloc(sizeof(*dd), GFP_KERNEL);
459 		if (!dd)
460 			return -ENOMEM;
461 
462 		dd->dm_dev.mode = mode;
463 		dd->dm_dev.bdev = NULL;
464 
465 		if ((r = open_dev(dd, dev, t->md))) {
466 			kfree(dd);
467 			return r;
468 		}
469 
470 		format_dev_t(dd->dm_dev.name, dev);
471 
472 		atomic_set(&dd->count, 0);
473 		list_add(&dd->list, &t->devices);
474 
475 	} else if (dd->dm_dev.mode != (mode | dd->dm_dev.mode)) {
476 		r = upgrade_mode(dd, mode, t->md);
477 		if (r)
478 			return r;
479 	}
480 	atomic_inc(&dd->count);
481 
482 	if (!check_device_area(dd, start, len)) {
483 		DMWARN("device %s too small for target", path);
484 		dm_put_device(ti, &dd->dm_dev);
485 		return -EINVAL;
486 	}
487 
488 	*result = &dd->dm_dev;
489 
490 	return 0;
491 }
492 
dm_set_device_limits(struct dm_target * ti,struct block_device * bdev)493 void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
494 {
495 	struct request_queue *q = bdev_get_queue(bdev);
496 	struct io_restrictions *rs = &ti->limits;
497 	char b[BDEVNAME_SIZE];
498 
499 	if (unlikely(!q)) {
500 		DMWARN("%s: Cannot set limits for nonexistent device %s",
501 		       dm_device_name(ti->table->md), bdevname(bdev, b));
502 		return;
503 	}
504 
505 	/*
506 	 * Combine the device limits low.
507 	 *
508 	 * FIXME: if we move an io_restriction struct
509 	 *        into q this would just be a call to
510 	 *        combine_restrictions_low()
511 	 */
512 	rs->max_sectors =
513 		min_not_zero(rs->max_sectors, q->max_sectors);
514 
515 	/*
516 	 * Check if merge fn is supported.
517 	 * If not we'll force DM to use PAGE_SIZE or
518 	 * smaller I/O, just to be safe.
519 	 */
520 
521 	if (q->merge_bvec_fn && !ti->type->merge)
522 		rs->max_sectors =
523 			min_not_zero(rs->max_sectors,
524 				     (unsigned int) (PAGE_SIZE >> 9));
525 
526 	rs->max_phys_segments =
527 		min_not_zero(rs->max_phys_segments,
528 			     q->max_phys_segments);
529 
530 	rs->max_hw_segments =
531 		min_not_zero(rs->max_hw_segments, q->max_hw_segments);
532 
533 	rs->hardsect_size = max(rs->hardsect_size, q->hardsect_size);
534 
535 	rs->max_segment_size =
536 		min_not_zero(rs->max_segment_size, q->max_segment_size);
537 
538 	rs->max_hw_sectors =
539 		min_not_zero(rs->max_hw_sectors, q->max_hw_sectors);
540 
541 	rs->seg_boundary_mask =
542 		min_not_zero(rs->seg_boundary_mask,
543 			     q->seg_boundary_mask);
544 
545 	rs->bounce_pfn = min_not_zero(rs->bounce_pfn, q->bounce_pfn);
546 
547 	rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
548 }
549 EXPORT_SYMBOL_GPL(dm_set_device_limits);
550 
dm_get_device(struct dm_target * ti,const char * path,sector_t start,sector_t len,fmode_t mode,struct dm_dev ** result)551 int dm_get_device(struct dm_target *ti, const char *path, sector_t start,
552 		  sector_t len, fmode_t mode, struct dm_dev **result)
553 {
554 	int r = __table_get_device(ti->table, ti, path,
555 				   start, len, mode, result);
556 
557 	if (!r)
558 		dm_set_device_limits(ti, (*result)->bdev);
559 
560 	return r;
561 }
562 
563 /*
564  * Decrement a devices use count and remove it if necessary.
565  */
dm_put_device(struct dm_target * ti,struct dm_dev * d)566 void dm_put_device(struct dm_target *ti, struct dm_dev *d)
567 {
568 	struct dm_dev_internal *dd = container_of(d, struct dm_dev_internal,
569 						  dm_dev);
570 
571 	if (atomic_dec_and_test(&dd->count)) {
572 		close_dev(dd, ti->table->md);
573 		list_del(&dd->list);
574 		kfree(dd);
575 	}
576 }
577 
578 /*
579  * Checks to see if the target joins onto the end of the table.
580  */
adjoin(struct dm_table * table,struct dm_target * ti)581 static int adjoin(struct dm_table *table, struct dm_target *ti)
582 {
583 	struct dm_target *prev;
584 
585 	if (!table->num_targets)
586 		return !ti->begin;
587 
588 	prev = &table->targets[table->num_targets - 1];
589 	return (ti->begin == (prev->begin + prev->len));
590 }
591 
592 /*
593  * Used to dynamically allocate the arg array.
594  */
realloc_argv(unsigned * array_size,char ** old_argv)595 static char **realloc_argv(unsigned *array_size, char **old_argv)
596 {
597 	char **argv;
598 	unsigned new_size;
599 
600 	new_size = *array_size ? *array_size * 2 : 64;
601 	argv = kmalloc(new_size * sizeof(*argv), GFP_KERNEL);
602 	if (argv) {
603 		memcpy(argv, old_argv, *array_size * sizeof(*argv));
604 		*array_size = new_size;
605 	}
606 
607 	kfree(old_argv);
608 	return argv;
609 }
610 
611 /*
612  * Destructively splits up the argument list to pass to ctr.
613  */
dm_split_args(int * argc,char *** argvp,char * input)614 int dm_split_args(int *argc, char ***argvp, char *input)
615 {
616 	char *start, *end = input, *out, **argv = NULL;
617 	unsigned array_size = 0;
618 
619 	*argc = 0;
620 
621 	if (!input) {
622 		*argvp = NULL;
623 		return 0;
624 	}
625 
626 	argv = realloc_argv(&array_size, argv);
627 	if (!argv)
628 		return -ENOMEM;
629 
630 	while (1) {
631 		start = end;
632 
633 		/* Skip whitespace */
634 		while (*start && isspace(*start))
635 			start++;
636 
637 		if (!*start)
638 			break;	/* success, we hit the end */
639 
640 		/* 'out' is used to remove any back-quotes */
641 		end = out = start;
642 		while (*end) {
643 			/* Everything apart from '\0' can be quoted */
644 			if (*end == '\\' && *(end + 1)) {
645 				*out++ = *(end + 1);
646 				end += 2;
647 				continue;
648 			}
649 
650 			if (isspace(*end))
651 				break;	/* end of token */
652 
653 			*out++ = *end++;
654 		}
655 
656 		/* have we already filled the array ? */
657 		if ((*argc + 1) > array_size) {
658 			argv = realloc_argv(&array_size, argv);
659 			if (!argv)
660 				return -ENOMEM;
661 		}
662 
663 		/* we know this is whitespace */
664 		if (*end)
665 			end++;
666 
667 		/* terminate the string and put it in the array */
668 		*out = '\0';
669 		argv[*argc] = start;
670 		(*argc)++;
671 	}
672 
673 	*argvp = argv;
674 	return 0;
675 }
676 
check_for_valid_limits(struct io_restrictions * rs)677 static void check_for_valid_limits(struct io_restrictions *rs)
678 {
679 	if (!rs->max_sectors)
680 		rs->max_sectors = SAFE_MAX_SECTORS;
681 	if (!rs->max_hw_sectors)
682 		rs->max_hw_sectors = SAFE_MAX_SECTORS;
683 	if (!rs->max_phys_segments)
684 		rs->max_phys_segments = MAX_PHYS_SEGMENTS;
685 	if (!rs->max_hw_segments)
686 		rs->max_hw_segments = MAX_HW_SEGMENTS;
687 	if (!rs->hardsect_size)
688 		rs->hardsect_size = 1 << SECTOR_SHIFT;
689 	if (!rs->max_segment_size)
690 		rs->max_segment_size = MAX_SEGMENT_SIZE;
691 	if (!rs->seg_boundary_mask)
692 		rs->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
693 	if (!rs->bounce_pfn)
694 		rs->bounce_pfn = -1;
695 }
696 
dm_table_add_target(struct dm_table * t,const char * type,sector_t start,sector_t len,char * params)697 int dm_table_add_target(struct dm_table *t, const char *type,
698 			sector_t start, sector_t len, char *params)
699 {
700 	int r = -EINVAL, argc;
701 	char **argv;
702 	struct dm_target *tgt;
703 
704 	if ((r = check_space(t)))
705 		return r;
706 
707 	tgt = t->targets + t->num_targets;
708 	memset(tgt, 0, sizeof(*tgt));
709 
710 	if (!len) {
711 		DMERR("%s: zero-length target", dm_device_name(t->md));
712 		return -EINVAL;
713 	}
714 
715 	tgt->type = dm_get_target_type(type);
716 	if (!tgt->type) {
717 		DMERR("%s: %s: unknown target type", dm_device_name(t->md),
718 		      type);
719 		return -EINVAL;
720 	}
721 
722 	tgt->table = t;
723 	tgt->begin = start;
724 	tgt->len = len;
725 	tgt->error = "Unknown error";
726 
727 	/*
728 	 * Does this target adjoin the previous one ?
729 	 */
730 	if (!adjoin(t, tgt)) {
731 		tgt->error = "Gap in table";
732 		r = -EINVAL;
733 		goto bad;
734 	}
735 
736 	r = dm_split_args(&argc, &argv, params);
737 	if (r) {
738 		tgt->error = "couldn't split parameters (insufficient memory)";
739 		goto bad;
740 	}
741 
742 	r = tgt->type->ctr(tgt, argc, argv);
743 	kfree(argv);
744 	if (r)
745 		goto bad;
746 
747 	t->highs[t->num_targets++] = tgt->begin + tgt->len - 1;
748 
749 	/* FIXME: the plan is to combine high here and then have
750 	 * the merge fn apply the target level restrictions. */
751 	combine_restrictions_low(&t->limits, &tgt->limits);
752 
753 	if (!(tgt->type->features & DM_TARGET_SUPPORTS_BARRIERS))
754 		t->barriers_supported = 0;
755 
756 	return 0;
757 
758  bad:
759 	DMERR("%s: %s: %s", dm_device_name(t->md), type, tgt->error);
760 	dm_put_target_type(tgt->type);
761 	return r;
762 }
763 
setup_indexes(struct dm_table * t)764 static int setup_indexes(struct dm_table *t)
765 {
766 	int i;
767 	unsigned int total = 0;
768 	sector_t *indexes;
769 
770 	/* allocate the space for *all* the indexes */
771 	for (i = t->depth - 2; i >= 0; i--) {
772 		t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE);
773 		total += t->counts[i];
774 	}
775 
776 	indexes = (sector_t *) dm_vcalloc(total, (unsigned long) NODE_SIZE);
777 	if (!indexes)
778 		return -ENOMEM;
779 
780 	/* set up internal nodes, bottom-up */
781 	for (i = t->depth - 2; i >= 0; i--) {
782 		t->index[i] = indexes;
783 		indexes += (KEYS_PER_NODE * t->counts[i]);
784 		setup_btree_index(i, t);
785 	}
786 
787 	return 0;
788 }
789 
790 /*
791  * Builds the btree to index the map.
792  */
dm_table_complete(struct dm_table * t)793 int dm_table_complete(struct dm_table *t)
794 {
795 	int r = 0;
796 	unsigned int leaf_nodes;
797 
798 	check_for_valid_limits(&t->limits);
799 
800 	/*
801 	 * We only support barriers if there is exactly one underlying device.
802 	 */
803 	if (!list_is_singular(&t->devices))
804 		t->barriers_supported = 0;
805 
806 	/* how many indexes will the btree have ? */
807 	leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE);
808 	t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE);
809 
810 	/* leaf layer has already been set up */
811 	t->counts[t->depth - 1] = leaf_nodes;
812 	t->index[t->depth - 1] = t->highs;
813 
814 	if (t->depth >= 2)
815 		r = setup_indexes(t);
816 
817 	return r;
818 }
819 
820 static DEFINE_MUTEX(_event_lock);
dm_table_event_callback(struct dm_table * t,void (* fn)(void *),void * context)821 void dm_table_event_callback(struct dm_table *t,
822 			     void (*fn)(void *), void *context)
823 {
824 	mutex_lock(&_event_lock);
825 	t->event_fn = fn;
826 	t->event_context = context;
827 	mutex_unlock(&_event_lock);
828 }
829 
dm_table_event(struct dm_table * t)830 void dm_table_event(struct dm_table *t)
831 {
832 	/*
833 	 * You can no longer call dm_table_event() from interrupt
834 	 * context, use a bottom half instead.
835 	 */
836 	BUG_ON(in_interrupt());
837 
838 	mutex_lock(&_event_lock);
839 	if (t->event_fn)
840 		t->event_fn(t->event_context);
841 	mutex_unlock(&_event_lock);
842 }
843 
dm_table_get_size(struct dm_table * t)844 sector_t dm_table_get_size(struct dm_table *t)
845 {
846 	return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
847 }
848 
dm_table_get_target(struct dm_table * t,unsigned int index)849 struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index)
850 {
851 	if (index >= t->num_targets)
852 		return NULL;
853 
854 	return t->targets + index;
855 }
856 
857 /*
858  * Search the btree for the correct target.
859  *
860  * Caller should check returned pointer with dm_target_is_valid()
861  * to trap I/O beyond end of device.
862  */
dm_table_find_target(struct dm_table * t,sector_t sector)863 struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
864 {
865 	unsigned int l, n = 0, k = 0;
866 	sector_t *node;
867 
868 	for (l = 0; l < t->depth; l++) {
869 		n = get_child(n, k);
870 		node = get_node(t, l, n);
871 
872 		for (k = 0; k < KEYS_PER_NODE; k++)
873 			if (node[k] >= sector)
874 				break;
875 	}
876 
877 	return &t->targets[(KEYS_PER_NODE * n) + k];
878 }
879 
dm_table_set_restrictions(struct dm_table * t,struct request_queue * q)880 void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q)
881 {
882 	/*
883 	 * Make sure we obey the optimistic sub devices
884 	 * restrictions.
885 	 */
886 	blk_queue_max_sectors(q, t->limits.max_sectors);
887 	q->max_phys_segments = t->limits.max_phys_segments;
888 	q->max_hw_segments = t->limits.max_hw_segments;
889 	q->hardsect_size = t->limits.hardsect_size;
890 	q->max_segment_size = t->limits.max_segment_size;
891 	q->max_hw_sectors = t->limits.max_hw_sectors;
892 	q->seg_boundary_mask = t->limits.seg_boundary_mask;
893 	q->bounce_pfn = t->limits.bounce_pfn;
894 
895 	if (t->limits.no_cluster)
896 		queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
897 	else
898 		queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, q);
899 
900 }
901 
dm_table_get_num_targets(struct dm_table * t)902 unsigned int dm_table_get_num_targets(struct dm_table *t)
903 {
904 	return t->num_targets;
905 }
906 
dm_table_get_devices(struct dm_table * t)907 struct list_head *dm_table_get_devices(struct dm_table *t)
908 {
909 	return &t->devices;
910 }
911 
dm_table_get_mode(struct dm_table * t)912 fmode_t dm_table_get_mode(struct dm_table *t)
913 {
914 	return t->mode;
915 }
916 
suspend_targets(struct dm_table * t,unsigned postsuspend)917 static void suspend_targets(struct dm_table *t, unsigned postsuspend)
918 {
919 	int i = t->num_targets;
920 	struct dm_target *ti = t->targets;
921 
922 	while (i--) {
923 		if (postsuspend) {
924 			if (ti->type->postsuspend)
925 				ti->type->postsuspend(ti);
926 		} else if (ti->type->presuspend)
927 			ti->type->presuspend(ti);
928 
929 		ti++;
930 	}
931 }
932 
dm_table_presuspend_targets(struct dm_table * t)933 void dm_table_presuspend_targets(struct dm_table *t)
934 {
935 	if (!t)
936 		return;
937 
938 	suspend_targets(t, 0);
939 }
940 
dm_table_postsuspend_targets(struct dm_table * t)941 void dm_table_postsuspend_targets(struct dm_table *t)
942 {
943 	if (!t)
944 		return;
945 
946 	suspend_targets(t, 1);
947 }
948 
dm_table_resume_targets(struct dm_table * t)949 int dm_table_resume_targets(struct dm_table *t)
950 {
951 	int i, r = 0;
952 
953 	for (i = 0; i < t->num_targets; i++) {
954 		struct dm_target *ti = t->targets + i;
955 
956 		if (!ti->type->preresume)
957 			continue;
958 
959 		r = ti->type->preresume(ti);
960 		if (r)
961 			return r;
962 	}
963 
964 	for (i = 0; i < t->num_targets; i++) {
965 		struct dm_target *ti = t->targets + i;
966 
967 		if (ti->type->resume)
968 			ti->type->resume(ti);
969 	}
970 
971 	return 0;
972 }
973 
dm_table_any_congested(struct dm_table * t,int bdi_bits)974 int dm_table_any_congested(struct dm_table *t, int bdi_bits)
975 {
976 	struct dm_dev_internal *dd;
977 	struct list_head *devices = dm_table_get_devices(t);
978 	int r = 0;
979 
980 	list_for_each_entry(dd, devices, list) {
981 		struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev);
982 		char b[BDEVNAME_SIZE];
983 
984 		if (likely(q))
985 			r |= bdi_congested(&q->backing_dev_info, bdi_bits);
986 		else
987 			DMWARN_LIMIT("%s: any_congested: nonexistent device %s",
988 				     dm_device_name(t->md),
989 				     bdevname(dd->dm_dev.bdev, b));
990 	}
991 
992 	return r;
993 }
994 
dm_table_unplug_all(struct dm_table * t)995 void dm_table_unplug_all(struct dm_table *t)
996 {
997 	struct dm_dev_internal *dd;
998 	struct list_head *devices = dm_table_get_devices(t);
999 
1000 	list_for_each_entry(dd, devices, list) {
1001 		struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev);
1002 		char b[BDEVNAME_SIZE];
1003 
1004 		if (likely(q))
1005 			blk_unplug(q);
1006 		else
1007 			DMWARN_LIMIT("%s: Cannot unplug nonexistent device %s",
1008 				     dm_device_name(t->md),
1009 				     bdevname(dd->dm_dev.bdev, b));
1010 	}
1011 }
1012 
dm_table_get_md(struct dm_table * t)1013 struct mapped_device *dm_table_get_md(struct dm_table *t)
1014 {
1015 	dm_get(t->md);
1016 
1017 	return t->md;
1018 }
1019 
dm_table_barrier_ok(struct dm_table * t)1020 int dm_table_barrier_ok(struct dm_table *t)
1021 {
1022 	return t->barriers_supported;
1023 }
1024 EXPORT_SYMBOL(dm_table_barrier_ok);
1025 
1026 EXPORT_SYMBOL(dm_vcalloc);
1027 EXPORT_SYMBOL(dm_get_device);
1028 EXPORT_SYMBOL(dm_put_device);
1029 EXPORT_SYMBOL(dm_table_event);
1030 EXPORT_SYMBOL(dm_table_get_size);
1031 EXPORT_SYMBOL(dm_table_get_mode);
1032 EXPORT_SYMBOL(dm_table_get_md);
1033 EXPORT_SYMBOL(dm_table_put);
1034 EXPORT_SYMBOL(dm_table_get);
1035 EXPORT_SYMBOL(dm_table_unplug_all);
1036