• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2001 Sistina Software (UK) Limited.
3  * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4  *
5  * This file is released under the GPL.
6  */
7 
8 #include "dm-core.h"
9 
10 #include <linux/module.h>
11 #include <linux/vmalloc.h>
12 #include <linux/blkdev.h>
13 #include <linux/namei.h>
14 #include <linux/ctype.h>
15 #include <linux/string.h>
16 #include <linux/slab.h>
17 #include <linux/interrupt.h>
18 #include <linux/mutex.h>
19 #include <linux/delay.h>
20 #include <linux/atomic.h>
21 #include <linux/blk-mq.h>
22 #include <linux/mount.h>
23 #include <linux/dax.h>
24 #include <linux/bio.h>
25 #include <linux/keyslot-manager.h>
26 
27 #define DM_MSG_PREFIX "table"
28 
29 #define MAX_DEPTH 16
30 #define NODE_SIZE L1_CACHE_BYTES
31 #define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))
32 #define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)
33 
34 struct dm_table {
35 	struct mapped_device *md;
36 	enum dm_queue_mode type;
37 
38 	/* btree table */
39 	unsigned int depth;
40 	unsigned int counts[MAX_DEPTH];	/* in nodes */
41 	sector_t *index[MAX_DEPTH];
42 
43 	unsigned int num_targets;
44 	unsigned int num_allocated;
45 	sector_t *highs;
46 	struct dm_target *targets;
47 
48 	struct target_type *immutable_target_type;
49 
50 	bool integrity_supported:1;
51 	bool singleton:1;
52 	unsigned integrity_added:1;
53 
54 	/*
55 	 * Indicates the rw permissions for the new logical
56 	 * device.  This should be a combination of FMODE_READ
57 	 * and FMODE_WRITE.
58 	 */
59 	fmode_t mode;
60 
61 	/* a list of devices used by this table */
62 	struct list_head devices;
63 
64 	/* events get handed up using this callback */
65 	void (*event_fn)(void *);
66 	void *event_context;
67 
68 	struct dm_md_mempools *mempools;
69 
70 	struct list_head target_callbacks;
71 };
72 
73 /*
74  * Similar to ceiling(log_size(n))
75  */
int_log(unsigned int n,unsigned int base)76 static unsigned int int_log(unsigned int n, unsigned int base)
77 {
78 	int result = 0;
79 
80 	while (n > 1) {
81 		n = dm_div_up(n, base);
82 		result++;
83 	}
84 
85 	return result;
86 }
87 
88 /*
89  * Calculate the index of the child node of the n'th node k'th key.
90  */
get_child(unsigned int n,unsigned int k)91 static inline unsigned int get_child(unsigned int n, unsigned int k)
92 {
93 	return (n * CHILDREN_PER_NODE) + k;
94 }
95 
96 /*
97  * Return the n'th node of level l from table t.
98  */
get_node(struct dm_table * t,unsigned int l,unsigned int n)99 static inline sector_t *get_node(struct dm_table *t,
100 				 unsigned int l, unsigned int n)
101 {
102 	return t->index[l] + (n * KEYS_PER_NODE);
103 }
104 
105 /*
106  * Return the highest key that you could lookup from the n'th
107  * node on level l of the btree.
108  */
high(struct dm_table * t,unsigned int l,unsigned int n)109 static sector_t high(struct dm_table *t, unsigned int l, unsigned int n)
110 {
111 	for (; l < t->depth - 1; l++)
112 		n = get_child(n, CHILDREN_PER_NODE - 1);
113 
114 	if (n >= t->counts[l])
115 		return (sector_t) - 1;
116 
117 	return get_node(t, l, n)[KEYS_PER_NODE - 1];
118 }
119 
120 /*
121  * Fills in a level of the btree based on the highs of the level
122  * below it.
123  */
setup_btree_index(unsigned int l,struct dm_table * t)124 static int setup_btree_index(unsigned int l, struct dm_table *t)
125 {
126 	unsigned int n, k;
127 	sector_t *node;
128 
129 	for (n = 0U; n < t->counts[l]; n++) {
130 		node = get_node(t, l, n);
131 
132 		for (k = 0U; k < KEYS_PER_NODE; k++)
133 			node[k] = high(t, l + 1, get_child(n, k));
134 	}
135 
136 	return 0;
137 }
138 
dm_vcalloc(unsigned long nmemb,unsigned long elem_size)139 void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size)
140 {
141 	unsigned long size;
142 	void *addr;
143 
144 	/*
145 	 * Check that we're not going to overflow.
146 	 */
147 	if (nmemb > (ULONG_MAX / elem_size))
148 		return NULL;
149 
150 	size = nmemb * elem_size;
151 	addr = vzalloc(size);
152 
153 	return addr;
154 }
155 EXPORT_SYMBOL(dm_vcalloc);
156 
157 /*
158  * highs, and targets are managed as dynamic arrays during a
159  * table load.
160  */
alloc_targets(struct dm_table * t,unsigned int num)161 static int alloc_targets(struct dm_table *t, unsigned int num)
162 {
163 	sector_t *n_highs;
164 	struct dm_target *n_targets;
165 
166 	/*
167 	 * Allocate both the target array and offset array at once.
168 	 */
169 	n_highs = (sector_t *) dm_vcalloc(num, sizeof(struct dm_target) +
170 					  sizeof(sector_t));
171 	if (!n_highs)
172 		return -ENOMEM;
173 
174 	n_targets = (struct dm_target *) (n_highs + num);
175 
176 	memset(n_highs, -1, sizeof(*n_highs) * num);
177 	vfree(t->highs);
178 
179 	t->num_allocated = num;
180 	t->highs = n_highs;
181 	t->targets = n_targets;
182 
183 	return 0;
184 }
185 
dm_table_create(struct dm_table ** result,fmode_t mode,unsigned num_targets,struct mapped_device * md)186 int dm_table_create(struct dm_table **result, fmode_t mode,
187 		    unsigned num_targets, struct mapped_device *md)
188 {
189 	struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL);
190 
191 	if (!t)
192 		return -ENOMEM;
193 
194 	INIT_LIST_HEAD(&t->devices);
195 	INIT_LIST_HEAD(&t->target_callbacks);
196 
197 	if (!num_targets)
198 		num_targets = KEYS_PER_NODE;
199 
200 	num_targets = dm_round_up(num_targets, KEYS_PER_NODE);
201 
202 	if (!num_targets) {
203 		kfree(t);
204 		return -ENOMEM;
205 	}
206 
207 	if (alloc_targets(t, num_targets)) {
208 		kfree(t);
209 		return -ENOMEM;
210 	}
211 
212 	t->type = DM_TYPE_NONE;
213 	t->mode = mode;
214 	t->md = md;
215 	*result = t;
216 	return 0;
217 }
218 
free_devices(struct list_head * devices,struct mapped_device * md)219 static void free_devices(struct list_head *devices, struct mapped_device *md)
220 {
221 	struct list_head *tmp, *next;
222 
223 	list_for_each_safe(tmp, next, devices) {
224 		struct dm_dev_internal *dd =
225 		    list_entry(tmp, struct dm_dev_internal, list);
226 		DMWARN("%s: dm_table_destroy: dm_put_device call missing for %s",
227 		       dm_device_name(md), dd->dm_dev->name);
228 		dm_put_table_device(md, dd->dm_dev);
229 		kfree(dd);
230 	}
231 }
232 
dm_table_destroy(struct dm_table * t)233 void dm_table_destroy(struct dm_table *t)
234 {
235 	unsigned int i;
236 
237 	if (!t)
238 		return;
239 
240 	/* free the indexes */
241 	if (t->depth >= 2)
242 		vfree(t->index[t->depth - 2]);
243 
244 	/* free the targets */
245 	for (i = 0; i < t->num_targets; i++) {
246 		struct dm_target *tgt = t->targets + i;
247 
248 		if (tgt->type->dtr)
249 			tgt->type->dtr(tgt);
250 
251 		dm_put_target_type(tgt->type);
252 	}
253 
254 	vfree(t->highs);
255 
256 	/* free the device list */
257 	free_devices(&t->devices, t->md);
258 
259 	dm_free_md_mempools(t->mempools);
260 
261 	kfree(t);
262 }
263 
264 /*
265  * See if we've already got a device in the list.
266  */
find_device(struct list_head * l,dev_t dev)267 static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
268 {
269 	struct dm_dev_internal *dd;
270 
271 	list_for_each_entry (dd, l, list)
272 		if (dd->dm_dev->bdev->bd_dev == dev)
273 			return dd;
274 
275 	return NULL;
276 }
277 
278 /*
279  * If possible, this checks an area of a destination device is invalid.
280  */
device_area_is_invalid(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)281 static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
282 				  sector_t start, sector_t len, void *data)
283 {
284 	struct request_queue *q;
285 	struct queue_limits *limits = data;
286 	struct block_device *bdev = dev->bdev;
287 	sector_t dev_size =
288 		i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
289 	unsigned short logical_block_size_sectors =
290 		limits->logical_block_size >> SECTOR_SHIFT;
291 	char b[BDEVNAME_SIZE];
292 
293 	/*
294 	 * Some devices exist without request functions,
295 	 * such as loop devices not yet bound to backing files.
296 	 * Forbid the use of such devices.
297 	 */
298 	q = bdev_get_queue(bdev);
299 	if (!q || !q->make_request_fn) {
300 		DMWARN("%s: %s is not yet initialised: "
301 		       "start=%llu, len=%llu, dev_size=%llu",
302 		       dm_device_name(ti->table->md), bdevname(bdev, b),
303 		       (unsigned long long)start,
304 		       (unsigned long long)len,
305 		       (unsigned long long)dev_size);
306 		return 1;
307 	}
308 
309 	if (!dev_size)
310 		return 0;
311 
312 	if ((start >= dev_size) || (start + len > dev_size)) {
313 		DMWARN("%s: %s too small for target: "
314 		       "start=%llu, len=%llu, dev_size=%llu",
315 		       dm_device_name(ti->table->md), bdevname(bdev, b),
316 		       (unsigned long long)start,
317 		       (unsigned long long)len,
318 		       (unsigned long long)dev_size);
319 		return 1;
320 	}
321 
322 	/*
323 	 * If the target is mapped to zoned block device(s), check
324 	 * that the zones are not partially mapped.
325 	 */
326 	if (bdev_zoned_model(bdev) != BLK_ZONED_NONE) {
327 		unsigned int zone_sectors = bdev_zone_sectors(bdev);
328 
329 		if (start & (zone_sectors - 1)) {
330 			DMWARN("%s: start=%llu not aligned to h/w zone size %u of %s",
331 			       dm_device_name(ti->table->md),
332 			       (unsigned long long)start,
333 			       zone_sectors, bdevname(bdev, b));
334 			return 1;
335 		}
336 
337 		/*
338 		 * Note: The last zone of a zoned block device may be smaller
339 		 * than other zones. So for a target mapping the end of a
340 		 * zoned block device with such a zone, len would not be zone
341 		 * aligned. We do not allow such last smaller zone to be part
342 		 * of the mapping here to ensure that mappings with multiple
343 		 * devices do not end up with a smaller zone in the middle of
344 		 * the sector range.
345 		 */
346 		if (len & (zone_sectors - 1)) {
347 			DMWARN("%s: len=%llu not aligned to h/w zone size %u of %s",
348 			       dm_device_name(ti->table->md),
349 			       (unsigned long long)len,
350 			       zone_sectors, bdevname(bdev, b));
351 			return 1;
352 		}
353 	}
354 
355 	if (logical_block_size_sectors <= 1)
356 		return 0;
357 
358 	if (start & (logical_block_size_sectors - 1)) {
359 		DMWARN("%s: start=%llu not aligned to h/w "
360 		       "logical block size %u of %s",
361 		       dm_device_name(ti->table->md),
362 		       (unsigned long long)start,
363 		       limits->logical_block_size, bdevname(bdev, b));
364 		return 1;
365 	}
366 
367 	if (len & (logical_block_size_sectors - 1)) {
368 		DMWARN("%s: len=%llu not aligned to h/w "
369 		       "logical block size %u of %s",
370 		       dm_device_name(ti->table->md),
371 		       (unsigned long long)len,
372 		       limits->logical_block_size, bdevname(bdev, b));
373 		return 1;
374 	}
375 
376 	return 0;
377 }
378 
379 /*
380  * This upgrades the mode on an already open dm_dev, being
381  * careful to leave things as they were if we fail to reopen the
382  * device and not to touch the existing bdev field in case
383  * it is accessed concurrently inside dm_table_any_congested().
384  */
upgrade_mode(struct dm_dev_internal * dd,fmode_t new_mode,struct mapped_device * md)385 static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
386 			struct mapped_device *md)
387 {
388 	int r;
389 	struct dm_dev *old_dev, *new_dev;
390 
391 	old_dev = dd->dm_dev;
392 
393 	r = dm_get_table_device(md, dd->dm_dev->bdev->bd_dev,
394 				dd->dm_dev->mode | new_mode, &new_dev);
395 	if (r)
396 		return r;
397 
398 	dd->dm_dev = new_dev;
399 	dm_put_table_device(md, old_dev);
400 
401 	return 0;
402 }
403 
404 /*
405  * Convert the path to a device
406  */
dm_get_dev_t(const char * path)407 dev_t dm_get_dev_t(const char *path)
408 {
409 	dev_t dev;
410 	struct block_device *bdev;
411 
412 	bdev = lookup_bdev(path);
413 	if (IS_ERR(bdev))
414 		dev = name_to_dev_t(path);
415 	else {
416 		dev = bdev->bd_dev;
417 		bdput(bdev);
418 	}
419 
420 	return dev;
421 }
422 EXPORT_SYMBOL_GPL(dm_get_dev_t);
423 
424 /*
425  * Add a device to the list, or just increment the usage count if
426  * it's already present.
427  */
dm_get_device(struct dm_target * ti,const char * path,fmode_t mode,struct dm_dev ** result)428 int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
429 		  struct dm_dev **result)
430 {
431 	int r;
432 	dev_t dev;
433 	struct dm_dev_internal *dd;
434 	struct dm_table *t = ti->table;
435 
436 	BUG_ON(!t);
437 
438 	dev = dm_get_dev_t(path);
439 	if (!dev)
440 		return -ENODEV;
441 
442 	dd = find_device(&t->devices, dev);
443 	if (!dd) {
444 		dd = kmalloc(sizeof(*dd), GFP_KERNEL);
445 		if (!dd)
446 			return -ENOMEM;
447 
448 		if ((r = dm_get_table_device(t->md, dev, mode, &dd->dm_dev))) {
449 			kfree(dd);
450 			return r;
451 		}
452 
453 		refcount_set(&dd->count, 1);
454 		list_add(&dd->list, &t->devices);
455 		goto out;
456 
457 	} else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) {
458 		r = upgrade_mode(dd, mode, t->md);
459 		if (r)
460 			return r;
461 	}
462 	refcount_inc(&dd->count);
463 out:
464 	*result = dd->dm_dev;
465 	return 0;
466 }
467 EXPORT_SYMBOL(dm_get_device);
468 
dm_set_device_limits(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)469 static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
470 				sector_t start, sector_t len, void *data)
471 {
472 	struct queue_limits *limits = data;
473 	struct block_device *bdev = dev->bdev;
474 	struct request_queue *q = bdev_get_queue(bdev);
475 	char b[BDEVNAME_SIZE];
476 
477 	if (unlikely(!q)) {
478 		DMWARN("%s: Cannot set limits for nonexistent device %s",
479 		       dm_device_name(ti->table->md), bdevname(bdev, b));
480 		return 0;
481 	}
482 
483 	if (bdev_stack_limits(limits, bdev, start) < 0)
484 		DMWARN("%s: adding target device %s caused an alignment inconsistency: "
485 		       "physical_block_size=%u, logical_block_size=%u, "
486 		       "alignment_offset=%u, start=%llu",
487 		       dm_device_name(ti->table->md), bdevname(bdev, b),
488 		       q->limits.physical_block_size,
489 		       q->limits.logical_block_size,
490 		       q->limits.alignment_offset,
491 		       (unsigned long long) start << SECTOR_SHIFT);
492 
493 	limits->zoned = blk_queue_zoned_model(q);
494 
495 	return 0;
496 }
497 
498 /*
499  * Decrement a device's use count and remove it if necessary.
500  */
dm_put_device(struct dm_target * ti,struct dm_dev * d)501 void dm_put_device(struct dm_target *ti, struct dm_dev *d)
502 {
503 	int found = 0;
504 	struct list_head *devices = &ti->table->devices;
505 	struct dm_dev_internal *dd;
506 
507 	list_for_each_entry(dd, devices, list) {
508 		if (dd->dm_dev == d) {
509 			found = 1;
510 			break;
511 		}
512 	}
513 	if (!found) {
514 		DMWARN("%s: device %s not in table devices list",
515 		       dm_device_name(ti->table->md), d->name);
516 		return;
517 	}
518 	if (refcount_dec_and_test(&dd->count)) {
519 		dm_put_table_device(ti->table->md, d);
520 		list_del(&dd->list);
521 		kfree(dd);
522 	}
523 }
524 EXPORT_SYMBOL(dm_put_device);
525 
526 /*
527  * Checks to see if the target joins onto the end of the table.
528  */
adjoin(struct dm_table * table,struct dm_target * ti)529 static int adjoin(struct dm_table *table, struct dm_target *ti)
530 {
531 	struct dm_target *prev;
532 
533 	if (!table->num_targets)
534 		return !ti->begin;
535 
536 	prev = &table->targets[table->num_targets - 1];
537 	return (ti->begin == (prev->begin + prev->len));
538 }
539 
540 /*
541  * Used to dynamically allocate the arg array.
542  *
543  * We do first allocation with GFP_NOIO because dm-mpath and dm-thin must
544  * process messages even if some device is suspended. These messages have a
545  * small fixed number of arguments.
546  *
547  * On the other hand, dm-switch needs to process bulk data using messages and
548  * excessive use of GFP_NOIO could cause trouble.
549  */
realloc_argv(unsigned * size,char ** old_argv)550 static char **realloc_argv(unsigned *size, char **old_argv)
551 {
552 	char **argv;
553 	unsigned new_size;
554 	gfp_t gfp;
555 
556 	if (*size) {
557 		new_size = *size * 2;
558 		gfp = GFP_KERNEL;
559 	} else {
560 		new_size = 8;
561 		gfp = GFP_NOIO;
562 	}
563 	argv = kmalloc_array(new_size, sizeof(*argv), gfp);
564 	if (argv && old_argv) {
565 		memcpy(argv, old_argv, *size * sizeof(*argv));
566 		*size = new_size;
567 	}
568 
569 	kfree(old_argv);
570 	return argv;
571 }
572 
573 /*
574  * Destructively splits up the argument list to pass to ctr.
575  */
dm_split_args(int * argc,char *** argvp,char * input)576 int dm_split_args(int *argc, char ***argvp, char *input)
577 {
578 	char *start, *end = input, *out, **argv = NULL;
579 	unsigned array_size = 0;
580 
581 	*argc = 0;
582 
583 	if (!input) {
584 		*argvp = NULL;
585 		return 0;
586 	}
587 
588 	argv = realloc_argv(&array_size, argv);
589 	if (!argv)
590 		return -ENOMEM;
591 
592 	while (1) {
593 		/* Skip whitespace */
594 		start = skip_spaces(end);
595 
596 		if (!*start)
597 			break;	/* success, we hit the end */
598 
599 		/* 'out' is used to remove any back-quotes */
600 		end = out = start;
601 		while (*end) {
602 			/* Everything apart from '\0' can be quoted */
603 			if (*end == '\\' && *(end + 1)) {
604 				*out++ = *(end + 1);
605 				end += 2;
606 				continue;
607 			}
608 
609 			if (isspace(*end))
610 				break;	/* end of token */
611 
612 			*out++ = *end++;
613 		}
614 
615 		/* have we already filled the array ? */
616 		if ((*argc + 1) > array_size) {
617 			argv = realloc_argv(&array_size, argv);
618 			if (!argv)
619 				return -ENOMEM;
620 		}
621 
622 		/* we know this is whitespace */
623 		if (*end)
624 			end++;
625 
626 		/* terminate the string and put it in the array */
627 		*out = '\0';
628 		argv[*argc] = start;
629 		(*argc)++;
630 	}
631 
632 	*argvp = argv;
633 	return 0;
634 }
635 
636 /*
637  * Impose necessary and sufficient conditions on a devices's table such
638  * that any incoming bio which respects its logical_block_size can be
639  * processed successfully.  If it falls across the boundary between
640  * two or more targets, the size of each piece it gets split into must
641  * be compatible with the logical_block_size of the target processing it.
642  */
validate_hardware_logical_block_alignment(struct dm_table * table,struct queue_limits * limits)643 static int validate_hardware_logical_block_alignment(struct dm_table *table,
644 						 struct queue_limits *limits)
645 {
646 	/*
647 	 * This function uses arithmetic modulo the logical_block_size
648 	 * (in units of 512-byte sectors).
649 	 */
650 	unsigned short device_logical_block_size_sects =
651 		limits->logical_block_size >> SECTOR_SHIFT;
652 
653 	/*
654 	 * Offset of the start of the next table entry, mod logical_block_size.
655 	 */
656 	unsigned short next_target_start = 0;
657 
658 	/*
659 	 * Given an aligned bio that extends beyond the end of a
660 	 * target, how many sectors must the next target handle?
661 	 */
662 	unsigned short remaining = 0;
663 
664 	struct dm_target *uninitialized_var(ti);
665 	struct queue_limits ti_limits;
666 	unsigned i;
667 
668 	/*
669 	 * Check each entry in the table in turn.
670 	 */
671 	for (i = 0; i < dm_table_get_num_targets(table); i++) {
672 		ti = dm_table_get_target(table, i);
673 
674 		blk_set_stacking_limits(&ti_limits);
675 
676 		/* combine all target devices' limits */
677 		if (ti->type->iterate_devices)
678 			ti->type->iterate_devices(ti, dm_set_device_limits,
679 						  &ti_limits);
680 
681 		/*
682 		 * If the remaining sectors fall entirely within this
683 		 * table entry are they compatible with its logical_block_size?
684 		 */
685 		if (remaining < ti->len &&
686 		    remaining & ((ti_limits.logical_block_size >>
687 				  SECTOR_SHIFT) - 1))
688 			break;	/* Error */
689 
690 		next_target_start =
691 		    (unsigned short) ((next_target_start + ti->len) &
692 				      (device_logical_block_size_sects - 1));
693 		remaining = next_target_start ?
694 		    device_logical_block_size_sects - next_target_start : 0;
695 	}
696 
697 	if (remaining) {
698 		DMWARN("%s: table line %u (start sect %llu len %llu) "
699 		       "not aligned to h/w logical block size %u",
700 		       dm_device_name(table->md), i,
701 		       (unsigned long long) ti->begin,
702 		       (unsigned long long) ti->len,
703 		       limits->logical_block_size);
704 		return -EINVAL;
705 	}
706 
707 	return 0;
708 }
709 
dm_table_add_target(struct dm_table * t,const char * type,sector_t start,sector_t len,char * params)710 int dm_table_add_target(struct dm_table *t, const char *type,
711 			sector_t start, sector_t len, char *params)
712 {
713 	int r = -EINVAL, argc;
714 	char **argv;
715 	struct dm_target *tgt;
716 
717 	if (t->singleton) {
718 		DMERR("%s: target type %s must appear alone in table",
719 		      dm_device_name(t->md), t->targets->type->name);
720 		return -EINVAL;
721 	}
722 
723 	BUG_ON(t->num_targets >= t->num_allocated);
724 
725 	tgt = t->targets + t->num_targets;
726 	memset(tgt, 0, sizeof(*tgt));
727 
728 	if (!len) {
729 		DMERR("%s: zero-length target", dm_device_name(t->md));
730 		return -EINVAL;
731 	}
732 
733 	tgt->type = dm_get_target_type(type);
734 	if (!tgt->type) {
735 		DMERR("%s: %s: unknown target type", dm_device_name(t->md), type);
736 		return -EINVAL;
737 	}
738 
739 	if (dm_target_needs_singleton(tgt->type)) {
740 		if (t->num_targets) {
741 			tgt->error = "singleton target type must appear alone in table";
742 			goto bad;
743 		}
744 		t->singleton = true;
745 	}
746 
747 	if (dm_target_always_writeable(tgt->type) && !(t->mode & FMODE_WRITE)) {
748 		tgt->error = "target type may not be included in a read-only table";
749 		goto bad;
750 	}
751 
752 	if (t->immutable_target_type) {
753 		if (t->immutable_target_type != tgt->type) {
754 			tgt->error = "immutable target type cannot be mixed with other target types";
755 			goto bad;
756 		}
757 	} else if (dm_target_is_immutable(tgt->type)) {
758 		if (t->num_targets) {
759 			tgt->error = "immutable target type cannot be mixed with other target types";
760 			goto bad;
761 		}
762 		t->immutable_target_type = tgt->type;
763 	}
764 
765 	if (dm_target_has_integrity(tgt->type))
766 		t->integrity_added = 1;
767 
768 	tgt->table = t;
769 	tgt->begin = start;
770 	tgt->len = len;
771 	tgt->error = "Unknown error";
772 
773 	/*
774 	 * Does this target adjoin the previous one ?
775 	 */
776 	if (!adjoin(t, tgt)) {
777 		tgt->error = "Gap in table";
778 		goto bad;
779 	}
780 
781 	r = dm_split_args(&argc, &argv, params);
782 	if (r) {
783 		tgt->error = "couldn't split parameters (insufficient memory)";
784 		goto bad;
785 	}
786 
787 	r = tgt->type->ctr(tgt, argc, argv);
788 	kfree(argv);
789 	if (r)
790 		goto bad;
791 
792 	t->highs[t->num_targets++] = tgt->begin + tgt->len - 1;
793 
794 	if (!tgt->num_discard_bios && tgt->discards_supported)
795 		DMWARN("%s: %s: ignoring discards_supported because num_discard_bios is zero.",
796 		       dm_device_name(t->md), type);
797 
798 	return 0;
799 
800  bad:
801 	DMERR("%s: %s: %s", dm_device_name(t->md), type, tgt->error);
802 	dm_put_target_type(tgt->type);
803 	return r;
804 }
805 
806 /*
807  * Target argument parsing helpers.
808  */
validate_next_arg(const struct dm_arg * arg,struct dm_arg_set * arg_set,unsigned * value,char ** error,unsigned grouped)809 static int validate_next_arg(const struct dm_arg *arg,
810 			     struct dm_arg_set *arg_set,
811 			     unsigned *value, char **error, unsigned grouped)
812 {
813 	const char *arg_str = dm_shift_arg(arg_set);
814 	char dummy;
815 
816 	if (!arg_str ||
817 	    (sscanf(arg_str, "%u%c", value, &dummy) != 1) ||
818 	    (*value < arg->min) ||
819 	    (*value > arg->max) ||
820 	    (grouped && arg_set->argc < *value)) {
821 		*error = arg->error;
822 		return -EINVAL;
823 	}
824 
825 	return 0;
826 }
827 
dm_read_arg(const struct dm_arg * arg,struct dm_arg_set * arg_set,unsigned * value,char ** error)828 int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
829 		unsigned *value, char **error)
830 {
831 	return validate_next_arg(arg, arg_set, value, error, 0);
832 }
833 EXPORT_SYMBOL(dm_read_arg);
834 
dm_read_arg_group(const struct dm_arg * arg,struct dm_arg_set * arg_set,unsigned * value,char ** error)835 int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set,
836 		      unsigned *value, char **error)
837 {
838 	return validate_next_arg(arg, arg_set, value, error, 1);
839 }
840 EXPORT_SYMBOL(dm_read_arg_group);
841 
dm_shift_arg(struct dm_arg_set * as)842 const char *dm_shift_arg(struct dm_arg_set *as)
843 {
844 	char *r;
845 
846 	if (as->argc) {
847 		as->argc--;
848 		r = *as->argv;
849 		as->argv++;
850 		return r;
851 	}
852 
853 	return NULL;
854 }
855 EXPORT_SYMBOL(dm_shift_arg);
856 
dm_consume_args(struct dm_arg_set * as,unsigned num_args)857 void dm_consume_args(struct dm_arg_set *as, unsigned num_args)
858 {
859 	BUG_ON(as->argc < num_args);
860 	as->argc -= num_args;
861 	as->argv += num_args;
862 }
863 EXPORT_SYMBOL(dm_consume_args);
864 
__table_type_bio_based(enum dm_queue_mode table_type)865 static bool __table_type_bio_based(enum dm_queue_mode table_type)
866 {
867 	return (table_type == DM_TYPE_BIO_BASED ||
868 		table_type == DM_TYPE_DAX_BIO_BASED ||
869 		table_type == DM_TYPE_NVME_BIO_BASED);
870 }
871 
__table_type_request_based(enum dm_queue_mode table_type)872 static bool __table_type_request_based(enum dm_queue_mode table_type)
873 {
874 	return table_type == DM_TYPE_REQUEST_BASED;
875 }
876 
dm_table_set_type(struct dm_table * t,enum dm_queue_mode type)877 void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type)
878 {
879 	t->type = type;
880 }
881 EXPORT_SYMBOL_GPL(dm_table_set_type);
882 
883 /* validate the dax capability of the target device span */
device_supports_dax(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)884 int device_supports_dax(struct dm_target *ti, struct dm_dev *dev,
885 			sector_t start, sector_t len, void *data)
886 {
887 	int blocksize = *(int *) data;
888 
889 	return generic_fsdax_supported(dev->dax_dev, dev->bdev, blocksize,
890 				       start, len);
891 }
892 
893 /* Check devices support synchronous DAX */
device_dax_synchronous(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)894 static int device_dax_synchronous(struct dm_target *ti, struct dm_dev *dev,
895 				  sector_t start, sector_t len, void *data)
896 {
897 	return dev->dax_dev && dax_synchronous(dev->dax_dev);
898 }
899 
dm_table_supports_dax(struct dm_table * t,iterate_devices_callout_fn iterate_fn,int * blocksize)900 bool dm_table_supports_dax(struct dm_table *t,
901 			   iterate_devices_callout_fn iterate_fn, int *blocksize)
902 {
903 	struct dm_target *ti;
904 	unsigned i;
905 
906 	/* Ensure that all targets support DAX. */
907 	for (i = 0; i < dm_table_get_num_targets(t); i++) {
908 		ti = dm_table_get_target(t, i);
909 
910 		if (!ti->type->direct_access)
911 			return false;
912 
913 		if (!ti->type->iterate_devices ||
914 		    !ti->type->iterate_devices(ti, iterate_fn, blocksize))
915 			return false;
916 	}
917 
918 	return true;
919 }
920 
921 static bool dm_table_does_not_support_partial_completion(struct dm_table *t);
922 
923 struct verify_rq_based_data {
924 	unsigned sq_count;
925 	unsigned mq_count;
926 };
927 
device_is_rq_based(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)928 static int device_is_rq_based(struct dm_target *ti, struct dm_dev *dev,
929 			      sector_t start, sector_t len, void *data)
930 {
931 	struct request_queue *q = bdev_get_queue(dev->bdev);
932 	struct verify_rq_based_data *v = data;
933 
934 	if (queue_is_mq(q))
935 		v->mq_count++;
936 	else
937 		v->sq_count++;
938 
939 	return queue_is_mq(q);
940 }
941 
dm_table_determine_type(struct dm_table * t)942 static int dm_table_determine_type(struct dm_table *t)
943 {
944 	unsigned i;
945 	unsigned bio_based = 0, request_based = 0, hybrid = 0;
946 	struct verify_rq_based_data v = {.sq_count = 0, .mq_count = 0};
947 	struct dm_target *tgt;
948 	struct list_head *devices = dm_table_get_devices(t);
949 	enum dm_queue_mode live_md_type = dm_get_md_type(t->md);
950 	int page_size = PAGE_SIZE;
951 
952 	if (t->type != DM_TYPE_NONE) {
953 		/* target already set the table's type */
954 		if (t->type == DM_TYPE_BIO_BASED) {
955 			/* possibly upgrade to a variant of bio-based */
956 			goto verify_bio_based;
957 		}
958 		BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED);
959 		BUG_ON(t->type == DM_TYPE_NVME_BIO_BASED);
960 		goto verify_rq_based;
961 	}
962 
963 	for (i = 0; i < t->num_targets; i++) {
964 		tgt = t->targets + i;
965 		if (dm_target_hybrid(tgt))
966 			hybrid = 1;
967 		else if (dm_target_request_based(tgt))
968 			request_based = 1;
969 		else
970 			bio_based = 1;
971 
972 		if (bio_based && request_based) {
973 			DMERR("Inconsistent table: different target types"
974 			      " can't be mixed up");
975 			return -EINVAL;
976 		}
977 	}
978 
979 	if (hybrid && !bio_based && !request_based) {
980 		/*
981 		 * The targets can work either way.
982 		 * Determine the type from the live device.
983 		 * Default to bio-based if device is new.
984 		 */
985 		if (__table_type_request_based(live_md_type))
986 			request_based = 1;
987 		else
988 			bio_based = 1;
989 	}
990 
991 	if (bio_based) {
992 verify_bio_based:
993 		/* We must use this table as bio-based */
994 		t->type = DM_TYPE_BIO_BASED;
995 		if (dm_table_supports_dax(t, device_supports_dax, &page_size) ||
996 		    (list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED)) {
997 			t->type = DM_TYPE_DAX_BIO_BASED;
998 		} else {
999 			/* Check if upgrading to NVMe bio-based is valid or required */
1000 			tgt = dm_table_get_immutable_target(t);
1001 			if (tgt && !tgt->max_io_len && dm_table_does_not_support_partial_completion(t)) {
1002 				t->type = DM_TYPE_NVME_BIO_BASED;
1003 				goto verify_rq_based; /* must be stacked directly on NVMe (blk-mq) */
1004 			} else if (list_empty(devices) && live_md_type == DM_TYPE_NVME_BIO_BASED) {
1005 				t->type = DM_TYPE_NVME_BIO_BASED;
1006 			}
1007 		}
1008 		return 0;
1009 	}
1010 
1011 	BUG_ON(!request_based); /* No targets in this table */
1012 
1013 	t->type = DM_TYPE_REQUEST_BASED;
1014 
1015 verify_rq_based:
1016 	/*
1017 	 * Request-based dm supports only tables that have a single target now.
1018 	 * To support multiple targets, request splitting support is needed,
1019 	 * and that needs lots of changes in the block-layer.
1020 	 * (e.g. request completion process for partial completion.)
1021 	 */
1022 	if (t->num_targets > 1) {
1023 		DMERR("%s DM doesn't support multiple targets",
1024 		      t->type == DM_TYPE_NVME_BIO_BASED ? "nvme bio-based" : "request-based");
1025 		return -EINVAL;
1026 	}
1027 
1028 	if (list_empty(devices)) {
1029 		int srcu_idx;
1030 		struct dm_table *live_table = dm_get_live_table(t->md, &srcu_idx);
1031 
1032 		/* inherit live table's type */
1033 		if (live_table)
1034 			t->type = live_table->type;
1035 		dm_put_live_table(t->md, srcu_idx);
1036 		return 0;
1037 	}
1038 
1039 	tgt = dm_table_get_immutable_target(t);
1040 	if (!tgt) {
1041 		DMERR("table load rejected: immutable target is required");
1042 		return -EINVAL;
1043 	} else if (tgt->max_io_len) {
1044 		DMERR("table load rejected: immutable target that splits IO is not supported");
1045 		return -EINVAL;
1046 	}
1047 
1048 	/* Non-request-stackable devices can't be used for request-based dm */
1049 	if (!tgt->type->iterate_devices ||
1050 	    !tgt->type->iterate_devices(tgt, device_is_rq_based, &v)) {
1051 		DMERR("table load rejected: including non-request-stackable devices");
1052 		return -EINVAL;
1053 	}
1054 	if (v.sq_count > 0) {
1055 		DMERR("table load rejected: not all devices are blk-mq request-stackable");
1056 		return -EINVAL;
1057 	}
1058 
1059 	return 0;
1060 }
1061 
dm_table_get_type(struct dm_table * t)1062 enum dm_queue_mode dm_table_get_type(struct dm_table *t)
1063 {
1064 	return t->type;
1065 }
1066 
dm_table_get_immutable_target_type(struct dm_table * t)1067 struct target_type *dm_table_get_immutable_target_type(struct dm_table *t)
1068 {
1069 	return t->immutable_target_type;
1070 }
1071 
dm_table_get_immutable_target(struct dm_table * t)1072 struct dm_target *dm_table_get_immutable_target(struct dm_table *t)
1073 {
1074 	/* Immutable target is implicitly a singleton */
1075 	if (t->num_targets > 1 ||
1076 	    !dm_target_is_immutable(t->targets[0].type))
1077 		return NULL;
1078 
1079 	return t->targets;
1080 }
1081 
dm_table_get_wildcard_target(struct dm_table * t)1082 struct dm_target *dm_table_get_wildcard_target(struct dm_table *t)
1083 {
1084 	struct dm_target *ti;
1085 	unsigned i;
1086 
1087 	for (i = 0; i < dm_table_get_num_targets(t); i++) {
1088 		ti = dm_table_get_target(t, i);
1089 		if (dm_target_is_wildcard(ti->type))
1090 			return ti;
1091 	}
1092 
1093 	return NULL;
1094 }
1095 
dm_table_bio_based(struct dm_table * t)1096 bool dm_table_bio_based(struct dm_table *t)
1097 {
1098 	return __table_type_bio_based(dm_table_get_type(t));
1099 }
1100 
dm_table_request_based(struct dm_table * t)1101 bool dm_table_request_based(struct dm_table *t)
1102 {
1103 	return __table_type_request_based(dm_table_get_type(t));
1104 }
1105 
dm_table_alloc_md_mempools(struct dm_table * t,struct mapped_device * md)1106 static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md)
1107 {
1108 	enum dm_queue_mode type = dm_table_get_type(t);
1109 	unsigned per_io_data_size = 0;
1110 	unsigned min_pool_size = 0;
1111 	struct dm_target *ti;
1112 	unsigned i;
1113 
1114 	if (unlikely(type == DM_TYPE_NONE)) {
1115 		DMWARN("no table type is set, can't allocate mempools");
1116 		return -EINVAL;
1117 	}
1118 
1119 	if (__table_type_bio_based(type))
1120 		for (i = 0; i < t->num_targets; i++) {
1121 			ti = t->targets + i;
1122 			per_io_data_size = max(per_io_data_size, ti->per_io_data_size);
1123 			min_pool_size = max(min_pool_size, ti->num_flush_bios);
1124 		}
1125 
1126 	t->mempools = dm_alloc_md_mempools(md, type, t->integrity_supported,
1127 					   per_io_data_size, min_pool_size);
1128 	if (!t->mempools)
1129 		return -ENOMEM;
1130 
1131 	return 0;
1132 }
1133 
dm_table_free_md_mempools(struct dm_table * t)1134 void dm_table_free_md_mempools(struct dm_table *t)
1135 {
1136 	dm_free_md_mempools(t->mempools);
1137 	t->mempools = NULL;
1138 }
1139 
dm_table_get_md_mempools(struct dm_table * t)1140 struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t)
1141 {
1142 	return t->mempools;
1143 }
1144 
setup_indexes(struct dm_table * t)1145 static int setup_indexes(struct dm_table *t)
1146 {
1147 	int i;
1148 	unsigned int total = 0;
1149 	sector_t *indexes;
1150 
1151 	/* allocate the space for *all* the indexes */
1152 	for (i = t->depth - 2; i >= 0; i--) {
1153 		t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE);
1154 		total += t->counts[i];
1155 	}
1156 
1157 	indexes = (sector_t *) dm_vcalloc(total, (unsigned long) NODE_SIZE);
1158 	if (!indexes)
1159 		return -ENOMEM;
1160 
1161 	/* set up internal nodes, bottom-up */
1162 	for (i = t->depth - 2; i >= 0; i--) {
1163 		t->index[i] = indexes;
1164 		indexes += (KEYS_PER_NODE * t->counts[i]);
1165 		setup_btree_index(i, t);
1166 	}
1167 
1168 	return 0;
1169 }
1170 
1171 /*
1172  * Builds the btree to index the map.
1173  */
dm_table_build_index(struct dm_table * t)1174 static int dm_table_build_index(struct dm_table *t)
1175 {
1176 	int r = 0;
1177 	unsigned int leaf_nodes;
1178 
1179 	/* how many indexes will the btree have ? */
1180 	leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE);
1181 	t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE);
1182 
1183 	/* leaf layer has already been set up */
1184 	t->counts[t->depth - 1] = leaf_nodes;
1185 	t->index[t->depth - 1] = t->highs;
1186 
1187 	if (t->depth >= 2)
1188 		r = setup_indexes(t);
1189 
1190 	return r;
1191 }
1192 
integrity_profile_exists(struct gendisk * disk)1193 static bool integrity_profile_exists(struct gendisk *disk)
1194 {
1195 	return !!blk_get_integrity(disk);
1196 }
1197 
1198 /*
1199  * Get a disk whose integrity profile reflects the table's profile.
1200  * Returns NULL if integrity support was inconsistent or unavailable.
1201  */
dm_table_get_integrity_disk(struct dm_table * t)1202 static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t)
1203 {
1204 	struct list_head *devices = dm_table_get_devices(t);
1205 	struct dm_dev_internal *dd = NULL;
1206 	struct gendisk *prev_disk = NULL, *template_disk = NULL;
1207 	unsigned i;
1208 
1209 	for (i = 0; i < dm_table_get_num_targets(t); i++) {
1210 		struct dm_target *ti = dm_table_get_target(t, i);
1211 		if (!dm_target_passes_integrity(ti->type))
1212 			goto no_integrity;
1213 	}
1214 
1215 	list_for_each_entry(dd, devices, list) {
1216 		template_disk = dd->dm_dev->bdev->bd_disk;
1217 		if (!integrity_profile_exists(template_disk))
1218 			goto no_integrity;
1219 		else if (prev_disk &&
1220 			 blk_integrity_compare(prev_disk, template_disk) < 0)
1221 			goto no_integrity;
1222 		prev_disk = template_disk;
1223 	}
1224 
1225 	return template_disk;
1226 
1227 no_integrity:
1228 	if (prev_disk)
1229 		DMWARN("%s: integrity not set: %s and %s profile mismatch",
1230 		       dm_device_name(t->md),
1231 		       prev_disk->disk_name,
1232 		       template_disk->disk_name);
1233 	return NULL;
1234 }
1235 
1236 /*
1237  * Register the mapped device for blk_integrity support if the
1238  * underlying devices have an integrity profile.  But all devices may
1239  * not have matching profiles (checking all devices isn't reliable
1240  * during table load because this table may use other DM device(s) which
1241  * must be resumed before they will have an initialized integity
1242  * profile).  Consequently, stacked DM devices force a 2 stage integrity
1243  * profile validation: First pass during table load, final pass during
1244  * resume.
1245  */
dm_table_register_integrity(struct dm_table * t)1246 static int dm_table_register_integrity(struct dm_table *t)
1247 {
1248 	struct mapped_device *md = t->md;
1249 	struct gendisk *template_disk = NULL;
1250 
1251 	/* If target handles integrity itself do not register it here. */
1252 	if (t->integrity_added)
1253 		return 0;
1254 
1255 	template_disk = dm_table_get_integrity_disk(t);
1256 	if (!template_disk)
1257 		return 0;
1258 
1259 	if (!integrity_profile_exists(dm_disk(md))) {
1260 		t->integrity_supported = true;
1261 		/*
1262 		 * Register integrity profile during table load; we can do
1263 		 * this because the final profile must match during resume.
1264 		 */
1265 		blk_integrity_register(dm_disk(md),
1266 				       blk_get_integrity(template_disk));
1267 		return 0;
1268 	}
1269 
1270 	/*
1271 	 * If DM device already has an initialized integrity
1272 	 * profile the new profile should not conflict.
1273 	 */
1274 	if (blk_integrity_compare(dm_disk(md), template_disk) < 0) {
1275 		DMWARN("%s: conflict with existing integrity profile: "
1276 		       "%s profile mismatch",
1277 		       dm_device_name(t->md),
1278 		       template_disk->disk_name);
1279 		return 1;
1280 	}
1281 
1282 	/* Preserve existing integrity profile */
1283 	t->integrity_supported = true;
1284 	return 0;
1285 }
1286 
1287 /*
1288  * Prepares the table for use by building the indices,
1289  * setting the type, and allocating mempools.
1290  */
dm_table_complete(struct dm_table * t)1291 int dm_table_complete(struct dm_table *t)
1292 {
1293 	int r;
1294 
1295 	r = dm_table_determine_type(t);
1296 	if (r) {
1297 		DMERR("unable to determine table type");
1298 		return r;
1299 	}
1300 
1301 	r = dm_table_build_index(t);
1302 	if (r) {
1303 		DMERR("unable to build btrees");
1304 		return r;
1305 	}
1306 
1307 	r = dm_table_register_integrity(t);
1308 	if (r) {
1309 		DMERR("could not register integrity profile.");
1310 		return r;
1311 	}
1312 
1313 	r = dm_table_alloc_md_mempools(t, t->md);
1314 	if (r)
1315 		DMERR("unable to allocate mempools");
1316 
1317 	return r;
1318 }
1319 
1320 static DEFINE_MUTEX(_event_lock);
dm_table_event_callback(struct dm_table * t,void (* fn)(void *),void * context)1321 void dm_table_event_callback(struct dm_table *t,
1322 			     void (*fn)(void *), void *context)
1323 {
1324 	mutex_lock(&_event_lock);
1325 	t->event_fn = fn;
1326 	t->event_context = context;
1327 	mutex_unlock(&_event_lock);
1328 }
1329 
dm_table_event(struct dm_table * t)1330 void dm_table_event(struct dm_table *t)
1331 {
1332 	/*
1333 	 * You can no longer call dm_table_event() from interrupt
1334 	 * context, use a bottom half instead.
1335 	 */
1336 	BUG_ON(in_interrupt());
1337 
1338 	mutex_lock(&_event_lock);
1339 	if (t->event_fn)
1340 		t->event_fn(t->event_context);
1341 	mutex_unlock(&_event_lock);
1342 }
1343 EXPORT_SYMBOL(dm_table_event);
1344 
dm_table_get_size(struct dm_table * t)1345 inline sector_t dm_table_get_size(struct dm_table *t)
1346 {
1347 	return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
1348 }
1349 EXPORT_SYMBOL(dm_table_get_size);
1350 
dm_table_get_target(struct dm_table * t,unsigned int index)1351 struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index)
1352 {
1353 	if (index >= t->num_targets)
1354 		return NULL;
1355 
1356 	return t->targets + index;
1357 }
1358 
1359 /*
1360  * Search the btree for the correct target.
1361  *
1362  * Caller should check returned pointer for NULL
1363  * to trap I/O beyond end of device.
1364  */
dm_table_find_target(struct dm_table * t,sector_t sector)1365 struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
1366 {
1367 	unsigned int l, n = 0, k = 0;
1368 	sector_t *node;
1369 
1370 	if (unlikely(sector >= dm_table_get_size(t)))
1371 		return NULL;
1372 
1373 	for (l = 0; l < t->depth; l++) {
1374 		n = get_child(n, k);
1375 		node = get_node(t, l, n);
1376 
1377 		for (k = 0; k < KEYS_PER_NODE; k++)
1378 			if (node[k] >= sector)
1379 				break;
1380 	}
1381 
1382 	return &t->targets[(KEYS_PER_NODE * n) + k];
1383 }
1384 
count_device(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)1385 static int count_device(struct dm_target *ti, struct dm_dev *dev,
1386 			sector_t start, sector_t len, void *data)
1387 {
1388 	unsigned *num_devices = data;
1389 
1390 	(*num_devices)++;
1391 
1392 	return 0;
1393 }
1394 
1395 /*
1396  * Check whether a table has no data devices attached using each
1397  * target's iterate_devices method.
1398  * Returns false if the result is unknown because a target doesn't
1399  * support iterate_devices.
1400  */
dm_table_has_no_data_devices(struct dm_table * table)1401 bool dm_table_has_no_data_devices(struct dm_table *table)
1402 {
1403 	struct dm_target *ti;
1404 	unsigned i, num_devices;
1405 
1406 	for (i = 0; i < dm_table_get_num_targets(table); i++) {
1407 		ti = dm_table_get_target(table, i);
1408 
1409 		if (!ti->type->iterate_devices)
1410 			return false;
1411 
1412 		num_devices = 0;
1413 		ti->type->iterate_devices(ti, count_device, &num_devices);
1414 		if (num_devices)
1415 			return false;
1416 	}
1417 
1418 	return true;
1419 }
1420 
device_is_zoned_model(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)1421 static int device_is_zoned_model(struct dm_target *ti, struct dm_dev *dev,
1422 				 sector_t start, sector_t len, void *data)
1423 {
1424 	struct request_queue *q = bdev_get_queue(dev->bdev);
1425 	enum blk_zoned_model *zoned_model = data;
1426 
1427 	return q && blk_queue_zoned_model(q) == *zoned_model;
1428 }
1429 
dm_table_supports_zoned_model(struct dm_table * t,enum blk_zoned_model zoned_model)1430 static bool dm_table_supports_zoned_model(struct dm_table *t,
1431 					  enum blk_zoned_model zoned_model)
1432 {
1433 	struct dm_target *ti;
1434 	unsigned i;
1435 
1436 	for (i = 0; i < dm_table_get_num_targets(t); i++) {
1437 		ti = dm_table_get_target(t, i);
1438 
1439 		if (zoned_model == BLK_ZONED_HM &&
1440 		    !dm_target_supports_zoned_hm(ti->type))
1441 			return false;
1442 
1443 		if (!ti->type->iterate_devices ||
1444 		    !ti->type->iterate_devices(ti, device_is_zoned_model, &zoned_model))
1445 			return false;
1446 	}
1447 
1448 	return true;
1449 }
1450 
device_matches_zone_sectors(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)1451 static int device_matches_zone_sectors(struct dm_target *ti, struct dm_dev *dev,
1452 				       sector_t start, sector_t len, void *data)
1453 {
1454 	struct request_queue *q = bdev_get_queue(dev->bdev);
1455 	unsigned int *zone_sectors = data;
1456 
1457 	return q && blk_queue_zone_sectors(q) == *zone_sectors;
1458 }
1459 
dm_table_matches_zone_sectors(struct dm_table * t,unsigned int zone_sectors)1460 static bool dm_table_matches_zone_sectors(struct dm_table *t,
1461 					  unsigned int zone_sectors)
1462 {
1463 	struct dm_target *ti;
1464 	unsigned i;
1465 
1466 	for (i = 0; i < dm_table_get_num_targets(t); i++) {
1467 		ti = dm_table_get_target(t, i);
1468 
1469 		if (!ti->type->iterate_devices ||
1470 		    !ti->type->iterate_devices(ti, device_matches_zone_sectors, &zone_sectors))
1471 			return false;
1472 	}
1473 
1474 	return true;
1475 }
1476 
validate_hardware_zoned_model(struct dm_table * table,enum blk_zoned_model zoned_model,unsigned int zone_sectors)1477 static int validate_hardware_zoned_model(struct dm_table *table,
1478 					 enum blk_zoned_model zoned_model,
1479 					 unsigned int zone_sectors)
1480 {
1481 	if (zoned_model == BLK_ZONED_NONE)
1482 		return 0;
1483 
1484 	if (!dm_table_supports_zoned_model(table, zoned_model)) {
1485 		DMERR("%s: zoned model is not consistent across all devices",
1486 		      dm_device_name(table->md));
1487 		return -EINVAL;
1488 	}
1489 
1490 	/* Check zone size validity and compatibility */
1491 	if (!zone_sectors || !is_power_of_2(zone_sectors))
1492 		return -EINVAL;
1493 
1494 	if (!dm_table_matches_zone_sectors(table, zone_sectors)) {
1495 		DMERR("%s: zone sectors is not consistent across all devices",
1496 		      dm_device_name(table->md));
1497 		return -EINVAL;
1498 	}
1499 
1500 	return 0;
1501 }
1502 
1503 /*
1504  * Establish the new table's queue_limits and validate them.
1505  */
dm_calculate_queue_limits(struct dm_table * table,struct queue_limits * limits)1506 int dm_calculate_queue_limits(struct dm_table *table,
1507 			      struct queue_limits *limits)
1508 {
1509 	struct dm_target *ti;
1510 	struct queue_limits ti_limits;
1511 	unsigned i;
1512 	enum blk_zoned_model zoned_model = BLK_ZONED_NONE;
1513 	unsigned int zone_sectors = 0;
1514 
1515 	blk_set_stacking_limits(limits);
1516 
1517 	for (i = 0; i < dm_table_get_num_targets(table); i++) {
1518 		blk_set_stacking_limits(&ti_limits);
1519 
1520 		ti = dm_table_get_target(table, i);
1521 
1522 		if (!ti->type->iterate_devices)
1523 			goto combine_limits;
1524 
1525 		/*
1526 		 * Combine queue limits of all the devices this target uses.
1527 		 */
1528 		ti->type->iterate_devices(ti, dm_set_device_limits,
1529 					  &ti_limits);
1530 
1531 		if (zoned_model == BLK_ZONED_NONE && ti_limits.zoned != BLK_ZONED_NONE) {
1532 			/*
1533 			 * After stacking all limits, validate all devices
1534 			 * in table support this zoned model and zone sectors.
1535 			 */
1536 			zoned_model = ti_limits.zoned;
1537 			zone_sectors = ti_limits.chunk_sectors;
1538 		}
1539 
1540 		/* Set I/O hints portion of queue limits */
1541 		if (ti->type->io_hints)
1542 			ti->type->io_hints(ti, &ti_limits);
1543 
1544 		/*
1545 		 * Check each device area is consistent with the target's
1546 		 * overall queue limits.
1547 		 */
1548 		if (ti->type->iterate_devices(ti, device_area_is_invalid,
1549 					      &ti_limits))
1550 			return -EINVAL;
1551 
1552 combine_limits:
1553 		/*
1554 		 * Merge this target's queue limits into the overall limits
1555 		 * for the table.
1556 		 */
1557 		if (blk_stack_limits(limits, &ti_limits, 0) < 0)
1558 			DMWARN("%s: adding target device "
1559 			       "(start sect %llu len %llu) "
1560 			       "caused an alignment inconsistency",
1561 			       dm_device_name(table->md),
1562 			       (unsigned long long) ti->begin,
1563 			       (unsigned long long) ti->len);
1564 
1565 		/*
1566 		 * FIXME: this should likely be moved to blk_stack_limits(), would
1567 		 * also eliminate limits->zoned stacking hack in dm_set_device_limits()
1568 		 */
1569 		if (limits->zoned == BLK_ZONED_NONE && ti_limits.zoned != BLK_ZONED_NONE) {
1570 			/*
1571 			 * By default, the stacked limits zoned model is set to
1572 			 * BLK_ZONED_NONE in blk_set_stacking_limits(). Update
1573 			 * this model using the first target model reported
1574 			 * that is not BLK_ZONED_NONE. This will be either the
1575 			 * first target device zoned model or the model reported
1576 			 * by the target .io_hints.
1577 			 */
1578 			limits->zoned = ti_limits.zoned;
1579 		}
1580 	}
1581 
1582 	/*
1583 	 * Verify that the zoned model and zone sectors, as determined before
1584 	 * any .io_hints override, are the same across all devices in the table.
1585 	 * - this is especially relevant if .io_hints is emulating a disk-managed
1586 	 *   zoned model (aka BLK_ZONED_NONE) on host-managed zoned block devices.
1587 	 * BUT...
1588 	 */
1589 	if (limits->zoned != BLK_ZONED_NONE) {
1590 		/*
1591 		 * ...IF the above limits stacking determined a zoned model
1592 		 * validate that all of the table's devices conform to it.
1593 		 */
1594 		zoned_model = limits->zoned;
1595 		zone_sectors = limits->chunk_sectors;
1596 	}
1597 	if (validate_hardware_zoned_model(table, zoned_model, zone_sectors))
1598 		return -EINVAL;
1599 
1600 	return validate_hardware_logical_block_alignment(table, limits);
1601 }
1602 
1603 /*
1604  * Verify that all devices have an integrity profile that matches the
1605  * DM device's registered integrity profile.  If the profiles don't
1606  * match then unregister the DM device's integrity profile.
1607  */
dm_table_verify_integrity(struct dm_table * t)1608 static void dm_table_verify_integrity(struct dm_table *t)
1609 {
1610 	struct gendisk *template_disk = NULL;
1611 
1612 	if (t->integrity_added)
1613 		return;
1614 
1615 	if (t->integrity_supported) {
1616 		/*
1617 		 * Verify that the original integrity profile
1618 		 * matches all the devices in this table.
1619 		 */
1620 		template_disk = dm_table_get_integrity_disk(t);
1621 		if (template_disk &&
1622 		    blk_integrity_compare(dm_disk(t->md), template_disk) >= 0)
1623 			return;
1624 	}
1625 
1626 	if (integrity_profile_exists(dm_disk(t->md))) {
1627 		DMWARN("%s: unable to establish an integrity profile",
1628 		       dm_device_name(t->md));
1629 		blk_integrity_unregister(dm_disk(t->md));
1630 	}
1631 }
1632 
1633 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
device_intersect_crypto_modes(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)1634 static int device_intersect_crypto_modes(struct dm_target *ti,
1635 					 struct dm_dev *dev, sector_t start,
1636 					 sector_t len, void *data)
1637 {
1638 	struct keyslot_manager *parent = data;
1639 	struct keyslot_manager *child = bdev_get_queue(dev->bdev)->ksm;
1640 
1641 	keyslot_manager_intersect_modes(parent, child);
1642 	return 0;
1643 }
1644 
1645 /*
1646  * Update the inline crypto modes supported by 'q->ksm' to be the intersection
1647  * of the modes supported by all targets in the table.
1648  *
1649  * For any mode to be supported at all, all targets must have explicitly
1650  * declared that they can pass through inline crypto support.  For a particular
1651  * mode to be supported, all underlying devices must also support it.
1652  *
1653  * Assume that 'q->ksm' initially declares all modes to be supported.
1654  */
dm_calculate_supported_crypto_modes(struct dm_table * t,struct request_queue * q)1655 static void dm_calculate_supported_crypto_modes(struct dm_table *t,
1656 						struct request_queue *q)
1657 {
1658 	struct dm_target *ti;
1659 	unsigned int i;
1660 
1661 	for (i = 0; i < dm_table_get_num_targets(t); i++) {
1662 		ti = dm_table_get_target(t, i);
1663 
1664 		if (!ti->may_passthrough_inline_crypto) {
1665 			keyslot_manager_intersect_modes(q->ksm, NULL);
1666 			return;
1667 		}
1668 		if (!ti->type->iterate_devices)
1669 			continue;
1670 		ti->type->iterate_devices(ti, device_intersect_crypto_modes,
1671 					  q->ksm);
1672 	}
1673 }
1674 #else /* CONFIG_BLK_INLINE_ENCRYPTION */
dm_calculate_supported_crypto_modes(struct dm_table * t,struct request_queue * q)1675 static inline void dm_calculate_supported_crypto_modes(struct dm_table *t,
1676 						       struct request_queue *q)
1677 {
1678 }
1679 #endif /* !CONFIG_BLK_INLINE_ENCRYPTION */
1680 
device_flush_capable(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)1681 static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
1682 				sector_t start, sector_t len, void *data)
1683 {
1684 	unsigned long flush = (unsigned long) data;
1685 	struct request_queue *q = bdev_get_queue(dev->bdev);
1686 
1687 	return q && (q->queue_flags & flush);
1688 }
1689 
dm_table_supports_flush(struct dm_table * t,unsigned long flush)1690 static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush)
1691 {
1692 	struct dm_target *ti;
1693 	unsigned i;
1694 
1695 	/*
1696 	 * Require at least one underlying device to support flushes.
1697 	 * t->devices includes internal dm devices such as mirror logs
1698 	 * so we need to use iterate_devices here, which targets
1699 	 * supporting flushes must provide.
1700 	 */
1701 	for (i = 0; i < dm_table_get_num_targets(t); i++) {
1702 		ti = dm_table_get_target(t, i);
1703 
1704 		if (!ti->num_flush_bios)
1705 			continue;
1706 
1707 		if (ti->flush_supported)
1708 			return true;
1709 
1710 		if (ti->type->iterate_devices &&
1711 		    ti->type->iterate_devices(ti, device_flush_capable, (void *) flush))
1712 			return true;
1713 	}
1714 
1715 	return false;
1716 }
1717 
device_dax_write_cache_enabled(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)1718 static int device_dax_write_cache_enabled(struct dm_target *ti,
1719 					  struct dm_dev *dev, sector_t start,
1720 					  sector_t len, void *data)
1721 {
1722 	struct dax_device *dax_dev = dev->dax_dev;
1723 
1724 	if (!dax_dev)
1725 		return false;
1726 
1727 	if (dax_write_cache_enabled(dax_dev))
1728 		return true;
1729 	return false;
1730 }
1731 
dm_table_supports_dax_write_cache(struct dm_table * t)1732 static int dm_table_supports_dax_write_cache(struct dm_table *t)
1733 {
1734 	struct dm_target *ti;
1735 	unsigned i;
1736 
1737 	for (i = 0; i < dm_table_get_num_targets(t); i++) {
1738 		ti = dm_table_get_target(t, i);
1739 
1740 		if (ti->type->iterate_devices &&
1741 		    ti->type->iterate_devices(ti,
1742 				device_dax_write_cache_enabled, NULL))
1743 			return true;
1744 	}
1745 
1746 	return false;
1747 }
1748 
device_is_nonrot(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)1749 static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev,
1750 			    sector_t start, sector_t len, void *data)
1751 {
1752 	struct request_queue *q = bdev_get_queue(dev->bdev);
1753 
1754 	return q && blk_queue_nonrot(q);
1755 }
1756 
device_is_not_random(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)1757 static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
1758 			     sector_t start, sector_t len, void *data)
1759 {
1760 	struct request_queue *q = bdev_get_queue(dev->bdev);
1761 
1762 	return q && !blk_queue_add_random(q);
1763 }
1764 
dm_table_all_devices_attribute(struct dm_table * t,iterate_devices_callout_fn func)1765 static bool dm_table_all_devices_attribute(struct dm_table *t,
1766 					   iterate_devices_callout_fn func)
1767 {
1768 	struct dm_target *ti;
1769 	unsigned i;
1770 
1771 	for (i = 0; i < dm_table_get_num_targets(t); i++) {
1772 		ti = dm_table_get_target(t, i);
1773 
1774 		if (!ti->type->iterate_devices ||
1775 		    !ti->type->iterate_devices(ti, func, NULL))
1776 			return false;
1777 	}
1778 
1779 	return true;
1780 }
1781 
device_no_partial_completion(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)1782 static int device_no_partial_completion(struct dm_target *ti, struct dm_dev *dev,
1783 					sector_t start, sector_t len, void *data)
1784 {
1785 	char b[BDEVNAME_SIZE];
1786 
1787 	/* For now, NVMe devices are the only devices of this class */
1788 	return (strncmp(bdevname(dev->bdev, b), "nvme", 4) == 0);
1789 }
1790 
dm_table_does_not_support_partial_completion(struct dm_table * t)1791 static bool dm_table_does_not_support_partial_completion(struct dm_table *t)
1792 {
1793 	return dm_table_all_devices_attribute(t, device_no_partial_completion);
1794 }
1795 
device_not_write_same_capable(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)1796 static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev,
1797 					 sector_t start, sector_t len, void *data)
1798 {
1799 	struct request_queue *q = bdev_get_queue(dev->bdev);
1800 
1801 	return q && !q->limits.max_write_same_sectors;
1802 }
1803 
dm_table_supports_write_same(struct dm_table * t)1804 static bool dm_table_supports_write_same(struct dm_table *t)
1805 {
1806 	struct dm_target *ti;
1807 	unsigned i;
1808 
1809 	for (i = 0; i < dm_table_get_num_targets(t); i++) {
1810 		ti = dm_table_get_target(t, i);
1811 
1812 		if (!ti->num_write_same_bios)
1813 			return false;
1814 
1815 		if (!ti->type->iterate_devices ||
1816 		    ti->type->iterate_devices(ti, device_not_write_same_capable, NULL))
1817 			return false;
1818 	}
1819 
1820 	return true;
1821 }
1822 
device_not_write_zeroes_capable(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)1823 static int device_not_write_zeroes_capable(struct dm_target *ti, struct dm_dev *dev,
1824 					   sector_t start, sector_t len, void *data)
1825 {
1826 	struct request_queue *q = bdev_get_queue(dev->bdev);
1827 
1828 	return q && !q->limits.max_write_zeroes_sectors;
1829 }
1830 
dm_table_supports_write_zeroes(struct dm_table * t)1831 static bool dm_table_supports_write_zeroes(struct dm_table *t)
1832 {
1833 	struct dm_target *ti;
1834 	unsigned i = 0;
1835 
1836 	while (i < dm_table_get_num_targets(t)) {
1837 		ti = dm_table_get_target(t, i++);
1838 
1839 		if (!ti->num_write_zeroes_bios)
1840 			return false;
1841 
1842 		if (!ti->type->iterate_devices ||
1843 		    ti->type->iterate_devices(ti, device_not_write_zeroes_capable, NULL))
1844 			return false;
1845 	}
1846 
1847 	return true;
1848 }
1849 
device_not_discard_capable(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)1850 static int device_not_discard_capable(struct dm_target *ti, struct dm_dev *dev,
1851 				      sector_t start, sector_t len, void *data)
1852 {
1853 	struct request_queue *q = bdev_get_queue(dev->bdev);
1854 
1855 	return q && !blk_queue_discard(q);
1856 }
1857 
dm_table_supports_discards(struct dm_table * t)1858 static bool dm_table_supports_discards(struct dm_table *t)
1859 {
1860 	struct dm_target *ti;
1861 	unsigned i;
1862 
1863 	for (i = 0; i < dm_table_get_num_targets(t); i++) {
1864 		ti = dm_table_get_target(t, i);
1865 
1866 		if (!ti->num_discard_bios)
1867 			return false;
1868 
1869 		/*
1870 		 * Either the target provides discard support (as implied by setting
1871 		 * 'discards_supported') or it relies on _all_ data devices having
1872 		 * discard support.
1873 		 */
1874 		if (!ti->discards_supported &&
1875 		    (!ti->type->iterate_devices ||
1876 		     ti->type->iterate_devices(ti, device_not_discard_capable, NULL)))
1877 			return false;
1878 	}
1879 
1880 	return true;
1881 }
1882 
device_not_secure_erase_capable(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)1883 static int device_not_secure_erase_capable(struct dm_target *ti,
1884 					   struct dm_dev *dev, sector_t start,
1885 					   sector_t len, void *data)
1886 {
1887 	struct request_queue *q = bdev_get_queue(dev->bdev);
1888 
1889 	return q && !blk_queue_secure_erase(q);
1890 }
1891 
dm_table_supports_secure_erase(struct dm_table * t)1892 static bool dm_table_supports_secure_erase(struct dm_table *t)
1893 {
1894 	struct dm_target *ti;
1895 	unsigned int i;
1896 
1897 	for (i = 0; i < dm_table_get_num_targets(t); i++) {
1898 		ti = dm_table_get_target(t, i);
1899 
1900 		if (!ti->num_secure_erase_bios)
1901 			return false;
1902 
1903 		if (!ti->type->iterate_devices ||
1904 		    ti->type->iterate_devices(ti, device_not_secure_erase_capable, NULL))
1905 			return false;
1906 	}
1907 
1908 	return true;
1909 }
1910 
device_requires_stable_pages(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)1911 static int device_requires_stable_pages(struct dm_target *ti,
1912 					struct dm_dev *dev, sector_t start,
1913 					sector_t len, void *data)
1914 {
1915 	struct request_queue *q = bdev_get_queue(dev->bdev);
1916 
1917 	return q && bdi_cap_stable_pages_required(q->backing_dev_info);
1918 }
1919 
1920 /*
1921  * If any underlying device requires stable pages, a table must require
1922  * them as well.  Only targets that support iterate_devices are considered:
1923  * don't want error, zero, etc to require stable pages.
1924  */
dm_table_requires_stable_pages(struct dm_table * t)1925 static bool dm_table_requires_stable_pages(struct dm_table *t)
1926 {
1927 	struct dm_target *ti;
1928 	unsigned i;
1929 
1930 	for (i = 0; i < dm_table_get_num_targets(t); i++) {
1931 		ti = dm_table_get_target(t, i);
1932 
1933 		if (ti->type->iterate_devices &&
1934 		    ti->type->iterate_devices(ti, device_requires_stable_pages, NULL))
1935 			return true;
1936 	}
1937 
1938 	return false;
1939 }
1940 
dm_table_set_restrictions(struct dm_table * t,struct request_queue * q,struct queue_limits * limits)1941 void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1942 			       struct queue_limits *limits)
1943 {
1944 	bool wc = false, fua = false;
1945 	int page_size = PAGE_SIZE;
1946 
1947 	/*
1948 	 * Copy table's limits to the DM device's request_queue
1949 	 */
1950 	q->limits = *limits;
1951 
1952 	if (!dm_table_supports_discards(t)) {
1953 		blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
1954 		/* Must also clear discard limits... */
1955 		q->limits.max_discard_sectors = 0;
1956 		q->limits.max_hw_discard_sectors = 0;
1957 		q->limits.discard_granularity = 0;
1958 		q->limits.discard_alignment = 0;
1959 		q->limits.discard_misaligned = 0;
1960 	} else
1961 		blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
1962 
1963 	if (dm_table_supports_secure_erase(t))
1964 		blk_queue_flag_set(QUEUE_FLAG_SECERASE, q);
1965 
1966 	if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) {
1967 		wc = true;
1968 		if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_FUA)))
1969 			fua = true;
1970 	}
1971 	blk_queue_write_cache(q, wc, fua);
1972 
1973 	if (dm_table_supports_dax(t, device_supports_dax, &page_size)) {
1974 		blk_queue_flag_set(QUEUE_FLAG_DAX, q);
1975 		if (dm_table_supports_dax(t, device_dax_synchronous, NULL))
1976 			set_dax_synchronous(t->md->dax_dev);
1977 	}
1978 	else
1979 		blk_queue_flag_clear(QUEUE_FLAG_DAX, q);
1980 
1981 	if (dm_table_supports_dax_write_cache(t))
1982 		dax_write_cache(t->md->dax_dev, true);
1983 
1984 	/* Ensure that all underlying devices are non-rotational. */
1985 	if (dm_table_all_devices_attribute(t, device_is_nonrot))
1986 		blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
1987 	else
1988 		blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
1989 
1990 	if (!dm_table_supports_write_same(t))
1991 		q->limits.max_write_same_sectors = 0;
1992 	if (!dm_table_supports_write_zeroes(t))
1993 		q->limits.max_write_zeroes_sectors = 0;
1994 
1995 	dm_table_verify_integrity(t);
1996 
1997 	dm_calculate_supported_crypto_modes(t, q);
1998 
1999 	/*
2000 	 * Some devices don't use blk_integrity but still want stable pages
2001 	 * because they do their own checksumming.
2002 	 */
2003 	if (dm_table_requires_stable_pages(t))
2004 		q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
2005 	else
2006 		q->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES;
2007 
2008 	/*
2009 	 * Determine whether or not this queue's I/O timings contribute
2010 	 * to the entropy pool, Only request-based targets use this.
2011 	 * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not
2012 	 * have it set.
2013 	 */
2014 	if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random))
2015 		blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
2016 
2017 	/*
2018 	 * For a zoned target, the number of zones should be updated for the
2019 	 * correct value to be exposed in sysfs queue/nr_zones. For a BIO based
2020 	 * target, this is all that is needed.
2021 	 */
2022 #ifdef CONFIG_BLK_DEV_ZONED
2023 	if (blk_queue_is_zoned(q)) {
2024 		WARN_ON_ONCE(queue_is_mq(q));
2025 		q->nr_zones = blkdev_nr_zones(t->md->disk);
2026 	}
2027 #endif
2028 
2029 	/* Allow reads to exceed readahead limits */
2030 	q->backing_dev_info->io_pages = limits->max_sectors >> (PAGE_SHIFT - 9);
2031 }
2032 
dm_table_get_num_targets(struct dm_table * t)2033 unsigned int dm_table_get_num_targets(struct dm_table *t)
2034 {
2035 	return t->num_targets;
2036 }
2037 
dm_table_get_devices(struct dm_table * t)2038 struct list_head *dm_table_get_devices(struct dm_table *t)
2039 {
2040 	return &t->devices;
2041 }
2042 
dm_table_get_mode(struct dm_table * t)2043 fmode_t dm_table_get_mode(struct dm_table *t)
2044 {
2045 	return t->mode;
2046 }
2047 EXPORT_SYMBOL(dm_table_get_mode);
2048 
2049 enum suspend_mode {
2050 	PRESUSPEND,
2051 	PRESUSPEND_UNDO,
2052 	POSTSUSPEND,
2053 };
2054 
suspend_targets(struct dm_table * t,enum suspend_mode mode)2055 static void suspend_targets(struct dm_table *t, enum suspend_mode mode)
2056 {
2057 	int i = t->num_targets;
2058 	struct dm_target *ti = t->targets;
2059 
2060 	lockdep_assert_held(&t->md->suspend_lock);
2061 
2062 	while (i--) {
2063 		switch (mode) {
2064 		case PRESUSPEND:
2065 			if (ti->type->presuspend)
2066 				ti->type->presuspend(ti);
2067 			break;
2068 		case PRESUSPEND_UNDO:
2069 			if (ti->type->presuspend_undo)
2070 				ti->type->presuspend_undo(ti);
2071 			break;
2072 		case POSTSUSPEND:
2073 			if (ti->type->postsuspend)
2074 				ti->type->postsuspend(ti);
2075 			break;
2076 		}
2077 		ti++;
2078 	}
2079 }
2080 
dm_table_presuspend_targets(struct dm_table * t)2081 void dm_table_presuspend_targets(struct dm_table *t)
2082 {
2083 	if (!t)
2084 		return;
2085 
2086 	suspend_targets(t, PRESUSPEND);
2087 }
2088 
dm_table_presuspend_undo_targets(struct dm_table * t)2089 void dm_table_presuspend_undo_targets(struct dm_table *t)
2090 {
2091 	if (!t)
2092 		return;
2093 
2094 	suspend_targets(t, PRESUSPEND_UNDO);
2095 }
2096 
dm_table_postsuspend_targets(struct dm_table * t)2097 void dm_table_postsuspend_targets(struct dm_table *t)
2098 {
2099 	if (!t)
2100 		return;
2101 
2102 	suspend_targets(t, POSTSUSPEND);
2103 }
2104 
dm_table_resume_targets(struct dm_table * t)2105 int dm_table_resume_targets(struct dm_table *t)
2106 {
2107 	int i, r = 0;
2108 
2109 	lockdep_assert_held(&t->md->suspend_lock);
2110 
2111 	for (i = 0; i < t->num_targets; i++) {
2112 		struct dm_target *ti = t->targets + i;
2113 
2114 		if (!ti->type->preresume)
2115 			continue;
2116 
2117 		r = ti->type->preresume(ti);
2118 		if (r) {
2119 			DMERR("%s: %s: preresume failed, error = %d",
2120 			      dm_device_name(t->md), ti->type->name, r);
2121 			return r;
2122 		}
2123 	}
2124 
2125 	for (i = 0; i < t->num_targets; i++) {
2126 		struct dm_target *ti = t->targets + i;
2127 
2128 		if (ti->type->resume)
2129 			ti->type->resume(ti);
2130 	}
2131 
2132 	return 0;
2133 }
2134 
dm_table_add_target_callbacks(struct dm_table * t,struct dm_target_callbacks * cb)2135 void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb)
2136 {
2137 	list_add(&cb->list, &t->target_callbacks);
2138 }
2139 EXPORT_SYMBOL_GPL(dm_table_add_target_callbacks);
2140 
dm_table_any_congested(struct dm_table * t,int bdi_bits)2141 int dm_table_any_congested(struct dm_table *t, int bdi_bits)
2142 {
2143 	struct dm_dev_internal *dd;
2144 	struct list_head *devices = dm_table_get_devices(t);
2145 	struct dm_target_callbacks *cb;
2146 	int r = 0;
2147 
2148 	list_for_each_entry(dd, devices, list) {
2149 		struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev);
2150 		char b[BDEVNAME_SIZE];
2151 
2152 		if (likely(q))
2153 			r |= bdi_congested(q->backing_dev_info, bdi_bits);
2154 		else
2155 			DMWARN_LIMIT("%s: any_congested: nonexistent device %s",
2156 				     dm_device_name(t->md),
2157 				     bdevname(dd->dm_dev->bdev, b));
2158 	}
2159 
2160 	list_for_each_entry(cb, &t->target_callbacks, list)
2161 		if (cb->congested_fn)
2162 			r |= cb->congested_fn(cb, bdi_bits);
2163 
2164 	return r;
2165 }
2166 
dm_table_get_md(struct dm_table * t)2167 struct mapped_device *dm_table_get_md(struct dm_table *t)
2168 {
2169 	return t->md;
2170 }
2171 EXPORT_SYMBOL(dm_table_get_md);
2172 
dm_table_device_name(struct dm_table * t)2173 const char *dm_table_device_name(struct dm_table *t)
2174 {
2175 	return dm_device_name(t->md);
2176 }
2177 EXPORT_SYMBOL_GPL(dm_table_device_name);
2178 
dm_table_run_md_queue_async(struct dm_table * t)2179 void dm_table_run_md_queue_async(struct dm_table *t)
2180 {
2181 	struct mapped_device *md;
2182 	struct request_queue *queue;
2183 
2184 	if (!dm_table_request_based(t))
2185 		return;
2186 
2187 	md = dm_table_get_md(t);
2188 	queue = dm_get_md_queue(md);
2189 	if (queue)
2190 		blk_mq_run_hw_queues(queue, true);
2191 }
2192 EXPORT_SYMBOL(dm_table_run_md_queue_async);
2193 
2194