1 /*
2 * Copyright (C) 2001 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the LGPL.
6 */
7
8 #ifndef _LINUX_DEVICE_MAPPER_H
9 #define _LINUX_DEVICE_MAPPER_H
10
11 #include <linux/bio.h>
12 #include <linux/blkdev.h>
13 #include <linux/dm-ioctl.h>
14 #include <linux/math64.h>
15 #include <linux/ratelimit.h>
16 #include <linux/android_kabi.h>
17
18 struct dm_dev;
19 struct dm_target;
20 struct dm_table;
21 struct dm_report_zones_args;
22 struct mapped_device;
23 struct bio_vec;
24
25 /*
26 * Type of table, mapped_device's mempool and request_queue
27 */
28 enum dm_queue_mode {
29 DM_TYPE_NONE = 0,
30 DM_TYPE_BIO_BASED = 1,
31 DM_TYPE_REQUEST_BASED = 2,
32 DM_TYPE_DAX_BIO_BASED = 3,
33 };
34
35 typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE, STATUSTYPE_IMA } status_type_t;
36
37 union map_info {
38 void *ptr;
39 };
40
41 /*
42 * In the constructor the target parameter will already have the
43 * table, type, begin and len fields filled in.
44 */
45 typedef int (*dm_ctr_fn) (struct dm_target *target,
46 unsigned int argc, char **argv);
47
48 /*
49 * The destructor doesn't need to free the dm_target, just
50 * anything hidden ti->private.
51 */
52 typedef void (*dm_dtr_fn) (struct dm_target *ti);
53
54 /*
55 * The map function must return:
56 * < 0: error
57 * = 0: The target will handle the io by resubmitting it later
58 * = 1: simple remap complete
59 * = 2: The target wants to push back the io
60 */
61 typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio);
62 typedef int (*dm_clone_and_map_request_fn) (struct dm_target *ti,
63 struct request *rq,
64 union map_info *map_context,
65 struct request **clone);
66 typedef void (*dm_release_clone_request_fn) (struct request *clone,
67 union map_info *map_context);
68
69 /*
70 * Returns:
71 * < 0 : error (currently ignored)
72 * 0 : ended successfully
73 * 1 : for some reason the io has still not completed (eg,
74 * multipath target might want to requeue a failed io).
75 * 2 : The target wants to push back the io
76 */
77 typedef int (*dm_endio_fn) (struct dm_target *ti,
78 struct bio *bio, blk_status_t *error);
79 typedef int (*dm_request_endio_fn) (struct dm_target *ti,
80 struct request *clone, blk_status_t error,
81 union map_info *map_context);
82
83 typedef void (*dm_presuspend_fn) (struct dm_target *ti);
84 typedef void (*dm_presuspend_undo_fn) (struct dm_target *ti);
85 typedef void (*dm_postsuspend_fn) (struct dm_target *ti);
86 typedef int (*dm_preresume_fn) (struct dm_target *ti);
87 typedef void (*dm_resume_fn) (struct dm_target *ti);
88
89 typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
90 unsigned status_flags, char *result, unsigned maxlen);
91
92 typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv,
93 char *result, unsigned maxlen);
94
95 typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device **bdev);
96
97 #ifdef CONFIG_BLK_DEV_ZONED
98 typedef int (*dm_report_zones_fn) (struct dm_target *ti,
99 struct dm_report_zones_args *args,
100 unsigned int nr_zones);
101 #else
102 /*
103 * Define dm_report_zones_fn so that targets can assign to NULL if
104 * CONFIG_BLK_DEV_ZONED disabled. Otherwise each target needs to do
105 * awkward #ifdefs in their target_type, etc.
106 */
107 typedef int (*dm_report_zones_fn) (struct dm_target *dummy);
108 #endif
109
110 /*
111 * These iteration functions are typically used to check (and combine)
112 * properties of underlying devices.
113 * E.g. Does at least one underlying device support flush?
114 * Does any underlying device not support WRITE_SAME?
115 *
116 * The callout function is called once for each contiguous section of
117 * an underlying device. State can be maintained in *data.
118 * Return non-zero to stop iterating through any further devices.
119 */
120 typedef int (*iterate_devices_callout_fn) (struct dm_target *ti,
121 struct dm_dev *dev,
122 sector_t start, sector_t len,
123 void *data);
124
125 /*
126 * This function must iterate through each section of device used by the
127 * target until it encounters a non-zero return code, which it then returns.
128 * Returns zero if no callout returned non-zero.
129 */
130 typedef int (*dm_iterate_devices_fn) (struct dm_target *ti,
131 iterate_devices_callout_fn fn,
132 void *data);
133
134 typedef void (*dm_io_hints_fn) (struct dm_target *ti,
135 struct queue_limits *limits);
136
137 /*
138 * Returns:
139 * 0: The target can handle the next I/O immediately.
140 * 1: The target can't handle the next I/O immediately.
141 */
142 typedef int (*dm_busy_fn) (struct dm_target *ti);
143
144 /*
145 * Returns:
146 * < 0 : error
147 * >= 0 : the number of bytes accessible at the address
148 */
149 typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff,
150 long nr_pages, void **kaddr, pfn_t *pfn);
151 typedef size_t (*dm_dax_copy_iter_fn)(struct dm_target *ti, pgoff_t pgoff,
152 void *addr, size_t bytes, struct iov_iter *i);
153 typedef int (*dm_dax_zero_page_range_fn)(struct dm_target *ti, pgoff_t pgoff,
154 size_t nr_pages);
155
156 void dm_error(const char *message);
157
158 struct dm_dev {
159 struct block_device *bdev;
160 struct dax_device *dax_dev;
161 fmode_t mode;
162 char name[16];
163 };
164
165 dev_t dm_get_dev_t(const char *path);
166
167 /*
168 * Constructors should call these functions to ensure destination devices
169 * are opened/closed correctly.
170 */
171 int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
172 struct dm_dev **result);
173 void dm_put_device(struct dm_target *ti, struct dm_dev *d);
174
175 /*
176 * Information about a target type
177 */
178
179 struct target_type {
180 uint64_t features;
181 const char *name;
182 struct module *module;
183 unsigned version[3];
184 dm_ctr_fn ctr;
185 dm_dtr_fn dtr;
186 dm_map_fn map;
187 dm_clone_and_map_request_fn clone_and_map_rq;
188 dm_release_clone_request_fn release_clone_rq;
189 dm_endio_fn end_io;
190 dm_request_endio_fn rq_end_io;
191 dm_presuspend_fn presuspend;
192 dm_presuspend_undo_fn presuspend_undo;
193 dm_postsuspend_fn postsuspend;
194 dm_preresume_fn preresume;
195 dm_resume_fn resume;
196 dm_status_fn status;
197 dm_message_fn message;
198 dm_prepare_ioctl_fn prepare_ioctl;
199 dm_report_zones_fn report_zones;
200 dm_busy_fn busy;
201 dm_iterate_devices_fn iterate_devices;
202 dm_io_hints_fn io_hints;
203 dm_dax_direct_access_fn direct_access;
204 dm_dax_copy_iter_fn dax_copy_from_iter;
205 dm_dax_copy_iter_fn dax_copy_to_iter;
206 dm_dax_zero_page_range_fn dax_zero_page_range;
207
208 ANDROID_KABI_RESERVE(1);
209 ANDROID_KABI_RESERVE(2);
210
211 /* For internal device-mapper use. */
212 struct list_head list;
213 };
214
215 /*
216 * Target features
217 */
218
219 /*
220 * Any table that contains an instance of this target must have only one.
221 */
222 #define DM_TARGET_SINGLETON 0x00000001
223 #define dm_target_needs_singleton(type) ((type)->features & DM_TARGET_SINGLETON)
224
225 /*
226 * Indicates that a target does not support read-only devices.
227 */
228 #define DM_TARGET_ALWAYS_WRITEABLE 0x00000002
229 #define dm_target_always_writeable(type) \
230 ((type)->features & DM_TARGET_ALWAYS_WRITEABLE)
231
232 /*
233 * Any device that contains a table with an instance of this target may never
234 * have tables containing any different target type.
235 */
236 #define DM_TARGET_IMMUTABLE 0x00000004
237 #define dm_target_is_immutable(type) ((type)->features & DM_TARGET_IMMUTABLE)
238
239 /*
240 * Indicates that a target may replace any target; even immutable targets.
241 * .map, .map_rq, .clone_and_map_rq and .release_clone_rq are all defined.
242 */
243 #define DM_TARGET_WILDCARD 0x00000008
244 #define dm_target_is_wildcard(type) ((type)->features & DM_TARGET_WILDCARD)
245
246 /*
247 * A target implements own bio data integrity.
248 */
249 #define DM_TARGET_INTEGRITY 0x00000010
250 #define dm_target_has_integrity(type) ((type)->features & DM_TARGET_INTEGRITY)
251
252 /*
253 * A target passes integrity data to the lower device.
254 */
255 #define DM_TARGET_PASSES_INTEGRITY 0x00000020
256 #define dm_target_passes_integrity(type) ((type)->features & DM_TARGET_PASSES_INTEGRITY)
257
258 /*
259 * Indicates support for zoned block devices:
260 * - DM_TARGET_ZONED_HM: the target also supports host-managed zoned
261 * block devices but does not support combining different zoned models.
262 * - DM_TARGET_MIXED_ZONED_MODEL: the target supports combining multiple
263 * devices with different zoned models.
264 */
265 #ifdef CONFIG_BLK_DEV_ZONED
266 #define DM_TARGET_ZONED_HM 0x00000040
267 #define dm_target_supports_zoned_hm(type) ((type)->features & DM_TARGET_ZONED_HM)
268 #else
269 #define DM_TARGET_ZONED_HM 0x00000000
270 #define dm_target_supports_zoned_hm(type) (false)
271 #endif
272
273 /*
274 * A target handles REQ_NOWAIT
275 */
276 #define DM_TARGET_NOWAIT 0x00000080
277 #define dm_target_supports_nowait(type) ((type)->features & DM_TARGET_NOWAIT)
278
279 /*
280 * A target supports passing through inline crypto support.
281 */
282 #define DM_TARGET_PASSES_CRYPTO 0x00000100
283 #define dm_target_passes_crypto(type) ((type)->features & DM_TARGET_PASSES_CRYPTO)
284
285 #ifdef CONFIG_BLK_DEV_ZONED
286 #define DM_TARGET_MIXED_ZONED_MODEL 0x00000200
287 #define dm_target_supports_mixed_zoned_model(type) \
288 ((type)->features & DM_TARGET_MIXED_ZONED_MODEL)
289 #else
290 #define DM_TARGET_MIXED_ZONED_MODEL 0x00000000
291 #define dm_target_supports_mixed_zoned_model(type) (false)
292 #endif
293
294 struct dm_target {
295 struct dm_table *table;
296 struct target_type *type;
297
298 /* target limits */
299 sector_t begin;
300 sector_t len;
301
302 /* If non-zero, maximum size of I/O submitted to a target. */
303 uint32_t max_io_len;
304
305 /*
306 * A number of zero-length barrier bios that will be submitted
307 * to the target for the purpose of flushing cache.
308 *
309 * The bio number can be accessed with dm_bio_get_target_bio_nr.
310 * It is a responsibility of the target driver to remap these bios
311 * to the real underlying devices.
312 */
313 unsigned num_flush_bios;
314
315 /*
316 * The number of discard bios that will be submitted to the target.
317 * The bio number can be accessed with dm_bio_get_target_bio_nr.
318 */
319 unsigned num_discard_bios;
320
321 /*
322 * The number of secure erase bios that will be submitted to the target.
323 * The bio number can be accessed with dm_bio_get_target_bio_nr.
324 */
325 unsigned num_secure_erase_bios;
326
327 /*
328 * The number of WRITE SAME bios that will be submitted to the target.
329 * The bio number can be accessed with dm_bio_get_target_bio_nr.
330 */
331 unsigned num_write_same_bios;
332
333 /*
334 * The number of WRITE ZEROES bios that will be submitted to the target.
335 * The bio number can be accessed with dm_bio_get_target_bio_nr.
336 */
337 unsigned num_write_zeroes_bios;
338
339 /*
340 * The minimum number of extra bytes allocated in each io for the
341 * target to use.
342 */
343 unsigned per_io_data_size;
344
345 /* target specific data */
346 void *private;
347
348 /* Used to provide an error string from the ctr */
349 char *error;
350
351 /*
352 * Set if this target needs to receive flushes regardless of
353 * whether or not its underlying devices have support.
354 */
355 bool flush_supported:1;
356
357 /*
358 * Set if this target needs to receive discards regardless of
359 * whether or not its underlying devices have support.
360 */
361 bool discards_supported:1;
362
363 /*
364 * Set if we need to limit the number of in-flight bios when swapping.
365 */
366 bool limit_swap_bios:1;
367
368 /*
369 * Set if this target implements a a zoned device and needs emulation of
370 * zone append operations using regular writes.
371 */
372 bool emulate_zone_append:1;
373
374 ANDROID_KABI_RESERVE(1);
375 ANDROID_KABI_RESERVE(2);
376 };
377
378 void *dm_per_bio_data(struct bio *bio, size_t data_size);
379 struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size);
380 unsigned dm_bio_get_target_bio_nr(const struct bio *bio);
381
382 u64 dm_start_time_ns_from_clone(struct bio *bio);
383
384 int dm_register_target(struct target_type *t);
385 void dm_unregister_target(struct target_type *t);
386
387 /*
388 * Target argument parsing.
389 */
390 struct dm_arg_set {
391 unsigned argc;
392 char **argv;
393 };
394
395 /*
396 * The minimum and maximum value of a numeric argument, together with
397 * the error message to use if the number is found to be outside that range.
398 */
399 struct dm_arg {
400 unsigned min;
401 unsigned max;
402 char *error;
403 };
404
405 /*
406 * Validate the next argument, either returning it as *value or, if invalid,
407 * returning -EINVAL and setting *error.
408 */
409 int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
410 unsigned *value, char **error);
411
412 /*
413 * Process the next argument as the start of a group containing between
414 * arg->min and arg->max further arguments. Either return the size as
415 * *num_args or, if invalid, return -EINVAL and set *error.
416 */
417 int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set,
418 unsigned *num_args, char **error);
419
420 /*
421 * Return the current argument and shift to the next.
422 */
423 const char *dm_shift_arg(struct dm_arg_set *as);
424
425 /*
426 * Move through num_args arguments.
427 */
428 void dm_consume_args(struct dm_arg_set *as, unsigned num_args);
429
430 /*-----------------------------------------------------------------
431 * Functions for creating and manipulating mapped devices.
432 * Drop the reference with dm_put when you finish with the object.
433 *---------------------------------------------------------------*/
434
435 /*
436 * DM_ANY_MINOR chooses the next available minor number.
437 */
438 #define DM_ANY_MINOR (-1)
439 int dm_create(int minor, struct mapped_device **md);
440
441 /*
442 * Reference counting for md.
443 */
444 struct mapped_device *dm_get_md(dev_t dev);
445 void dm_get(struct mapped_device *md);
446 int dm_hold(struct mapped_device *md);
447 void dm_put(struct mapped_device *md);
448
449 /*
450 * An arbitrary pointer may be stored alongside a mapped device.
451 */
452 void dm_set_mdptr(struct mapped_device *md, void *ptr);
453 void *dm_get_mdptr(struct mapped_device *md);
454
455 /*
456 * A device can still be used while suspended, but I/O is deferred.
457 */
458 int dm_suspend(struct mapped_device *md, unsigned suspend_flags);
459 int dm_resume(struct mapped_device *md);
460
461 /*
462 * Event functions.
463 */
464 uint32_t dm_get_event_nr(struct mapped_device *md);
465 int dm_wait_event(struct mapped_device *md, int event_nr);
466 uint32_t dm_next_uevent_seq(struct mapped_device *md);
467 void dm_uevent_add(struct mapped_device *md, struct list_head *elist);
468
469 /*
470 * Info functions.
471 */
472 const char *dm_device_name(struct mapped_device *md);
473 int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid);
474 struct gendisk *dm_disk(struct mapped_device *md);
475 int dm_suspended(struct dm_target *ti);
476 int dm_post_suspending(struct dm_target *ti);
477 int dm_noflush_suspending(struct dm_target *ti);
478 void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors);
479 union map_info *dm_get_rq_mapinfo(struct request *rq);
480
481 #ifdef CONFIG_BLK_DEV_ZONED
482 struct dm_report_zones_args {
483 struct dm_target *tgt;
484 sector_t next_sector;
485
486 void *orig_data;
487 report_zones_cb orig_cb;
488 unsigned int zone_idx;
489
490 /* must be filled by ->report_zones before calling dm_report_zones_cb */
491 sector_t start;
492 };
493 int dm_report_zones(struct block_device *bdev, sector_t start, sector_t sector,
494 struct dm_report_zones_args *args, unsigned int nr_zones);
495 #endif /* CONFIG_BLK_DEV_ZONED */
496
497 /*
498 * Device mapper functions to parse and create devices specified by the
499 * parameter "dm-mod.create="
500 */
501 int __init dm_early_create(struct dm_ioctl *dmi,
502 struct dm_target_spec **spec_array,
503 char **target_params_array);
504
505 struct queue_limits *dm_get_queue_limits(struct mapped_device *md);
506
507 /*
508 * Geometry functions.
509 */
510 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo);
511 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo);
512
513 /*-----------------------------------------------------------------
514 * Functions for manipulating device-mapper tables.
515 *---------------------------------------------------------------*/
516
517 /*
518 * First create an empty table.
519 */
520 int dm_table_create(struct dm_table **result, fmode_t mode,
521 unsigned num_targets, struct mapped_device *md);
522
523 /*
524 * Then call this once for each target.
525 */
526 int dm_table_add_target(struct dm_table *t, const char *type,
527 sector_t start, sector_t len, char *params);
528
529 /*
530 * Target can use this to set the table's type.
531 * Can only ever be called from a target's ctr.
532 * Useful for "hybrid" target (supports both bio-based
533 * and request-based).
534 */
535 void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type);
536
537 /*
538 * Finally call this to make the table ready for use.
539 */
540 int dm_table_complete(struct dm_table *t);
541
542 /*
543 * Destroy the table when finished.
544 */
545 void dm_table_destroy(struct dm_table *t);
546
547 /*
548 * Target may require that it is never sent I/O larger than len.
549 */
550 int __must_check dm_set_target_max_io_len(struct dm_target *ti, sector_t len);
551
552 /*
553 * Table reference counting.
554 */
555 struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx);
556 void dm_put_live_table(struct mapped_device *md, int srcu_idx);
557 void dm_sync_table(struct mapped_device *md);
558
559 /*
560 * Queries
561 */
562 sector_t dm_table_get_size(struct dm_table *t);
563 unsigned int dm_table_get_num_targets(struct dm_table *t);
564 fmode_t dm_table_get_mode(struct dm_table *t);
565 struct mapped_device *dm_table_get_md(struct dm_table *t);
566 const char *dm_table_device_name(struct dm_table *t);
567
568 /*
569 * Trigger an event.
570 */
571 void dm_table_event(struct dm_table *t);
572
573 /*
574 * Run the queue for request-based targets.
575 */
576 void dm_table_run_md_queue_async(struct dm_table *t);
577
578 /*
579 * The device must be suspended before calling this method.
580 * Returns the previous table, which the caller must destroy.
581 */
582 struct dm_table *dm_swap_table(struct mapped_device *md,
583 struct dm_table *t);
584
585 /*
586 * Table blk_crypto_profile functions
587 */
588 void dm_destroy_crypto_profile(struct blk_crypto_profile *profile);
589
590 /*-----------------------------------------------------------------
591 * Macros.
592 *---------------------------------------------------------------*/
593 #define DM_NAME "device-mapper"
594
595 #define DM_FMT(fmt) DM_NAME ": " DM_MSG_PREFIX ": " fmt "\n"
596
597 #define DMCRIT(fmt, ...) pr_crit(DM_FMT(fmt), ##__VA_ARGS__)
598
599 #define DMERR(fmt, ...) pr_err(DM_FMT(fmt), ##__VA_ARGS__)
600 #define DMERR_LIMIT(fmt, ...) pr_err_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
601 #define DMWARN(fmt, ...) pr_warn(DM_FMT(fmt), ##__VA_ARGS__)
602 #define DMWARN_LIMIT(fmt, ...) pr_warn_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
603 #define DMINFO(fmt, ...) pr_info(DM_FMT(fmt), ##__VA_ARGS__)
604 #define DMINFO_LIMIT(fmt, ...) pr_info_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
605
606 #define DMDEBUG(fmt, ...) pr_debug(DM_FMT(fmt), ##__VA_ARGS__)
607 #define DMDEBUG_LIMIT(fmt, ...) pr_debug_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
608
609 #define DMEMIT(x...) sz += ((sz >= maxlen) ? \
610 0 : scnprintf(result + sz, maxlen - sz, x))
611
612 #define DMEMIT_TARGET_NAME_VERSION(y) \
613 DMEMIT("target_name=%s,target_version=%u.%u.%u", \
614 (y)->name, (y)->version[0], (y)->version[1], (y)->version[2])
615
616 /*
617 * Definitions of return values from target end_io function.
618 */
619 #define DM_ENDIO_DONE 0
620 #define DM_ENDIO_INCOMPLETE 1
621 #define DM_ENDIO_REQUEUE 2
622 #define DM_ENDIO_DELAY_REQUEUE 3
623
624 /*
625 * Definitions of return values from target map function.
626 */
627 #define DM_MAPIO_SUBMITTED 0
628 #define DM_MAPIO_REMAPPED 1
629 #define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE
630 #define DM_MAPIO_DELAY_REQUEUE DM_ENDIO_DELAY_REQUEUE
631 #define DM_MAPIO_KILL 4
632
633 #define dm_sector_div64(x, y)( \
634 { \
635 u64 _res; \
636 (x) = div64_u64_rem(x, y, &_res); \
637 _res; \
638 } \
639 )
640
641 /*
642 * Ceiling(n / sz)
643 */
644 #define dm_div_up(n, sz) (((n) + (sz) - 1) / (sz))
645
646 #define dm_sector_div_up(n, sz) ( \
647 { \
648 sector_t _r = ((n) + (sz) - 1); \
649 sector_div(_r, (sz)); \
650 _r; \
651 } \
652 )
653
654 /*
655 * ceiling(n / size) * size
656 */
657 #define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz))
658
659 /*
660 * Sector offset taken relative to the start of the target instead of
661 * relative to the start of the device.
662 */
663 #define dm_target_offset(ti, sector) ((sector) - (ti)->begin)
664
to_sector(unsigned long long n)665 static inline sector_t to_sector(unsigned long long n)
666 {
667 return (n >> SECTOR_SHIFT);
668 }
669
to_bytes(sector_t n)670 static inline unsigned long to_bytes(sector_t n)
671 {
672 return (n << SECTOR_SHIFT);
673 }
674
675 #endif /* _LINUX_DEVICE_MAPPER_H */
676