• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2001 Sistina Software (UK) Limited.
4  * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5  *
6  * This file is released under the LGPL.
7  */
8 
9 #ifndef _LINUX_DEVICE_MAPPER_H
10 #define _LINUX_DEVICE_MAPPER_H
11 
12 #include <linux/bio.h>
13 #include <linux/blkdev.h>
14 #include <linux/dm-ioctl.h>
15 #include <linux/math64.h>
16 #include <linux/ratelimit.h>
17 #include <linux/android_kabi.h>
18 
19 struct dm_dev;
20 struct dm_target;
21 struct dm_table;
22 struct dm_report_zones_args;
23 struct mapped_device;
24 struct bio_vec;
25 enum dax_access_mode;
26 
27 /*
28  * Type of table, mapped_device's mempool and request_queue
29  */
30 enum dm_queue_mode {
31 	DM_TYPE_NONE		 = 0,
32 	DM_TYPE_BIO_BASED	 = 1,
33 	DM_TYPE_REQUEST_BASED	 = 2,
34 	DM_TYPE_DAX_BIO_BASED	 = 3,
35 };
36 
37 typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE, STATUSTYPE_IMA } status_type_t;
38 
39 union map_info {
40 	void *ptr;
41 };
42 
43 /*
44  * In the constructor the target parameter will already have the
45  * table, type, begin and len fields filled in.
46  */
47 typedef int (*dm_ctr_fn) (struct dm_target *target,
48 			  unsigned int argc, char **argv);
49 
50 /*
51  * The destructor doesn't need to free the dm_target, just
52  * anything hidden ti->private.
53  */
54 typedef void (*dm_dtr_fn) (struct dm_target *ti);
55 
56 /*
57  * The map function must return:
58  * < 0: error
59  * = 0: The target will handle the io by resubmitting it later
60  * = 1: simple remap complete
61  * = 2: The target wants to push back the io
62  */
63 typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio);
64 typedef int (*dm_clone_and_map_request_fn) (struct dm_target *ti,
65 					    struct request *rq,
66 					    union map_info *map_context,
67 					    struct request **clone);
68 typedef void (*dm_release_clone_request_fn) (struct request *clone,
69 					     union map_info *map_context);
70 
71 /*
72  * Returns:
73  * < 0 : error (currently ignored)
74  * 0   : ended successfully
75  * 1   : for some reason the io has still not completed (eg,
76  *       multipath target might want to requeue a failed io).
77  * 2   : The target wants to push back the io
78  */
79 typedef int (*dm_endio_fn) (struct dm_target *ti,
80 			    struct bio *bio, blk_status_t *error);
81 typedef int (*dm_request_endio_fn) (struct dm_target *ti,
82 				    struct request *clone, blk_status_t error,
83 				    union map_info *map_context);
84 
85 typedef void (*dm_presuspend_fn) (struct dm_target *ti);
86 typedef void (*dm_presuspend_undo_fn) (struct dm_target *ti);
87 typedef void (*dm_postsuspend_fn) (struct dm_target *ti);
88 typedef int (*dm_preresume_fn) (struct dm_target *ti);
89 typedef void (*dm_resume_fn) (struct dm_target *ti);
90 
91 typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
92 			      unsigned int status_flags, char *result, unsigned int maxlen);
93 
94 typedef int (*dm_message_fn) (struct dm_target *ti, unsigned int argc, char **argv,
95 			      char *result, unsigned int maxlen);
96 
97 typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device **bdev);
98 
99 #ifdef CONFIG_BLK_DEV_ZONED
100 typedef int (*dm_report_zones_fn) (struct dm_target *ti,
101 				   struct dm_report_zones_args *args,
102 				   unsigned int nr_zones);
103 #else
104 /*
105  * Define dm_report_zones_fn so that targets can assign to NULL if
106  * CONFIG_BLK_DEV_ZONED disabled. Otherwise each target needs to do
107  * awkward #ifdefs in their target_type, etc.
108  */
109 typedef int (*dm_report_zones_fn) (struct dm_target *dummy);
110 #endif
111 
112 /*
113  * These iteration functions are typically used to check (and combine)
114  * properties of underlying devices.
115  * E.g. Does at least one underlying device support flush?
116  *      Does any underlying device not support WRITE_SAME?
117  *
118  * The callout function is called once for each contiguous section of
119  * an underlying device.  State can be maintained in *data.
120  * Return non-zero to stop iterating through any further devices.
121  */
122 typedef int (*iterate_devices_callout_fn) (struct dm_target *ti,
123 					   struct dm_dev *dev,
124 					   sector_t start, sector_t len,
125 					   void *data);
126 
127 /*
128  * This function must iterate through each section of device used by the
129  * target until it encounters a non-zero return code, which it then returns.
130  * Returns zero if no callout returned non-zero.
131  */
132 typedef int (*dm_iterate_devices_fn) (struct dm_target *ti,
133 				      iterate_devices_callout_fn fn,
134 				      void *data);
135 
136 typedef void (*dm_io_hints_fn) (struct dm_target *ti,
137 				struct queue_limits *limits);
138 
139 /*
140  * Returns:
141  *    0: The target can handle the next I/O immediately.
142  *    1: The target can't handle the next I/O immediately.
143  */
144 typedef int (*dm_busy_fn) (struct dm_target *ti);
145 
146 /*
147  * Returns:
148  *  < 0 : error
149  * >= 0 : the number of bytes accessible at the address
150  */
151 typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff,
152 		long nr_pages, enum dax_access_mode node, void **kaddr,
153 		pfn_t *pfn);
154 typedef int (*dm_dax_zero_page_range_fn)(struct dm_target *ti, pgoff_t pgoff,
155 		size_t nr_pages);
156 
157 /*
158  * Returns:
159  * != 0 : number of bytes transferred
160  * 0    : recovery write failed
161  */
162 typedef size_t (*dm_dax_recovery_write_fn)(struct dm_target *ti, pgoff_t pgoff,
163 		void *addr, size_t bytes, struct iov_iter *i);
164 
165 void dm_error(const char *message);
166 
167 struct dm_dev {
168 	struct block_device *bdev;
169 	struct file *bdev_file;
170 	struct dax_device *dax_dev;
171 	blk_mode_t mode;
172 	char name[16];
173 };
174 
175 /*
176  * Constructors should call these functions to ensure destination devices
177  * are opened/closed correctly.
178  */
179 int dm_get_device(struct dm_target *ti, const char *path, blk_mode_t mode,
180 		  struct dm_dev **result);
181 void dm_put_device(struct dm_target *ti, struct dm_dev *d);
182 
183 /*
184  * Helper function for getting devices
185  */
186 int dm_devt_from_path(const char *path, dev_t *dev_p);
187 
188 /*
189  * Information about a target type
190  */
191 
192 struct target_type {
193 	uint64_t features;
194 	const char *name;
195 	struct module *module;
196 	unsigned int version[3];
197 	dm_ctr_fn ctr;
198 	dm_dtr_fn dtr;
199 	dm_map_fn map;
200 	dm_clone_and_map_request_fn clone_and_map_rq;
201 	dm_release_clone_request_fn release_clone_rq;
202 	dm_endio_fn end_io;
203 	dm_request_endio_fn rq_end_io;
204 	dm_presuspend_fn presuspend;
205 	dm_presuspend_undo_fn presuspend_undo;
206 	dm_postsuspend_fn postsuspend;
207 	dm_preresume_fn preresume;
208 	dm_resume_fn resume;
209 	dm_status_fn status;
210 	dm_message_fn message;
211 	dm_prepare_ioctl_fn prepare_ioctl;
212 	dm_report_zones_fn report_zones;
213 	dm_busy_fn busy;
214 	dm_iterate_devices_fn iterate_devices;
215 	dm_io_hints_fn io_hints;
216 	dm_dax_direct_access_fn direct_access;
217 	dm_dax_zero_page_range_fn dax_zero_page_range;
218 	dm_dax_recovery_write_fn dax_recovery_write;
219 
220 	ANDROID_KABI_RESERVE(1);
221 	ANDROID_KABI_RESERVE(2);
222 
223 	/* For internal device-mapper use. */
224 	struct list_head list;
225 };
226 
227 /*
228  * Target features
229  */
230 
231 /*
232  * Any table that contains an instance of this target must have only one.
233  */
234 #define DM_TARGET_SINGLETON		0x00000001
235 #define dm_target_needs_singleton(type)	((type)->features & DM_TARGET_SINGLETON)
236 
237 /*
238  * Indicates that a target does not support read-only devices.
239  */
240 #define DM_TARGET_ALWAYS_WRITEABLE	0x00000002
241 #define dm_target_always_writeable(type) \
242 		((type)->features & DM_TARGET_ALWAYS_WRITEABLE)
243 
244 /*
245  * Any device that contains a table with an instance of this target may never
246  * have tables containing any different target type.
247  */
248 #define DM_TARGET_IMMUTABLE		0x00000004
249 #define dm_target_is_immutable(type)	((type)->features & DM_TARGET_IMMUTABLE)
250 
251 /*
252  * Indicates that a target may replace any target; even immutable targets.
253  * .map, .map_rq, .clone_and_map_rq and .release_clone_rq are all defined.
254  */
255 #define DM_TARGET_WILDCARD		0x00000008
256 #define dm_target_is_wildcard(type)	((type)->features & DM_TARGET_WILDCARD)
257 
258 /*
259  * A target implements own bio data integrity.
260  */
261 #define DM_TARGET_INTEGRITY		0x00000010
262 #define dm_target_has_integrity(type)	((type)->features & DM_TARGET_INTEGRITY)
263 
264 /*
265  * A target passes integrity data to the lower device.
266  */
267 #define DM_TARGET_PASSES_INTEGRITY	0x00000020
268 #define dm_target_passes_integrity(type) ((type)->features & DM_TARGET_PASSES_INTEGRITY)
269 
270 /*
271  * Indicates support for zoned block devices:
272  * - DM_TARGET_ZONED_HM: the target also supports host-managed zoned
273  *   block devices but does not support combining different zoned models.
274  * - DM_TARGET_MIXED_ZONED_MODEL: the target supports combining multiple
275  *   devices with different zoned models.
276  */
277 #ifdef CONFIG_BLK_DEV_ZONED
278 #define DM_TARGET_ZONED_HM		0x00000040
279 #define dm_target_supports_zoned_hm(type) ((type)->features & DM_TARGET_ZONED_HM)
280 #else
281 #define DM_TARGET_ZONED_HM		0x00000000
282 #define dm_target_supports_zoned_hm(type) (false)
283 #endif
284 
285 /*
286  * A target handles REQ_NOWAIT
287  */
288 #define DM_TARGET_NOWAIT		0x00000080
289 #define dm_target_supports_nowait(type) ((type)->features & DM_TARGET_NOWAIT)
290 
291 /*
292  * A target supports passing through inline crypto support.
293  */
294 #define DM_TARGET_PASSES_CRYPTO		0x00000100
295 #define dm_target_passes_crypto(type) ((type)->features & DM_TARGET_PASSES_CRYPTO)
296 
297 #ifdef CONFIG_BLK_DEV_ZONED
298 #define DM_TARGET_MIXED_ZONED_MODEL	0x00000200
299 #define dm_target_supports_mixed_zoned_model(type) \
300 	((type)->features & DM_TARGET_MIXED_ZONED_MODEL)
301 #else
302 #define DM_TARGET_MIXED_ZONED_MODEL	0x00000000
303 #define dm_target_supports_mixed_zoned_model(type) (false)
304 #endif
305 
306 struct dm_target {
307 	struct dm_table *table;
308 	struct target_type *type;
309 
310 	/* target limits */
311 	sector_t begin;
312 	sector_t len;
313 
314 	/* If non-zero, maximum size of I/O submitted to a target. */
315 	uint32_t max_io_len;
316 
317 	/*
318 	 * A number of zero-length barrier bios that will be submitted
319 	 * to the target for the purpose of flushing cache.
320 	 *
321 	 * The bio number can be accessed with dm_bio_get_target_bio_nr.
322 	 * It is a responsibility of the target driver to remap these bios
323 	 * to the real underlying devices.
324 	 */
325 	unsigned int num_flush_bios;
326 
327 	/*
328 	 * The number of discard bios that will be submitted to the target.
329 	 * The bio number can be accessed with dm_bio_get_target_bio_nr.
330 	 */
331 	unsigned int num_discard_bios;
332 
333 	/*
334 	 * The number of secure erase bios that will be submitted to the target.
335 	 * The bio number can be accessed with dm_bio_get_target_bio_nr.
336 	 */
337 	unsigned int num_secure_erase_bios;
338 
339 	/*
340 	 * The number of WRITE ZEROES bios that will be submitted to the target.
341 	 * The bio number can be accessed with dm_bio_get_target_bio_nr.
342 	 */
343 	unsigned int num_write_zeroes_bios;
344 
345 	/*
346 	 * The minimum number of extra bytes allocated in each io for the
347 	 * target to use.
348 	 */
349 	unsigned int per_io_data_size;
350 
351 	/* target specific data */
352 	void *private;
353 
354 	/* Used to provide an error string from the ctr */
355 	char *error;
356 
357 	/*
358 	 * Set if this target needs to receive flushes regardless of
359 	 * whether or not its underlying devices have support.
360 	 */
361 	bool flush_supported:1;
362 
363 	/*
364 	 * Set if this target needs to receive discards regardless of
365 	 * whether or not its underlying devices have support.
366 	 */
367 	bool discards_supported:1;
368 
369 	/*
370 	 * Automatically set by dm-core if this target supports
371 	 * REQ_OP_ZONE_RESET_ALL. Otherwise, this operation will be emulated
372 	 * using REQ_OP_ZONE_RESET. Target drivers must not set this manually.
373 	 */
374 	bool zone_reset_all_supported:1;
375 
376 	/*
377 	 * Set if this target requires that discards be split on
378 	 * 'max_discard_sectors' boundaries.
379 	 */
380 	bool max_discard_granularity:1;
381 
382 	/*
383 	 * Set if we need to limit the number of in-flight bios when swapping.
384 	 */
385 	bool limit_swap_bios:1;
386 
387 	/*
388 	 * Set if this target implements a zoned device and needs emulation of
389 	 * zone append operations using regular writes.
390 	 */
391 	bool emulate_zone_append:1;
392 
393 	/*
394 	 * Set if the target will submit IO using dm_submit_bio_remap()
395 	 * after returning DM_MAPIO_SUBMITTED from its map function.
396 	 */
397 	bool accounts_remapped_io:1;
398 
399 	/*
400 	 * Set if the target will submit the DM bio without first calling
401 	 * bio_set_dev(). NOTE: ideally a target should _not_ need this.
402 	 */
403 	bool needs_bio_set_dev:1;
404 
405 	/*
406 	 * Set if the target supports flush optimization. If all the targets in
407 	 * a table have flush_bypasses_map set, the dm core will not send
408 	 * flushes to the targets via a ->map method. It will iterate over
409 	 * dm_table->devices and send flushes to the devices directly. This
410 	 * optimization reduces the number of flushes being sent when multiple
411 	 * targets in a table use the same underlying device.
412 	 *
413 	 * This optimization may be enabled on targets that just pass the
414 	 * flushes to the underlying devices without performing any other
415 	 * actions on the flush request. Currently, dm-linear and dm-stripe
416 	 * support it.
417 	 */
418 	bool flush_bypasses_map:1;
419 
420 	/*
421 	 * Set if the target calls bio_integrity_alloc on bios received
422 	 * in the map method.
423 	 */
424 	bool mempool_needs_integrity:1;
425 	ANDROID_KABI_RESERVE(1);
426 	ANDROID_KABI_RESERVE(2);
427 };
428 
429 void *dm_per_bio_data(struct bio *bio, size_t data_size);
430 struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size);
431 unsigned int dm_bio_get_target_bio_nr(const struct bio *bio);
432 
433 u64 dm_start_time_ns_from_clone(struct bio *bio);
434 
435 int dm_register_target(struct target_type *t);
436 void dm_unregister_target(struct target_type *t);
437 
438 /*
439  * Target argument parsing.
440  */
441 struct dm_arg_set {
442 	unsigned int argc;
443 	char **argv;
444 };
445 
446 /*
447  * The minimum and maximum value of a numeric argument, together with
448  * the error message to use if the number is found to be outside that range.
449  */
450 struct dm_arg {
451 	unsigned int min;
452 	unsigned int max;
453 	char *error;
454 };
455 
456 /*
457  * Validate the next argument, either returning it as *value or, if invalid,
458  * returning -EINVAL and setting *error.
459  */
460 int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
461 		unsigned int *value, char **error);
462 
463 /*
464  * Process the next argument as the start of a group containing between
465  * arg->min and arg->max further arguments. Either return the size as
466  * *num_args or, if invalid, return -EINVAL and set *error.
467  */
468 int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set,
469 		      unsigned int *num_args, char **error);
470 
471 /*
472  * Return the current argument and shift to the next.
473  */
474 const char *dm_shift_arg(struct dm_arg_set *as);
475 
476 /*
477  * Move through num_args arguments.
478  */
479 void dm_consume_args(struct dm_arg_set *as, unsigned int num_args);
480 
481 /*
482  *----------------------------------------------------------------
483  * Functions for creating and manipulating mapped devices.
484  * Drop the reference with dm_put when you finish with the object.
485  *----------------------------------------------------------------
486  */
487 
488 /*
489  * DM_ANY_MINOR chooses the next available minor number.
490  */
491 #define DM_ANY_MINOR (-1)
492 int dm_create(int minor, struct mapped_device **md);
493 
494 /*
495  * Reference counting for md.
496  */
497 struct mapped_device *dm_get_md(dev_t dev);
498 void dm_get(struct mapped_device *md);
499 int dm_hold(struct mapped_device *md);
500 void dm_put(struct mapped_device *md);
501 
502 /*
503  * An arbitrary pointer may be stored alongside a mapped device.
504  */
505 void dm_set_mdptr(struct mapped_device *md, void *ptr);
506 void *dm_get_mdptr(struct mapped_device *md);
507 
508 /*
509  * A device can still be used while suspended, but I/O is deferred.
510  */
511 int dm_suspend(struct mapped_device *md, unsigned int suspend_flags);
512 int dm_resume(struct mapped_device *md);
513 
514 /*
515  * Event functions.
516  */
517 uint32_t dm_get_event_nr(struct mapped_device *md);
518 int dm_wait_event(struct mapped_device *md, int event_nr);
519 uint32_t dm_next_uevent_seq(struct mapped_device *md);
520 void dm_uevent_add(struct mapped_device *md, struct list_head *elist);
521 
522 /*
523  * Info functions.
524  */
525 const char *dm_device_name(struct mapped_device *md);
526 int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid);
527 struct gendisk *dm_disk(struct mapped_device *md);
528 int dm_suspended(struct dm_target *ti);
529 int dm_post_suspending(struct dm_target *ti);
530 int dm_noflush_suspending(struct dm_target *ti);
531 void dm_accept_partial_bio(struct bio *bio, unsigned int n_sectors);
532 void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone);
533 
534 #ifdef CONFIG_BLK_DEV_ZONED
535 struct dm_report_zones_args {
536 	struct dm_target *tgt;
537 	sector_t next_sector;
538 
539 	void *orig_data;
540 	report_zones_cb orig_cb;
541 	unsigned int zone_idx;
542 
543 	/* must be filled by ->report_zones before calling dm_report_zones_cb */
544 	sector_t start;
545 };
546 int dm_report_zones(struct block_device *bdev, sector_t start, sector_t sector,
547 		    struct dm_report_zones_args *args, unsigned int nr_zones);
548 #endif /* CONFIG_BLK_DEV_ZONED */
549 
550 /*
551  * Device mapper functions to parse and create devices specified by the
552  * parameter "dm-mod.create="
553  */
554 int __init dm_early_create(struct dm_ioctl *dmi,
555 			   struct dm_target_spec **spec_array,
556 			   char **target_params_array);
557 
558 /*
559  * Geometry functions.
560  */
561 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo);
562 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo);
563 
564 /*
565  *---------------------------------------------------------------
566  * Functions for manipulating device-mapper tables.
567  *---------------------------------------------------------------
568  */
569 
570 /*
571  * First create an empty table.
572  */
573 int dm_table_create(struct dm_table **result, blk_mode_t mode,
574 		    unsigned int num_targets, struct mapped_device *md);
575 
576 /*
577  * Then call this once for each target.
578  */
579 int dm_table_add_target(struct dm_table *t, const char *type,
580 			sector_t start, sector_t len, char *params);
581 
582 /*
583  * Target can use this to set the table's type.
584  * Can only ever be called from a target's ctr.
585  * Useful for "hybrid" target (supports both bio-based
586  * and request-based).
587  */
588 void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type);
589 
590 /*
591  * Finally call this to make the table ready for use.
592  */
593 int dm_table_complete(struct dm_table *t);
594 
595 /*
596  * Destroy the table when finished.
597  */
598 void dm_table_destroy(struct dm_table *t);
599 
600 /*
601  * Target may require that it is never sent I/O larger than len.
602  */
603 int __must_check dm_set_target_max_io_len(struct dm_target *ti, sector_t len);
604 
605 /*
606  * Table reference counting.
607  */
608 struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx);
609 void dm_put_live_table(struct mapped_device *md, int srcu_idx);
610 void dm_sync_table(struct mapped_device *md);
611 
612 /*
613  * Queries
614  */
615 sector_t dm_table_get_size(struct dm_table *t);
616 blk_mode_t dm_table_get_mode(struct dm_table *t);
617 struct mapped_device *dm_table_get_md(struct dm_table *t);
618 const char *dm_table_device_name(struct dm_table *t);
619 
620 /*
621  * Trigger an event.
622  */
623 void dm_table_event(struct dm_table *t);
624 
625 /*
626  * Run the queue for request-based targets.
627  */
628 void dm_table_run_md_queue_async(struct dm_table *t);
629 
630 /*
631  * The device must be suspended before calling this method.
632  * Returns the previous table, which the caller must destroy.
633  */
634 struct dm_table *dm_swap_table(struct mapped_device *md,
635 			       struct dm_table *t);
636 
637 /*
638  * Table blk_crypto_profile functions
639  */
640 void dm_destroy_crypto_profile(struct blk_crypto_profile *profile);
641 
642 /*
643  *---------------------------------------------------------------
644  * Macros.
645  *---------------------------------------------------------------
646  */
647 #define DM_NAME "device-mapper"
648 
649 #define DM_FMT(fmt) DM_NAME ": " DM_MSG_PREFIX ": " fmt "\n"
650 
651 #define DMCRIT(fmt, ...) pr_crit(DM_FMT(fmt), ##__VA_ARGS__)
652 
653 #define DMERR(fmt, ...) pr_err(DM_FMT(fmt), ##__VA_ARGS__)
654 #define DMERR_LIMIT(fmt, ...) pr_err_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
655 #define DMWARN(fmt, ...) pr_warn(DM_FMT(fmt), ##__VA_ARGS__)
656 #define DMWARN_LIMIT(fmt, ...) pr_warn_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
657 #define DMINFO(fmt, ...) pr_info(DM_FMT(fmt), ##__VA_ARGS__)
658 #define DMINFO_LIMIT(fmt, ...) pr_info_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
659 
660 #define DMDEBUG(fmt, ...) pr_debug(DM_FMT(fmt), ##__VA_ARGS__)
661 #define DMDEBUG_LIMIT(fmt, ...) pr_debug_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
662 
663 #define DMEMIT(x...) (sz += ((sz >= maxlen) ? 0 : scnprintf(result + sz, maxlen - sz, x)))
664 
665 #define DMEMIT_TARGET_NAME_VERSION(y) \
666 		DMEMIT("target_name=%s,target_version=%u.%u.%u", \
667 		       (y)->name, (y)->version[0], (y)->version[1], (y)->version[2])
668 
669 /**
670  * module_dm() - Helper macro for DM targets that don't do anything
671  * special in their module_init and module_exit.
672  * Each module may only use this macro once, and calling it replaces
673  * module_init() and module_exit().
674  *
675  * @name: DM target's name
676  */
677 #define module_dm(name) \
678 static int __init dm_##name##_init(void) \
679 { \
680 	return dm_register_target(&(name##_target)); \
681 } \
682 module_init(dm_##name##_init) \
683 static void __exit dm_##name##_exit(void) \
684 { \
685 	dm_unregister_target(&(name##_target)); \
686 } \
687 module_exit(dm_##name##_exit)
688 
689 /*
690  * Definitions of return values from target end_io function.
691  */
692 #define DM_ENDIO_DONE		0
693 #define DM_ENDIO_INCOMPLETE	1
694 #define DM_ENDIO_REQUEUE	2
695 #define DM_ENDIO_DELAY_REQUEUE	3
696 
697 /*
698  * Definitions of return values from target map function.
699  */
700 #define DM_MAPIO_SUBMITTED	0
701 #define DM_MAPIO_REMAPPED	1
702 #define DM_MAPIO_REQUEUE	DM_ENDIO_REQUEUE
703 #define DM_MAPIO_DELAY_REQUEUE	DM_ENDIO_DELAY_REQUEUE
704 #define DM_MAPIO_KILL		4
705 
706 #define dm_sector_div64(x, y)( \
707 { \
708 	u64 _res; \
709 	(x) = div64_u64_rem(x, y, &_res); \
710 	_res; \
711 } \
712 )
713 
714 /*
715  * Ceiling(n / sz)
716  */
717 #define dm_div_up(n, sz) (((n) + (sz) - 1) / (sz))
718 
719 #define dm_sector_div_up(n, sz) ( \
720 { \
721 	sector_t _r = ((n) + (sz) - 1); \
722 	sector_div(_r, (sz)); \
723 	_r; \
724 } \
725 )
726 
727 /*
728  * ceiling(n / size) * size
729  */
730 #define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz))
731 
732 /*
733  * Sector offset taken relative to the start of the target instead of
734  * relative to the start of the device.
735  */
736 #define dm_target_offset(ti, sector) ((sector) - (ti)->begin)
737 
to_sector(unsigned long long n)738 static inline sector_t to_sector(unsigned long long n)
739 {
740 	return (n >> SECTOR_SHIFT);
741 }
742 
to_bytes(sector_t n)743 static inline unsigned long to_bytes(sector_t n)
744 {
745 	return (n << SECTOR_SHIFT);
746 }
747 
748 #endif	/* _LINUX_DEVICE_MAPPER_H */
749