1 /*
2 * Copyright (C) 2001 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the LGPL.
6 */
7
8 #ifndef _LINUX_DEVICE_MAPPER_H
9 #define _LINUX_DEVICE_MAPPER_H
10
11 #include <linux/bio.h>
12 #include <linux/blkdev.h>
13 #include <linux/math64.h>
14 #include <linux/ratelimit.h>
15
16 struct dm_dev;
17 struct dm_target;
18 struct dm_table;
19 struct mapped_device;
20 struct bio_vec;
21
22 typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;
23
24 union map_info {
25 void *ptr;
26 };
27
28 /*
29 * In the constructor the target parameter will already have the
30 * table, type, begin and len fields filled in.
31 */
32 typedef int (*dm_ctr_fn) (struct dm_target *target,
33 unsigned int argc, char **argv);
34
35 /*
36 * The destructor doesn't need to free the dm_target, just
37 * anything hidden ti->private.
38 */
39 typedef void (*dm_dtr_fn) (struct dm_target *ti);
40
41 /*
42 * The map function must return:
43 * < 0: error
44 * = 0: The target will handle the io by resubmitting it later
45 * = 1: simple remap complete
46 * = 2: The target wants to push back the io
47 */
48 typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio);
49 typedef int (*dm_map_request_fn) (struct dm_target *ti, struct request *clone,
50 union map_info *map_context);
51
52 /*
53 * Returns:
54 * < 0 : error (currently ignored)
55 * 0 : ended successfully
56 * 1 : for some reason the io has still not completed (eg,
57 * multipath target might want to requeue a failed io).
58 * 2 : The target wants to push back the io
59 */
60 typedef int (*dm_endio_fn) (struct dm_target *ti,
61 struct bio *bio, int error);
62 typedef int (*dm_request_endio_fn) (struct dm_target *ti,
63 struct request *clone, int error,
64 union map_info *map_context);
65
66 typedef void (*dm_presuspend_fn) (struct dm_target *ti);
67 typedef void (*dm_postsuspend_fn) (struct dm_target *ti);
68 typedef int (*dm_preresume_fn) (struct dm_target *ti);
69 typedef void (*dm_resume_fn) (struct dm_target *ti);
70
71 typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
72 unsigned status_flags, char *result, unsigned maxlen);
73
74 typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv);
75
76 typedef int (*dm_ioctl_fn) (struct dm_target *ti, unsigned int cmd,
77 unsigned long arg);
78
79 typedef int (*dm_merge_fn) (struct dm_target *ti, struct bvec_merge_data *bvm,
80 struct bio_vec *biovec, int max_size);
81
82 /*
83 * These iteration functions are typically used to check (and combine)
84 * properties of underlying devices.
85 * E.g. Does at least one underlying device support flush?
86 * Does any underlying device not support WRITE_SAME?
87 *
88 * The callout function is called once for each contiguous section of
89 * an underlying device. State can be maintained in *data.
90 * Return non-zero to stop iterating through any further devices.
91 */
92 typedef int (*iterate_devices_callout_fn) (struct dm_target *ti,
93 struct dm_dev *dev,
94 sector_t start, sector_t len,
95 void *data);
96
97 /*
98 * This function must iterate through each section of device used by the
99 * target until it encounters a non-zero return code, which it then returns.
100 * Returns zero if no callout returned non-zero.
101 */
102 typedef int (*dm_iterate_devices_fn) (struct dm_target *ti,
103 iterate_devices_callout_fn fn,
104 void *data);
105
106 typedef void (*dm_io_hints_fn) (struct dm_target *ti,
107 struct queue_limits *limits);
108
109 /*
110 * Returns:
111 * 0: The target can handle the next I/O immediately.
112 * 1: The target can't handle the next I/O immediately.
113 */
114 typedef int (*dm_busy_fn) (struct dm_target *ti);
115
116 void dm_error(const char *message);
117
118 struct dm_dev {
119 struct block_device *bdev;
120 fmode_t mode;
121 char name[16];
122 };
123
124 /*
125 * Constructors should call these functions to ensure destination devices
126 * are opened/closed correctly.
127 */
128 int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
129 struct dm_dev **result);
130 void dm_put_device(struct dm_target *ti, struct dm_dev *d);
131
132 /*
133 * Information about a target type
134 */
135
136 struct target_type {
137 uint64_t features;
138 const char *name;
139 struct module *module;
140 unsigned version[3];
141 dm_ctr_fn ctr;
142 dm_dtr_fn dtr;
143 dm_map_fn map;
144 dm_map_request_fn map_rq;
145 dm_endio_fn end_io;
146 dm_request_endio_fn rq_end_io;
147 dm_presuspend_fn presuspend;
148 dm_postsuspend_fn postsuspend;
149 dm_preresume_fn preresume;
150 dm_resume_fn resume;
151 dm_status_fn status;
152 dm_message_fn message;
153 dm_ioctl_fn ioctl;
154 dm_merge_fn merge;
155 dm_busy_fn busy;
156 dm_iterate_devices_fn iterate_devices;
157 dm_io_hints_fn io_hints;
158
159 /* For internal device-mapper use. */
160 struct list_head list;
161 };
162
163 /*
164 * Target features
165 */
166
167 /*
168 * Any table that contains an instance of this target must have only one.
169 */
170 #define DM_TARGET_SINGLETON 0x00000001
171 #define dm_target_needs_singleton(type) ((type)->features & DM_TARGET_SINGLETON)
172
173 /*
174 * Indicates that a target does not support read-only devices.
175 */
176 #define DM_TARGET_ALWAYS_WRITEABLE 0x00000002
177 #define dm_target_always_writeable(type) \
178 ((type)->features & DM_TARGET_ALWAYS_WRITEABLE)
179
180 /*
181 * Any device that contains a table with an instance of this target may never
182 * have tables containing any different target type.
183 */
184 #define DM_TARGET_IMMUTABLE 0x00000004
185 #define dm_target_is_immutable(type) ((type)->features & DM_TARGET_IMMUTABLE)
186
187 /*
188 * Some targets need to be sent the same WRITE bio severals times so
189 * that they can send copies of it to different devices. This function
190 * examines any supplied bio and returns the number of copies of it the
191 * target requires.
192 */
193 typedef unsigned (*dm_num_write_bios_fn) (struct dm_target *ti, struct bio *bio);
194
195 struct dm_target {
196 struct dm_table *table;
197 struct target_type *type;
198
199 /* target limits */
200 sector_t begin;
201 sector_t len;
202
203 /* If non-zero, maximum size of I/O submitted to a target. */
204 uint32_t max_io_len;
205
206 /*
207 * A number of zero-length barrier bios that will be submitted
208 * to the target for the purpose of flushing cache.
209 *
210 * The bio number can be accessed with dm_bio_get_target_bio_nr.
211 * It is a responsibility of the target driver to remap these bios
212 * to the real underlying devices.
213 */
214 unsigned num_flush_bios;
215
216 /*
217 * The number of discard bios that will be submitted to the target.
218 * The bio number can be accessed with dm_bio_get_target_bio_nr.
219 */
220 unsigned num_discard_bios;
221
222 /*
223 * The number of WRITE SAME bios that will be submitted to the target.
224 * The bio number can be accessed with dm_bio_get_target_bio_nr.
225 */
226 unsigned num_write_same_bios;
227
228 /*
229 * The minimum number of extra bytes allocated in each bio for the
230 * target to use. dm_per_bio_data returns the data location.
231 */
232 unsigned per_bio_data_size;
233
234 /*
235 * If defined, this function is called to find out how many
236 * duplicate bios should be sent to the target when writing
237 * data.
238 */
239 dm_num_write_bios_fn num_write_bios;
240
241 /* target specific data */
242 void *private;
243
244 /* Used to provide an error string from the ctr */
245 char *error;
246
247 /*
248 * Set if this target needs to receive flushes regardless of
249 * whether or not its underlying devices have support.
250 */
251 bool flush_supported:1;
252
253 /*
254 * Set if this target needs to receive discards regardless of
255 * whether or not its underlying devices have support.
256 */
257 bool discards_supported:1;
258
259 /*
260 * Set if the target required discard bios to be split
261 * on max_io_len boundary.
262 */
263 bool split_discard_bios:1;
264
265 /*
266 * Set if this target does not return zeroes on discarded blocks.
267 */
268 bool discard_zeroes_data_unsupported:1;
269 };
270
271 /* Each target can link one of these into the table */
272 struct dm_target_callbacks {
273 struct list_head list;
274 int (*congested_fn) (struct dm_target_callbacks *, int);
275 };
276
277 /*
278 * For bio-based dm.
279 * One of these is allocated for each bio.
280 * This structure shouldn't be touched directly by target drivers.
281 * It is here so that we can inline dm_per_bio_data and
282 * dm_bio_from_per_bio_data
283 */
284 struct dm_target_io {
285 struct dm_io *io;
286 struct dm_target *ti;
287 unsigned target_bio_nr;
288 unsigned *len_ptr;
289 struct bio clone;
290 };
291
dm_per_bio_data(struct bio * bio,size_t data_size)292 static inline void *dm_per_bio_data(struct bio *bio, size_t data_size)
293 {
294 return (char *)bio - offsetof(struct dm_target_io, clone) - data_size;
295 }
296
dm_bio_from_per_bio_data(void * data,size_t data_size)297 static inline struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size)
298 {
299 return (struct bio *)((char *)data + data_size + offsetof(struct dm_target_io, clone));
300 }
301
dm_bio_get_target_bio_nr(const struct bio * bio)302 static inline unsigned dm_bio_get_target_bio_nr(const struct bio *bio)
303 {
304 return container_of(bio, struct dm_target_io, clone)->target_bio_nr;
305 }
306
307 int dm_register_target(struct target_type *t);
308 void dm_unregister_target(struct target_type *t);
309
310 /*
311 * Target argument parsing.
312 */
313 struct dm_arg_set {
314 unsigned argc;
315 char **argv;
316 };
317
318 /*
319 * The minimum and maximum value of a numeric argument, together with
320 * the error message to use if the number is found to be outside that range.
321 */
322 struct dm_arg {
323 unsigned min;
324 unsigned max;
325 char *error;
326 };
327
328 /*
329 * Validate the next argument, either returning it as *value or, if invalid,
330 * returning -EINVAL and setting *error.
331 */
332 int dm_read_arg(struct dm_arg *arg, struct dm_arg_set *arg_set,
333 unsigned *value, char **error);
334
335 /*
336 * Process the next argument as the start of a group containing between
337 * arg->min and arg->max further arguments. Either return the size as
338 * *num_args or, if invalid, return -EINVAL and set *error.
339 */
340 int dm_read_arg_group(struct dm_arg *arg, struct dm_arg_set *arg_set,
341 unsigned *num_args, char **error);
342
343 /*
344 * Return the current argument and shift to the next.
345 */
346 const char *dm_shift_arg(struct dm_arg_set *as);
347
348 /*
349 * Move through num_args arguments.
350 */
351 void dm_consume_args(struct dm_arg_set *as, unsigned num_args);
352
353 /*-----------------------------------------------------------------
354 * Functions for creating and manipulating mapped devices.
355 * Drop the reference with dm_put when you finish with the object.
356 *---------------------------------------------------------------*/
357
358 /*
359 * DM_ANY_MINOR chooses the next available minor number.
360 */
361 #define DM_ANY_MINOR (-1)
362 int dm_create(int minor, struct mapped_device **md);
363
364 /*
365 * Reference counting for md.
366 */
367 struct mapped_device *dm_get_md(dev_t dev);
368 void dm_get(struct mapped_device *md);
369 int dm_hold(struct mapped_device *md);
370 void dm_put(struct mapped_device *md);
371
372 /*
373 * An arbitrary pointer may be stored alongside a mapped device.
374 */
375 void dm_set_mdptr(struct mapped_device *md, void *ptr);
376 void *dm_get_mdptr(struct mapped_device *md);
377
378 /*
379 * Export the device via the ioctl interface (uses mdptr).
380 */
381 int dm_ioctl_export(struct mapped_device *md, const char *name,
382 const char *uuid);
383
384 /*
385 * A device can still be used while suspended, but I/O is deferred.
386 */
387 int dm_suspend(struct mapped_device *md, unsigned suspend_flags);
388 int dm_resume(struct mapped_device *md);
389
390 /*
391 * Event functions.
392 */
393 uint32_t dm_get_event_nr(struct mapped_device *md);
394 int dm_wait_event(struct mapped_device *md, int event_nr);
395 uint32_t dm_next_uevent_seq(struct mapped_device *md);
396 void dm_uevent_add(struct mapped_device *md, struct list_head *elist);
397
398 /*
399 * Info functions.
400 */
401 const char *dm_device_name(struct mapped_device *md);
402 int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid);
403 struct gendisk *dm_disk(struct mapped_device *md);
404 int dm_suspended(struct dm_target *ti);
405 int dm_noflush_suspending(struct dm_target *ti);
406 void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors);
407 union map_info *dm_get_rq_mapinfo(struct request *rq);
408
409 struct queue_limits *dm_get_queue_limits(struct mapped_device *md);
410
411 /*
412 * Geometry functions.
413 */
414 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo);
415 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo);
416
417 /*-----------------------------------------------------------------
418 * Functions for manipulating device-mapper tables.
419 *---------------------------------------------------------------*/
420
421 /*
422 * First create an empty table.
423 */
424 int dm_table_create(struct dm_table **result, fmode_t mode,
425 unsigned num_targets, struct mapped_device *md);
426
427 /*
428 * Then call this once for each target.
429 */
430 int dm_table_add_target(struct dm_table *t, const char *type,
431 sector_t start, sector_t len, char *params);
432
433 /*
434 * Target_ctr should call this if it needs to add any callbacks.
435 */
436 void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb);
437
438 /*
439 * Finally call this to make the table ready for use.
440 */
441 int dm_table_complete(struct dm_table *t);
442
443 /*
444 * Target may require that it is never sent I/O larger than len.
445 */
446 int __must_check dm_set_target_max_io_len(struct dm_target *ti, sector_t len);
447
448 /*
449 * Table reference counting.
450 */
451 struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx);
452 void dm_put_live_table(struct mapped_device *md, int srcu_idx);
453 void dm_sync_table(struct mapped_device *md);
454
455 /*
456 * Queries
457 */
458 sector_t dm_table_get_size(struct dm_table *t);
459 unsigned int dm_table_get_num_targets(struct dm_table *t);
460 fmode_t dm_table_get_mode(struct dm_table *t);
461 struct mapped_device *dm_table_get_md(struct dm_table *t);
462
463 /*
464 * Trigger an event.
465 */
466 void dm_table_event(struct dm_table *t);
467
468 /*
469 * Run the queue for request-based targets.
470 */
471 void dm_table_run_md_queue_async(struct dm_table *t);
472
473 /*
474 * The device must be suspended before calling this method.
475 * Returns the previous table, which the caller must destroy.
476 */
477 struct dm_table *dm_swap_table(struct mapped_device *md,
478 struct dm_table *t);
479
480 /*
481 * A wrapper around vmalloc.
482 */
483 void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size);
484
485 /*-----------------------------------------------------------------
486 * Macros.
487 *---------------------------------------------------------------*/
488 #define DM_NAME "device-mapper"
489
490 #ifdef CONFIG_PRINTK
491 extern struct ratelimit_state dm_ratelimit_state;
492
493 #define dm_ratelimit() __ratelimit(&dm_ratelimit_state)
494 #else
495 #define dm_ratelimit() 0
496 #endif
497
498 #define DMCRIT(f, arg...) \
499 printk(KERN_CRIT DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
500
501 #define DMERR(f, arg...) \
502 printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
503 #define DMERR_LIMIT(f, arg...) \
504 do { \
505 if (dm_ratelimit()) \
506 printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " \
507 f "\n", ## arg); \
508 } while (0)
509
510 #define DMWARN(f, arg...) \
511 printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
512 #define DMWARN_LIMIT(f, arg...) \
513 do { \
514 if (dm_ratelimit()) \
515 printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " \
516 f "\n", ## arg); \
517 } while (0)
518
519 #define DMINFO(f, arg...) \
520 printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
521 #define DMINFO_LIMIT(f, arg...) \
522 do { \
523 if (dm_ratelimit()) \
524 printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f \
525 "\n", ## arg); \
526 } while (0)
527
528 #ifdef CONFIG_DM_DEBUG
529 # define DMDEBUG(f, arg...) \
530 printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX " DEBUG: " f "\n", ## arg)
531 # define DMDEBUG_LIMIT(f, arg...) \
532 do { \
533 if (dm_ratelimit()) \
534 printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX ": " f \
535 "\n", ## arg); \
536 } while (0)
537 #else
538 # define DMDEBUG(f, arg...) do {} while (0)
539 # define DMDEBUG_LIMIT(f, arg...) do {} while (0)
540 #endif
541
542 #define DMEMIT(x...) sz += ((sz >= maxlen) ? \
543 0 : scnprintf(result + sz, maxlen - sz, x))
544
545 #define SECTOR_SHIFT 9
546
547 /*
548 * Definitions of return values from target end_io function.
549 */
550 #define DM_ENDIO_INCOMPLETE 1
551 #define DM_ENDIO_REQUEUE 2
552
553 /*
554 * Definitions of return values from target map function.
555 */
556 #define DM_MAPIO_SUBMITTED 0
557 #define DM_MAPIO_REMAPPED 1
558 #define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE
559
560 #define dm_sector_div64(x, y)( \
561 { \
562 u64 _res; \
563 (x) = div64_u64_rem(x, y, &_res); \
564 _res; \
565 } \
566 )
567
568 /*
569 * Ceiling(n / sz)
570 */
571 #define dm_div_up(n, sz) (((n) + (sz) - 1) / (sz))
572
573 #define dm_sector_div_up(n, sz) ( \
574 { \
575 sector_t _r = ((n) + (sz) - 1); \
576 sector_div(_r, (sz)); \
577 _r; \
578 } \
579 )
580
581 /*
582 * ceiling(n / size) * size
583 */
584 #define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz))
585
586 #define dm_array_too_big(fixed, obj, num) \
587 ((num) > (UINT_MAX - (fixed)) / (obj))
588
589 /*
590 * Sector offset taken relative to the start of the target instead of
591 * relative to the start of the device.
592 */
593 #define dm_target_offset(ti, sector) ((sector) - (ti)->begin)
594
to_sector(unsigned long n)595 static inline sector_t to_sector(unsigned long n)
596 {
597 return (n >> SECTOR_SHIFT);
598 }
599
to_bytes(sector_t n)600 static inline unsigned long to_bytes(sector_t n)
601 {
602 return (n << SECTOR_SHIFT);
603 }
604
605 /*-----------------------------------------------------------------
606 * Helper for block layer and dm core operations
607 *---------------------------------------------------------------*/
608 void dm_dispatch_request(struct request *rq);
609 void dm_requeue_unmapped_request(struct request *rq);
610 void dm_kill_unmapped_request(struct request *rq, int error);
611 int dm_underlying_device_busy(struct request_queue *q);
612
613 #endif /* _LINUX_DEVICE_MAPPER_H */
614