1 /* 2 * Internal header file _only_ for device mapper core 3 * 4 * Copyright (C) 2016 Red Hat, Inc. All rights reserved. 5 * 6 * This file is released under the LGPL. 7 */ 8 9 #ifndef DM_CORE_INTERNAL_H 10 #define DM_CORE_INTERNAL_H 11 12 #include <linux/kthread.h> 13 #include <linux/ktime.h> 14 #include <linux/genhd.h> 15 #include <linux/blk-mq.h> 16 #include <linux/keyslot-manager.h> 17 18 #include <trace/events/block.h> 19 20 #include "dm.h" 21 22 #define DM_RESERVED_MAX_IOS 1024 23 #define DM_MAX_TARGETS 1048576 24 #define DM_MAX_TARGET_PARAMS 1024 25 26 struct dm_kobject_holder { 27 struct kobject kobj; 28 struct completion completion; 29 }; 30 31 /* 32 * DM core internal structures used directly by dm.c, dm-rq.c and dm-table.c. 33 * DM targets must _not_ deference a mapped_device or dm_table to directly 34 * access their members! 35 */ 36 37 struct mapped_device { 38 struct mutex suspend_lock; 39 40 struct mutex table_devices_lock; 41 struct list_head table_devices; 42 43 /* 44 * The current mapping (struct dm_table *). 45 * Use dm_get_live_table{_fast} or take suspend_lock for 46 * dereference. 47 */ 48 void __rcu *map; 49 50 unsigned long flags; 51 52 /* Protect queue and type against concurrent access. */ 53 struct mutex type_lock; 54 enum dm_queue_mode type; 55 56 int numa_node_id; 57 struct request_queue *queue; 58 59 atomic_t holders; 60 atomic_t open_count; 61 62 struct dm_target *immutable_target; 63 struct target_type *immutable_target_type; 64 65 char name[16]; 66 struct gendisk *disk; 67 struct dax_device *dax_dev; 68 69 /* 70 * A list of ios that arrived while we were suspended. 71 */ 72 struct work_struct work; 73 wait_queue_head_t wait; 74 spinlock_t deferred_lock; 75 struct bio_list deferred; 76 77 void *interface_ptr; 78 79 /* 80 * Event handling. 81 */ 82 wait_queue_head_t eventq; 83 atomic_t event_nr; 84 atomic_t uevent_seq; 85 struct list_head uevent_list; 86 spinlock_t uevent_lock; /* Protect access to uevent_list */ 87 88 /* the number of internal suspends */ 89 unsigned internal_suspend_count; 90 91 /* 92 * io objects are allocated from here. 93 */ 94 struct bio_set io_bs; 95 struct bio_set bs; 96 97 /* 98 * Processing queue (flush) 99 */ 100 struct workqueue_struct *wq; 101 102 /* forced geometry settings */ 103 struct hd_geometry geometry; 104 105 /* kobject and completion */ 106 struct dm_kobject_holder kobj_holder; 107 108 struct block_device *bdev; 109 110 int swap_bios; 111 struct semaphore swap_bios_semaphore; 112 struct mutex swap_bios_lock; 113 114 struct dm_stats stats; 115 116 /* for blk-mq request-based DM support */ 117 struct blk_mq_tag_set *tag_set; 118 bool init_tio_pdu:1; 119 120 struct srcu_struct io_barrier; 121 }; 122 123 void disable_discard(struct mapped_device *md); 124 void disable_write_same(struct mapped_device *md); 125 void disable_write_zeroes(struct mapped_device *md); 126 dm_get_size(struct mapped_device * md)127static inline sector_t dm_get_size(struct mapped_device *md) 128 { 129 return get_capacity(md->disk); 130 } 131 dm_get_stats(struct mapped_device * md)132static inline struct dm_stats *dm_get_stats(struct mapped_device *md) 133 { 134 return &md->stats; 135 } 136 137 #define DM_TABLE_MAX_DEPTH 16 138 139 struct dm_table { 140 struct mapped_device *md; 141 enum dm_queue_mode type; 142 143 /* btree table */ 144 unsigned int depth; 145 unsigned int counts[DM_TABLE_MAX_DEPTH]; /* in nodes */ 146 sector_t *index[DM_TABLE_MAX_DEPTH]; 147 148 unsigned int num_targets; 149 unsigned int num_allocated; 150 sector_t *highs; 151 struct dm_target *targets; 152 153 struct target_type *immutable_target_type; 154 155 bool integrity_supported:1; 156 bool singleton:1; 157 unsigned integrity_added:1; 158 159 /* 160 * Indicates the rw permissions for the new logical 161 * device. This should be a combination of FMODE_READ 162 * and FMODE_WRITE. 163 */ 164 fmode_t mode; 165 166 /* a list of devices used by this table */ 167 struct list_head devices; 168 169 /* events get handed up using this callback */ 170 void (*event_fn)(void *); 171 void *event_context; 172 173 struct dm_md_mempools *mempools; 174 175 #ifdef CONFIG_BLK_INLINE_ENCRYPTION 176 struct blk_keyslot_manager *ksm; 177 #endif 178 }; 179 dm_get_completion_from_kobject(struct kobject * kobj)180static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj) 181 { 182 return &container_of(kobj, struct dm_kobject_holder, kobj)->completion; 183 } 184 185 unsigned __dm_get_module_param(unsigned *module_param, unsigned def, unsigned max); 186 dm_message_test_buffer_overflow(char * result,unsigned maxlen)187static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen) 188 { 189 return !maxlen || strlen(result) + 1 >= maxlen; 190 } 191 192 extern atomic_t dm_global_event_nr; 193 extern wait_queue_head_t dm_global_eventq; 194 void dm_issue_global_event(void); 195 196 #endif 197