• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Internal header file _only_ for device mapper core
3  *
4  * Copyright (C) 2016 Red Hat, Inc. All rights reserved.
5  *
6  * This file is released under the LGPL.
7  */
8 
9 #ifndef DM_CORE_INTERNAL_H
10 #define DM_CORE_INTERNAL_H
11 
12 #include <linux/kthread.h>
13 #include <linux/ktime.h>
14 #include <linux/genhd.h>
15 #include <linux/blk-mq.h>
16 
17 #include <trace/events/block.h>
18 
19 #include "dm.h"
20 
21 #define DM_RESERVED_MAX_IOS		1024
22 
23 struct dm_kobject_holder {
24 	struct kobject kobj;
25 	struct completion completion;
26 };
27 
28 /*
29  * DM core internal structures used directly by dm.c, dm-rq.c and dm-table.c.
30  * DM targets must _not_ deference a mapped_device or dm_table to directly
31  * access their members!
32  */
33 
34 struct mapped_device {
35 	struct mutex suspend_lock;
36 
37 	struct mutex table_devices_lock;
38 	struct list_head table_devices;
39 
40 	/*
41 	 * The current mapping (struct dm_table *).
42 	 * Use dm_get_live_table{_fast} or take suspend_lock for
43 	 * dereference.
44 	 */
45 	void __rcu *map;
46 
47 	unsigned long flags;
48 
49 	/* Protect queue and type against concurrent access. */
50 	struct mutex type_lock;
51 	enum dm_queue_mode type;
52 
53 	int numa_node_id;
54 	struct request_queue *queue;
55 
56 	atomic_t holders;
57 	atomic_t open_count;
58 
59 	struct dm_target *immutable_target;
60 	struct target_type *immutable_target_type;
61 
62 	char name[16];
63 	struct gendisk *disk;
64 	struct dax_device *dax_dev;
65 
66 	/*
67 	 * A list of ios that arrived while we were suspended.
68 	 */
69 	struct work_struct work;
70 	wait_queue_head_t wait;
71 	spinlock_t deferred_lock;
72 	struct bio_list deferred;
73 
74 	void *interface_ptr;
75 
76 	/*
77 	 * Event handling.
78 	 */
79 	wait_queue_head_t eventq;
80 	atomic_t event_nr;
81 	atomic_t uevent_seq;
82 	struct list_head uevent_list;
83 	spinlock_t uevent_lock; /* Protect access to uevent_list */
84 
85 	/* the number of internal suspends */
86 	unsigned internal_suspend_count;
87 
88 	/*
89 	 * io objects are allocated from here.
90 	 */
91 	struct bio_set io_bs;
92 	struct bio_set bs;
93 
94 	/*
95 	 * Processing queue (flush)
96 	 */
97 	struct workqueue_struct *wq;
98 
99 	/*
100 	 * freeze/thaw support require holding onto a super block
101 	 */
102 	struct super_block *frozen_sb;
103 
104 	/* forced geometry settings */
105 	struct hd_geometry geometry;
106 
107 	/* kobject and completion */
108 	struct dm_kobject_holder kobj_holder;
109 
110 	struct block_device *bdev;
111 
112 	int swap_bios;
113 	struct semaphore swap_bios_semaphore;
114 	struct mutex swap_bios_lock;
115 
116 	struct dm_stats stats;
117 
118 	/* for blk-mq request-based DM support */
119 	struct blk_mq_tag_set *tag_set;
120 	bool init_tio_pdu:1;
121 
122 	struct srcu_struct io_barrier;
123 };
124 
125 void disable_discard(struct mapped_device *md);
126 void disable_write_same(struct mapped_device *md);
127 void disable_write_zeroes(struct mapped_device *md);
128 
dm_get_size(struct mapped_device * md)129 static inline sector_t dm_get_size(struct mapped_device *md)
130 {
131 	return get_capacity(md->disk);
132 }
133 
dm_get_stats(struct mapped_device * md)134 static inline struct dm_stats *dm_get_stats(struct mapped_device *md)
135 {
136 	return &md->stats;
137 }
138 
139 #define DM_TABLE_MAX_DEPTH 16
140 
141 struct dm_table {
142 	struct mapped_device *md;
143 	enum dm_queue_mode type;
144 
145 	/* btree table */
146 	unsigned int depth;
147 	unsigned int counts[DM_TABLE_MAX_DEPTH]; /* in nodes */
148 	sector_t *index[DM_TABLE_MAX_DEPTH];
149 
150 	unsigned int num_targets;
151 	unsigned int num_allocated;
152 	sector_t *highs;
153 	struct dm_target *targets;
154 
155 	struct target_type *immutable_target_type;
156 
157 	bool integrity_supported:1;
158 	bool singleton:1;
159 	unsigned integrity_added:1;
160 
161 	/*
162 	 * Indicates the rw permissions for the new logical
163 	 * device.  This should be a combination of FMODE_READ
164 	 * and FMODE_WRITE.
165 	 */
166 	fmode_t mode;
167 
168 	/* a list of devices used by this table */
169 	struct list_head devices;
170 
171 	/* events get handed up using this callback */
172 	void (*event_fn)(void *);
173 	void *event_context;
174 
175 	struct dm_md_mempools *mempools;
176 };
177 
dm_get_completion_from_kobject(struct kobject * kobj)178 static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj)
179 {
180 	return &container_of(kobj, struct dm_kobject_holder, kobj)->completion;
181 }
182 
183 unsigned __dm_get_module_param(unsigned *module_param, unsigned def, unsigned max);
184 
dm_message_test_buffer_overflow(char * result,unsigned maxlen)185 static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen)
186 {
187 	return !maxlen || strlen(result) + 1 >= maxlen;
188 }
189 
190 extern atomic_t dm_global_event_nr;
191 extern wait_queue_head_t dm_global_eventq;
192 void dm_issue_global_event(void);
193 
194 #endif
195