1 /*
2 * Copyright (C) 2005-2007 Red Hat GmbH
3 *
4 * A target that delays reads and/or writes and can send
5 * them to different devices.
6 *
7 * This file is released under the GPL.
8 */
9
10 #include <linux/module.h>
11 #include <linux/init.h>
12 #include <linux/blkdev.h>
13 #include <linux/bio.h>
14 #include <linux/slab.h>
15
16 #include <linux/device-mapper.h>
17
18 #define DM_MSG_PREFIX "delay"
19
20 struct delay_c {
21 struct timer_list delay_timer;
22 struct mutex timer_lock;
23 struct workqueue_struct *kdelayd_wq;
24 struct work_struct flush_expired_bios;
25 struct list_head delayed_bios;
26 atomic_t may_delay;
27
28 struct dm_dev *dev_read;
29 sector_t start_read;
30 unsigned read_delay;
31 unsigned reads;
32
33 struct dm_dev *dev_write;
34 sector_t start_write;
35 unsigned write_delay;
36 unsigned writes;
37 };
38
39 struct dm_delay_info {
40 struct delay_c *context;
41 struct list_head list;
42 unsigned long expires;
43 };
44
45 static DEFINE_MUTEX(delayed_bios_lock);
46
handle_delayed_timer(unsigned long data)47 static void handle_delayed_timer(unsigned long data)
48 {
49 struct delay_c *dc = (struct delay_c *)data;
50
51 queue_work(dc->kdelayd_wq, &dc->flush_expired_bios);
52 }
53
queue_timeout(struct delay_c * dc,unsigned long expires)54 static void queue_timeout(struct delay_c *dc, unsigned long expires)
55 {
56 mutex_lock(&dc->timer_lock);
57
58 if (!timer_pending(&dc->delay_timer) || expires < dc->delay_timer.expires)
59 mod_timer(&dc->delay_timer, expires);
60
61 mutex_unlock(&dc->timer_lock);
62 }
63
flush_bios(struct bio * bio)64 static void flush_bios(struct bio *bio)
65 {
66 struct bio *n;
67
68 while (bio) {
69 n = bio->bi_next;
70 bio->bi_next = NULL;
71 generic_make_request(bio);
72 bio = n;
73 }
74 }
75
flush_delayed_bios(struct delay_c * dc,int flush_all)76 static struct bio *flush_delayed_bios(struct delay_c *dc, int flush_all)
77 {
78 struct dm_delay_info *delayed, *next;
79 unsigned long next_expires = 0;
80 int start_timer = 0;
81 struct bio_list flush_bios = { };
82
83 mutex_lock(&delayed_bios_lock);
84 list_for_each_entry_safe(delayed, next, &dc->delayed_bios, list) {
85 if (flush_all || time_after_eq(jiffies, delayed->expires)) {
86 struct bio *bio = dm_bio_from_per_bio_data(delayed,
87 sizeof(struct dm_delay_info));
88 list_del(&delayed->list);
89 bio_list_add(&flush_bios, bio);
90 if ((bio_data_dir(bio) == WRITE))
91 delayed->context->writes--;
92 else
93 delayed->context->reads--;
94 continue;
95 }
96
97 if (!start_timer) {
98 start_timer = 1;
99 next_expires = delayed->expires;
100 } else
101 next_expires = min(next_expires, delayed->expires);
102 }
103
104 mutex_unlock(&delayed_bios_lock);
105
106 if (start_timer)
107 queue_timeout(dc, next_expires);
108
109 return bio_list_get(&flush_bios);
110 }
111
flush_expired_bios(struct work_struct * work)112 static void flush_expired_bios(struct work_struct *work)
113 {
114 struct delay_c *dc;
115
116 dc = container_of(work, struct delay_c, flush_expired_bios);
117 flush_bios(flush_delayed_bios(dc, 0));
118 }
119
120 /*
121 * Mapping parameters:
122 * <device> <offset> <delay> [<write_device> <write_offset> <write_delay>]
123 *
124 * With separate write parameters, the first set is only used for reads.
125 * Offsets are specified in sectors.
126 * Delays are specified in milliseconds.
127 */
delay_ctr(struct dm_target * ti,unsigned int argc,char ** argv)128 static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
129 {
130 struct delay_c *dc;
131 unsigned long long tmpll;
132 char dummy;
133 int ret;
134
135 if (argc != 3 && argc != 6) {
136 ti->error = "Requires exactly 3 or 6 arguments";
137 return -EINVAL;
138 }
139
140 dc = kmalloc(sizeof(*dc), GFP_KERNEL);
141 if (!dc) {
142 ti->error = "Cannot allocate context";
143 return -ENOMEM;
144 }
145
146 dc->reads = dc->writes = 0;
147
148 ret = -EINVAL;
149 if (sscanf(argv[1], "%llu%c", &tmpll, &dummy) != 1) {
150 ti->error = "Invalid device sector";
151 goto bad;
152 }
153 dc->start_read = tmpll;
154
155 if (sscanf(argv[2], "%u%c", &dc->read_delay, &dummy) != 1) {
156 ti->error = "Invalid delay";
157 goto bad;
158 }
159
160 ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
161 &dc->dev_read);
162 if (ret) {
163 ti->error = "Device lookup failed";
164 goto bad;
165 }
166
167 ret = -EINVAL;
168 dc->dev_write = NULL;
169 if (argc == 3)
170 goto out;
171
172 if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) {
173 ti->error = "Invalid write device sector";
174 goto bad_dev_read;
175 }
176 dc->start_write = tmpll;
177
178 if (sscanf(argv[5], "%u%c", &dc->write_delay, &dummy) != 1) {
179 ti->error = "Invalid write delay";
180 goto bad_dev_read;
181 }
182
183 ret = dm_get_device(ti, argv[3], dm_table_get_mode(ti->table),
184 &dc->dev_write);
185 if (ret) {
186 ti->error = "Write device lookup failed";
187 goto bad_dev_read;
188 }
189
190 out:
191 ret = -EINVAL;
192 dc->kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0);
193 if (!dc->kdelayd_wq) {
194 DMERR("Couldn't start kdelayd");
195 goto bad_queue;
196 }
197
198 setup_timer(&dc->delay_timer, handle_delayed_timer, (unsigned long)dc);
199
200 INIT_WORK(&dc->flush_expired_bios, flush_expired_bios);
201 INIT_LIST_HEAD(&dc->delayed_bios);
202 mutex_init(&dc->timer_lock);
203 atomic_set(&dc->may_delay, 1);
204
205 ti->num_flush_bios = 1;
206 ti->num_discard_bios = 1;
207 ti->per_io_data_size = sizeof(struct dm_delay_info);
208 ti->private = dc;
209 return 0;
210
211 bad_queue:
212 if (dc->dev_write)
213 dm_put_device(ti, dc->dev_write);
214 bad_dev_read:
215 dm_put_device(ti, dc->dev_read);
216 bad:
217 kfree(dc);
218 return ret;
219 }
220
delay_dtr(struct dm_target * ti)221 static void delay_dtr(struct dm_target *ti)
222 {
223 struct delay_c *dc = ti->private;
224
225 destroy_workqueue(dc->kdelayd_wq);
226
227 dm_put_device(ti, dc->dev_read);
228
229 if (dc->dev_write)
230 dm_put_device(ti, dc->dev_write);
231
232 kfree(dc);
233 }
234
delay_bio(struct delay_c * dc,int delay,struct bio * bio)235 static int delay_bio(struct delay_c *dc, int delay, struct bio *bio)
236 {
237 struct dm_delay_info *delayed;
238 unsigned long expires = 0;
239
240 if (!delay || !atomic_read(&dc->may_delay))
241 return DM_MAPIO_REMAPPED;
242
243 delayed = dm_per_bio_data(bio, sizeof(struct dm_delay_info));
244
245 delayed->context = dc;
246 delayed->expires = expires = jiffies + msecs_to_jiffies(delay);
247
248 mutex_lock(&delayed_bios_lock);
249
250 if (bio_data_dir(bio) == WRITE)
251 dc->writes++;
252 else
253 dc->reads++;
254
255 list_add_tail(&delayed->list, &dc->delayed_bios);
256
257 mutex_unlock(&delayed_bios_lock);
258
259 queue_timeout(dc, expires);
260
261 return DM_MAPIO_SUBMITTED;
262 }
263
delay_presuspend(struct dm_target * ti)264 static void delay_presuspend(struct dm_target *ti)
265 {
266 struct delay_c *dc = ti->private;
267
268 atomic_set(&dc->may_delay, 0);
269 del_timer_sync(&dc->delay_timer);
270 flush_bios(flush_delayed_bios(dc, 1));
271 }
272
delay_resume(struct dm_target * ti)273 static void delay_resume(struct dm_target *ti)
274 {
275 struct delay_c *dc = ti->private;
276
277 atomic_set(&dc->may_delay, 1);
278 }
279
delay_map(struct dm_target * ti,struct bio * bio)280 static int delay_map(struct dm_target *ti, struct bio *bio)
281 {
282 struct delay_c *dc = ti->private;
283
284 if ((bio_data_dir(bio) == WRITE) && (dc->dev_write)) {
285 bio->bi_bdev = dc->dev_write->bdev;
286 if (bio_sectors(bio))
287 bio->bi_iter.bi_sector = dc->start_write +
288 dm_target_offset(ti, bio->bi_iter.bi_sector);
289
290 return delay_bio(dc, dc->write_delay, bio);
291 }
292
293 bio->bi_bdev = dc->dev_read->bdev;
294 bio->bi_iter.bi_sector = dc->start_read +
295 dm_target_offset(ti, bio->bi_iter.bi_sector);
296
297 return delay_bio(dc, dc->read_delay, bio);
298 }
299
delay_status(struct dm_target * ti,status_type_t type,unsigned status_flags,char * result,unsigned maxlen)300 static void delay_status(struct dm_target *ti, status_type_t type,
301 unsigned status_flags, char *result, unsigned maxlen)
302 {
303 struct delay_c *dc = ti->private;
304 int sz = 0;
305
306 switch (type) {
307 case STATUSTYPE_INFO:
308 DMEMIT("%u %u", dc->reads, dc->writes);
309 break;
310
311 case STATUSTYPE_TABLE:
312 DMEMIT("%s %llu %u", dc->dev_read->name,
313 (unsigned long long) dc->start_read,
314 dc->read_delay);
315 if (dc->dev_write)
316 DMEMIT(" %s %llu %u", dc->dev_write->name,
317 (unsigned long long) dc->start_write,
318 dc->write_delay);
319 break;
320 }
321 }
322
delay_iterate_devices(struct dm_target * ti,iterate_devices_callout_fn fn,void * data)323 static int delay_iterate_devices(struct dm_target *ti,
324 iterate_devices_callout_fn fn, void *data)
325 {
326 struct delay_c *dc = ti->private;
327 int ret = 0;
328
329 ret = fn(ti, dc->dev_read, dc->start_read, ti->len, data);
330 if (ret)
331 goto out;
332
333 if (dc->dev_write)
334 ret = fn(ti, dc->dev_write, dc->start_write, ti->len, data);
335
336 out:
337 return ret;
338 }
339
340 static struct target_type delay_target = {
341 .name = "delay",
342 .version = {1, 2, 1},
343 .module = THIS_MODULE,
344 .ctr = delay_ctr,
345 .dtr = delay_dtr,
346 .map = delay_map,
347 .presuspend = delay_presuspend,
348 .resume = delay_resume,
349 .status = delay_status,
350 .iterate_devices = delay_iterate_devices,
351 };
352
dm_delay_init(void)353 static int __init dm_delay_init(void)
354 {
355 int r;
356
357 r = dm_register_target(&delay_target);
358 if (r < 0) {
359 DMERR("register failed %d", r);
360 goto bad_register;
361 }
362
363 return 0;
364
365 bad_register:
366 return r;
367 }
368
dm_delay_exit(void)369 static void __exit dm_delay_exit(void)
370 {
371 dm_unregister_target(&delay_target);
372 }
373
374 /* Module hooks */
375 module_init(dm_delay_init);
376 module_exit(dm_delay_exit);
377
378 MODULE_DESCRIPTION(DM_NAME " delay target");
379 MODULE_AUTHOR("Heinz Mauelshagen <mauelshagen@redhat.com>");
380 MODULE_LICENSE("GPL");
381