• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2003 Sistina Software
3  * Copyright (C) 2006 Red Hat GmbH
4  *
5  * This file is released under the GPL.
6  */
7 
8 #include <linux/device-mapper.h>
9 
10 #include <linux/bio.h>
11 #include <linux/mempool.h>
12 #include <linux/module.h>
13 #include <linux/sched.h>
14 #include <linux/slab.h>
15 #include <linux/dm-io.h>
16 
17 struct dm_io_client {
18 	mempool_t *pool;
19 	struct bio_set *bios;
20 };
21 
22 /* FIXME: can we shrink this ? */
23 struct io {
24 	unsigned long error_bits;
25 	atomic_t count;
26 	struct task_struct *sleeper;
27 	struct dm_io_client *client;
28 	io_notify_fn callback;
29 	void *context;
30 };
31 
32 /*
33  * io contexts are only dynamically allocated for asynchronous
34  * io.  Since async io is likely to be the majority of io we'll
35  * have the same number of io contexts as bios! (FIXME: must reduce this).
36  */
37 
pages_to_ios(unsigned int pages)38 static unsigned int pages_to_ios(unsigned int pages)
39 {
40 	return 4 * pages;	/* too many ? */
41 }
42 
43 /*
44  * Create a client with mempool and bioset.
45  */
dm_io_client_create(unsigned num_pages)46 struct dm_io_client *dm_io_client_create(unsigned num_pages)
47 {
48 	unsigned ios = pages_to_ios(num_pages);
49 	struct dm_io_client *client;
50 
51 	client = kmalloc(sizeof(*client), GFP_KERNEL);
52 	if (!client)
53 		return ERR_PTR(-ENOMEM);
54 
55 	client->pool = mempool_create_kmalloc_pool(ios, sizeof(struct io));
56 	if (!client->pool)
57 		goto bad;
58 
59 	client->bios = bioset_create(16, 0);
60 	if (!client->bios)
61 		goto bad;
62 
63 	return client;
64 
65    bad:
66 	if (client->pool)
67 		mempool_destroy(client->pool);
68 	kfree(client);
69 	return ERR_PTR(-ENOMEM);
70 }
71 EXPORT_SYMBOL(dm_io_client_create);
72 
dm_io_client_resize(unsigned num_pages,struct dm_io_client * client)73 int dm_io_client_resize(unsigned num_pages, struct dm_io_client *client)
74 {
75 	return mempool_resize(client->pool, pages_to_ios(num_pages),
76 			      GFP_KERNEL);
77 }
78 EXPORT_SYMBOL(dm_io_client_resize);
79 
dm_io_client_destroy(struct dm_io_client * client)80 void dm_io_client_destroy(struct dm_io_client *client)
81 {
82 	mempool_destroy(client->pool);
83 	bioset_free(client->bios);
84 	kfree(client);
85 }
86 EXPORT_SYMBOL(dm_io_client_destroy);
87 
88 /*-----------------------------------------------------------------
89  * We need to keep track of which region a bio is doing io for.
90  * In order to save a memory allocation we store this the last
91  * bvec which we know is unused (blech).
92  * XXX This is ugly and can OOPS with some configs... find another way.
93  *---------------------------------------------------------------*/
bio_set_region(struct bio * bio,unsigned region)94 static inline void bio_set_region(struct bio *bio, unsigned region)
95 {
96 	bio->bi_io_vec[bio->bi_max_vecs].bv_len = region;
97 }
98 
bio_get_region(struct bio * bio)99 static inline unsigned bio_get_region(struct bio *bio)
100 {
101 	return bio->bi_io_vec[bio->bi_max_vecs].bv_len;
102 }
103 
104 /*-----------------------------------------------------------------
105  * We need an io object to keep track of the number of bios that
106  * have been dispatched for a particular io.
107  *---------------------------------------------------------------*/
dec_count(struct io * io,unsigned int region,int error)108 static void dec_count(struct io *io, unsigned int region, int error)
109 {
110 	if (error)
111 		set_bit(region, &io->error_bits);
112 
113 	if (atomic_dec_and_test(&io->count)) {
114 		if (io->sleeper)
115 			wake_up_process(io->sleeper);
116 
117 		else {
118 			unsigned long r = io->error_bits;
119 			io_notify_fn fn = io->callback;
120 			void *context = io->context;
121 
122 			mempool_free(io, io->client->pool);
123 			fn(r, context);
124 		}
125 	}
126 }
127 
endio(struct bio * bio,int error)128 static void endio(struct bio *bio, int error)
129 {
130 	struct io *io;
131 	unsigned region;
132 
133 	if (error && bio_data_dir(bio) == READ)
134 		zero_fill_bio(bio);
135 
136 	/*
137 	 * The bio destructor in bio_put() may use the io object.
138 	 */
139 	io = bio->bi_private;
140 	region = bio_get_region(bio);
141 
142 	bio->bi_max_vecs++;
143 	bio_put(bio);
144 
145 	dec_count(io, region, error);
146 }
147 
148 /*-----------------------------------------------------------------
149  * These little objects provide an abstraction for getting a new
150  * destination page for io.
151  *---------------------------------------------------------------*/
152 struct dpages {
153 	void (*get_page)(struct dpages *dp,
154 			 struct page **p, unsigned long *len, unsigned *offset);
155 	void (*next_page)(struct dpages *dp);
156 
157 	unsigned context_u;
158 	void *context_ptr;
159 };
160 
161 /*
162  * Functions for getting the pages from a list.
163  */
list_get_page(struct dpages * dp,struct page ** p,unsigned long * len,unsigned * offset)164 static void list_get_page(struct dpages *dp,
165 		  struct page **p, unsigned long *len, unsigned *offset)
166 {
167 	unsigned o = dp->context_u;
168 	struct page_list *pl = (struct page_list *) dp->context_ptr;
169 
170 	*p = pl->page;
171 	*len = PAGE_SIZE - o;
172 	*offset = o;
173 }
174 
list_next_page(struct dpages * dp)175 static void list_next_page(struct dpages *dp)
176 {
177 	struct page_list *pl = (struct page_list *) dp->context_ptr;
178 	dp->context_ptr = pl->next;
179 	dp->context_u = 0;
180 }
181 
list_dp_init(struct dpages * dp,struct page_list * pl,unsigned offset)182 static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset)
183 {
184 	dp->get_page = list_get_page;
185 	dp->next_page = list_next_page;
186 	dp->context_u = offset;
187 	dp->context_ptr = pl;
188 }
189 
190 /*
191  * Functions for getting the pages from a bvec.
192  */
bvec_get_page(struct dpages * dp,struct page ** p,unsigned long * len,unsigned * offset)193 static void bvec_get_page(struct dpages *dp,
194 		  struct page **p, unsigned long *len, unsigned *offset)
195 {
196 	struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
197 	*p = bvec->bv_page;
198 	*len = bvec->bv_len;
199 	*offset = bvec->bv_offset;
200 }
201 
bvec_next_page(struct dpages * dp)202 static void bvec_next_page(struct dpages *dp)
203 {
204 	struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
205 	dp->context_ptr = bvec + 1;
206 }
207 
bvec_dp_init(struct dpages * dp,struct bio_vec * bvec)208 static void bvec_dp_init(struct dpages *dp, struct bio_vec *bvec)
209 {
210 	dp->get_page = bvec_get_page;
211 	dp->next_page = bvec_next_page;
212 	dp->context_ptr = bvec;
213 }
214 
215 /*
216  * Functions for getting the pages from a VMA.
217  */
vm_get_page(struct dpages * dp,struct page ** p,unsigned long * len,unsigned * offset)218 static void vm_get_page(struct dpages *dp,
219 		 struct page **p, unsigned long *len, unsigned *offset)
220 {
221 	*p = vmalloc_to_page(dp->context_ptr);
222 	*offset = dp->context_u;
223 	*len = PAGE_SIZE - dp->context_u;
224 }
225 
vm_next_page(struct dpages * dp)226 static void vm_next_page(struct dpages *dp)
227 {
228 	dp->context_ptr += PAGE_SIZE - dp->context_u;
229 	dp->context_u = 0;
230 }
231 
vm_dp_init(struct dpages * dp,void * data)232 static void vm_dp_init(struct dpages *dp, void *data)
233 {
234 	dp->get_page = vm_get_page;
235 	dp->next_page = vm_next_page;
236 	dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
237 	dp->context_ptr = data;
238 }
239 
dm_bio_destructor(struct bio * bio)240 static void dm_bio_destructor(struct bio *bio)
241 {
242 	struct io *io = bio->bi_private;
243 
244 	bio_free(bio, io->client->bios);
245 }
246 
247 /*
248  * Functions for getting the pages from kernel memory.
249  */
km_get_page(struct dpages * dp,struct page ** p,unsigned long * len,unsigned * offset)250 static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len,
251 			unsigned *offset)
252 {
253 	*p = virt_to_page(dp->context_ptr);
254 	*offset = dp->context_u;
255 	*len = PAGE_SIZE - dp->context_u;
256 }
257 
km_next_page(struct dpages * dp)258 static void km_next_page(struct dpages *dp)
259 {
260 	dp->context_ptr += PAGE_SIZE - dp->context_u;
261 	dp->context_u = 0;
262 }
263 
km_dp_init(struct dpages * dp,void * data)264 static void km_dp_init(struct dpages *dp, void *data)
265 {
266 	dp->get_page = km_get_page;
267 	dp->next_page = km_next_page;
268 	dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
269 	dp->context_ptr = data;
270 }
271 
272 /*-----------------------------------------------------------------
273  * IO routines that accept a list of pages.
274  *---------------------------------------------------------------*/
do_region(int rw,unsigned region,struct dm_io_region * where,struct dpages * dp,struct io * io)275 static void do_region(int rw, unsigned region, struct dm_io_region *where,
276 		      struct dpages *dp, struct io *io)
277 {
278 	struct bio *bio;
279 	struct page *page;
280 	unsigned long len;
281 	unsigned offset;
282 	unsigned num_bvecs;
283 	sector_t remaining = where->count;
284 
285 	while (remaining) {
286 		/*
287 		 * Allocate a suitably sized-bio: we add an extra
288 		 * bvec for bio_get/set_region() and decrement bi_max_vecs
289 		 * to hide it from bio_add_page().
290 		 */
291 		num_bvecs = dm_sector_div_up(remaining,
292 					     (PAGE_SIZE >> SECTOR_SHIFT));
293 		num_bvecs = 1 + min_t(int, bio_get_nr_vecs(where->bdev),
294 				      num_bvecs);
295 		if (unlikely(num_bvecs > BIO_MAX_PAGES))
296 			num_bvecs = BIO_MAX_PAGES;
297 		bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
298 		bio->bi_sector = where->sector + (where->count - remaining);
299 		bio->bi_bdev = where->bdev;
300 		bio->bi_end_io = endio;
301 		bio->bi_private = io;
302 		bio->bi_destructor = dm_bio_destructor;
303 		bio->bi_max_vecs--;
304 		bio_set_region(bio, region);
305 
306 		/*
307 		 * Try and add as many pages as possible.
308 		 */
309 		while (remaining) {
310 			dp->get_page(dp, &page, &len, &offset);
311 			len = min(len, to_bytes(remaining));
312 			if (!bio_add_page(bio, page, len, offset))
313 				break;
314 
315 			offset = 0;
316 			remaining -= to_sector(len);
317 			dp->next_page(dp);
318 		}
319 
320 		atomic_inc(&io->count);
321 		submit_bio(rw, bio);
322 	}
323 }
324 
dispatch_io(int rw,unsigned int num_regions,struct dm_io_region * where,struct dpages * dp,struct io * io,int sync)325 static void dispatch_io(int rw, unsigned int num_regions,
326 			struct dm_io_region *where, struct dpages *dp,
327 			struct io *io, int sync)
328 {
329 	int i;
330 	struct dpages old_pages = *dp;
331 
332 	if (sync)
333 		rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
334 
335 	/*
336 	 * For multiple regions we need to be careful to rewind
337 	 * the dp object for each call to do_region.
338 	 */
339 	for (i = 0; i < num_regions; i++) {
340 		*dp = old_pages;
341 		if (where[i].count)
342 			do_region(rw, i, where + i, dp, io);
343 	}
344 
345 	/*
346 	 * Drop the extra reference that we were holding to avoid
347 	 * the io being completed too early.
348 	 */
349 	dec_count(io, 0, 0);
350 }
351 
sync_io(struct dm_io_client * client,unsigned int num_regions,struct dm_io_region * where,int rw,struct dpages * dp,unsigned long * error_bits)352 static int sync_io(struct dm_io_client *client, unsigned int num_regions,
353 		   struct dm_io_region *where, int rw, struct dpages *dp,
354 		   unsigned long *error_bits)
355 {
356 	struct io io;
357 
358 	if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
359 		WARN_ON(1);
360 		return -EIO;
361 	}
362 
363 	io.error_bits = 0;
364 	atomic_set(&io.count, 1); /* see dispatch_io() */
365 	io.sleeper = current;
366 	io.client = client;
367 
368 	dispatch_io(rw, num_regions, where, dp, &io, 1);
369 
370 	while (1) {
371 		set_current_state(TASK_UNINTERRUPTIBLE);
372 
373 		if (!atomic_read(&io.count) || signal_pending(current))
374 			break;
375 
376 		io_schedule();
377 	}
378 	set_current_state(TASK_RUNNING);
379 
380 	if (atomic_read(&io.count))
381 		return -EINTR;
382 
383 	if (error_bits)
384 		*error_bits = io.error_bits;
385 
386 	return io.error_bits ? -EIO : 0;
387 }
388 
async_io(struct dm_io_client * client,unsigned int num_regions,struct dm_io_region * where,int rw,struct dpages * dp,io_notify_fn fn,void * context)389 static int async_io(struct dm_io_client *client, unsigned int num_regions,
390 		    struct dm_io_region *where, int rw, struct dpages *dp,
391 		    io_notify_fn fn, void *context)
392 {
393 	struct io *io;
394 
395 	if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
396 		WARN_ON(1);
397 		fn(1, context);
398 		return -EIO;
399 	}
400 
401 	io = mempool_alloc(client->pool, GFP_NOIO);
402 	io->error_bits = 0;
403 	atomic_set(&io->count, 1); /* see dispatch_io() */
404 	io->sleeper = NULL;
405 	io->client = client;
406 	io->callback = fn;
407 	io->context = context;
408 
409 	dispatch_io(rw, num_regions, where, dp, io, 0);
410 	return 0;
411 }
412 
dp_init(struct dm_io_request * io_req,struct dpages * dp)413 static int dp_init(struct dm_io_request *io_req, struct dpages *dp)
414 {
415 	/* Set up dpages based on memory type */
416 	switch (io_req->mem.type) {
417 	case DM_IO_PAGE_LIST:
418 		list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
419 		break;
420 
421 	case DM_IO_BVEC:
422 		bvec_dp_init(dp, io_req->mem.ptr.bvec);
423 		break;
424 
425 	case DM_IO_VMA:
426 		vm_dp_init(dp, io_req->mem.ptr.vma);
427 		break;
428 
429 	case DM_IO_KMEM:
430 		km_dp_init(dp, io_req->mem.ptr.addr);
431 		break;
432 
433 	default:
434 		return -EINVAL;
435 	}
436 
437 	return 0;
438 }
439 
440 /*
441  * New collapsed (a)synchronous interface.
442  *
443  * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
444  * the queue with blk_unplug() some time later or set the BIO_RW_SYNC bit in
445  * io_req->bi_rw. If you fail to do one of these, the IO will be submitted to
446  * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c.
447  */
dm_io(struct dm_io_request * io_req,unsigned num_regions,struct dm_io_region * where,unsigned long * sync_error_bits)448 int dm_io(struct dm_io_request *io_req, unsigned num_regions,
449 	  struct dm_io_region *where, unsigned long *sync_error_bits)
450 {
451 	int r;
452 	struct dpages dp;
453 
454 	r = dp_init(io_req, &dp);
455 	if (r)
456 		return r;
457 
458 	if (!io_req->notify.fn)
459 		return sync_io(io_req->client, num_regions, where,
460 			       io_req->bi_rw, &dp, sync_error_bits);
461 
462 	return async_io(io_req->client, num_regions, where, io_req->bi_rw,
463 			&dp, io_req->notify.fn, io_req->notify.context);
464 }
465 EXPORT_SYMBOL(dm_io);
466