• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2003 Sistina Software
3  * Copyright (C) 2006 Red Hat GmbH
4  *
5  * This file is released under the GPL.
6  */
7 
8 #include "dm.h"
9 
10 #include <linux/device-mapper.h>
11 
12 #include <linux/bio.h>
13 #include <linux/completion.h>
14 #include <linux/mempool.h>
15 #include <linux/module.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dm-io.h>
19 
20 #define DM_MSG_PREFIX "io"
21 
22 #define DM_IO_MAX_REGIONS	BITS_PER_LONG
23 
24 struct dm_io_client {
25 	mempool_t *pool;
26 	struct bio_set *bios;
27 };
28 
29 /*
30  * Aligning 'struct io' reduces the number of bits required to store
31  * its address.  Refer to store_io_and_region_in_bio() below.
32  */
33 struct io {
34 	unsigned long error_bits;
35 	atomic_t count;
36 	struct dm_io_client *client;
37 	io_notify_fn callback;
38 	void *context;
39 	void *vma_invalidate_address;
40 	unsigned long vma_invalidate_size;
41 } __attribute__((aligned(DM_IO_MAX_REGIONS)));
42 
43 static struct kmem_cache *_dm_io_cache;
44 
45 /*
46  * Create a client with mempool and bioset.
47  */
dm_io_client_create(void)48 struct dm_io_client *dm_io_client_create(void)
49 {
50 	struct dm_io_client *client;
51 	unsigned min_ios = dm_get_reserved_bio_based_ios();
52 
53 	client = kzalloc(sizeof(*client), GFP_KERNEL);
54 	if (!client)
55 		return ERR_PTR(-ENOMEM);
56 
57 	client->pool = mempool_create_slab_pool(min_ios, _dm_io_cache);
58 	if (!client->pool)
59 		goto bad;
60 
61 	client->bios = bioset_create(min_ios, 0);
62 	if (!client->bios)
63 		goto bad;
64 
65 	return client;
66 
67    bad:
68 	mempool_destroy(client->pool);
69 	kfree(client);
70 	return ERR_PTR(-ENOMEM);
71 }
72 EXPORT_SYMBOL(dm_io_client_create);
73 
dm_io_client_destroy(struct dm_io_client * client)74 void dm_io_client_destroy(struct dm_io_client *client)
75 {
76 	mempool_destroy(client->pool);
77 	bioset_free(client->bios);
78 	kfree(client);
79 }
80 EXPORT_SYMBOL(dm_io_client_destroy);
81 
82 /*-----------------------------------------------------------------
83  * We need to keep track of which region a bio is doing io for.
84  * To avoid a memory allocation to store just 5 or 6 bits, we
85  * ensure the 'struct io' pointer is aligned so enough low bits are
86  * always zero and then combine it with the region number directly in
87  * bi_private.
88  *---------------------------------------------------------------*/
store_io_and_region_in_bio(struct bio * bio,struct io * io,unsigned region)89 static void store_io_and_region_in_bio(struct bio *bio, struct io *io,
90 				       unsigned region)
91 {
92 	if (unlikely(!IS_ALIGNED((unsigned long)io, DM_IO_MAX_REGIONS))) {
93 		DMCRIT("Unaligned struct io pointer %p", io);
94 		BUG();
95 	}
96 
97 	bio->bi_private = (void *)((unsigned long)io | region);
98 }
99 
retrieve_io_and_region_from_bio(struct bio * bio,struct io ** io,unsigned * region)100 static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io,
101 				       unsigned *region)
102 {
103 	unsigned long val = (unsigned long)bio->bi_private;
104 
105 	*io = (void *)(val & -(unsigned long)DM_IO_MAX_REGIONS);
106 	*region = val & (DM_IO_MAX_REGIONS - 1);
107 }
108 
109 /*-----------------------------------------------------------------
110  * We need an io object to keep track of the number of bios that
111  * have been dispatched for a particular io.
112  *---------------------------------------------------------------*/
complete_io(struct io * io)113 static void complete_io(struct io *io)
114 {
115 	unsigned long error_bits = io->error_bits;
116 	io_notify_fn fn = io->callback;
117 	void *context = io->context;
118 
119 	if (io->vma_invalidate_size)
120 		invalidate_kernel_vmap_range(io->vma_invalidate_address,
121 					     io->vma_invalidate_size);
122 
123 	mempool_free(io, io->client->pool);
124 	fn(error_bits, context);
125 }
126 
dec_count(struct io * io,unsigned int region,int error)127 static void dec_count(struct io *io, unsigned int region, int error)
128 {
129 	if (error)
130 		set_bit(region, &io->error_bits);
131 
132 	if (atomic_dec_and_test(&io->count))
133 		complete_io(io);
134 }
135 
endio(struct bio * bio)136 static void endio(struct bio *bio)
137 {
138 	struct io *io;
139 	unsigned region;
140 	int error;
141 
142 	if (bio->bi_error && bio_data_dir(bio) == READ)
143 		zero_fill_bio(bio);
144 
145 	/*
146 	 * The bio destructor in bio_put() may use the io object.
147 	 */
148 	retrieve_io_and_region_from_bio(bio, &io, &region);
149 
150 	error = bio->bi_error;
151 	bio_put(bio);
152 
153 	dec_count(io, region, error);
154 }
155 
156 /*-----------------------------------------------------------------
157  * These little objects provide an abstraction for getting a new
158  * destination page for io.
159  *---------------------------------------------------------------*/
160 struct dpages {
161 	void (*get_page)(struct dpages *dp,
162 			 struct page **p, unsigned long *len, unsigned *offset);
163 	void (*next_page)(struct dpages *dp);
164 
165 	unsigned context_u;
166 	void *context_ptr;
167 
168 	void *vma_invalidate_address;
169 	unsigned long vma_invalidate_size;
170 };
171 
172 /*
173  * Functions for getting the pages from a list.
174  */
list_get_page(struct dpages * dp,struct page ** p,unsigned long * len,unsigned * offset)175 static void list_get_page(struct dpages *dp,
176 		  struct page **p, unsigned long *len, unsigned *offset)
177 {
178 	unsigned o = dp->context_u;
179 	struct page_list *pl = (struct page_list *) dp->context_ptr;
180 
181 	*p = pl->page;
182 	*len = PAGE_SIZE - o;
183 	*offset = o;
184 }
185 
list_next_page(struct dpages * dp)186 static void list_next_page(struct dpages *dp)
187 {
188 	struct page_list *pl = (struct page_list *) dp->context_ptr;
189 	dp->context_ptr = pl->next;
190 	dp->context_u = 0;
191 }
192 
list_dp_init(struct dpages * dp,struct page_list * pl,unsigned offset)193 static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset)
194 {
195 	dp->get_page = list_get_page;
196 	dp->next_page = list_next_page;
197 	dp->context_u = offset;
198 	dp->context_ptr = pl;
199 }
200 
201 /*
202  * Functions for getting the pages from a bvec.
203  */
bio_get_page(struct dpages * dp,struct page ** p,unsigned long * len,unsigned * offset)204 static void bio_get_page(struct dpages *dp, struct page **p,
205 			 unsigned long *len, unsigned *offset)
206 {
207 	struct bio_vec *bvec = dp->context_ptr;
208 	*p = bvec->bv_page;
209 	*len = bvec->bv_len - dp->context_u;
210 	*offset = bvec->bv_offset + dp->context_u;
211 }
212 
bio_next_page(struct dpages * dp)213 static void bio_next_page(struct dpages *dp)
214 {
215 	struct bio_vec *bvec = dp->context_ptr;
216 	dp->context_ptr = bvec + 1;
217 	dp->context_u = 0;
218 }
219 
bio_dp_init(struct dpages * dp,struct bio * bio)220 static void bio_dp_init(struct dpages *dp, struct bio *bio)
221 {
222 	dp->get_page = bio_get_page;
223 	dp->next_page = bio_next_page;
224 	dp->context_ptr = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
225 	dp->context_u = bio->bi_iter.bi_bvec_done;
226 }
227 
228 /*
229  * Functions for getting the pages from a VMA.
230  */
vm_get_page(struct dpages * dp,struct page ** p,unsigned long * len,unsigned * offset)231 static void vm_get_page(struct dpages *dp,
232 		 struct page **p, unsigned long *len, unsigned *offset)
233 {
234 	*p = vmalloc_to_page(dp->context_ptr);
235 	*offset = dp->context_u;
236 	*len = PAGE_SIZE - dp->context_u;
237 }
238 
vm_next_page(struct dpages * dp)239 static void vm_next_page(struct dpages *dp)
240 {
241 	dp->context_ptr += PAGE_SIZE - dp->context_u;
242 	dp->context_u = 0;
243 }
244 
vm_dp_init(struct dpages * dp,void * data)245 static void vm_dp_init(struct dpages *dp, void *data)
246 {
247 	dp->get_page = vm_get_page;
248 	dp->next_page = vm_next_page;
249 	dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
250 	dp->context_ptr = data;
251 }
252 
253 /*
254  * Functions for getting the pages from kernel memory.
255  */
km_get_page(struct dpages * dp,struct page ** p,unsigned long * len,unsigned * offset)256 static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len,
257 			unsigned *offset)
258 {
259 	*p = virt_to_page(dp->context_ptr);
260 	*offset = dp->context_u;
261 	*len = PAGE_SIZE - dp->context_u;
262 }
263 
km_next_page(struct dpages * dp)264 static void km_next_page(struct dpages *dp)
265 {
266 	dp->context_ptr += PAGE_SIZE - dp->context_u;
267 	dp->context_u = 0;
268 }
269 
km_dp_init(struct dpages * dp,void * data)270 static void km_dp_init(struct dpages *dp, void *data)
271 {
272 	dp->get_page = km_get_page;
273 	dp->next_page = km_next_page;
274 	dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
275 	dp->context_ptr = data;
276 }
277 
278 /*-----------------------------------------------------------------
279  * IO routines that accept a list of pages.
280  *---------------------------------------------------------------*/
do_region(int rw,unsigned region,struct dm_io_region * where,struct dpages * dp,struct io * io)281 static void do_region(int rw, unsigned region, struct dm_io_region *where,
282 		      struct dpages *dp, struct io *io)
283 {
284 	struct bio *bio;
285 	struct page *page;
286 	unsigned long len;
287 	unsigned offset;
288 	unsigned num_bvecs;
289 	sector_t remaining = where->count;
290 	struct request_queue *q = bdev_get_queue(where->bdev);
291 	unsigned short logical_block_size = queue_logical_block_size(q);
292 	sector_t num_sectors;
293 	unsigned int uninitialized_var(special_cmd_max_sectors);
294 
295 	/*
296 	 * Reject unsupported discard and write same requests.
297 	 */
298 	if (rw & REQ_DISCARD)
299 		special_cmd_max_sectors = q->limits.max_discard_sectors;
300 	else if (rw & REQ_WRITE_SAME)
301 		special_cmd_max_sectors = q->limits.max_write_same_sectors;
302 	if ((rw & (REQ_DISCARD | REQ_WRITE_SAME)) && special_cmd_max_sectors == 0) {
303 		atomic_inc(&io->count);
304 		dec_count(io, region, -EOPNOTSUPP);
305 		return;
306 	}
307 
308 	/*
309 	 * where->count may be zero if rw holds a flush and we need to
310 	 * send a zero-sized flush.
311 	 */
312 	do {
313 		/*
314 		 * Allocate a suitably sized-bio.
315 		 */
316 		if ((rw & REQ_DISCARD) || (rw & REQ_WRITE_SAME))
317 			num_bvecs = 1;
318 		else
319 			num_bvecs = min_t(int, BIO_MAX_PAGES,
320 					  dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT)));
321 
322 		bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
323 		bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
324 		bio->bi_bdev = where->bdev;
325 		bio->bi_end_io = endio;
326 		store_io_and_region_in_bio(bio, io, region);
327 
328 		if (rw & REQ_DISCARD) {
329 			num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
330 			bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
331 			remaining -= num_sectors;
332 		} else if (rw & REQ_WRITE_SAME) {
333 			/*
334 			 * WRITE SAME only uses a single page.
335 			 */
336 			dp->get_page(dp, &page, &len, &offset);
337 			bio_add_page(bio, page, logical_block_size, offset);
338 			num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
339 			bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
340 
341 			offset = 0;
342 			remaining -= num_sectors;
343 			dp->next_page(dp);
344 		} else while (remaining) {
345 			/*
346 			 * Try and add as many pages as possible.
347 			 */
348 			dp->get_page(dp, &page, &len, &offset);
349 			len = min(len, to_bytes(remaining));
350 			if (!bio_add_page(bio, page, len, offset))
351 				break;
352 
353 			offset = 0;
354 			remaining -= to_sector(len);
355 			dp->next_page(dp);
356 		}
357 
358 		atomic_inc(&io->count);
359 		submit_bio(rw, bio);
360 	} while (remaining);
361 }
362 
dispatch_io(int rw,unsigned int num_regions,struct dm_io_region * where,struct dpages * dp,struct io * io,int sync)363 static void dispatch_io(int rw, unsigned int num_regions,
364 			struct dm_io_region *where, struct dpages *dp,
365 			struct io *io, int sync)
366 {
367 	int i;
368 	struct dpages old_pages = *dp;
369 
370 	BUG_ON(num_regions > DM_IO_MAX_REGIONS);
371 
372 	if (sync)
373 		rw |= REQ_SYNC;
374 
375 	/*
376 	 * For multiple regions we need to be careful to rewind
377 	 * the dp object for each call to do_region.
378 	 */
379 	for (i = 0; i < num_regions; i++) {
380 		*dp = old_pages;
381 		if (where[i].count || (rw & REQ_FLUSH))
382 			do_region(rw, i, where + i, dp, io);
383 	}
384 
385 	/*
386 	 * Drop the extra reference that we were holding to avoid
387 	 * the io being completed too early.
388 	 */
389 	dec_count(io, 0, 0);
390 }
391 
392 struct sync_io {
393 	unsigned long error_bits;
394 	struct completion wait;
395 };
396 
sync_io_complete(unsigned long error,void * context)397 static void sync_io_complete(unsigned long error, void *context)
398 {
399 	struct sync_io *sio = context;
400 
401 	sio->error_bits = error;
402 	complete(&sio->wait);
403 }
404 
sync_io(struct dm_io_client * client,unsigned int num_regions,struct dm_io_region * where,int rw,struct dpages * dp,unsigned long * error_bits)405 static int sync_io(struct dm_io_client *client, unsigned int num_regions,
406 		   struct dm_io_region *where, int rw, struct dpages *dp,
407 		   unsigned long *error_bits)
408 {
409 	struct io *io;
410 	struct sync_io sio;
411 
412 	if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
413 		WARN_ON(1);
414 		return -EIO;
415 	}
416 
417 	init_completion(&sio.wait);
418 
419 	io = mempool_alloc(client->pool, GFP_NOIO);
420 	io->error_bits = 0;
421 	atomic_set(&io->count, 1); /* see dispatch_io() */
422 	io->client = client;
423 	io->callback = sync_io_complete;
424 	io->context = &sio;
425 
426 	io->vma_invalidate_address = dp->vma_invalidate_address;
427 	io->vma_invalidate_size = dp->vma_invalidate_size;
428 
429 	dispatch_io(rw, num_regions, where, dp, io, 1);
430 
431 	wait_for_completion_io(&sio.wait);
432 
433 	if (error_bits)
434 		*error_bits = sio.error_bits;
435 
436 	return sio.error_bits ? -EIO : 0;
437 }
438 
async_io(struct dm_io_client * client,unsigned int num_regions,struct dm_io_region * where,int rw,struct dpages * dp,io_notify_fn fn,void * context)439 static int async_io(struct dm_io_client *client, unsigned int num_regions,
440 		    struct dm_io_region *where, int rw, struct dpages *dp,
441 		    io_notify_fn fn, void *context)
442 {
443 	struct io *io;
444 
445 	if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
446 		WARN_ON(1);
447 		fn(1, context);
448 		return -EIO;
449 	}
450 
451 	io = mempool_alloc(client->pool, GFP_NOIO);
452 	io->error_bits = 0;
453 	atomic_set(&io->count, 1); /* see dispatch_io() */
454 	io->client = client;
455 	io->callback = fn;
456 	io->context = context;
457 
458 	io->vma_invalidate_address = dp->vma_invalidate_address;
459 	io->vma_invalidate_size = dp->vma_invalidate_size;
460 
461 	dispatch_io(rw, num_regions, where, dp, io, 0);
462 	return 0;
463 }
464 
dp_init(struct dm_io_request * io_req,struct dpages * dp,unsigned long size)465 static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
466 		   unsigned long size)
467 {
468 	/* Set up dpages based on memory type */
469 
470 	dp->vma_invalidate_address = NULL;
471 	dp->vma_invalidate_size = 0;
472 
473 	switch (io_req->mem.type) {
474 	case DM_IO_PAGE_LIST:
475 		list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
476 		break;
477 
478 	case DM_IO_BIO:
479 		bio_dp_init(dp, io_req->mem.ptr.bio);
480 		break;
481 
482 	case DM_IO_VMA:
483 		flush_kernel_vmap_range(io_req->mem.ptr.vma, size);
484 		if ((io_req->bi_rw & RW_MASK) == READ) {
485 			dp->vma_invalidate_address = io_req->mem.ptr.vma;
486 			dp->vma_invalidate_size = size;
487 		}
488 		vm_dp_init(dp, io_req->mem.ptr.vma);
489 		break;
490 
491 	case DM_IO_KMEM:
492 		km_dp_init(dp, io_req->mem.ptr.addr);
493 		break;
494 
495 	default:
496 		return -EINVAL;
497 	}
498 
499 	return 0;
500 }
501 
502 /*
503  * New collapsed (a)synchronous interface.
504  *
505  * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
506  * the queue with blk_unplug() some time later or set REQ_SYNC in io_req->bi_rw.
507  * If you fail to do one of these, the IO will be submitted to the disk after
508  * q->unplug_delay, which defaults to 3ms in blk-settings.c.
509  */
dm_io(struct dm_io_request * io_req,unsigned num_regions,struct dm_io_region * where,unsigned long * sync_error_bits)510 int dm_io(struct dm_io_request *io_req, unsigned num_regions,
511 	  struct dm_io_region *where, unsigned long *sync_error_bits)
512 {
513 	int r;
514 	struct dpages dp;
515 
516 	r = dp_init(io_req, &dp, (unsigned long)where->count << SECTOR_SHIFT);
517 	if (r)
518 		return r;
519 
520 	if (!io_req->notify.fn)
521 		return sync_io(io_req->client, num_regions, where,
522 			       io_req->bi_rw, &dp, sync_error_bits);
523 
524 	return async_io(io_req->client, num_regions, where, io_req->bi_rw,
525 			&dp, io_req->notify.fn, io_req->notify.context);
526 }
527 EXPORT_SYMBOL(dm_io);
528 
dm_io_init(void)529 int __init dm_io_init(void)
530 {
531 	_dm_io_cache = KMEM_CACHE(io, 0);
532 	if (!_dm_io_cache)
533 		return -ENOMEM;
534 
535 	return 0;
536 }
537 
dm_io_exit(void)538 void dm_io_exit(void)
539 {
540 	kmem_cache_destroy(_dm_io_cache);
541 	_dm_io_cache = NULL;
542 }
543