• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2009-2011 Red Hat, Inc.
3  *
4  * Author: Mikulas Patocka <mpatocka@redhat.com>
5  *
6  * This file is released under the GPL.
7  */
8 
9 #include "dm-bufio.h"
10 
11 #include <linux/device-mapper.h>
12 #include <linux/dm-io.h>
13 #include <linux/slab.h>
14 #include <linux/jiffies.h>
15 #include <linux/vmalloc.h>
16 #include <linux/shrinker.h>
17 #include <linux/module.h>
18 #include <linux/rbtree.h>
19 
20 #define DM_MSG_PREFIX "bufio"
21 
22 /*
23  * Memory management policy:
24  *	Limit the number of buffers to DM_BUFIO_MEMORY_PERCENT of main memory
25  *	or DM_BUFIO_VMALLOC_PERCENT of vmalloc memory (whichever is lower).
26  *	Always allocate at least DM_BUFIO_MIN_BUFFERS buffers.
27  *	Start background writeback when there are DM_BUFIO_WRITEBACK_PERCENT
28  *	dirty buffers.
29  */
30 #define DM_BUFIO_MIN_BUFFERS		8
31 
32 #define DM_BUFIO_MEMORY_PERCENT		2
33 #define DM_BUFIO_VMALLOC_PERCENT	25
34 #define DM_BUFIO_WRITEBACK_PERCENT	75
35 
36 /*
37  * Check buffer ages in this interval (seconds)
38  */
39 #define DM_BUFIO_WORK_TIMER_SECS	30
40 
41 /*
42  * Free buffers when they are older than this (seconds)
43  */
44 #define DM_BUFIO_DEFAULT_AGE_SECS	300
45 
46 /*
47  * The nr of bytes of cached data to keep around.
48  */
49 #define DM_BUFIO_DEFAULT_RETAIN_BYTES   (256 * 1024)
50 
51 /*
52  * The number of bvec entries that are embedded directly in the buffer.
53  * If the chunk size is larger, dm-io is used to do the io.
54  */
55 #define DM_BUFIO_INLINE_VECS		16
56 
57 /*
58  * Don't try to use kmem_cache_alloc for blocks larger than this.
59  * For explanation, see alloc_buffer_data below.
60  */
61 #define DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT	(PAGE_SIZE >> 1)
62 #define DM_BUFIO_BLOCK_SIZE_GFP_LIMIT	(PAGE_SIZE << (MAX_ORDER - 1))
63 
64 /*
65  * dm_buffer->list_mode
66  */
67 #define LIST_CLEAN	0
68 #define LIST_DIRTY	1
69 #define LIST_SIZE	2
70 
71 /*
72  * Linking of buffers:
73  *	All buffers are linked to cache_hash with their hash_list field.
74  *
75  *	Clean buffers that are not being written (B_WRITING not set)
76  *	are linked to lru[LIST_CLEAN] with their lru_list field.
77  *
78  *	Dirty and clean buffers that are being written are linked to
79  *	lru[LIST_DIRTY] with their lru_list field. When the write
80  *	finishes, the buffer cannot be relinked immediately (because we
81  *	are in an interrupt context and relinking requires process
82  *	context), so some clean-not-writing buffers can be held on
83  *	dirty_lru too.  They are later added to lru in the process
84  *	context.
85  */
86 struct dm_bufio_client {
87 	struct mutex lock;
88 
89 	struct list_head lru[LIST_SIZE];
90 	unsigned long n_buffers[LIST_SIZE];
91 
92 	struct block_device *bdev;
93 	unsigned block_size;
94 	unsigned char sectors_per_block_bits;
95 	unsigned char pages_per_block_bits;
96 	unsigned char blocks_per_page_bits;
97 	unsigned aux_size;
98 	void (*alloc_callback)(struct dm_buffer *);
99 	void (*write_callback)(struct dm_buffer *);
100 
101 	struct dm_io_client *dm_io;
102 
103 	struct list_head reserved_buffers;
104 	unsigned need_reserved_buffers;
105 
106 	unsigned minimum_buffers;
107 
108 	struct rb_root buffer_tree;
109 	wait_queue_head_t free_buffer_wait;
110 
111 	int async_write_error;
112 
113 	struct list_head client_list;
114 	struct shrinker shrinker;
115 };
116 
117 /*
118  * Buffer state bits.
119  */
120 #define B_READING	0
121 #define B_WRITING	1
122 #define B_DIRTY		2
123 
124 /*
125  * Describes how the block was allocated:
126  * kmem_cache_alloc(), __get_free_pages() or vmalloc().
127  * See the comment at alloc_buffer_data.
128  */
129 enum data_mode {
130 	DATA_MODE_SLAB = 0,
131 	DATA_MODE_GET_FREE_PAGES = 1,
132 	DATA_MODE_VMALLOC = 2,
133 	DATA_MODE_LIMIT = 3
134 };
135 
136 struct dm_buffer {
137 	struct rb_node node;
138 	struct list_head lru_list;
139 	sector_t block;
140 	void *data;
141 	enum data_mode data_mode;
142 	unsigned char list_mode;		/* LIST_* */
143 	unsigned hold_count;
144 	int read_error;
145 	int write_error;
146 	unsigned long state;
147 	unsigned long last_accessed;
148 	struct dm_bufio_client *c;
149 	struct list_head write_list;
150 	struct bio bio;
151 	struct bio_vec bio_vec[DM_BUFIO_INLINE_VECS];
152 };
153 
154 /*----------------------------------------------------------------*/
155 
156 static struct kmem_cache *dm_bufio_caches[PAGE_SHIFT - SECTOR_SHIFT];
157 static char *dm_bufio_cache_names[PAGE_SHIFT - SECTOR_SHIFT];
158 
dm_bufio_cache_index(struct dm_bufio_client * c)159 static inline int dm_bufio_cache_index(struct dm_bufio_client *c)
160 {
161 	unsigned ret = c->blocks_per_page_bits - 1;
162 
163 	BUG_ON(ret >= ARRAY_SIZE(dm_bufio_caches));
164 
165 	return ret;
166 }
167 
168 #define DM_BUFIO_CACHE(c)	(dm_bufio_caches[dm_bufio_cache_index(c)])
169 #define DM_BUFIO_CACHE_NAME(c)	(dm_bufio_cache_names[dm_bufio_cache_index(c)])
170 
171 #define dm_bufio_in_request()	(!!current->bio_list)
172 
dm_bufio_lock(struct dm_bufio_client * c)173 static void dm_bufio_lock(struct dm_bufio_client *c)
174 {
175 	mutex_lock_nested(&c->lock, dm_bufio_in_request());
176 }
177 
dm_bufio_trylock(struct dm_bufio_client * c)178 static int dm_bufio_trylock(struct dm_bufio_client *c)
179 {
180 	return mutex_trylock(&c->lock);
181 }
182 
dm_bufio_unlock(struct dm_bufio_client * c)183 static void dm_bufio_unlock(struct dm_bufio_client *c)
184 {
185 	mutex_unlock(&c->lock);
186 }
187 
188 /*
189  * FIXME Move to sched.h?
190  */
191 #ifdef CONFIG_PREEMPT_VOLUNTARY
192 #  define dm_bufio_cond_resched()		\
193 do {						\
194 	if (unlikely(need_resched()))		\
195 		_cond_resched();		\
196 } while (0)
197 #else
198 #  define dm_bufio_cond_resched()                do { } while (0)
199 #endif
200 
201 /*----------------------------------------------------------------*/
202 
203 /*
204  * Default cache size: available memory divided by the ratio.
205  */
206 static unsigned long dm_bufio_default_cache_size;
207 
208 /*
209  * Total cache size set by the user.
210  */
211 static unsigned long dm_bufio_cache_size;
212 
213 /*
214  * A copy of dm_bufio_cache_size because dm_bufio_cache_size can change
215  * at any time.  If it disagrees, the user has changed cache size.
216  */
217 static unsigned long dm_bufio_cache_size_latch;
218 
219 static DEFINE_SPINLOCK(param_spinlock);
220 
221 /*
222  * Buffers are freed after this timeout
223  */
224 static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
225 static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
226 
227 static unsigned long dm_bufio_peak_allocated;
228 static unsigned long dm_bufio_allocated_kmem_cache;
229 static unsigned long dm_bufio_allocated_get_free_pages;
230 static unsigned long dm_bufio_allocated_vmalloc;
231 static unsigned long dm_bufio_current_allocated;
232 
233 /*----------------------------------------------------------------*/
234 
235 /*
236  * Per-client cache: dm_bufio_cache_size / dm_bufio_client_count
237  */
238 static unsigned long dm_bufio_cache_size_per_client;
239 
240 /*
241  * The current number of clients.
242  */
243 static int dm_bufio_client_count;
244 
245 /*
246  * The list of all clients.
247  */
248 static LIST_HEAD(dm_bufio_all_clients);
249 
250 /*
251  * This mutex protects dm_bufio_cache_size_latch,
252  * dm_bufio_cache_size_per_client and dm_bufio_client_count
253  */
254 static DEFINE_MUTEX(dm_bufio_clients_lock);
255 
256 /*----------------------------------------------------------------
257  * A red/black tree acts as an index for all the buffers.
258  *--------------------------------------------------------------*/
__find(struct dm_bufio_client * c,sector_t block)259 static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block)
260 {
261 	struct rb_node *n = c->buffer_tree.rb_node;
262 	struct dm_buffer *b;
263 
264 	while (n) {
265 		b = container_of(n, struct dm_buffer, node);
266 
267 		if (b->block == block)
268 			return b;
269 
270 		n = (b->block < block) ? n->rb_left : n->rb_right;
271 	}
272 
273 	return NULL;
274 }
275 
__insert(struct dm_bufio_client * c,struct dm_buffer * b)276 static void __insert(struct dm_bufio_client *c, struct dm_buffer *b)
277 {
278 	struct rb_node **new = &c->buffer_tree.rb_node, *parent = NULL;
279 	struct dm_buffer *found;
280 
281 	while (*new) {
282 		found = container_of(*new, struct dm_buffer, node);
283 
284 		if (found->block == b->block) {
285 			BUG_ON(found != b);
286 			return;
287 		}
288 
289 		parent = *new;
290 		new = (found->block < b->block) ?
291 			&((*new)->rb_left) : &((*new)->rb_right);
292 	}
293 
294 	rb_link_node(&b->node, parent, new);
295 	rb_insert_color(&b->node, &c->buffer_tree);
296 }
297 
__remove(struct dm_bufio_client * c,struct dm_buffer * b)298 static void __remove(struct dm_bufio_client *c, struct dm_buffer *b)
299 {
300 	rb_erase(&b->node, &c->buffer_tree);
301 }
302 
303 /*----------------------------------------------------------------*/
304 
adjust_total_allocated(enum data_mode data_mode,long diff)305 static void adjust_total_allocated(enum data_mode data_mode, long diff)
306 {
307 	static unsigned long * const class_ptr[DATA_MODE_LIMIT] = {
308 		&dm_bufio_allocated_kmem_cache,
309 		&dm_bufio_allocated_get_free_pages,
310 		&dm_bufio_allocated_vmalloc,
311 	};
312 
313 	spin_lock(&param_spinlock);
314 
315 	*class_ptr[data_mode] += diff;
316 
317 	dm_bufio_current_allocated += diff;
318 
319 	if (dm_bufio_current_allocated > dm_bufio_peak_allocated)
320 		dm_bufio_peak_allocated = dm_bufio_current_allocated;
321 
322 	spin_unlock(&param_spinlock);
323 }
324 
325 /*
326  * Change the number of clients and recalculate per-client limit.
327  */
__cache_size_refresh(void)328 static void __cache_size_refresh(void)
329 {
330 	BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock));
331 	BUG_ON(dm_bufio_client_count < 0);
332 
333 	dm_bufio_cache_size_latch = ACCESS_ONCE(dm_bufio_cache_size);
334 
335 	/*
336 	 * Use default if set to 0 and report the actual cache size used.
337 	 */
338 	if (!dm_bufio_cache_size_latch) {
339 		(void)cmpxchg(&dm_bufio_cache_size, 0,
340 			      dm_bufio_default_cache_size);
341 		dm_bufio_cache_size_latch = dm_bufio_default_cache_size;
342 	}
343 
344 	dm_bufio_cache_size_per_client = dm_bufio_cache_size_latch /
345 					 (dm_bufio_client_count ? : 1);
346 }
347 
348 /*
349  * Allocating buffer data.
350  *
351  * Small buffers are allocated with kmem_cache, to use space optimally.
352  *
353  * For large buffers, we choose between get_free_pages and vmalloc.
354  * Each has advantages and disadvantages.
355  *
356  * __get_free_pages can randomly fail if the memory is fragmented.
357  * __vmalloc won't randomly fail, but vmalloc space is limited (it may be
358  * as low as 128M) so using it for caching is not appropriate.
359  *
360  * If the allocation may fail we use __get_free_pages. Memory fragmentation
361  * won't have a fatal effect here, but it just causes flushes of some other
362  * buffers and more I/O will be performed. Don't use __get_free_pages if it
363  * always fails (i.e. order >= MAX_ORDER).
364  *
365  * If the allocation shouldn't fail we use __vmalloc. This is only for the
366  * initial reserve allocation, so there's no risk of wasting all vmalloc
367  * space.
368  */
alloc_buffer_data(struct dm_bufio_client * c,gfp_t gfp_mask,enum data_mode * data_mode)369 static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
370 			       enum data_mode *data_mode)
371 {
372 	unsigned noio_flag;
373 	void *ptr;
374 
375 	if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) {
376 		*data_mode = DATA_MODE_SLAB;
377 		return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask);
378 	}
379 
380 	if (c->block_size <= DM_BUFIO_BLOCK_SIZE_GFP_LIMIT &&
381 	    gfp_mask & __GFP_NORETRY) {
382 		*data_mode = DATA_MODE_GET_FREE_PAGES;
383 		return (void *)__get_free_pages(gfp_mask,
384 						c->pages_per_block_bits);
385 	}
386 
387 	*data_mode = DATA_MODE_VMALLOC;
388 
389 	/*
390 	 * __vmalloc allocates the data pages and auxiliary structures with
391 	 * gfp_flags that were specified, but pagetables are always allocated
392 	 * with GFP_KERNEL, no matter what was specified as gfp_mask.
393 	 *
394 	 * Consequently, we must set per-process flag PF_MEMALLOC_NOIO so that
395 	 * all allocations done by this process (including pagetables) are done
396 	 * as if GFP_NOIO was specified.
397 	 */
398 
399 	if (gfp_mask & __GFP_NORETRY)
400 		noio_flag = memalloc_noio_save();
401 
402 	ptr = __vmalloc(c->block_size, gfp_mask | __GFP_HIGHMEM, PAGE_KERNEL);
403 
404 	if (gfp_mask & __GFP_NORETRY)
405 		memalloc_noio_restore(noio_flag);
406 
407 	return ptr;
408 }
409 
410 /*
411  * Free buffer's data.
412  */
free_buffer_data(struct dm_bufio_client * c,void * data,enum data_mode data_mode)413 static void free_buffer_data(struct dm_bufio_client *c,
414 			     void *data, enum data_mode data_mode)
415 {
416 	switch (data_mode) {
417 	case DATA_MODE_SLAB:
418 		kmem_cache_free(DM_BUFIO_CACHE(c), data);
419 		break;
420 
421 	case DATA_MODE_GET_FREE_PAGES:
422 		free_pages((unsigned long)data, c->pages_per_block_bits);
423 		break;
424 
425 	case DATA_MODE_VMALLOC:
426 		vfree(data);
427 		break;
428 
429 	default:
430 		DMCRIT("dm_bufio_free_buffer_data: bad data mode: %d",
431 		       data_mode);
432 		BUG();
433 	}
434 }
435 
436 /*
437  * Allocate buffer and its data.
438  */
alloc_buffer(struct dm_bufio_client * c,gfp_t gfp_mask)439 static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask)
440 {
441 	struct dm_buffer *b = kmalloc(sizeof(struct dm_buffer) + c->aux_size,
442 				      gfp_mask);
443 
444 	if (!b)
445 		return NULL;
446 
447 	b->c = c;
448 
449 	b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode);
450 	if (!b->data) {
451 		kfree(b);
452 		return NULL;
453 	}
454 
455 	adjust_total_allocated(b->data_mode, (long)c->block_size);
456 
457 	return b;
458 }
459 
460 /*
461  * Free buffer and its data.
462  */
free_buffer(struct dm_buffer * b)463 static void free_buffer(struct dm_buffer *b)
464 {
465 	struct dm_bufio_client *c = b->c;
466 
467 	adjust_total_allocated(b->data_mode, -(long)c->block_size);
468 
469 	free_buffer_data(c, b->data, b->data_mode);
470 	kfree(b);
471 }
472 
473 /*
474  * Link buffer to the hash list and clean or dirty queue.
475  */
__link_buffer(struct dm_buffer * b,sector_t block,int dirty)476 static void __link_buffer(struct dm_buffer *b, sector_t block, int dirty)
477 {
478 	struct dm_bufio_client *c = b->c;
479 
480 	c->n_buffers[dirty]++;
481 	b->block = block;
482 	b->list_mode = dirty;
483 	list_add(&b->lru_list, &c->lru[dirty]);
484 	__insert(b->c, b);
485 	b->last_accessed = jiffies;
486 }
487 
488 /*
489  * Unlink buffer from the hash list and dirty or clean queue.
490  */
__unlink_buffer(struct dm_buffer * b)491 static void __unlink_buffer(struct dm_buffer *b)
492 {
493 	struct dm_bufio_client *c = b->c;
494 
495 	BUG_ON(!c->n_buffers[b->list_mode]);
496 
497 	c->n_buffers[b->list_mode]--;
498 	__remove(b->c, b);
499 	list_del(&b->lru_list);
500 }
501 
502 /*
503  * Place the buffer to the head of dirty or clean LRU queue.
504  */
__relink_lru(struct dm_buffer * b,int dirty)505 static void __relink_lru(struct dm_buffer *b, int dirty)
506 {
507 	struct dm_bufio_client *c = b->c;
508 
509 	BUG_ON(!c->n_buffers[b->list_mode]);
510 
511 	c->n_buffers[b->list_mode]--;
512 	c->n_buffers[dirty]++;
513 	b->list_mode = dirty;
514 	list_move(&b->lru_list, &c->lru[dirty]);
515 	b->last_accessed = jiffies;
516 }
517 
518 /*----------------------------------------------------------------
519  * Submit I/O on the buffer.
520  *
521  * Bio interface is faster but it has some problems:
522  *	the vector list is limited (increasing this limit increases
523  *	memory-consumption per buffer, so it is not viable);
524  *
525  *	the memory must be direct-mapped, not vmalloced;
526  *
527  *	the I/O driver can reject requests spuriously if it thinks that
528  *	the requests are too big for the device or if they cross a
529  *	controller-defined memory boundary.
530  *
531  * If the buffer is small enough (up to DM_BUFIO_INLINE_VECS pages) and
532  * it is not vmalloced, try using the bio interface.
533  *
534  * If the buffer is big, if it is vmalloced or if the underlying device
535  * rejects the bio because it is too large, use dm-io layer to do the I/O.
536  * The dm-io layer splits the I/O into multiple requests, avoiding the above
537  * shortcomings.
538  *--------------------------------------------------------------*/
539 
540 /*
541  * dm-io completion routine. It just calls b->bio.bi_end_io, pretending
542  * that the request was handled directly with bio interface.
543  */
dmio_complete(unsigned long error,void * context)544 static void dmio_complete(unsigned long error, void *context)
545 {
546 	struct dm_buffer *b = context;
547 
548 	b->bio.bi_error = error ? -EIO : 0;
549 	b->bio.bi_end_io(&b->bio);
550 }
551 
use_dmio(struct dm_buffer * b,int rw,sector_t block,bio_end_io_t * end_io)552 static void use_dmio(struct dm_buffer *b, int rw, sector_t block,
553 		     bio_end_io_t *end_io)
554 {
555 	int r;
556 	struct dm_io_request io_req = {
557 		.bi_rw = rw,
558 		.notify.fn = dmio_complete,
559 		.notify.context = b,
560 		.client = b->c->dm_io,
561 	};
562 	struct dm_io_region region = {
563 		.bdev = b->c->bdev,
564 		.sector = block << b->c->sectors_per_block_bits,
565 		.count = b->c->block_size >> SECTOR_SHIFT,
566 	};
567 
568 	if (b->data_mode != DATA_MODE_VMALLOC) {
569 		io_req.mem.type = DM_IO_KMEM;
570 		io_req.mem.ptr.addr = b->data;
571 	} else {
572 		io_req.mem.type = DM_IO_VMA;
573 		io_req.mem.ptr.vma = b->data;
574 	}
575 
576 	b->bio.bi_end_io = end_io;
577 
578 	r = dm_io(&io_req, 1, &region, NULL);
579 	if (r) {
580 		b->bio.bi_error = r;
581 		end_io(&b->bio);
582 	}
583 }
584 
inline_endio(struct bio * bio)585 static void inline_endio(struct bio *bio)
586 {
587 	bio_end_io_t *end_fn = bio->bi_private;
588 	int error = bio->bi_error;
589 
590 	/*
591 	 * Reset the bio to free any attached resources
592 	 * (e.g. bio integrity profiles).
593 	 */
594 	bio_reset(bio);
595 
596 	bio->bi_error = error;
597 	end_fn(bio);
598 }
599 
use_inline_bio(struct dm_buffer * b,int rw,sector_t block,bio_end_io_t * end_io)600 static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
601 			   bio_end_io_t *end_io)
602 {
603 	char *ptr;
604 	int len;
605 
606 	bio_init(&b->bio);
607 	b->bio.bi_io_vec = b->bio_vec;
608 	b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS;
609 	b->bio.bi_iter.bi_sector = block << b->c->sectors_per_block_bits;
610 	b->bio.bi_bdev = b->c->bdev;
611 	b->bio.bi_end_io = inline_endio;
612 	/*
613 	 * Use of .bi_private isn't a problem here because
614 	 * the dm_buffer's inline bio is local to bufio.
615 	 */
616 	b->bio.bi_private = end_io;
617 
618 	/*
619 	 * We assume that if len >= PAGE_SIZE ptr is page-aligned.
620 	 * If len < PAGE_SIZE the buffer doesn't cross page boundary.
621 	 */
622 	ptr = b->data;
623 	len = b->c->block_size;
624 
625 	if (len >= PAGE_SIZE)
626 		BUG_ON((unsigned long)ptr & (PAGE_SIZE - 1));
627 	else
628 		BUG_ON((unsigned long)ptr & (len - 1));
629 
630 	do {
631 		if (!bio_add_page(&b->bio, virt_to_page(ptr),
632 				  len < PAGE_SIZE ? len : PAGE_SIZE,
633 				  virt_to_phys(ptr) & (PAGE_SIZE - 1))) {
634 			BUG_ON(b->c->block_size <= PAGE_SIZE);
635 			use_dmio(b, rw, block, end_io);
636 			return;
637 		}
638 
639 		len -= PAGE_SIZE;
640 		ptr += PAGE_SIZE;
641 	} while (len > 0);
642 
643 	submit_bio(rw, &b->bio);
644 }
645 
submit_io(struct dm_buffer * b,int rw,sector_t block,bio_end_io_t * end_io)646 static void submit_io(struct dm_buffer *b, int rw, sector_t block,
647 		      bio_end_io_t *end_io)
648 {
649 	if (rw == WRITE && b->c->write_callback)
650 		b->c->write_callback(b);
651 
652 	if (b->c->block_size <= DM_BUFIO_INLINE_VECS * PAGE_SIZE &&
653 	    b->data_mode != DATA_MODE_VMALLOC)
654 		use_inline_bio(b, rw, block, end_io);
655 	else
656 		use_dmio(b, rw, block, end_io);
657 }
658 
659 /*----------------------------------------------------------------
660  * Writing dirty buffers
661  *--------------------------------------------------------------*/
662 
663 /*
664  * The endio routine for write.
665  *
666  * Set the error, clear B_WRITING bit and wake anyone who was waiting on
667  * it.
668  */
write_endio(struct bio * bio)669 static void write_endio(struct bio *bio)
670 {
671 	struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
672 
673 	b->write_error = bio->bi_error;
674 	if (unlikely(bio->bi_error)) {
675 		struct dm_bufio_client *c = b->c;
676 		int error = bio->bi_error;
677 		(void)cmpxchg(&c->async_write_error, 0, error);
678 	}
679 
680 	BUG_ON(!test_bit(B_WRITING, &b->state));
681 
682 	smp_mb__before_atomic();
683 	clear_bit(B_WRITING, &b->state);
684 	smp_mb__after_atomic();
685 
686 	wake_up_bit(&b->state, B_WRITING);
687 }
688 
689 /*
690  * Initiate a write on a dirty buffer, but don't wait for it.
691  *
692  * - If the buffer is not dirty, exit.
693  * - If there some previous write going on, wait for it to finish (we can't
694  *   have two writes on the same buffer simultaneously).
695  * - Submit our write and don't wait on it. We set B_WRITING indicating
696  *   that there is a write in progress.
697  */
__write_dirty_buffer(struct dm_buffer * b,struct list_head * write_list)698 static void __write_dirty_buffer(struct dm_buffer *b,
699 				 struct list_head *write_list)
700 {
701 	if (!test_bit(B_DIRTY, &b->state))
702 		return;
703 
704 	clear_bit(B_DIRTY, &b->state);
705 	wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
706 
707 	if (!write_list)
708 		submit_io(b, WRITE, b->block, write_endio);
709 	else
710 		list_add_tail(&b->write_list, write_list);
711 }
712 
__flush_write_list(struct list_head * write_list)713 static void __flush_write_list(struct list_head *write_list)
714 {
715 	struct blk_plug plug;
716 	blk_start_plug(&plug);
717 	while (!list_empty(write_list)) {
718 		struct dm_buffer *b =
719 			list_entry(write_list->next, struct dm_buffer, write_list);
720 		list_del(&b->write_list);
721 		submit_io(b, WRITE, b->block, write_endio);
722 		dm_bufio_cond_resched();
723 	}
724 	blk_finish_plug(&plug);
725 }
726 
727 /*
728  * Wait until any activity on the buffer finishes.  Possibly write the
729  * buffer if it is dirty.  When this function finishes, there is no I/O
730  * running on the buffer and the buffer is not dirty.
731  */
__make_buffer_clean(struct dm_buffer * b)732 static void __make_buffer_clean(struct dm_buffer *b)
733 {
734 	BUG_ON(b->hold_count);
735 
736 	if (!b->state)	/* fast case */
737 		return;
738 
739 	wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
740 	__write_dirty_buffer(b, NULL);
741 	wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
742 }
743 
744 /*
745  * Find some buffer that is not held by anybody, clean it, unlink it and
746  * return it.
747  */
__get_unclaimed_buffer(struct dm_bufio_client * c)748 static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
749 {
750 	struct dm_buffer *b;
751 
752 	list_for_each_entry_reverse(b, &c->lru[LIST_CLEAN], lru_list) {
753 		BUG_ON(test_bit(B_WRITING, &b->state));
754 		BUG_ON(test_bit(B_DIRTY, &b->state));
755 
756 		if (!b->hold_count) {
757 			__make_buffer_clean(b);
758 			__unlink_buffer(b);
759 			return b;
760 		}
761 		dm_bufio_cond_resched();
762 	}
763 
764 	list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) {
765 		BUG_ON(test_bit(B_READING, &b->state));
766 
767 		if (!b->hold_count) {
768 			__make_buffer_clean(b);
769 			__unlink_buffer(b);
770 			return b;
771 		}
772 		dm_bufio_cond_resched();
773 	}
774 
775 	return NULL;
776 }
777 
778 /*
779  * Wait until some other threads free some buffer or release hold count on
780  * some buffer.
781  *
782  * This function is entered with c->lock held, drops it and regains it
783  * before exiting.
784  */
__wait_for_free_buffer(struct dm_bufio_client * c)785 static void __wait_for_free_buffer(struct dm_bufio_client *c)
786 {
787 	DECLARE_WAITQUEUE(wait, current);
788 
789 	add_wait_queue(&c->free_buffer_wait, &wait);
790 	set_task_state(current, TASK_UNINTERRUPTIBLE);
791 	dm_bufio_unlock(c);
792 
793 	io_schedule();
794 
795 	remove_wait_queue(&c->free_buffer_wait, &wait);
796 
797 	dm_bufio_lock(c);
798 }
799 
800 enum new_flag {
801 	NF_FRESH = 0,
802 	NF_READ = 1,
803 	NF_GET = 2,
804 	NF_PREFETCH = 3
805 };
806 
807 /*
808  * Allocate a new buffer. If the allocation is not possible, wait until
809  * some other thread frees a buffer.
810  *
811  * May drop the lock and regain it.
812  */
__alloc_buffer_wait_no_callback(struct dm_bufio_client * c,enum new_flag nf)813 static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf)
814 {
815 	struct dm_buffer *b;
816 	bool tried_noio_alloc = false;
817 
818 	/*
819 	 * dm-bufio is resistant to allocation failures (it just keeps
820 	 * one buffer reserved in cases all the allocations fail).
821 	 * So set flags to not try too hard:
822 	 *	GFP_NOWAIT: don't wait; if we need to sleep we'll release our
823 	 *		    mutex and wait ourselves.
824 	 *	__GFP_NORETRY: don't retry and rather return failure
825 	 *	__GFP_NOMEMALLOC: don't use emergency reserves
826 	 *	__GFP_NOWARN: don't print a warning in case of failure
827 	 *
828 	 * For debugging, if we set the cache size to 1, no new buffers will
829 	 * be allocated.
830 	 */
831 	while (1) {
832 		if (dm_bufio_cache_size_latch != 1) {
833 			b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
834 			if (b)
835 				return b;
836 		}
837 
838 		if (nf == NF_PREFETCH)
839 			return NULL;
840 
841 		if (dm_bufio_cache_size_latch != 1 && !tried_noio_alloc) {
842 			dm_bufio_unlock(c);
843 			b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
844 			dm_bufio_lock(c);
845 			if (b)
846 				return b;
847 			tried_noio_alloc = true;
848 		}
849 
850 		if (!list_empty(&c->reserved_buffers)) {
851 			b = list_entry(c->reserved_buffers.next,
852 				       struct dm_buffer, lru_list);
853 			list_del(&b->lru_list);
854 			c->need_reserved_buffers++;
855 
856 			return b;
857 		}
858 
859 		b = __get_unclaimed_buffer(c);
860 		if (b)
861 			return b;
862 
863 		__wait_for_free_buffer(c);
864 	}
865 }
866 
__alloc_buffer_wait(struct dm_bufio_client * c,enum new_flag nf)867 static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c, enum new_flag nf)
868 {
869 	struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf);
870 
871 	if (!b)
872 		return NULL;
873 
874 	if (c->alloc_callback)
875 		c->alloc_callback(b);
876 
877 	return b;
878 }
879 
880 /*
881  * Free a buffer and wake other threads waiting for free buffers.
882  */
__free_buffer_wake(struct dm_buffer * b)883 static void __free_buffer_wake(struct dm_buffer *b)
884 {
885 	struct dm_bufio_client *c = b->c;
886 
887 	if (!c->need_reserved_buffers)
888 		free_buffer(b);
889 	else {
890 		list_add(&b->lru_list, &c->reserved_buffers);
891 		c->need_reserved_buffers--;
892 	}
893 
894 	wake_up(&c->free_buffer_wait);
895 }
896 
__write_dirty_buffers_async(struct dm_bufio_client * c,int no_wait,struct list_head * write_list)897 static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait,
898 					struct list_head *write_list)
899 {
900 	struct dm_buffer *b, *tmp;
901 
902 	list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
903 		BUG_ON(test_bit(B_READING, &b->state));
904 
905 		if (!test_bit(B_DIRTY, &b->state) &&
906 		    !test_bit(B_WRITING, &b->state)) {
907 			__relink_lru(b, LIST_CLEAN);
908 			continue;
909 		}
910 
911 		if (no_wait && test_bit(B_WRITING, &b->state))
912 			return;
913 
914 		__write_dirty_buffer(b, write_list);
915 		dm_bufio_cond_resched();
916 	}
917 }
918 
919 /*
920  * Get writeback threshold and buffer limit for a given client.
921  */
__get_memory_limit(struct dm_bufio_client * c,unsigned long * threshold_buffers,unsigned long * limit_buffers)922 static void __get_memory_limit(struct dm_bufio_client *c,
923 			       unsigned long *threshold_buffers,
924 			       unsigned long *limit_buffers)
925 {
926 	unsigned long buffers;
927 
928 	if (unlikely(ACCESS_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch)) {
929 		if (mutex_trylock(&dm_bufio_clients_lock)) {
930 			__cache_size_refresh();
931 			mutex_unlock(&dm_bufio_clients_lock);
932 		}
933 	}
934 
935 	buffers = dm_bufio_cache_size_per_client >>
936 		  (c->sectors_per_block_bits + SECTOR_SHIFT);
937 
938 	if (buffers < c->minimum_buffers)
939 		buffers = c->minimum_buffers;
940 
941 	*limit_buffers = buffers;
942 	*threshold_buffers = mult_frac(buffers,
943 				       DM_BUFIO_WRITEBACK_PERCENT, 100);
944 }
945 
946 /*
947  * Check if we're over watermark.
948  * If we are over threshold_buffers, start freeing buffers.
949  * If we're over "limit_buffers", block until we get under the limit.
950  */
__check_watermark(struct dm_bufio_client * c,struct list_head * write_list)951 static void __check_watermark(struct dm_bufio_client *c,
952 			      struct list_head *write_list)
953 {
954 	unsigned long threshold_buffers, limit_buffers;
955 
956 	__get_memory_limit(c, &threshold_buffers, &limit_buffers);
957 
958 	while (c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY] >
959 	       limit_buffers) {
960 
961 		struct dm_buffer *b = __get_unclaimed_buffer(c);
962 
963 		if (!b)
964 			return;
965 
966 		__free_buffer_wake(b);
967 		dm_bufio_cond_resched();
968 	}
969 
970 	if (c->n_buffers[LIST_DIRTY] > threshold_buffers)
971 		__write_dirty_buffers_async(c, 1, write_list);
972 }
973 
974 /*----------------------------------------------------------------
975  * Getting a buffer
976  *--------------------------------------------------------------*/
977 
__bufio_new(struct dm_bufio_client * c,sector_t block,enum new_flag nf,int * need_submit,struct list_head * write_list)978 static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
979 				     enum new_flag nf, int *need_submit,
980 				     struct list_head *write_list)
981 {
982 	struct dm_buffer *b, *new_b = NULL;
983 
984 	*need_submit = 0;
985 
986 	b = __find(c, block);
987 	if (b)
988 		goto found_buffer;
989 
990 	if (nf == NF_GET)
991 		return NULL;
992 
993 	new_b = __alloc_buffer_wait(c, nf);
994 	if (!new_b)
995 		return NULL;
996 
997 	/*
998 	 * We've had a period where the mutex was unlocked, so need to
999 	 * recheck the hash table.
1000 	 */
1001 	b = __find(c, block);
1002 	if (b) {
1003 		__free_buffer_wake(new_b);
1004 		goto found_buffer;
1005 	}
1006 
1007 	__check_watermark(c, write_list);
1008 
1009 	b = new_b;
1010 	b->hold_count = 1;
1011 	b->read_error = 0;
1012 	b->write_error = 0;
1013 	__link_buffer(b, block, LIST_CLEAN);
1014 
1015 	if (nf == NF_FRESH) {
1016 		b->state = 0;
1017 		return b;
1018 	}
1019 
1020 	b->state = 1 << B_READING;
1021 	*need_submit = 1;
1022 
1023 	return b;
1024 
1025 found_buffer:
1026 	if (nf == NF_PREFETCH)
1027 		return NULL;
1028 	/*
1029 	 * Note: it is essential that we don't wait for the buffer to be
1030 	 * read if dm_bufio_get function is used. Both dm_bufio_get and
1031 	 * dm_bufio_prefetch can be used in the driver request routine.
1032 	 * If the user called both dm_bufio_prefetch and dm_bufio_get on
1033 	 * the same buffer, it would deadlock if we waited.
1034 	 */
1035 	if (nf == NF_GET && unlikely(test_bit(B_READING, &b->state)))
1036 		return NULL;
1037 
1038 	b->hold_count++;
1039 	__relink_lru(b, test_bit(B_DIRTY, &b->state) ||
1040 		     test_bit(B_WRITING, &b->state));
1041 	return b;
1042 }
1043 
1044 /*
1045  * The endio routine for reading: set the error, clear the bit and wake up
1046  * anyone waiting on the buffer.
1047  */
read_endio(struct bio * bio)1048 static void read_endio(struct bio *bio)
1049 {
1050 	struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
1051 
1052 	b->read_error = bio->bi_error;
1053 
1054 	BUG_ON(!test_bit(B_READING, &b->state));
1055 
1056 	smp_mb__before_atomic();
1057 	clear_bit(B_READING, &b->state);
1058 	smp_mb__after_atomic();
1059 
1060 	wake_up_bit(&b->state, B_READING);
1061 }
1062 
1063 /*
1064  * A common routine for dm_bufio_new and dm_bufio_read.  Operation of these
1065  * functions is similar except that dm_bufio_new doesn't read the
1066  * buffer from the disk (assuming that the caller overwrites all the data
1067  * and uses dm_bufio_mark_buffer_dirty to write new data back).
1068  */
new_read(struct dm_bufio_client * c,sector_t block,enum new_flag nf,struct dm_buffer ** bp)1069 static void *new_read(struct dm_bufio_client *c, sector_t block,
1070 		      enum new_flag nf, struct dm_buffer **bp)
1071 {
1072 	int need_submit;
1073 	struct dm_buffer *b;
1074 
1075 	LIST_HEAD(write_list);
1076 
1077 	dm_bufio_lock(c);
1078 	b = __bufio_new(c, block, nf, &need_submit, &write_list);
1079 	dm_bufio_unlock(c);
1080 
1081 	__flush_write_list(&write_list);
1082 
1083 	if (!b)
1084 		return b;
1085 
1086 	if (need_submit)
1087 		submit_io(b, READ, b->block, read_endio);
1088 
1089 	wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
1090 
1091 	if (b->read_error) {
1092 		int error = b->read_error;
1093 
1094 		dm_bufio_release(b);
1095 
1096 		return ERR_PTR(error);
1097 	}
1098 
1099 	*bp = b;
1100 
1101 	return b->data;
1102 }
1103 
dm_bufio_get(struct dm_bufio_client * c,sector_t block,struct dm_buffer ** bp)1104 void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
1105 		   struct dm_buffer **bp)
1106 {
1107 	return new_read(c, block, NF_GET, bp);
1108 }
1109 EXPORT_SYMBOL_GPL(dm_bufio_get);
1110 
dm_bufio_read(struct dm_bufio_client * c,sector_t block,struct dm_buffer ** bp)1111 void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
1112 		    struct dm_buffer **bp)
1113 {
1114 	BUG_ON(dm_bufio_in_request());
1115 
1116 	return new_read(c, block, NF_READ, bp);
1117 }
1118 EXPORT_SYMBOL_GPL(dm_bufio_read);
1119 
dm_bufio_new(struct dm_bufio_client * c,sector_t block,struct dm_buffer ** bp)1120 void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
1121 		   struct dm_buffer **bp)
1122 {
1123 	BUG_ON(dm_bufio_in_request());
1124 
1125 	return new_read(c, block, NF_FRESH, bp);
1126 }
1127 EXPORT_SYMBOL_GPL(dm_bufio_new);
1128 
dm_bufio_prefetch(struct dm_bufio_client * c,sector_t block,unsigned n_blocks)1129 void dm_bufio_prefetch(struct dm_bufio_client *c,
1130 		       sector_t block, unsigned n_blocks)
1131 {
1132 	struct blk_plug plug;
1133 
1134 	LIST_HEAD(write_list);
1135 
1136 	BUG_ON(dm_bufio_in_request());
1137 
1138 	blk_start_plug(&plug);
1139 	dm_bufio_lock(c);
1140 
1141 	for (; n_blocks--; block++) {
1142 		int need_submit;
1143 		struct dm_buffer *b;
1144 		b = __bufio_new(c, block, NF_PREFETCH, &need_submit,
1145 				&write_list);
1146 		if (unlikely(!list_empty(&write_list))) {
1147 			dm_bufio_unlock(c);
1148 			blk_finish_plug(&plug);
1149 			__flush_write_list(&write_list);
1150 			blk_start_plug(&plug);
1151 			dm_bufio_lock(c);
1152 		}
1153 		if (unlikely(b != NULL)) {
1154 			dm_bufio_unlock(c);
1155 
1156 			if (need_submit)
1157 				submit_io(b, READ, b->block, read_endio);
1158 			dm_bufio_release(b);
1159 
1160 			dm_bufio_cond_resched();
1161 
1162 			if (!n_blocks)
1163 				goto flush_plug;
1164 			dm_bufio_lock(c);
1165 		}
1166 	}
1167 
1168 	dm_bufio_unlock(c);
1169 
1170 flush_plug:
1171 	blk_finish_plug(&plug);
1172 }
1173 EXPORT_SYMBOL_GPL(dm_bufio_prefetch);
1174 
dm_bufio_release(struct dm_buffer * b)1175 void dm_bufio_release(struct dm_buffer *b)
1176 {
1177 	struct dm_bufio_client *c = b->c;
1178 
1179 	dm_bufio_lock(c);
1180 
1181 	BUG_ON(!b->hold_count);
1182 
1183 	b->hold_count--;
1184 	if (!b->hold_count) {
1185 		wake_up(&c->free_buffer_wait);
1186 
1187 		/*
1188 		 * If there were errors on the buffer, and the buffer is not
1189 		 * to be written, free the buffer. There is no point in caching
1190 		 * invalid buffer.
1191 		 */
1192 		if ((b->read_error || b->write_error) &&
1193 		    !test_bit(B_READING, &b->state) &&
1194 		    !test_bit(B_WRITING, &b->state) &&
1195 		    !test_bit(B_DIRTY, &b->state)) {
1196 			__unlink_buffer(b);
1197 			__free_buffer_wake(b);
1198 		}
1199 	}
1200 
1201 	dm_bufio_unlock(c);
1202 }
1203 EXPORT_SYMBOL_GPL(dm_bufio_release);
1204 
dm_bufio_mark_buffer_dirty(struct dm_buffer * b)1205 void dm_bufio_mark_buffer_dirty(struct dm_buffer *b)
1206 {
1207 	struct dm_bufio_client *c = b->c;
1208 
1209 	dm_bufio_lock(c);
1210 
1211 	BUG_ON(test_bit(B_READING, &b->state));
1212 
1213 	if (!test_and_set_bit(B_DIRTY, &b->state))
1214 		__relink_lru(b, LIST_DIRTY);
1215 
1216 	dm_bufio_unlock(c);
1217 }
1218 EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty);
1219 
dm_bufio_write_dirty_buffers_async(struct dm_bufio_client * c)1220 void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c)
1221 {
1222 	LIST_HEAD(write_list);
1223 
1224 	BUG_ON(dm_bufio_in_request());
1225 
1226 	dm_bufio_lock(c);
1227 	__write_dirty_buffers_async(c, 0, &write_list);
1228 	dm_bufio_unlock(c);
1229 	__flush_write_list(&write_list);
1230 }
1231 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async);
1232 
1233 /*
1234  * For performance, it is essential that the buffers are written asynchronously
1235  * and simultaneously (so that the block layer can merge the writes) and then
1236  * waited upon.
1237  *
1238  * Finally, we flush hardware disk cache.
1239  */
dm_bufio_write_dirty_buffers(struct dm_bufio_client * c)1240 int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c)
1241 {
1242 	int a, f;
1243 	unsigned long buffers_processed = 0;
1244 	struct dm_buffer *b, *tmp;
1245 
1246 	LIST_HEAD(write_list);
1247 
1248 	dm_bufio_lock(c);
1249 	__write_dirty_buffers_async(c, 0, &write_list);
1250 	dm_bufio_unlock(c);
1251 	__flush_write_list(&write_list);
1252 	dm_bufio_lock(c);
1253 
1254 again:
1255 	list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
1256 		int dropped_lock = 0;
1257 
1258 		if (buffers_processed < c->n_buffers[LIST_DIRTY])
1259 			buffers_processed++;
1260 
1261 		BUG_ON(test_bit(B_READING, &b->state));
1262 
1263 		if (test_bit(B_WRITING, &b->state)) {
1264 			if (buffers_processed < c->n_buffers[LIST_DIRTY]) {
1265 				dropped_lock = 1;
1266 				b->hold_count++;
1267 				dm_bufio_unlock(c);
1268 				wait_on_bit_io(&b->state, B_WRITING,
1269 					       TASK_UNINTERRUPTIBLE);
1270 				dm_bufio_lock(c);
1271 				b->hold_count--;
1272 			} else
1273 				wait_on_bit_io(&b->state, B_WRITING,
1274 					       TASK_UNINTERRUPTIBLE);
1275 		}
1276 
1277 		if (!test_bit(B_DIRTY, &b->state) &&
1278 		    !test_bit(B_WRITING, &b->state))
1279 			__relink_lru(b, LIST_CLEAN);
1280 
1281 		dm_bufio_cond_resched();
1282 
1283 		/*
1284 		 * If we dropped the lock, the list is no longer consistent,
1285 		 * so we must restart the search.
1286 		 *
1287 		 * In the most common case, the buffer just processed is
1288 		 * relinked to the clean list, so we won't loop scanning the
1289 		 * same buffer again and again.
1290 		 *
1291 		 * This may livelock if there is another thread simultaneously
1292 		 * dirtying buffers, so we count the number of buffers walked
1293 		 * and if it exceeds the total number of buffers, it means that
1294 		 * someone is doing some writes simultaneously with us.  In
1295 		 * this case, stop, dropping the lock.
1296 		 */
1297 		if (dropped_lock)
1298 			goto again;
1299 	}
1300 	wake_up(&c->free_buffer_wait);
1301 	dm_bufio_unlock(c);
1302 
1303 	a = xchg(&c->async_write_error, 0);
1304 	f = dm_bufio_issue_flush(c);
1305 	if (a)
1306 		return a;
1307 
1308 	return f;
1309 }
1310 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers);
1311 
1312 /*
1313  * Use dm-io to send and empty barrier flush the device.
1314  */
dm_bufio_issue_flush(struct dm_bufio_client * c)1315 int dm_bufio_issue_flush(struct dm_bufio_client *c)
1316 {
1317 	struct dm_io_request io_req = {
1318 		.bi_rw = WRITE_FLUSH,
1319 		.mem.type = DM_IO_KMEM,
1320 		.mem.ptr.addr = NULL,
1321 		.client = c->dm_io,
1322 	};
1323 	struct dm_io_region io_reg = {
1324 		.bdev = c->bdev,
1325 		.sector = 0,
1326 		.count = 0,
1327 	};
1328 
1329 	BUG_ON(dm_bufio_in_request());
1330 
1331 	return dm_io(&io_req, 1, &io_reg, NULL);
1332 }
1333 EXPORT_SYMBOL_GPL(dm_bufio_issue_flush);
1334 
1335 /*
1336  * We first delete any other buffer that may be at that new location.
1337  *
1338  * Then, we write the buffer to the original location if it was dirty.
1339  *
1340  * Then, if we are the only one who is holding the buffer, relink the buffer
1341  * in the hash queue for the new location.
1342  *
1343  * If there was someone else holding the buffer, we write it to the new
1344  * location but not relink it, because that other user needs to have the buffer
1345  * at the same place.
1346  */
dm_bufio_release_move(struct dm_buffer * b,sector_t new_block)1347 void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block)
1348 {
1349 	struct dm_bufio_client *c = b->c;
1350 	struct dm_buffer *new;
1351 
1352 	BUG_ON(dm_bufio_in_request());
1353 
1354 	dm_bufio_lock(c);
1355 
1356 retry:
1357 	new = __find(c, new_block);
1358 	if (new) {
1359 		if (new->hold_count) {
1360 			__wait_for_free_buffer(c);
1361 			goto retry;
1362 		}
1363 
1364 		/*
1365 		 * FIXME: Is there any point waiting for a write that's going
1366 		 * to be overwritten in a bit?
1367 		 */
1368 		__make_buffer_clean(new);
1369 		__unlink_buffer(new);
1370 		__free_buffer_wake(new);
1371 	}
1372 
1373 	BUG_ON(!b->hold_count);
1374 	BUG_ON(test_bit(B_READING, &b->state));
1375 
1376 	__write_dirty_buffer(b, NULL);
1377 	if (b->hold_count == 1) {
1378 		wait_on_bit_io(&b->state, B_WRITING,
1379 			       TASK_UNINTERRUPTIBLE);
1380 		set_bit(B_DIRTY, &b->state);
1381 		__unlink_buffer(b);
1382 		__link_buffer(b, new_block, LIST_DIRTY);
1383 	} else {
1384 		sector_t old_block;
1385 		wait_on_bit_lock_io(&b->state, B_WRITING,
1386 				    TASK_UNINTERRUPTIBLE);
1387 		/*
1388 		 * Relink buffer to "new_block" so that write_callback
1389 		 * sees "new_block" as a block number.
1390 		 * After the write, link the buffer back to old_block.
1391 		 * All this must be done in bufio lock, so that block number
1392 		 * change isn't visible to other threads.
1393 		 */
1394 		old_block = b->block;
1395 		__unlink_buffer(b);
1396 		__link_buffer(b, new_block, b->list_mode);
1397 		submit_io(b, WRITE, new_block, write_endio);
1398 		wait_on_bit_io(&b->state, B_WRITING,
1399 			       TASK_UNINTERRUPTIBLE);
1400 		__unlink_buffer(b);
1401 		__link_buffer(b, old_block, b->list_mode);
1402 	}
1403 
1404 	dm_bufio_unlock(c);
1405 	dm_bufio_release(b);
1406 }
1407 EXPORT_SYMBOL_GPL(dm_bufio_release_move);
1408 
1409 /*
1410  * Free the given buffer.
1411  *
1412  * This is just a hint, if the buffer is in use or dirty, this function
1413  * does nothing.
1414  */
dm_bufio_forget(struct dm_bufio_client * c,sector_t block)1415 void dm_bufio_forget(struct dm_bufio_client *c, sector_t block)
1416 {
1417 	struct dm_buffer *b;
1418 
1419 	dm_bufio_lock(c);
1420 
1421 	b = __find(c, block);
1422 	if (b && likely(!b->hold_count) && likely(!b->state)) {
1423 		__unlink_buffer(b);
1424 		__free_buffer_wake(b);
1425 	}
1426 
1427 	dm_bufio_unlock(c);
1428 }
1429 EXPORT_SYMBOL(dm_bufio_forget);
1430 
dm_bufio_set_minimum_buffers(struct dm_bufio_client * c,unsigned n)1431 void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n)
1432 {
1433 	c->minimum_buffers = n;
1434 }
1435 EXPORT_SYMBOL(dm_bufio_set_minimum_buffers);
1436 
dm_bufio_get_block_size(struct dm_bufio_client * c)1437 unsigned dm_bufio_get_block_size(struct dm_bufio_client *c)
1438 {
1439 	return c->block_size;
1440 }
1441 EXPORT_SYMBOL_GPL(dm_bufio_get_block_size);
1442 
dm_bufio_get_device_size(struct dm_bufio_client * c)1443 sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
1444 {
1445 	return i_size_read(c->bdev->bd_inode) >>
1446 			   (SECTOR_SHIFT + c->sectors_per_block_bits);
1447 }
1448 EXPORT_SYMBOL_GPL(dm_bufio_get_device_size);
1449 
dm_bufio_get_block_number(struct dm_buffer * b)1450 sector_t dm_bufio_get_block_number(struct dm_buffer *b)
1451 {
1452 	return b->block;
1453 }
1454 EXPORT_SYMBOL_GPL(dm_bufio_get_block_number);
1455 
dm_bufio_get_block_data(struct dm_buffer * b)1456 void *dm_bufio_get_block_data(struct dm_buffer *b)
1457 {
1458 	return b->data;
1459 }
1460 EXPORT_SYMBOL_GPL(dm_bufio_get_block_data);
1461 
dm_bufio_get_aux_data(struct dm_buffer * b)1462 void *dm_bufio_get_aux_data(struct dm_buffer *b)
1463 {
1464 	return b + 1;
1465 }
1466 EXPORT_SYMBOL_GPL(dm_bufio_get_aux_data);
1467 
dm_bufio_get_client(struct dm_buffer * b)1468 struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b)
1469 {
1470 	return b->c;
1471 }
1472 EXPORT_SYMBOL_GPL(dm_bufio_get_client);
1473 
drop_buffers(struct dm_bufio_client * c)1474 static void drop_buffers(struct dm_bufio_client *c)
1475 {
1476 	struct dm_buffer *b;
1477 	int i;
1478 
1479 	BUG_ON(dm_bufio_in_request());
1480 
1481 	/*
1482 	 * An optimization so that the buffers are not written one-by-one.
1483 	 */
1484 	dm_bufio_write_dirty_buffers_async(c);
1485 
1486 	dm_bufio_lock(c);
1487 
1488 	while ((b = __get_unclaimed_buffer(c)))
1489 		__free_buffer_wake(b);
1490 
1491 	for (i = 0; i < LIST_SIZE; i++)
1492 		list_for_each_entry(b, &c->lru[i], lru_list)
1493 			DMERR("leaked buffer %llx, hold count %u, list %d",
1494 			      (unsigned long long)b->block, b->hold_count, i);
1495 
1496 	for (i = 0; i < LIST_SIZE; i++)
1497 		BUG_ON(!list_empty(&c->lru[i]));
1498 
1499 	dm_bufio_unlock(c);
1500 }
1501 
1502 /*
1503  * We may not be able to evict this buffer if IO pending or the client
1504  * is still using it.  Caller is expected to know buffer is too old.
1505  *
1506  * And if GFP_NOFS is used, we must not do any I/O because we hold
1507  * dm_bufio_clients_lock and we would risk deadlock if the I/O gets
1508  * rerouted to different bufio client.
1509  */
__try_evict_buffer(struct dm_buffer * b,gfp_t gfp)1510 static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp)
1511 {
1512 	if (!(gfp & __GFP_FS)) {
1513 		if (test_bit(B_READING, &b->state) ||
1514 		    test_bit(B_WRITING, &b->state) ||
1515 		    test_bit(B_DIRTY, &b->state))
1516 			return false;
1517 	}
1518 
1519 	if (b->hold_count)
1520 		return false;
1521 
1522 	__make_buffer_clean(b);
1523 	__unlink_buffer(b);
1524 	__free_buffer_wake(b);
1525 
1526 	return true;
1527 }
1528 
get_retain_buffers(struct dm_bufio_client * c)1529 static unsigned long get_retain_buffers(struct dm_bufio_client *c)
1530 {
1531         unsigned long retain_bytes = ACCESS_ONCE(dm_bufio_retain_bytes);
1532         return retain_bytes >> (c->sectors_per_block_bits + SECTOR_SHIFT);
1533 }
1534 
__scan(struct dm_bufio_client * c,unsigned long nr_to_scan,gfp_t gfp_mask)1535 static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
1536 			    gfp_t gfp_mask)
1537 {
1538 	int l;
1539 	struct dm_buffer *b, *tmp;
1540 	unsigned long freed = 0;
1541 	unsigned long count = c->n_buffers[LIST_CLEAN] +
1542 			      c->n_buffers[LIST_DIRTY];
1543 	unsigned long retain_target = get_retain_buffers(c);
1544 
1545 	for (l = 0; l < LIST_SIZE; l++) {
1546 		list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) {
1547 			if (__try_evict_buffer(b, gfp_mask))
1548 				freed++;
1549 			if (!--nr_to_scan || ((count - freed) <= retain_target))
1550 				return freed;
1551 			dm_bufio_cond_resched();
1552 		}
1553 	}
1554 	return freed;
1555 }
1556 
1557 static unsigned long
dm_bufio_shrink_scan(struct shrinker * shrink,struct shrink_control * sc)1558 dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1559 {
1560 	struct dm_bufio_client *c;
1561 	unsigned long freed;
1562 
1563 	c = container_of(shrink, struct dm_bufio_client, shrinker);
1564 	if (sc->gfp_mask & __GFP_FS)
1565 		dm_bufio_lock(c);
1566 	else if (!dm_bufio_trylock(c))
1567 		return SHRINK_STOP;
1568 
1569 	freed  = __scan(c, sc->nr_to_scan, sc->gfp_mask);
1570 	dm_bufio_unlock(c);
1571 	return freed;
1572 }
1573 
1574 static unsigned long
dm_bufio_shrink_count(struct shrinker * shrink,struct shrink_control * sc)1575 dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1576 {
1577 	struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker);
1578 	unsigned long count = READ_ONCE(c->n_buffers[LIST_CLEAN]) +
1579 			      READ_ONCE(c->n_buffers[LIST_DIRTY]);
1580 	unsigned long retain_target = get_retain_buffers(c);
1581 
1582 	return (count < retain_target) ? 0 : (count - retain_target);
1583 }
1584 
1585 /*
1586  * Create the buffering interface
1587  */
dm_bufio_client_create(struct block_device * bdev,unsigned block_size,unsigned reserved_buffers,unsigned aux_size,void (* alloc_callback)(struct dm_buffer *),void (* write_callback)(struct dm_buffer *))1588 struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned block_size,
1589 					       unsigned reserved_buffers, unsigned aux_size,
1590 					       void (*alloc_callback)(struct dm_buffer *),
1591 					       void (*write_callback)(struct dm_buffer *))
1592 {
1593 	int r;
1594 	struct dm_bufio_client *c;
1595 	unsigned i;
1596 
1597 	BUG_ON(block_size < 1 << SECTOR_SHIFT ||
1598 	       (block_size & (block_size - 1)));
1599 
1600 	c = kzalloc(sizeof(*c), GFP_KERNEL);
1601 	if (!c) {
1602 		r = -ENOMEM;
1603 		goto bad_client;
1604 	}
1605 	c->buffer_tree = RB_ROOT;
1606 
1607 	c->bdev = bdev;
1608 	c->block_size = block_size;
1609 	c->sectors_per_block_bits = __ffs(block_size) - SECTOR_SHIFT;
1610 	c->pages_per_block_bits = (__ffs(block_size) >= PAGE_SHIFT) ?
1611 				  __ffs(block_size) - PAGE_SHIFT : 0;
1612 	c->blocks_per_page_bits = (__ffs(block_size) < PAGE_SHIFT ?
1613 				  PAGE_SHIFT - __ffs(block_size) : 0);
1614 
1615 	c->aux_size = aux_size;
1616 	c->alloc_callback = alloc_callback;
1617 	c->write_callback = write_callback;
1618 
1619 	for (i = 0; i < LIST_SIZE; i++) {
1620 		INIT_LIST_HEAD(&c->lru[i]);
1621 		c->n_buffers[i] = 0;
1622 	}
1623 
1624 	mutex_init(&c->lock);
1625 	INIT_LIST_HEAD(&c->reserved_buffers);
1626 	c->need_reserved_buffers = reserved_buffers;
1627 
1628 	c->minimum_buffers = DM_BUFIO_MIN_BUFFERS;
1629 
1630 	init_waitqueue_head(&c->free_buffer_wait);
1631 	c->async_write_error = 0;
1632 
1633 	c->dm_io = dm_io_client_create();
1634 	if (IS_ERR(c->dm_io)) {
1635 		r = PTR_ERR(c->dm_io);
1636 		goto bad_dm_io;
1637 	}
1638 
1639 	mutex_lock(&dm_bufio_clients_lock);
1640 	if (c->blocks_per_page_bits) {
1641 		if (!DM_BUFIO_CACHE_NAME(c)) {
1642 			DM_BUFIO_CACHE_NAME(c) = kasprintf(GFP_KERNEL, "dm_bufio_cache-%u", c->block_size);
1643 			if (!DM_BUFIO_CACHE_NAME(c)) {
1644 				r = -ENOMEM;
1645 				mutex_unlock(&dm_bufio_clients_lock);
1646 				goto bad_cache;
1647 			}
1648 		}
1649 
1650 		if (!DM_BUFIO_CACHE(c)) {
1651 			DM_BUFIO_CACHE(c) = kmem_cache_create(DM_BUFIO_CACHE_NAME(c),
1652 							      c->block_size,
1653 							      c->block_size, 0, NULL);
1654 			if (!DM_BUFIO_CACHE(c)) {
1655 				r = -ENOMEM;
1656 				mutex_unlock(&dm_bufio_clients_lock);
1657 				goto bad_cache;
1658 			}
1659 		}
1660 	}
1661 	mutex_unlock(&dm_bufio_clients_lock);
1662 
1663 	while (c->need_reserved_buffers) {
1664 		struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL);
1665 
1666 		if (!b) {
1667 			r = -ENOMEM;
1668 			goto bad_buffer;
1669 		}
1670 		__free_buffer_wake(b);
1671 	}
1672 
1673 	mutex_lock(&dm_bufio_clients_lock);
1674 	dm_bufio_client_count++;
1675 	list_add(&c->client_list, &dm_bufio_all_clients);
1676 	__cache_size_refresh();
1677 	mutex_unlock(&dm_bufio_clients_lock);
1678 
1679 	c->shrinker.count_objects = dm_bufio_shrink_count;
1680 	c->shrinker.scan_objects = dm_bufio_shrink_scan;
1681 	c->shrinker.seeks = 1;
1682 	c->shrinker.batch = 0;
1683 	register_shrinker(&c->shrinker);
1684 
1685 	return c;
1686 
1687 bad_buffer:
1688 bad_cache:
1689 	while (!list_empty(&c->reserved_buffers)) {
1690 		struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1691 						 struct dm_buffer, lru_list);
1692 		list_del(&b->lru_list);
1693 		free_buffer(b);
1694 	}
1695 	dm_io_client_destroy(c->dm_io);
1696 bad_dm_io:
1697 	kfree(c);
1698 bad_client:
1699 	return ERR_PTR(r);
1700 }
1701 EXPORT_SYMBOL_GPL(dm_bufio_client_create);
1702 
1703 /*
1704  * Free the buffering interface.
1705  * It is required that there are no references on any buffers.
1706  */
dm_bufio_client_destroy(struct dm_bufio_client * c)1707 void dm_bufio_client_destroy(struct dm_bufio_client *c)
1708 {
1709 	unsigned i;
1710 
1711 	drop_buffers(c);
1712 
1713 	unregister_shrinker(&c->shrinker);
1714 
1715 	mutex_lock(&dm_bufio_clients_lock);
1716 
1717 	list_del(&c->client_list);
1718 	dm_bufio_client_count--;
1719 	__cache_size_refresh();
1720 
1721 	mutex_unlock(&dm_bufio_clients_lock);
1722 
1723 	BUG_ON(!RB_EMPTY_ROOT(&c->buffer_tree));
1724 	BUG_ON(c->need_reserved_buffers);
1725 
1726 	while (!list_empty(&c->reserved_buffers)) {
1727 		struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1728 						 struct dm_buffer, lru_list);
1729 		list_del(&b->lru_list);
1730 		free_buffer(b);
1731 	}
1732 
1733 	for (i = 0; i < LIST_SIZE; i++)
1734 		if (c->n_buffers[i])
1735 			DMERR("leaked buffer count %d: %ld", i, c->n_buffers[i]);
1736 
1737 	for (i = 0; i < LIST_SIZE; i++)
1738 		BUG_ON(c->n_buffers[i]);
1739 
1740 	dm_io_client_destroy(c->dm_io);
1741 	kfree(c);
1742 }
1743 EXPORT_SYMBOL_GPL(dm_bufio_client_destroy);
1744 
get_max_age_hz(void)1745 static unsigned get_max_age_hz(void)
1746 {
1747 	unsigned max_age = ACCESS_ONCE(dm_bufio_max_age);
1748 
1749 	if (max_age > UINT_MAX / HZ)
1750 		max_age = UINT_MAX / HZ;
1751 
1752 	return max_age * HZ;
1753 }
1754 
older_than(struct dm_buffer * b,unsigned long age_hz)1755 static bool older_than(struct dm_buffer *b, unsigned long age_hz)
1756 {
1757 	return time_after_eq(jiffies, b->last_accessed + age_hz);
1758 }
1759 
__evict_old_buffers(struct dm_bufio_client * c,unsigned long age_hz)1760 static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz)
1761 {
1762 	struct dm_buffer *b, *tmp;
1763 	unsigned long retain_target = get_retain_buffers(c);
1764 	unsigned long count;
1765 	LIST_HEAD(write_list);
1766 
1767 	dm_bufio_lock(c);
1768 
1769 	__check_watermark(c, &write_list);
1770 	if (unlikely(!list_empty(&write_list))) {
1771 		dm_bufio_unlock(c);
1772 		__flush_write_list(&write_list);
1773 		dm_bufio_lock(c);
1774 	}
1775 
1776 	count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
1777 	list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_CLEAN], lru_list) {
1778 		if (count <= retain_target)
1779 			break;
1780 
1781 		if (!older_than(b, age_hz))
1782 			break;
1783 
1784 		if (__try_evict_buffer(b, 0))
1785 			count--;
1786 
1787 		dm_bufio_cond_resched();
1788 	}
1789 
1790 	dm_bufio_unlock(c);
1791 }
1792 
cleanup_old_buffers(void)1793 static void cleanup_old_buffers(void)
1794 {
1795 	unsigned long max_age_hz = get_max_age_hz();
1796 	struct dm_bufio_client *c;
1797 
1798 	mutex_lock(&dm_bufio_clients_lock);
1799 
1800 	__cache_size_refresh();
1801 
1802 	list_for_each_entry(c, &dm_bufio_all_clients, client_list)
1803 		__evict_old_buffers(c, max_age_hz);
1804 
1805 	mutex_unlock(&dm_bufio_clients_lock);
1806 }
1807 
1808 static struct workqueue_struct *dm_bufio_wq;
1809 static struct delayed_work dm_bufio_work;
1810 
work_fn(struct work_struct * w)1811 static void work_fn(struct work_struct *w)
1812 {
1813 	cleanup_old_buffers();
1814 
1815 	queue_delayed_work(dm_bufio_wq, &dm_bufio_work,
1816 			   DM_BUFIO_WORK_TIMER_SECS * HZ);
1817 }
1818 
1819 /*----------------------------------------------------------------
1820  * Module setup
1821  *--------------------------------------------------------------*/
1822 
1823 /*
1824  * This is called only once for the whole dm_bufio module.
1825  * It initializes memory limit.
1826  */
dm_bufio_init(void)1827 static int __init dm_bufio_init(void)
1828 {
1829 	__u64 mem;
1830 
1831 	dm_bufio_allocated_kmem_cache = 0;
1832 	dm_bufio_allocated_get_free_pages = 0;
1833 	dm_bufio_allocated_vmalloc = 0;
1834 	dm_bufio_current_allocated = 0;
1835 
1836 	memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches);
1837 	memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names);
1838 
1839 	mem = (__u64)mult_frac(totalram_pages - totalhigh_pages,
1840 			       DM_BUFIO_MEMORY_PERCENT, 100) << PAGE_SHIFT;
1841 
1842 	if (mem > ULONG_MAX)
1843 		mem = ULONG_MAX;
1844 
1845 #ifdef CONFIG_MMU
1846 	if (mem > mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100))
1847 		mem = mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100);
1848 #endif
1849 
1850 	dm_bufio_default_cache_size = mem;
1851 
1852 	mutex_lock(&dm_bufio_clients_lock);
1853 	__cache_size_refresh();
1854 	mutex_unlock(&dm_bufio_clients_lock);
1855 
1856 	dm_bufio_wq = create_singlethread_workqueue("dm_bufio_cache");
1857 	if (!dm_bufio_wq)
1858 		return -ENOMEM;
1859 
1860 	INIT_DELAYED_WORK(&dm_bufio_work, work_fn);
1861 	queue_delayed_work(dm_bufio_wq, &dm_bufio_work,
1862 			   DM_BUFIO_WORK_TIMER_SECS * HZ);
1863 
1864 	return 0;
1865 }
1866 
1867 /*
1868  * This is called once when unloading the dm_bufio module.
1869  */
dm_bufio_exit(void)1870 static void __exit dm_bufio_exit(void)
1871 {
1872 	int bug = 0;
1873 	int i;
1874 
1875 	cancel_delayed_work_sync(&dm_bufio_work);
1876 	destroy_workqueue(dm_bufio_wq);
1877 
1878 	for (i = 0; i < ARRAY_SIZE(dm_bufio_caches); i++)
1879 		kmem_cache_destroy(dm_bufio_caches[i]);
1880 
1881 	for (i = 0; i < ARRAY_SIZE(dm_bufio_cache_names); i++)
1882 		kfree(dm_bufio_cache_names[i]);
1883 
1884 	if (dm_bufio_client_count) {
1885 		DMCRIT("%s: dm_bufio_client_count leaked: %d",
1886 			__func__, dm_bufio_client_count);
1887 		bug = 1;
1888 	}
1889 
1890 	if (dm_bufio_current_allocated) {
1891 		DMCRIT("%s: dm_bufio_current_allocated leaked: %lu",
1892 			__func__, dm_bufio_current_allocated);
1893 		bug = 1;
1894 	}
1895 
1896 	if (dm_bufio_allocated_get_free_pages) {
1897 		DMCRIT("%s: dm_bufio_allocated_get_free_pages leaked: %lu",
1898 		       __func__, dm_bufio_allocated_get_free_pages);
1899 		bug = 1;
1900 	}
1901 
1902 	if (dm_bufio_allocated_vmalloc) {
1903 		DMCRIT("%s: dm_bufio_vmalloc leaked: %lu",
1904 		       __func__, dm_bufio_allocated_vmalloc);
1905 		bug = 1;
1906 	}
1907 
1908 	if (bug)
1909 		BUG();
1910 }
1911 
1912 module_init(dm_bufio_init)
1913 module_exit(dm_bufio_exit)
1914 
1915 module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, S_IRUGO | S_IWUSR);
1916 MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
1917 
1918 module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR);
1919 MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
1920 
1921 module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, S_IRUGO | S_IWUSR);
1922 MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory");
1923 
1924 module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR);
1925 MODULE_PARM_DESC(peak_allocated_bytes, "Tracks the maximum allocated memory");
1926 
1927 module_param_named(allocated_kmem_cache_bytes, dm_bufio_allocated_kmem_cache, ulong, S_IRUGO);
1928 MODULE_PARM_DESC(allocated_kmem_cache_bytes, "Memory allocated with kmem_cache_alloc");
1929 
1930 module_param_named(allocated_get_free_pages_bytes, dm_bufio_allocated_get_free_pages, ulong, S_IRUGO);
1931 MODULE_PARM_DESC(allocated_get_free_pages_bytes, "Memory allocated with get_free_pages");
1932 
1933 module_param_named(allocated_vmalloc_bytes, dm_bufio_allocated_vmalloc, ulong, S_IRUGO);
1934 MODULE_PARM_DESC(allocated_vmalloc_bytes, "Memory allocated with vmalloc");
1935 
1936 module_param_named(current_allocated_bytes, dm_bufio_current_allocated, ulong, S_IRUGO);
1937 MODULE_PARM_DESC(current_allocated_bytes, "Memory currently used by the cache");
1938 
1939 MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
1940 MODULE_DESCRIPTION(DM_NAME " buffered I/O library");
1941 MODULE_LICENSE("GPL");
1942