1 /*
2 * Copyright (C) 2009-2011 Red Hat, Inc.
3 *
4 * Author: Mikulas Patocka <mpatocka@redhat.com>
5 *
6 * This file is released under the GPL.
7 */
8
9 #include "dm-bufio.h"
10
11 #include <linux/device-mapper.h>
12 #include <linux/dm-io.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/shrinker.h>
16 #include <linux/module.h>
17
18 #define DM_MSG_PREFIX "bufio"
19
20 /*
21 * Memory management policy:
22 * Limit the number of buffers to DM_BUFIO_MEMORY_PERCENT of main memory
23 * or DM_BUFIO_VMALLOC_PERCENT of vmalloc memory (whichever is lower).
24 * Always allocate at least DM_BUFIO_MIN_BUFFERS buffers.
25 * Start background writeback when there are DM_BUFIO_WRITEBACK_PERCENT
26 * dirty buffers.
27 */
28 #define DM_BUFIO_MIN_BUFFERS 8
29
30 #define DM_BUFIO_MEMORY_PERCENT 2
31 #define DM_BUFIO_VMALLOC_PERCENT 25
32 #define DM_BUFIO_WRITEBACK_PERCENT 75
33
34 /*
35 * Check buffer ages in this interval (seconds)
36 */
37 #define DM_BUFIO_WORK_TIMER_SECS 10
38
39 /*
40 * Free buffers when they are older than this (seconds)
41 */
42 #define DM_BUFIO_DEFAULT_AGE_SECS 60
43
44 /*
45 * The number of bvec entries that are embedded directly in the buffer.
46 * If the chunk size is larger, dm-io is used to do the io.
47 */
48 #define DM_BUFIO_INLINE_VECS 16
49
50 /*
51 * Buffer hash
52 */
53 #define DM_BUFIO_HASH_BITS 20
54 #define DM_BUFIO_HASH(block) \
55 ((((block) >> DM_BUFIO_HASH_BITS) ^ (block)) & \
56 ((1 << DM_BUFIO_HASH_BITS) - 1))
57
58 /*
59 * Don't try to use kmem_cache_alloc for blocks larger than this.
60 * For explanation, see alloc_buffer_data below.
61 */
62 #define DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT (PAGE_SIZE >> 1)
63 #define DM_BUFIO_BLOCK_SIZE_GFP_LIMIT (PAGE_SIZE << (MAX_ORDER - 1))
64
65 /*
66 * dm_buffer->list_mode
67 */
68 #define LIST_CLEAN 0
69 #define LIST_DIRTY 1
70 #define LIST_SIZE 2
71
72 /*
73 * Linking of buffers:
74 * All buffers are linked to cache_hash with their hash_list field.
75 *
76 * Clean buffers that are not being written (B_WRITING not set)
77 * are linked to lru[LIST_CLEAN] with their lru_list field.
78 *
79 * Dirty and clean buffers that are being written are linked to
80 * lru[LIST_DIRTY] with their lru_list field. When the write
81 * finishes, the buffer cannot be relinked immediately (because we
82 * are in an interrupt context and relinking requires process
83 * context), so some clean-not-writing buffers can be held on
84 * dirty_lru too. They are later added to lru in the process
85 * context.
86 */
87 struct dm_bufio_client {
88 struct mutex lock;
89
90 struct list_head lru[LIST_SIZE];
91 unsigned long n_buffers[LIST_SIZE];
92
93 struct block_device *bdev;
94 unsigned block_size;
95 unsigned char sectors_per_block_bits;
96 unsigned char pages_per_block_bits;
97 unsigned char blocks_per_page_bits;
98 unsigned aux_size;
99 void (*alloc_callback)(struct dm_buffer *);
100 void (*write_callback)(struct dm_buffer *);
101
102 struct dm_io_client *dm_io;
103
104 struct list_head reserved_buffers;
105 unsigned need_reserved_buffers;
106
107 struct hlist_head *cache_hash;
108 wait_queue_head_t free_buffer_wait;
109
110 int async_write_error;
111
112 struct list_head client_list;
113 struct shrinker shrinker;
114 };
115
116 /*
117 * Buffer state bits.
118 */
119 #define B_READING 0
120 #define B_WRITING 1
121 #define B_DIRTY 2
122
123 /*
124 * Describes how the block was allocated:
125 * kmem_cache_alloc(), __get_free_pages() or vmalloc().
126 * See the comment at alloc_buffer_data.
127 */
128 enum data_mode {
129 DATA_MODE_SLAB = 0,
130 DATA_MODE_GET_FREE_PAGES = 1,
131 DATA_MODE_VMALLOC = 2,
132 DATA_MODE_LIMIT = 3
133 };
134
135 struct dm_buffer {
136 struct hlist_node hash_list;
137 struct list_head lru_list;
138 sector_t block;
139 void *data;
140 enum data_mode data_mode;
141 unsigned char list_mode; /* LIST_* */
142 unsigned hold_count;
143 int read_error;
144 int write_error;
145 unsigned long state;
146 unsigned long last_accessed;
147 struct dm_bufio_client *c;
148 struct bio bio;
149 struct bio_vec bio_vec[DM_BUFIO_INLINE_VECS];
150 };
151
152 /*----------------------------------------------------------------*/
153
154 static struct kmem_cache *dm_bufio_caches[PAGE_SHIFT - SECTOR_SHIFT];
155 static char *dm_bufio_cache_names[PAGE_SHIFT - SECTOR_SHIFT];
156
dm_bufio_cache_index(struct dm_bufio_client * c)157 static inline int dm_bufio_cache_index(struct dm_bufio_client *c)
158 {
159 unsigned ret = c->blocks_per_page_bits - 1;
160
161 BUG_ON(ret >= ARRAY_SIZE(dm_bufio_caches));
162
163 return ret;
164 }
165
166 #define DM_BUFIO_CACHE(c) (dm_bufio_caches[dm_bufio_cache_index(c)])
167 #define DM_BUFIO_CACHE_NAME(c) (dm_bufio_cache_names[dm_bufio_cache_index(c)])
168
169 #define dm_bufio_in_request() (!!current->bio_list)
170
dm_bufio_lock(struct dm_bufio_client * c)171 static void dm_bufio_lock(struct dm_bufio_client *c)
172 {
173 mutex_lock_nested(&c->lock, dm_bufio_in_request());
174 }
175
dm_bufio_trylock(struct dm_bufio_client * c)176 static int dm_bufio_trylock(struct dm_bufio_client *c)
177 {
178 return mutex_trylock(&c->lock);
179 }
180
dm_bufio_unlock(struct dm_bufio_client * c)181 static void dm_bufio_unlock(struct dm_bufio_client *c)
182 {
183 mutex_unlock(&c->lock);
184 }
185
186 /*
187 * FIXME Move to sched.h?
188 */
189 #ifdef CONFIG_PREEMPT_VOLUNTARY
190 # define dm_bufio_cond_resched() \
191 do { \
192 if (unlikely(need_resched())) \
193 _cond_resched(); \
194 } while (0)
195 #else
196 # define dm_bufio_cond_resched() do { } while (0)
197 #endif
198
199 /*----------------------------------------------------------------*/
200
201 /*
202 * Default cache size: available memory divided by the ratio.
203 */
204 static unsigned long dm_bufio_default_cache_size;
205
206 /*
207 * Total cache size set by the user.
208 */
209 static unsigned long dm_bufio_cache_size;
210
211 /*
212 * A copy of dm_bufio_cache_size because dm_bufio_cache_size can change
213 * at any time. If it disagrees, the user has changed cache size.
214 */
215 static unsigned long dm_bufio_cache_size_latch;
216
217 static DEFINE_SPINLOCK(param_spinlock);
218
219 /*
220 * Buffers are freed after this timeout
221 */
222 static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
223
224 static unsigned long dm_bufio_peak_allocated;
225 static unsigned long dm_bufio_allocated_kmem_cache;
226 static unsigned long dm_bufio_allocated_get_free_pages;
227 static unsigned long dm_bufio_allocated_vmalloc;
228 static unsigned long dm_bufio_current_allocated;
229
230 /*----------------------------------------------------------------*/
231
232 /*
233 * Per-client cache: dm_bufio_cache_size / dm_bufio_client_count
234 */
235 static unsigned long dm_bufio_cache_size_per_client;
236
237 /*
238 * The current number of clients.
239 */
240 static int dm_bufio_client_count;
241
242 /*
243 * The list of all clients.
244 */
245 static LIST_HEAD(dm_bufio_all_clients);
246
247 /*
248 * This mutex protects dm_bufio_cache_size_latch,
249 * dm_bufio_cache_size_per_client and dm_bufio_client_count
250 */
251 static DEFINE_MUTEX(dm_bufio_clients_lock);
252
253 /*----------------------------------------------------------------*/
254
adjust_total_allocated(enum data_mode data_mode,long diff)255 static void adjust_total_allocated(enum data_mode data_mode, long diff)
256 {
257 static unsigned long * const class_ptr[DATA_MODE_LIMIT] = {
258 &dm_bufio_allocated_kmem_cache,
259 &dm_bufio_allocated_get_free_pages,
260 &dm_bufio_allocated_vmalloc,
261 };
262
263 spin_lock(¶m_spinlock);
264
265 *class_ptr[data_mode] += diff;
266
267 dm_bufio_current_allocated += diff;
268
269 if (dm_bufio_current_allocated > dm_bufio_peak_allocated)
270 dm_bufio_peak_allocated = dm_bufio_current_allocated;
271
272 spin_unlock(¶m_spinlock);
273 }
274
275 /*
276 * Change the number of clients and recalculate per-client limit.
277 */
__cache_size_refresh(void)278 static void __cache_size_refresh(void)
279 {
280 BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock));
281 BUG_ON(dm_bufio_client_count < 0);
282
283 dm_bufio_cache_size_latch = ACCESS_ONCE(dm_bufio_cache_size);
284
285 /*
286 * Use default if set to 0 and report the actual cache size used.
287 */
288 if (!dm_bufio_cache_size_latch) {
289 (void)cmpxchg(&dm_bufio_cache_size, 0,
290 dm_bufio_default_cache_size);
291 dm_bufio_cache_size_latch = dm_bufio_default_cache_size;
292 }
293
294 dm_bufio_cache_size_per_client = dm_bufio_cache_size_latch /
295 (dm_bufio_client_count ? : 1);
296 }
297
298 /*
299 * Allocating buffer data.
300 *
301 * Small buffers are allocated with kmem_cache, to use space optimally.
302 *
303 * For large buffers, we choose between get_free_pages and vmalloc.
304 * Each has advantages and disadvantages.
305 *
306 * __get_free_pages can randomly fail if the memory is fragmented.
307 * __vmalloc won't randomly fail, but vmalloc space is limited (it may be
308 * as low as 128M) so using it for caching is not appropriate.
309 *
310 * If the allocation may fail we use __get_free_pages. Memory fragmentation
311 * won't have a fatal effect here, but it just causes flushes of some other
312 * buffers and more I/O will be performed. Don't use __get_free_pages if it
313 * always fails (i.e. order >= MAX_ORDER).
314 *
315 * If the allocation shouldn't fail we use __vmalloc. This is only for the
316 * initial reserve allocation, so there's no risk of wasting all vmalloc
317 * space.
318 */
alloc_buffer_data(struct dm_bufio_client * c,gfp_t gfp_mask,enum data_mode * data_mode)319 static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
320 enum data_mode *data_mode)
321 {
322 unsigned noio_flag;
323 void *ptr;
324
325 if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) {
326 *data_mode = DATA_MODE_SLAB;
327 return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask);
328 }
329
330 if (c->block_size <= DM_BUFIO_BLOCK_SIZE_GFP_LIMIT &&
331 gfp_mask & __GFP_NORETRY) {
332 *data_mode = DATA_MODE_GET_FREE_PAGES;
333 return (void *)__get_free_pages(gfp_mask,
334 c->pages_per_block_bits);
335 }
336
337 *data_mode = DATA_MODE_VMALLOC;
338
339 /*
340 * __vmalloc allocates the data pages and auxiliary structures with
341 * gfp_flags that were specified, but pagetables are always allocated
342 * with GFP_KERNEL, no matter what was specified as gfp_mask.
343 *
344 * Consequently, we must set per-process flag PF_MEMALLOC_NOIO so that
345 * all allocations done by this process (including pagetables) are done
346 * as if GFP_NOIO was specified.
347 */
348
349 if (gfp_mask & __GFP_NORETRY)
350 noio_flag = memalloc_noio_save();
351
352 ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
353
354 if (gfp_mask & __GFP_NORETRY)
355 memalloc_noio_restore(noio_flag);
356
357 return ptr;
358 }
359
360 /*
361 * Free buffer's data.
362 */
free_buffer_data(struct dm_bufio_client * c,void * data,enum data_mode data_mode)363 static void free_buffer_data(struct dm_bufio_client *c,
364 void *data, enum data_mode data_mode)
365 {
366 switch (data_mode) {
367 case DATA_MODE_SLAB:
368 kmem_cache_free(DM_BUFIO_CACHE(c), data);
369 break;
370
371 case DATA_MODE_GET_FREE_PAGES:
372 free_pages((unsigned long)data, c->pages_per_block_bits);
373 break;
374
375 case DATA_MODE_VMALLOC:
376 vfree(data);
377 break;
378
379 default:
380 DMCRIT("dm_bufio_free_buffer_data: bad data mode: %d",
381 data_mode);
382 BUG();
383 }
384 }
385
386 /*
387 * Allocate buffer and its data.
388 */
alloc_buffer(struct dm_bufio_client * c,gfp_t gfp_mask)389 static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask)
390 {
391 struct dm_buffer *b = kmalloc(sizeof(struct dm_buffer) + c->aux_size,
392 gfp_mask);
393
394 if (!b)
395 return NULL;
396
397 b->c = c;
398
399 b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode);
400 if (!b->data) {
401 kfree(b);
402 return NULL;
403 }
404
405 adjust_total_allocated(b->data_mode, (long)c->block_size);
406
407 return b;
408 }
409
410 /*
411 * Free buffer and its data.
412 */
free_buffer(struct dm_buffer * b)413 static void free_buffer(struct dm_buffer *b)
414 {
415 struct dm_bufio_client *c = b->c;
416
417 adjust_total_allocated(b->data_mode, -(long)c->block_size);
418
419 free_buffer_data(c, b->data, b->data_mode);
420 kfree(b);
421 }
422
423 /*
424 * Link buffer to the hash list and clean or dirty queue.
425 */
__link_buffer(struct dm_buffer * b,sector_t block,int dirty)426 static void __link_buffer(struct dm_buffer *b, sector_t block, int dirty)
427 {
428 struct dm_bufio_client *c = b->c;
429
430 c->n_buffers[dirty]++;
431 b->block = block;
432 b->list_mode = dirty;
433 list_add(&b->lru_list, &c->lru[dirty]);
434 hlist_add_head(&b->hash_list, &c->cache_hash[DM_BUFIO_HASH(block)]);
435 b->last_accessed = jiffies;
436 }
437
438 /*
439 * Unlink buffer from the hash list and dirty or clean queue.
440 */
__unlink_buffer(struct dm_buffer * b)441 static void __unlink_buffer(struct dm_buffer *b)
442 {
443 struct dm_bufio_client *c = b->c;
444
445 BUG_ON(!c->n_buffers[b->list_mode]);
446
447 c->n_buffers[b->list_mode]--;
448 hlist_del(&b->hash_list);
449 list_del(&b->lru_list);
450 }
451
452 /*
453 * Place the buffer to the head of dirty or clean LRU queue.
454 */
__relink_lru(struct dm_buffer * b,int dirty)455 static void __relink_lru(struct dm_buffer *b, int dirty)
456 {
457 struct dm_bufio_client *c = b->c;
458
459 BUG_ON(!c->n_buffers[b->list_mode]);
460
461 c->n_buffers[b->list_mode]--;
462 c->n_buffers[dirty]++;
463 b->list_mode = dirty;
464 list_move(&b->lru_list, &c->lru[dirty]);
465 }
466
467 /*----------------------------------------------------------------
468 * Submit I/O on the buffer.
469 *
470 * Bio interface is faster but it has some problems:
471 * the vector list is limited (increasing this limit increases
472 * memory-consumption per buffer, so it is not viable);
473 *
474 * the memory must be direct-mapped, not vmalloced;
475 *
476 * the I/O driver can reject requests spuriously if it thinks that
477 * the requests are too big for the device or if they cross a
478 * controller-defined memory boundary.
479 *
480 * If the buffer is small enough (up to DM_BUFIO_INLINE_VECS pages) and
481 * it is not vmalloced, try using the bio interface.
482 *
483 * If the buffer is big, if it is vmalloced or if the underlying device
484 * rejects the bio because it is too large, use dm-io layer to do the I/O.
485 * The dm-io layer splits the I/O into multiple requests, avoiding the above
486 * shortcomings.
487 *--------------------------------------------------------------*/
488
489 /*
490 * dm-io completion routine. It just calls b->bio.bi_end_io, pretending
491 * that the request was handled directly with bio interface.
492 */
dmio_complete(unsigned long error,void * context)493 static void dmio_complete(unsigned long error, void *context)
494 {
495 struct dm_buffer *b = context;
496
497 b->bio.bi_end_io(&b->bio, error ? -EIO : 0);
498 }
499
use_dmio(struct dm_buffer * b,int rw,sector_t block,bio_end_io_t * end_io)500 static void use_dmio(struct dm_buffer *b, int rw, sector_t block,
501 bio_end_io_t *end_io)
502 {
503 int r;
504 struct dm_io_request io_req = {
505 .bi_rw = rw,
506 .notify.fn = dmio_complete,
507 .notify.context = b,
508 .client = b->c->dm_io,
509 };
510 struct dm_io_region region = {
511 .bdev = b->c->bdev,
512 .sector = block << b->c->sectors_per_block_bits,
513 .count = b->c->block_size >> SECTOR_SHIFT,
514 };
515
516 if (b->data_mode != DATA_MODE_VMALLOC) {
517 io_req.mem.type = DM_IO_KMEM;
518 io_req.mem.ptr.addr = b->data;
519 } else {
520 io_req.mem.type = DM_IO_VMA;
521 io_req.mem.ptr.vma = b->data;
522 }
523
524 b->bio.bi_end_io = end_io;
525
526 r = dm_io(&io_req, 1, ®ion, NULL);
527 if (r)
528 end_io(&b->bio, r);
529 }
530
use_inline_bio(struct dm_buffer * b,int rw,sector_t block,bio_end_io_t * end_io)531 static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
532 bio_end_io_t *end_io)
533 {
534 char *ptr;
535 int len;
536
537 bio_init(&b->bio);
538 b->bio.bi_io_vec = b->bio_vec;
539 b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS;
540 b->bio.bi_sector = block << b->c->sectors_per_block_bits;
541 b->bio.bi_bdev = b->c->bdev;
542 b->bio.bi_end_io = end_io;
543
544 /*
545 * We assume that if len >= PAGE_SIZE ptr is page-aligned.
546 * If len < PAGE_SIZE the buffer doesn't cross page boundary.
547 */
548 ptr = b->data;
549 len = b->c->block_size;
550
551 if (len >= PAGE_SIZE)
552 BUG_ON((unsigned long)ptr & (PAGE_SIZE - 1));
553 else
554 BUG_ON((unsigned long)ptr & (len - 1));
555
556 do {
557 if (!bio_add_page(&b->bio, virt_to_page(ptr),
558 len < PAGE_SIZE ? len : PAGE_SIZE,
559 virt_to_phys(ptr) & (PAGE_SIZE - 1))) {
560 BUG_ON(b->c->block_size <= PAGE_SIZE);
561 use_dmio(b, rw, block, end_io);
562 return;
563 }
564
565 len -= PAGE_SIZE;
566 ptr += PAGE_SIZE;
567 } while (len > 0);
568
569 submit_bio(rw, &b->bio);
570 }
571
submit_io(struct dm_buffer * b,int rw,sector_t block,bio_end_io_t * end_io)572 static void submit_io(struct dm_buffer *b, int rw, sector_t block,
573 bio_end_io_t *end_io)
574 {
575 if (rw == WRITE && b->c->write_callback)
576 b->c->write_callback(b);
577
578 if (b->c->block_size <= DM_BUFIO_INLINE_VECS * PAGE_SIZE &&
579 b->data_mode != DATA_MODE_VMALLOC)
580 use_inline_bio(b, rw, block, end_io);
581 else
582 use_dmio(b, rw, block, end_io);
583 }
584
585 /*----------------------------------------------------------------
586 * Writing dirty buffers
587 *--------------------------------------------------------------*/
588
589 /*
590 * The endio routine for write.
591 *
592 * Set the error, clear B_WRITING bit and wake anyone who was waiting on
593 * it.
594 */
write_endio(struct bio * bio,int error)595 static void write_endio(struct bio *bio, int error)
596 {
597 struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
598
599 b->write_error = error;
600 if (unlikely(error)) {
601 struct dm_bufio_client *c = b->c;
602 (void)cmpxchg(&c->async_write_error, 0, error);
603 }
604
605 BUG_ON(!test_bit(B_WRITING, &b->state));
606
607 smp_mb__before_clear_bit();
608 clear_bit(B_WRITING, &b->state);
609 smp_mb__after_clear_bit();
610
611 wake_up_bit(&b->state, B_WRITING);
612 }
613
614 /*
615 * This function is called when wait_on_bit is actually waiting.
616 */
do_io_schedule(void * word)617 static int do_io_schedule(void *word)
618 {
619 io_schedule();
620
621 return 0;
622 }
623
624 /*
625 * Initiate a write on a dirty buffer, but don't wait for it.
626 *
627 * - If the buffer is not dirty, exit.
628 * - If there some previous write going on, wait for it to finish (we can't
629 * have two writes on the same buffer simultaneously).
630 * - Submit our write and don't wait on it. We set B_WRITING indicating
631 * that there is a write in progress.
632 */
__write_dirty_buffer(struct dm_buffer * b)633 static void __write_dirty_buffer(struct dm_buffer *b)
634 {
635 if (!test_bit(B_DIRTY, &b->state))
636 return;
637
638 clear_bit(B_DIRTY, &b->state);
639 wait_on_bit_lock(&b->state, B_WRITING,
640 do_io_schedule, TASK_UNINTERRUPTIBLE);
641
642 submit_io(b, WRITE, b->block, write_endio);
643 }
644
645 /*
646 * Wait until any activity on the buffer finishes. Possibly write the
647 * buffer if it is dirty. When this function finishes, there is no I/O
648 * running on the buffer and the buffer is not dirty.
649 */
__make_buffer_clean(struct dm_buffer * b)650 static void __make_buffer_clean(struct dm_buffer *b)
651 {
652 BUG_ON(b->hold_count);
653
654 if (!b->state) /* fast case */
655 return;
656
657 wait_on_bit(&b->state, B_READING, do_io_schedule, TASK_UNINTERRUPTIBLE);
658 __write_dirty_buffer(b);
659 wait_on_bit(&b->state, B_WRITING, do_io_schedule, TASK_UNINTERRUPTIBLE);
660 }
661
662 /*
663 * Find some buffer that is not held by anybody, clean it, unlink it and
664 * return it.
665 */
__get_unclaimed_buffer(struct dm_bufio_client * c)666 static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
667 {
668 struct dm_buffer *b;
669
670 list_for_each_entry_reverse(b, &c->lru[LIST_CLEAN], lru_list) {
671 BUG_ON(test_bit(B_WRITING, &b->state));
672 BUG_ON(test_bit(B_DIRTY, &b->state));
673
674 if (!b->hold_count) {
675 __make_buffer_clean(b);
676 __unlink_buffer(b);
677 return b;
678 }
679 dm_bufio_cond_resched();
680 }
681
682 list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) {
683 BUG_ON(test_bit(B_READING, &b->state));
684
685 if (!b->hold_count) {
686 __make_buffer_clean(b);
687 __unlink_buffer(b);
688 return b;
689 }
690 dm_bufio_cond_resched();
691 }
692
693 return NULL;
694 }
695
696 /*
697 * Wait until some other threads free some buffer or release hold count on
698 * some buffer.
699 *
700 * This function is entered with c->lock held, drops it and regains it
701 * before exiting.
702 */
__wait_for_free_buffer(struct dm_bufio_client * c)703 static void __wait_for_free_buffer(struct dm_bufio_client *c)
704 {
705 DECLARE_WAITQUEUE(wait, current);
706
707 add_wait_queue(&c->free_buffer_wait, &wait);
708 set_task_state(current, TASK_UNINTERRUPTIBLE);
709 dm_bufio_unlock(c);
710
711 io_schedule();
712
713 set_task_state(current, TASK_RUNNING);
714 remove_wait_queue(&c->free_buffer_wait, &wait);
715
716 dm_bufio_lock(c);
717 }
718
719 enum new_flag {
720 NF_FRESH = 0,
721 NF_READ = 1,
722 NF_GET = 2,
723 NF_PREFETCH = 3
724 };
725
726 /*
727 * Allocate a new buffer. If the allocation is not possible, wait until
728 * some other thread frees a buffer.
729 *
730 * May drop the lock and regain it.
731 */
__alloc_buffer_wait_no_callback(struct dm_bufio_client * c,enum new_flag nf)732 static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf)
733 {
734 struct dm_buffer *b;
735
736 /*
737 * dm-bufio is resistant to allocation failures (it just keeps
738 * one buffer reserved in cases all the allocations fail).
739 * So set flags to not try too hard:
740 * GFP_NOIO: don't recurse into the I/O layer
741 * __GFP_NORETRY: don't retry and rather return failure
742 * __GFP_NOMEMALLOC: don't use emergency reserves
743 * __GFP_NOWARN: don't print a warning in case of failure
744 *
745 * For debugging, if we set the cache size to 1, no new buffers will
746 * be allocated.
747 */
748 while (1) {
749 if (dm_bufio_cache_size_latch != 1) {
750 b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
751 if (b)
752 return b;
753 }
754
755 if (nf == NF_PREFETCH)
756 return NULL;
757
758 if (!list_empty(&c->reserved_buffers)) {
759 b = list_entry(c->reserved_buffers.next,
760 struct dm_buffer, lru_list);
761 list_del(&b->lru_list);
762 c->need_reserved_buffers++;
763
764 return b;
765 }
766
767 b = __get_unclaimed_buffer(c);
768 if (b)
769 return b;
770
771 __wait_for_free_buffer(c);
772 }
773 }
774
__alloc_buffer_wait(struct dm_bufio_client * c,enum new_flag nf)775 static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c, enum new_flag nf)
776 {
777 struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf);
778
779 if (!b)
780 return NULL;
781
782 if (c->alloc_callback)
783 c->alloc_callback(b);
784
785 return b;
786 }
787
788 /*
789 * Free a buffer and wake other threads waiting for free buffers.
790 */
__free_buffer_wake(struct dm_buffer * b)791 static void __free_buffer_wake(struct dm_buffer *b)
792 {
793 struct dm_bufio_client *c = b->c;
794
795 if (!c->need_reserved_buffers)
796 free_buffer(b);
797 else {
798 list_add(&b->lru_list, &c->reserved_buffers);
799 c->need_reserved_buffers--;
800 }
801
802 wake_up(&c->free_buffer_wait);
803 }
804
__write_dirty_buffers_async(struct dm_bufio_client * c,int no_wait)805 static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait)
806 {
807 struct dm_buffer *b, *tmp;
808
809 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
810 BUG_ON(test_bit(B_READING, &b->state));
811
812 if (!test_bit(B_DIRTY, &b->state) &&
813 !test_bit(B_WRITING, &b->state)) {
814 __relink_lru(b, LIST_CLEAN);
815 continue;
816 }
817
818 if (no_wait && test_bit(B_WRITING, &b->state))
819 return;
820
821 __write_dirty_buffer(b);
822 dm_bufio_cond_resched();
823 }
824 }
825
826 /*
827 * Get writeback threshold and buffer limit for a given client.
828 */
__get_memory_limit(struct dm_bufio_client * c,unsigned long * threshold_buffers,unsigned long * limit_buffers)829 static void __get_memory_limit(struct dm_bufio_client *c,
830 unsigned long *threshold_buffers,
831 unsigned long *limit_buffers)
832 {
833 unsigned long buffers;
834
835 if (ACCESS_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch) {
836 mutex_lock(&dm_bufio_clients_lock);
837 __cache_size_refresh();
838 mutex_unlock(&dm_bufio_clients_lock);
839 }
840
841 buffers = dm_bufio_cache_size_per_client >>
842 (c->sectors_per_block_bits + SECTOR_SHIFT);
843
844 if (buffers < DM_BUFIO_MIN_BUFFERS)
845 buffers = DM_BUFIO_MIN_BUFFERS;
846
847 *limit_buffers = buffers;
848 *threshold_buffers = buffers * DM_BUFIO_WRITEBACK_PERCENT / 100;
849 }
850
851 /*
852 * Check if we're over watermark.
853 * If we are over threshold_buffers, start freeing buffers.
854 * If we're over "limit_buffers", block until we get under the limit.
855 */
__check_watermark(struct dm_bufio_client * c)856 static void __check_watermark(struct dm_bufio_client *c)
857 {
858 unsigned long threshold_buffers, limit_buffers;
859
860 __get_memory_limit(c, &threshold_buffers, &limit_buffers);
861
862 while (c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY] >
863 limit_buffers) {
864
865 struct dm_buffer *b = __get_unclaimed_buffer(c);
866
867 if (!b)
868 return;
869
870 __free_buffer_wake(b);
871 dm_bufio_cond_resched();
872 }
873
874 if (c->n_buffers[LIST_DIRTY] > threshold_buffers)
875 __write_dirty_buffers_async(c, 1);
876 }
877
878 /*
879 * Find a buffer in the hash.
880 */
__find(struct dm_bufio_client * c,sector_t block)881 static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block)
882 {
883 struct dm_buffer *b;
884
885 hlist_for_each_entry(b, &c->cache_hash[DM_BUFIO_HASH(block)],
886 hash_list) {
887 dm_bufio_cond_resched();
888 if (b->block == block)
889 return b;
890 }
891
892 return NULL;
893 }
894
895 /*----------------------------------------------------------------
896 * Getting a buffer
897 *--------------------------------------------------------------*/
898
__bufio_new(struct dm_bufio_client * c,sector_t block,enum new_flag nf,int * need_submit)899 static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
900 enum new_flag nf, int *need_submit)
901 {
902 struct dm_buffer *b, *new_b = NULL;
903
904 *need_submit = 0;
905
906 b = __find(c, block);
907 if (b)
908 goto found_buffer;
909
910 if (nf == NF_GET)
911 return NULL;
912
913 new_b = __alloc_buffer_wait(c, nf);
914 if (!new_b)
915 return NULL;
916
917 /*
918 * We've had a period where the mutex was unlocked, so need to
919 * recheck the hash table.
920 */
921 b = __find(c, block);
922 if (b) {
923 __free_buffer_wake(new_b);
924 goto found_buffer;
925 }
926
927 __check_watermark(c);
928
929 b = new_b;
930 b->hold_count = 1;
931 b->read_error = 0;
932 b->write_error = 0;
933 __link_buffer(b, block, LIST_CLEAN);
934
935 if (nf == NF_FRESH) {
936 b->state = 0;
937 return b;
938 }
939
940 b->state = 1 << B_READING;
941 *need_submit = 1;
942
943 return b;
944
945 found_buffer:
946 if (nf == NF_PREFETCH)
947 return NULL;
948 /*
949 * Note: it is essential that we don't wait for the buffer to be
950 * read if dm_bufio_get function is used. Both dm_bufio_get and
951 * dm_bufio_prefetch can be used in the driver request routine.
952 * If the user called both dm_bufio_prefetch and dm_bufio_get on
953 * the same buffer, it would deadlock if we waited.
954 */
955 if (nf == NF_GET && unlikely(test_bit(B_READING, &b->state)))
956 return NULL;
957
958 b->hold_count++;
959 __relink_lru(b, test_bit(B_DIRTY, &b->state) ||
960 test_bit(B_WRITING, &b->state));
961 return b;
962 }
963
964 /*
965 * The endio routine for reading: set the error, clear the bit and wake up
966 * anyone waiting on the buffer.
967 */
read_endio(struct bio * bio,int error)968 static void read_endio(struct bio *bio, int error)
969 {
970 struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
971
972 b->read_error = error;
973
974 BUG_ON(!test_bit(B_READING, &b->state));
975
976 smp_mb__before_clear_bit();
977 clear_bit(B_READING, &b->state);
978 smp_mb__after_clear_bit();
979
980 wake_up_bit(&b->state, B_READING);
981 }
982
983 /*
984 * A common routine for dm_bufio_new and dm_bufio_read. Operation of these
985 * functions is similar except that dm_bufio_new doesn't read the
986 * buffer from the disk (assuming that the caller overwrites all the data
987 * and uses dm_bufio_mark_buffer_dirty to write new data back).
988 */
new_read(struct dm_bufio_client * c,sector_t block,enum new_flag nf,struct dm_buffer ** bp)989 static void *new_read(struct dm_bufio_client *c, sector_t block,
990 enum new_flag nf, struct dm_buffer **bp)
991 {
992 int need_submit;
993 struct dm_buffer *b;
994
995 dm_bufio_lock(c);
996 b = __bufio_new(c, block, nf, &need_submit);
997 dm_bufio_unlock(c);
998
999 if (!b)
1000 return b;
1001
1002 if (need_submit)
1003 submit_io(b, READ, b->block, read_endio);
1004
1005 wait_on_bit(&b->state, B_READING, do_io_schedule, TASK_UNINTERRUPTIBLE);
1006
1007 if (b->read_error) {
1008 int error = b->read_error;
1009
1010 dm_bufio_release(b);
1011
1012 return ERR_PTR(error);
1013 }
1014
1015 *bp = b;
1016
1017 return b->data;
1018 }
1019
dm_bufio_get(struct dm_bufio_client * c,sector_t block,struct dm_buffer ** bp)1020 void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
1021 struct dm_buffer **bp)
1022 {
1023 return new_read(c, block, NF_GET, bp);
1024 }
1025 EXPORT_SYMBOL_GPL(dm_bufio_get);
1026
dm_bufio_read(struct dm_bufio_client * c,sector_t block,struct dm_buffer ** bp)1027 void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
1028 struct dm_buffer **bp)
1029 {
1030 BUG_ON(dm_bufio_in_request());
1031
1032 return new_read(c, block, NF_READ, bp);
1033 }
1034 EXPORT_SYMBOL_GPL(dm_bufio_read);
1035
dm_bufio_new(struct dm_bufio_client * c,sector_t block,struct dm_buffer ** bp)1036 void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
1037 struct dm_buffer **bp)
1038 {
1039 BUG_ON(dm_bufio_in_request());
1040
1041 return new_read(c, block, NF_FRESH, bp);
1042 }
1043 EXPORT_SYMBOL_GPL(dm_bufio_new);
1044
dm_bufio_prefetch(struct dm_bufio_client * c,sector_t block,unsigned n_blocks)1045 void dm_bufio_prefetch(struct dm_bufio_client *c,
1046 sector_t block, unsigned n_blocks)
1047 {
1048 struct blk_plug plug;
1049
1050 BUG_ON(dm_bufio_in_request());
1051
1052 blk_start_plug(&plug);
1053 dm_bufio_lock(c);
1054
1055 for (; n_blocks--; block++) {
1056 int need_submit;
1057 struct dm_buffer *b;
1058 b = __bufio_new(c, block, NF_PREFETCH, &need_submit);
1059 if (unlikely(b != NULL)) {
1060 dm_bufio_unlock(c);
1061
1062 if (need_submit)
1063 submit_io(b, READ, b->block, read_endio);
1064 dm_bufio_release(b);
1065
1066 dm_bufio_cond_resched();
1067
1068 if (!n_blocks)
1069 goto flush_plug;
1070 dm_bufio_lock(c);
1071 }
1072
1073 }
1074
1075 dm_bufio_unlock(c);
1076
1077 flush_plug:
1078 blk_finish_plug(&plug);
1079 }
1080 EXPORT_SYMBOL_GPL(dm_bufio_prefetch);
1081
dm_bufio_release(struct dm_buffer * b)1082 void dm_bufio_release(struct dm_buffer *b)
1083 {
1084 struct dm_bufio_client *c = b->c;
1085
1086 dm_bufio_lock(c);
1087
1088 BUG_ON(!b->hold_count);
1089
1090 b->hold_count--;
1091 if (!b->hold_count) {
1092 wake_up(&c->free_buffer_wait);
1093
1094 /*
1095 * If there were errors on the buffer, and the buffer is not
1096 * to be written, free the buffer. There is no point in caching
1097 * invalid buffer.
1098 */
1099 if ((b->read_error || b->write_error) &&
1100 !test_bit(B_READING, &b->state) &&
1101 !test_bit(B_WRITING, &b->state) &&
1102 !test_bit(B_DIRTY, &b->state)) {
1103 __unlink_buffer(b);
1104 __free_buffer_wake(b);
1105 }
1106 }
1107
1108 dm_bufio_unlock(c);
1109 }
1110 EXPORT_SYMBOL_GPL(dm_bufio_release);
1111
dm_bufio_mark_buffer_dirty(struct dm_buffer * b)1112 void dm_bufio_mark_buffer_dirty(struct dm_buffer *b)
1113 {
1114 struct dm_bufio_client *c = b->c;
1115
1116 dm_bufio_lock(c);
1117
1118 BUG_ON(test_bit(B_READING, &b->state));
1119
1120 if (!test_and_set_bit(B_DIRTY, &b->state))
1121 __relink_lru(b, LIST_DIRTY);
1122
1123 dm_bufio_unlock(c);
1124 }
1125 EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty);
1126
dm_bufio_write_dirty_buffers_async(struct dm_bufio_client * c)1127 void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c)
1128 {
1129 BUG_ON(dm_bufio_in_request());
1130
1131 dm_bufio_lock(c);
1132 __write_dirty_buffers_async(c, 0);
1133 dm_bufio_unlock(c);
1134 }
1135 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async);
1136
1137 /*
1138 * For performance, it is essential that the buffers are written asynchronously
1139 * and simultaneously (so that the block layer can merge the writes) and then
1140 * waited upon.
1141 *
1142 * Finally, we flush hardware disk cache.
1143 */
dm_bufio_write_dirty_buffers(struct dm_bufio_client * c)1144 int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c)
1145 {
1146 int a, f;
1147 unsigned long buffers_processed = 0;
1148 struct dm_buffer *b, *tmp;
1149
1150 dm_bufio_lock(c);
1151 __write_dirty_buffers_async(c, 0);
1152
1153 again:
1154 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
1155 int dropped_lock = 0;
1156
1157 if (buffers_processed < c->n_buffers[LIST_DIRTY])
1158 buffers_processed++;
1159
1160 BUG_ON(test_bit(B_READING, &b->state));
1161
1162 if (test_bit(B_WRITING, &b->state)) {
1163 if (buffers_processed < c->n_buffers[LIST_DIRTY]) {
1164 dropped_lock = 1;
1165 b->hold_count++;
1166 dm_bufio_unlock(c);
1167 wait_on_bit(&b->state, B_WRITING,
1168 do_io_schedule,
1169 TASK_UNINTERRUPTIBLE);
1170 dm_bufio_lock(c);
1171 b->hold_count--;
1172 } else
1173 wait_on_bit(&b->state, B_WRITING,
1174 do_io_schedule,
1175 TASK_UNINTERRUPTIBLE);
1176 }
1177
1178 if (!test_bit(B_DIRTY, &b->state) &&
1179 !test_bit(B_WRITING, &b->state))
1180 __relink_lru(b, LIST_CLEAN);
1181
1182 dm_bufio_cond_resched();
1183
1184 /*
1185 * If we dropped the lock, the list is no longer consistent,
1186 * so we must restart the search.
1187 *
1188 * In the most common case, the buffer just processed is
1189 * relinked to the clean list, so we won't loop scanning the
1190 * same buffer again and again.
1191 *
1192 * This may livelock if there is another thread simultaneously
1193 * dirtying buffers, so we count the number of buffers walked
1194 * and if it exceeds the total number of buffers, it means that
1195 * someone is doing some writes simultaneously with us. In
1196 * this case, stop, dropping the lock.
1197 */
1198 if (dropped_lock)
1199 goto again;
1200 }
1201 wake_up(&c->free_buffer_wait);
1202 dm_bufio_unlock(c);
1203
1204 a = xchg(&c->async_write_error, 0);
1205 f = dm_bufio_issue_flush(c);
1206 if (a)
1207 return a;
1208
1209 return f;
1210 }
1211 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers);
1212
1213 /*
1214 * Use dm-io to send and empty barrier flush the device.
1215 */
dm_bufio_issue_flush(struct dm_bufio_client * c)1216 int dm_bufio_issue_flush(struct dm_bufio_client *c)
1217 {
1218 struct dm_io_request io_req = {
1219 .bi_rw = WRITE_FLUSH,
1220 .mem.type = DM_IO_KMEM,
1221 .mem.ptr.addr = NULL,
1222 .client = c->dm_io,
1223 };
1224 struct dm_io_region io_reg = {
1225 .bdev = c->bdev,
1226 .sector = 0,
1227 .count = 0,
1228 };
1229
1230 BUG_ON(dm_bufio_in_request());
1231
1232 return dm_io(&io_req, 1, &io_reg, NULL);
1233 }
1234 EXPORT_SYMBOL_GPL(dm_bufio_issue_flush);
1235
1236 /*
1237 * We first delete any other buffer that may be at that new location.
1238 *
1239 * Then, we write the buffer to the original location if it was dirty.
1240 *
1241 * Then, if we are the only one who is holding the buffer, relink the buffer
1242 * in the hash queue for the new location.
1243 *
1244 * If there was someone else holding the buffer, we write it to the new
1245 * location but not relink it, because that other user needs to have the buffer
1246 * at the same place.
1247 */
dm_bufio_release_move(struct dm_buffer * b,sector_t new_block)1248 void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block)
1249 {
1250 struct dm_bufio_client *c = b->c;
1251 struct dm_buffer *new;
1252
1253 BUG_ON(dm_bufio_in_request());
1254
1255 dm_bufio_lock(c);
1256
1257 retry:
1258 new = __find(c, new_block);
1259 if (new) {
1260 if (new->hold_count) {
1261 __wait_for_free_buffer(c);
1262 goto retry;
1263 }
1264
1265 /*
1266 * FIXME: Is there any point waiting for a write that's going
1267 * to be overwritten in a bit?
1268 */
1269 __make_buffer_clean(new);
1270 __unlink_buffer(new);
1271 __free_buffer_wake(new);
1272 }
1273
1274 BUG_ON(!b->hold_count);
1275 BUG_ON(test_bit(B_READING, &b->state));
1276
1277 __write_dirty_buffer(b);
1278 if (b->hold_count == 1) {
1279 wait_on_bit(&b->state, B_WRITING,
1280 do_io_schedule, TASK_UNINTERRUPTIBLE);
1281 set_bit(B_DIRTY, &b->state);
1282 __unlink_buffer(b);
1283 __link_buffer(b, new_block, LIST_DIRTY);
1284 } else {
1285 sector_t old_block;
1286 wait_on_bit_lock(&b->state, B_WRITING,
1287 do_io_schedule, TASK_UNINTERRUPTIBLE);
1288 /*
1289 * Relink buffer to "new_block" so that write_callback
1290 * sees "new_block" as a block number.
1291 * After the write, link the buffer back to old_block.
1292 * All this must be done in bufio lock, so that block number
1293 * change isn't visible to other threads.
1294 */
1295 old_block = b->block;
1296 __unlink_buffer(b);
1297 __link_buffer(b, new_block, b->list_mode);
1298 submit_io(b, WRITE, new_block, write_endio);
1299 wait_on_bit(&b->state, B_WRITING,
1300 do_io_schedule, TASK_UNINTERRUPTIBLE);
1301 __unlink_buffer(b);
1302 __link_buffer(b, old_block, b->list_mode);
1303 }
1304
1305 dm_bufio_unlock(c);
1306 dm_bufio_release(b);
1307 }
1308 EXPORT_SYMBOL_GPL(dm_bufio_release_move);
1309
dm_bufio_get_block_size(struct dm_bufio_client * c)1310 unsigned dm_bufio_get_block_size(struct dm_bufio_client *c)
1311 {
1312 return c->block_size;
1313 }
1314 EXPORT_SYMBOL_GPL(dm_bufio_get_block_size);
1315
dm_bufio_get_device_size(struct dm_bufio_client * c)1316 sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
1317 {
1318 return i_size_read(c->bdev->bd_inode) >>
1319 (SECTOR_SHIFT + c->sectors_per_block_bits);
1320 }
1321 EXPORT_SYMBOL_GPL(dm_bufio_get_device_size);
1322
dm_bufio_get_block_number(struct dm_buffer * b)1323 sector_t dm_bufio_get_block_number(struct dm_buffer *b)
1324 {
1325 return b->block;
1326 }
1327 EXPORT_SYMBOL_GPL(dm_bufio_get_block_number);
1328
dm_bufio_get_block_data(struct dm_buffer * b)1329 void *dm_bufio_get_block_data(struct dm_buffer *b)
1330 {
1331 return b->data;
1332 }
1333 EXPORT_SYMBOL_GPL(dm_bufio_get_block_data);
1334
dm_bufio_get_aux_data(struct dm_buffer * b)1335 void *dm_bufio_get_aux_data(struct dm_buffer *b)
1336 {
1337 return b + 1;
1338 }
1339 EXPORT_SYMBOL_GPL(dm_bufio_get_aux_data);
1340
dm_bufio_get_client(struct dm_buffer * b)1341 struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b)
1342 {
1343 return b->c;
1344 }
1345 EXPORT_SYMBOL_GPL(dm_bufio_get_client);
1346
drop_buffers(struct dm_bufio_client * c)1347 static void drop_buffers(struct dm_bufio_client *c)
1348 {
1349 struct dm_buffer *b;
1350 int i;
1351
1352 BUG_ON(dm_bufio_in_request());
1353
1354 /*
1355 * An optimization so that the buffers are not written one-by-one.
1356 */
1357 dm_bufio_write_dirty_buffers_async(c);
1358
1359 dm_bufio_lock(c);
1360
1361 while ((b = __get_unclaimed_buffer(c)))
1362 __free_buffer_wake(b);
1363
1364 for (i = 0; i < LIST_SIZE; i++)
1365 list_for_each_entry(b, &c->lru[i], lru_list)
1366 DMERR("leaked buffer %llx, hold count %u, list %d",
1367 (unsigned long long)b->block, b->hold_count, i);
1368
1369 for (i = 0; i < LIST_SIZE; i++)
1370 BUG_ON(!list_empty(&c->lru[i]));
1371
1372 dm_bufio_unlock(c);
1373 }
1374
1375 /*
1376 * Test if the buffer is unused and too old, and commit it.
1377 * At if noio is set, we must not do any I/O because we hold
1378 * dm_bufio_clients_lock and we would risk deadlock if the I/O gets rerouted to
1379 * different bufio client.
1380 */
__cleanup_old_buffer(struct dm_buffer * b,gfp_t gfp,unsigned long max_jiffies)1381 static int __cleanup_old_buffer(struct dm_buffer *b, gfp_t gfp,
1382 unsigned long max_jiffies)
1383 {
1384 if (jiffies - b->last_accessed < max_jiffies)
1385 return 1;
1386
1387 if (!(gfp & __GFP_IO)) {
1388 if (test_bit(B_READING, &b->state) ||
1389 test_bit(B_WRITING, &b->state) ||
1390 test_bit(B_DIRTY, &b->state))
1391 return 1;
1392 }
1393
1394 if (b->hold_count)
1395 return 1;
1396
1397 __make_buffer_clean(b);
1398 __unlink_buffer(b);
1399 __free_buffer_wake(b);
1400
1401 return 0;
1402 }
1403
__scan(struct dm_bufio_client * c,unsigned long nr_to_scan,struct shrink_control * sc)1404 static void __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
1405 struct shrink_control *sc)
1406 {
1407 int l;
1408 struct dm_buffer *b, *tmp;
1409
1410 for (l = 0; l < LIST_SIZE; l++) {
1411 list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list)
1412 if (!__cleanup_old_buffer(b, sc->gfp_mask, 0) &&
1413 !--nr_to_scan)
1414 return;
1415 dm_bufio_cond_resched();
1416 }
1417 }
1418
shrink(struct shrinker * shrinker,struct shrink_control * sc)1419 static int shrink(struct shrinker *shrinker, struct shrink_control *sc)
1420 {
1421 struct dm_bufio_client *c =
1422 container_of(shrinker, struct dm_bufio_client, shrinker);
1423 unsigned long r;
1424 unsigned long nr_to_scan = sc->nr_to_scan;
1425
1426 if (sc->gfp_mask & __GFP_IO)
1427 dm_bufio_lock(c);
1428 else if (!dm_bufio_trylock(c))
1429 return !nr_to_scan ? 0 : -1;
1430
1431 if (nr_to_scan)
1432 __scan(c, nr_to_scan, sc);
1433
1434 r = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
1435 if (r > INT_MAX)
1436 r = INT_MAX;
1437
1438 dm_bufio_unlock(c);
1439
1440 return r;
1441 }
1442
1443 /*
1444 * Create the buffering interface
1445 */
dm_bufio_client_create(struct block_device * bdev,unsigned block_size,unsigned reserved_buffers,unsigned aux_size,void (* alloc_callback)(struct dm_buffer *),void (* write_callback)(struct dm_buffer *))1446 struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned block_size,
1447 unsigned reserved_buffers, unsigned aux_size,
1448 void (*alloc_callback)(struct dm_buffer *),
1449 void (*write_callback)(struct dm_buffer *))
1450 {
1451 int r;
1452 struct dm_bufio_client *c;
1453 unsigned i;
1454
1455 BUG_ON(block_size < 1 << SECTOR_SHIFT ||
1456 (block_size & (block_size - 1)));
1457
1458 c = kmalloc(sizeof(*c), GFP_KERNEL);
1459 if (!c) {
1460 r = -ENOMEM;
1461 goto bad_client;
1462 }
1463 c->cache_hash = vmalloc(sizeof(struct hlist_head) << DM_BUFIO_HASH_BITS);
1464 if (!c->cache_hash) {
1465 r = -ENOMEM;
1466 goto bad_hash;
1467 }
1468
1469 c->bdev = bdev;
1470 c->block_size = block_size;
1471 c->sectors_per_block_bits = ffs(block_size) - 1 - SECTOR_SHIFT;
1472 c->pages_per_block_bits = (ffs(block_size) - 1 >= PAGE_SHIFT) ?
1473 ffs(block_size) - 1 - PAGE_SHIFT : 0;
1474 c->blocks_per_page_bits = (ffs(block_size) - 1 < PAGE_SHIFT ?
1475 PAGE_SHIFT - (ffs(block_size) - 1) : 0);
1476
1477 c->aux_size = aux_size;
1478 c->alloc_callback = alloc_callback;
1479 c->write_callback = write_callback;
1480
1481 for (i = 0; i < LIST_SIZE; i++) {
1482 INIT_LIST_HEAD(&c->lru[i]);
1483 c->n_buffers[i] = 0;
1484 }
1485
1486 for (i = 0; i < 1 << DM_BUFIO_HASH_BITS; i++)
1487 INIT_HLIST_HEAD(&c->cache_hash[i]);
1488
1489 mutex_init(&c->lock);
1490 INIT_LIST_HEAD(&c->reserved_buffers);
1491 c->need_reserved_buffers = reserved_buffers;
1492
1493 init_waitqueue_head(&c->free_buffer_wait);
1494 c->async_write_error = 0;
1495
1496 c->dm_io = dm_io_client_create();
1497 if (IS_ERR(c->dm_io)) {
1498 r = PTR_ERR(c->dm_io);
1499 goto bad_dm_io;
1500 }
1501
1502 mutex_lock(&dm_bufio_clients_lock);
1503 if (c->blocks_per_page_bits) {
1504 if (!DM_BUFIO_CACHE_NAME(c)) {
1505 DM_BUFIO_CACHE_NAME(c) = kasprintf(GFP_KERNEL, "dm_bufio_cache-%u", c->block_size);
1506 if (!DM_BUFIO_CACHE_NAME(c)) {
1507 r = -ENOMEM;
1508 mutex_unlock(&dm_bufio_clients_lock);
1509 goto bad_cache;
1510 }
1511 }
1512
1513 if (!DM_BUFIO_CACHE(c)) {
1514 DM_BUFIO_CACHE(c) = kmem_cache_create(DM_BUFIO_CACHE_NAME(c),
1515 c->block_size,
1516 c->block_size, 0, NULL);
1517 if (!DM_BUFIO_CACHE(c)) {
1518 r = -ENOMEM;
1519 mutex_unlock(&dm_bufio_clients_lock);
1520 goto bad_cache;
1521 }
1522 }
1523 }
1524 mutex_unlock(&dm_bufio_clients_lock);
1525
1526 while (c->need_reserved_buffers) {
1527 struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL);
1528
1529 if (!b) {
1530 r = -ENOMEM;
1531 goto bad_buffer;
1532 }
1533 __free_buffer_wake(b);
1534 }
1535
1536 mutex_lock(&dm_bufio_clients_lock);
1537 dm_bufio_client_count++;
1538 list_add(&c->client_list, &dm_bufio_all_clients);
1539 __cache_size_refresh();
1540 mutex_unlock(&dm_bufio_clients_lock);
1541
1542 c->shrinker.shrink = shrink;
1543 c->shrinker.seeks = 1;
1544 c->shrinker.batch = 0;
1545 register_shrinker(&c->shrinker);
1546
1547 return c;
1548
1549 bad_buffer:
1550 bad_cache:
1551 while (!list_empty(&c->reserved_buffers)) {
1552 struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1553 struct dm_buffer, lru_list);
1554 list_del(&b->lru_list);
1555 free_buffer(b);
1556 }
1557 dm_io_client_destroy(c->dm_io);
1558 bad_dm_io:
1559 vfree(c->cache_hash);
1560 bad_hash:
1561 kfree(c);
1562 bad_client:
1563 return ERR_PTR(r);
1564 }
1565 EXPORT_SYMBOL_GPL(dm_bufio_client_create);
1566
1567 /*
1568 * Free the buffering interface.
1569 * It is required that there are no references on any buffers.
1570 */
dm_bufio_client_destroy(struct dm_bufio_client * c)1571 void dm_bufio_client_destroy(struct dm_bufio_client *c)
1572 {
1573 unsigned i;
1574
1575 drop_buffers(c);
1576
1577 unregister_shrinker(&c->shrinker);
1578
1579 mutex_lock(&dm_bufio_clients_lock);
1580
1581 list_del(&c->client_list);
1582 dm_bufio_client_count--;
1583 __cache_size_refresh();
1584
1585 mutex_unlock(&dm_bufio_clients_lock);
1586
1587 for (i = 0; i < 1 << DM_BUFIO_HASH_BITS; i++)
1588 BUG_ON(!hlist_empty(&c->cache_hash[i]));
1589
1590 BUG_ON(c->need_reserved_buffers);
1591
1592 while (!list_empty(&c->reserved_buffers)) {
1593 struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1594 struct dm_buffer, lru_list);
1595 list_del(&b->lru_list);
1596 free_buffer(b);
1597 }
1598
1599 for (i = 0; i < LIST_SIZE; i++)
1600 if (c->n_buffers[i])
1601 DMERR("leaked buffer count %d: %ld", i, c->n_buffers[i]);
1602
1603 for (i = 0; i < LIST_SIZE; i++)
1604 BUG_ON(c->n_buffers[i]);
1605
1606 dm_io_client_destroy(c->dm_io);
1607 vfree(c->cache_hash);
1608 kfree(c);
1609 }
1610 EXPORT_SYMBOL_GPL(dm_bufio_client_destroy);
1611
cleanup_old_buffers(void)1612 static void cleanup_old_buffers(void)
1613 {
1614 unsigned long max_age = ACCESS_ONCE(dm_bufio_max_age);
1615 struct dm_bufio_client *c;
1616
1617 if (max_age > ULONG_MAX / HZ)
1618 max_age = ULONG_MAX / HZ;
1619
1620 mutex_lock(&dm_bufio_clients_lock);
1621 list_for_each_entry(c, &dm_bufio_all_clients, client_list) {
1622 if (!dm_bufio_trylock(c))
1623 continue;
1624
1625 while (!list_empty(&c->lru[LIST_CLEAN])) {
1626 struct dm_buffer *b;
1627 b = list_entry(c->lru[LIST_CLEAN].prev,
1628 struct dm_buffer, lru_list);
1629 if (__cleanup_old_buffer(b, 0, max_age * HZ))
1630 break;
1631 dm_bufio_cond_resched();
1632 }
1633
1634 dm_bufio_unlock(c);
1635 dm_bufio_cond_resched();
1636 }
1637 mutex_unlock(&dm_bufio_clients_lock);
1638 }
1639
1640 static struct workqueue_struct *dm_bufio_wq;
1641 static struct delayed_work dm_bufio_work;
1642
work_fn(struct work_struct * w)1643 static void work_fn(struct work_struct *w)
1644 {
1645 cleanup_old_buffers();
1646
1647 queue_delayed_work(dm_bufio_wq, &dm_bufio_work,
1648 DM_BUFIO_WORK_TIMER_SECS * HZ);
1649 }
1650
1651 /*----------------------------------------------------------------
1652 * Module setup
1653 *--------------------------------------------------------------*/
1654
1655 /*
1656 * This is called only once for the whole dm_bufio module.
1657 * It initializes memory limit.
1658 */
dm_bufio_init(void)1659 static int __init dm_bufio_init(void)
1660 {
1661 __u64 mem;
1662
1663 memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches);
1664 memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names);
1665
1666 mem = (__u64)((totalram_pages - totalhigh_pages) *
1667 DM_BUFIO_MEMORY_PERCENT / 100) << PAGE_SHIFT;
1668
1669 if (mem > ULONG_MAX)
1670 mem = ULONG_MAX;
1671
1672 #ifdef CONFIG_MMU
1673 /*
1674 * Get the size of vmalloc space the same way as VMALLOC_TOTAL
1675 * in fs/proc/internal.h
1676 */
1677 if (mem > (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100)
1678 mem = (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100;
1679 #endif
1680
1681 dm_bufio_default_cache_size = mem;
1682
1683 mutex_lock(&dm_bufio_clients_lock);
1684 __cache_size_refresh();
1685 mutex_unlock(&dm_bufio_clients_lock);
1686
1687 dm_bufio_wq = create_singlethread_workqueue("dm_bufio_cache");
1688 if (!dm_bufio_wq)
1689 return -ENOMEM;
1690
1691 INIT_DELAYED_WORK(&dm_bufio_work, work_fn);
1692 queue_delayed_work(dm_bufio_wq, &dm_bufio_work,
1693 DM_BUFIO_WORK_TIMER_SECS * HZ);
1694
1695 return 0;
1696 }
1697
1698 /*
1699 * This is called once when unloading the dm_bufio module.
1700 */
dm_bufio_exit(void)1701 static void __exit dm_bufio_exit(void)
1702 {
1703 int bug = 0;
1704 int i;
1705
1706 cancel_delayed_work_sync(&dm_bufio_work);
1707 destroy_workqueue(dm_bufio_wq);
1708
1709 for (i = 0; i < ARRAY_SIZE(dm_bufio_caches); i++) {
1710 struct kmem_cache *kc = dm_bufio_caches[i];
1711
1712 if (kc)
1713 kmem_cache_destroy(kc);
1714 }
1715
1716 for (i = 0; i < ARRAY_SIZE(dm_bufio_cache_names); i++)
1717 kfree(dm_bufio_cache_names[i]);
1718
1719 if (dm_bufio_client_count) {
1720 DMCRIT("%s: dm_bufio_client_count leaked: %d",
1721 __func__, dm_bufio_client_count);
1722 bug = 1;
1723 }
1724
1725 if (dm_bufio_current_allocated) {
1726 DMCRIT("%s: dm_bufio_current_allocated leaked: %lu",
1727 __func__, dm_bufio_current_allocated);
1728 bug = 1;
1729 }
1730
1731 if (dm_bufio_allocated_get_free_pages) {
1732 DMCRIT("%s: dm_bufio_allocated_get_free_pages leaked: %lu",
1733 __func__, dm_bufio_allocated_get_free_pages);
1734 bug = 1;
1735 }
1736
1737 if (dm_bufio_allocated_vmalloc) {
1738 DMCRIT("%s: dm_bufio_vmalloc leaked: %lu",
1739 __func__, dm_bufio_allocated_vmalloc);
1740 bug = 1;
1741 }
1742
1743 if (bug)
1744 BUG();
1745 }
1746
1747 module_init(dm_bufio_init)
1748 module_exit(dm_bufio_exit)
1749
1750 module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, S_IRUGO | S_IWUSR);
1751 MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
1752
1753 module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR);
1754 MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
1755
1756 module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR);
1757 MODULE_PARM_DESC(peak_allocated_bytes, "Tracks the maximum allocated memory");
1758
1759 module_param_named(allocated_kmem_cache_bytes, dm_bufio_allocated_kmem_cache, ulong, S_IRUGO);
1760 MODULE_PARM_DESC(allocated_kmem_cache_bytes, "Memory allocated with kmem_cache_alloc");
1761
1762 module_param_named(allocated_get_free_pages_bytes, dm_bufio_allocated_get_free_pages, ulong, S_IRUGO);
1763 MODULE_PARM_DESC(allocated_get_free_pages_bytes, "Memory allocated with get_free_pages");
1764
1765 module_param_named(allocated_vmalloc_bytes, dm_bufio_allocated_vmalloc, ulong, S_IRUGO);
1766 MODULE_PARM_DESC(allocated_vmalloc_bytes, "Memory allocated with vmalloc");
1767
1768 module_param_named(current_allocated_bytes, dm_bufio_current_allocated, ulong, S_IRUGO);
1769 MODULE_PARM_DESC(current_allocated_bytes, "Memory currently used by the cache");
1770
1771 MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
1772 MODULE_DESCRIPTION(DM_NAME " buffered I/O library");
1773 MODULE_LICENSE("GPL");
1774