1 /*
2 * Copyright (C) 2009-2011 Red Hat, Inc.
3 *
4 * Author: Mikulas Patocka <mpatocka@redhat.com>
5 *
6 * This file is released under the GPL.
7 */
8
9 #include <linux/dm-bufio.h>
10
11 #include <linux/device-mapper.h>
12 #include <linux/dm-io.h>
13 #include <linux/slab.h>
14 #include <linux/sched/mm.h>
15 #include <linux/jiffies.h>
16 #include <linux/vmalloc.h>
17 #include <linux/shrinker.h>
18 #include <linux/module.h>
19 #include <linux/rbtree.h>
20 #include <linux/stacktrace.h>
21
22 #include <trace/hooks/mm.h>
23
24 #define DM_MSG_PREFIX "bufio"
25
26 /*
27 * Memory management policy:
28 * Limit the number of buffers to DM_BUFIO_MEMORY_PERCENT of main memory
29 * or DM_BUFIO_VMALLOC_PERCENT of vmalloc memory (whichever is lower).
30 * Always allocate at least DM_BUFIO_MIN_BUFFERS buffers.
31 * Start background writeback when there are DM_BUFIO_WRITEBACK_PERCENT
32 * dirty buffers.
33 */
34 #define DM_BUFIO_MIN_BUFFERS 8
35
36 #define DM_BUFIO_MEMORY_PERCENT 2
37 #define DM_BUFIO_VMALLOC_PERCENT 25
38 #define DM_BUFIO_WRITEBACK_RATIO 3
39 #define DM_BUFIO_LOW_WATERMARK_RATIO 16
40
41 /*
42 * Check buffer ages in this interval (seconds)
43 */
44 #define DM_BUFIO_WORK_TIMER_SECS 30
45
46 /*
47 * Free buffers when they are older than this (seconds)
48 */
49 #define DM_BUFIO_DEFAULT_AGE_SECS 300
50
51 /*
52 * The nr of bytes of cached data to keep around.
53 */
54 #define DM_BUFIO_DEFAULT_RETAIN_BYTES (256 * 1024)
55
56 /*
57 * Align buffer writes to this boundary.
58 * Tests show that SSDs have the highest IOPS when using 4k writes.
59 */
60 #define DM_BUFIO_WRITE_ALIGN 4096
61
62 /*
63 * dm_buffer->list_mode
64 */
65 #define LIST_CLEAN 0
66 #define LIST_DIRTY 1
67 #define LIST_SIZE 2
68
69 /*
70 * Linking of buffers:
71 * All buffers are linked to buffer_tree with their node field.
72 *
73 * Clean buffers that are not being written (B_WRITING not set)
74 * are linked to lru[LIST_CLEAN] with their lru_list field.
75 *
76 * Dirty and clean buffers that are being written are linked to
77 * lru[LIST_DIRTY] with their lru_list field. When the write
78 * finishes, the buffer cannot be relinked immediately (because we
79 * are in an interrupt context and relinking requires process
80 * context), so some clean-not-writing buffers can be held on
81 * dirty_lru too. They are later added to lru in the process
82 * context.
83 */
84 struct dm_bufio_client {
85 struct mutex lock;
86
87 struct list_head lru[LIST_SIZE];
88 unsigned long n_buffers[LIST_SIZE];
89
90 struct block_device *bdev;
91 unsigned block_size;
92 s8 sectors_per_block_bits;
93 void (*alloc_callback)(struct dm_buffer *);
94 void (*write_callback)(struct dm_buffer *);
95
96 struct kmem_cache *slab_buffer;
97 struct kmem_cache *slab_cache;
98 struct dm_io_client *dm_io;
99
100 struct list_head reserved_buffers;
101 unsigned need_reserved_buffers;
102
103 unsigned minimum_buffers;
104
105 struct rb_root buffer_tree;
106 wait_queue_head_t free_buffer_wait;
107
108 sector_t start;
109
110 int async_write_error;
111
112 struct list_head client_list;
113
114 struct shrinker shrinker;
115 struct work_struct shrink_work;
116 atomic_long_t need_shrink;
117 };
118
119 /*
120 * Buffer state bits.
121 */
122 #define B_READING 0
123 #define B_WRITING 1
124 #define B_DIRTY 2
125
126 /*
127 * Describes how the block was allocated:
128 * kmem_cache_alloc(), __get_free_pages() or vmalloc().
129 * See the comment at alloc_buffer_data.
130 */
131 enum data_mode {
132 DATA_MODE_SLAB = 0,
133 DATA_MODE_GET_FREE_PAGES = 1,
134 DATA_MODE_VMALLOC = 2,
135 DATA_MODE_LIMIT = 3
136 };
137
138 struct dm_buffer {
139 struct rb_node node;
140 struct list_head lru_list;
141 struct list_head global_list;
142 sector_t block;
143 void *data;
144 unsigned char data_mode; /* DATA_MODE_* */
145 unsigned char list_mode; /* LIST_* */
146 blk_status_t read_error;
147 blk_status_t write_error;
148 unsigned accessed;
149 unsigned hold_count;
150 unsigned long state;
151 unsigned long last_accessed;
152 unsigned dirty_start;
153 unsigned dirty_end;
154 unsigned write_start;
155 unsigned write_end;
156 struct dm_bufio_client *c;
157 struct list_head write_list;
158 void (*end_io)(struct dm_buffer *, blk_status_t);
159 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
160 #define MAX_STACK 10
161 unsigned int stack_len;
162 unsigned long stack_entries[MAX_STACK];
163 #endif
164 };
165
166 /*----------------------------------------------------------------*/
167
168 #define dm_bufio_in_request() (!!current->bio_list)
169
dm_bufio_lock(struct dm_bufio_client * c)170 static void dm_bufio_lock(struct dm_bufio_client *c)
171 {
172 mutex_lock_nested(&c->lock, dm_bufio_in_request());
173 }
174
dm_bufio_trylock(struct dm_bufio_client * c)175 static int dm_bufio_trylock(struct dm_bufio_client *c)
176 {
177 return mutex_trylock(&c->lock);
178 }
179
dm_bufio_unlock(struct dm_bufio_client * c)180 static void dm_bufio_unlock(struct dm_bufio_client *c)
181 {
182 mutex_unlock(&c->lock);
183 }
184
185 /*----------------------------------------------------------------*/
186
187 /*
188 * Default cache size: available memory divided by the ratio.
189 */
190 static unsigned long dm_bufio_default_cache_size;
191
192 /*
193 * Total cache size set by the user.
194 */
195 static unsigned long dm_bufio_cache_size;
196
197 /*
198 * A copy of dm_bufio_cache_size because dm_bufio_cache_size can change
199 * at any time. If it disagrees, the user has changed cache size.
200 */
201 static unsigned long dm_bufio_cache_size_latch;
202
203 static DEFINE_SPINLOCK(global_spinlock);
204
205 static LIST_HEAD(global_queue);
206
207 static unsigned long global_num = 0;
208
209 /*
210 * Buffers are freed after this timeout
211 */
212 static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
213 static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
214
215 static unsigned long dm_bufio_peak_allocated;
216 static unsigned long dm_bufio_allocated_kmem_cache;
217 static unsigned long dm_bufio_allocated_get_free_pages;
218 static unsigned long dm_bufio_allocated_vmalloc;
219 static unsigned long dm_bufio_current_allocated;
220
221 /*----------------------------------------------------------------*/
222
223 /*
224 * The current number of clients.
225 */
226 static int dm_bufio_client_count;
227
228 /*
229 * The list of all clients.
230 */
231 static LIST_HEAD(dm_bufio_all_clients);
232
233 /*
234 * This mutex protects dm_bufio_cache_size_latch and dm_bufio_client_count
235 */
236 static DEFINE_MUTEX(dm_bufio_clients_lock);
237
238 static struct workqueue_struct *dm_bufio_wq;
239 static struct delayed_work dm_bufio_cleanup_old_work;
240 static struct work_struct dm_bufio_replacement_work;
241
242
243 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
buffer_record_stack(struct dm_buffer * b)244 static void buffer_record_stack(struct dm_buffer *b)
245 {
246 b->stack_len = stack_trace_save(b->stack_entries, MAX_STACK, 2);
247 }
248 #endif
249
250 /*----------------------------------------------------------------
251 * A red/black tree acts as an index for all the buffers.
252 *--------------------------------------------------------------*/
__find(struct dm_bufio_client * c,sector_t block)253 static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block)
254 {
255 struct rb_node *n = c->buffer_tree.rb_node;
256 struct dm_buffer *b;
257
258 while (n) {
259 b = container_of(n, struct dm_buffer, node);
260
261 if (b->block == block)
262 return b;
263
264 n = block < b->block ? n->rb_left : n->rb_right;
265 }
266
267 return NULL;
268 }
269
__find_next(struct dm_bufio_client * c,sector_t block)270 static struct dm_buffer *__find_next(struct dm_bufio_client *c, sector_t block)
271 {
272 struct rb_node *n = c->buffer_tree.rb_node;
273 struct dm_buffer *b;
274 struct dm_buffer *best = NULL;
275
276 while (n) {
277 b = container_of(n, struct dm_buffer, node);
278
279 if (b->block == block)
280 return b;
281
282 if (block <= b->block) {
283 n = n->rb_left;
284 best = b;
285 } else {
286 n = n->rb_right;
287 }
288 }
289
290 return best;
291 }
292
__insert(struct dm_bufio_client * c,struct dm_buffer * b)293 static void __insert(struct dm_bufio_client *c, struct dm_buffer *b)
294 {
295 struct rb_node **new = &c->buffer_tree.rb_node, *parent = NULL;
296 struct dm_buffer *found;
297
298 while (*new) {
299 found = container_of(*new, struct dm_buffer, node);
300
301 if (found->block == b->block) {
302 BUG_ON(found != b);
303 return;
304 }
305
306 parent = *new;
307 new = b->block < found->block ?
308 &found->node.rb_left : &found->node.rb_right;
309 }
310
311 rb_link_node(&b->node, parent, new);
312 rb_insert_color(&b->node, &c->buffer_tree);
313 }
314
__remove(struct dm_bufio_client * c,struct dm_buffer * b)315 static void __remove(struct dm_bufio_client *c, struct dm_buffer *b)
316 {
317 rb_erase(&b->node, &c->buffer_tree);
318 }
319
320 /*----------------------------------------------------------------*/
321
adjust_total_allocated(struct dm_buffer * b,bool unlink)322 static void adjust_total_allocated(struct dm_buffer *b, bool unlink)
323 {
324 unsigned char data_mode;
325 long diff;
326
327 static unsigned long * const class_ptr[DATA_MODE_LIMIT] = {
328 &dm_bufio_allocated_kmem_cache,
329 &dm_bufio_allocated_get_free_pages,
330 &dm_bufio_allocated_vmalloc,
331 };
332
333 data_mode = b->data_mode;
334 diff = (long)b->c->block_size;
335 if (unlink)
336 diff = -diff;
337
338 spin_lock(&global_spinlock);
339
340 *class_ptr[data_mode] += diff;
341
342 dm_bufio_current_allocated += diff;
343
344 if (dm_bufio_current_allocated > dm_bufio_peak_allocated)
345 dm_bufio_peak_allocated = dm_bufio_current_allocated;
346
347 b->accessed = 1;
348
349 if (!unlink) {
350 list_add(&b->global_list, &global_queue);
351 global_num++;
352 if (dm_bufio_current_allocated > dm_bufio_cache_size)
353 queue_work(dm_bufio_wq, &dm_bufio_replacement_work);
354 } else {
355 list_del(&b->global_list);
356 global_num--;
357 }
358
359 spin_unlock(&global_spinlock);
360 }
361
362 /*
363 * Change the number of clients and recalculate per-client limit.
364 */
__cache_size_refresh(void)365 static void __cache_size_refresh(void)
366 {
367 BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock));
368 BUG_ON(dm_bufio_client_count < 0);
369
370 dm_bufio_cache_size_latch = READ_ONCE(dm_bufio_cache_size);
371
372 /*
373 * Use default if set to 0 and report the actual cache size used.
374 */
375 if (!dm_bufio_cache_size_latch) {
376 (void)cmpxchg(&dm_bufio_cache_size, 0,
377 dm_bufio_default_cache_size);
378 dm_bufio_cache_size_latch = dm_bufio_default_cache_size;
379 }
380 }
381
382 /*
383 * Allocating buffer data.
384 *
385 * Small buffers are allocated with kmem_cache, to use space optimally.
386 *
387 * For large buffers, we choose between get_free_pages and vmalloc.
388 * Each has advantages and disadvantages.
389 *
390 * __get_free_pages can randomly fail if the memory is fragmented.
391 * __vmalloc won't randomly fail, but vmalloc space is limited (it may be
392 * as low as 128M) so using it for caching is not appropriate.
393 *
394 * If the allocation may fail we use __get_free_pages. Memory fragmentation
395 * won't have a fatal effect here, but it just causes flushes of some other
396 * buffers and more I/O will be performed. Don't use __get_free_pages if it
397 * always fails (i.e. order >= MAX_ORDER).
398 *
399 * If the allocation shouldn't fail we use __vmalloc. This is only for the
400 * initial reserve allocation, so there's no risk of wasting all vmalloc
401 * space.
402 */
alloc_buffer_data(struct dm_bufio_client * c,gfp_t gfp_mask,unsigned char * data_mode)403 static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
404 unsigned char *data_mode)
405 {
406 if (unlikely(c->slab_cache != NULL)) {
407 *data_mode = DATA_MODE_SLAB;
408 return kmem_cache_alloc(c->slab_cache, gfp_mask);
409 }
410
411 if (c->block_size <= KMALLOC_MAX_SIZE &&
412 gfp_mask & __GFP_NORETRY) {
413 *data_mode = DATA_MODE_GET_FREE_PAGES;
414 return (void *)__get_free_pages(gfp_mask,
415 c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT));
416 }
417
418 *data_mode = DATA_MODE_VMALLOC;
419
420 /*
421 * __vmalloc allocates the data pages and auxiliary structures with
422 * gfp_flags that were specified, but pagetables are always allocated
423 * with GFP_KERNEL, no matter what was specified as gfp_mask.
424 *
425 * Consequently, we must set per-process flag PF_MEMALLOC_NOIO so that
426 * all allocations done by this process (including pagetables) are done
427 * as if GFP_NOIO was specified.
428 */
429 if (gfp_mask & __GFP_NORETRY) {
430 unsigned noio_flag = memalloc_noio_save();
431 void *ptr = __vmalloc(c->block_size, gfp_mask);
432
433 memalloc_noio_restore(noio_flag);
434 return ptr;
435 }
436
437 return __vmalloc(c->block_size, gfp_mask);
438 }
439
440 /*
441 * Free buffer's data.
442 */
free_buffer_data(struct dm_bufio_client * c,void * data,unsigned char data_mode)443 static void free_buffer_data(struct dm_bufio_client *c,
444 void *data, unsigned char data_mode)
445 {
446 switch (data_mode) {
447 case DATA_MODE_SLAB:
448 kmem_cache_free(c->slab_cache, data);
449 break;
450
451 case DATA_MODE_GET_FREE_PAGES:
452 free_pages((unsigned long)data,
453 c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT));
454 break;
455
456 case DATA_MODE_VMALLOC:
457 vfree(data);
458 break;
459
460 default:
461 DMCRIT("dm_bufio_free_buffer_data: bad data mode: %d",
462 data_mode);
463 BUG();
464 }
465 }
466
467 /*
468 * Allocate buffer and its data.
469 */
alloc_buffer(struct dm_bufio_client * c,gfp_t gfp_mask)470 static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask)
471 {
472 struct dm_buffer *b = kmem_cache_alloc(c->slab_buffer, gfp_mask);
473
474 if (!b)
475 return NULL;
476
477 b->c = c;
478
479 b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode);
480 if (!b->data) {
481 kmem_cache_free(c->slab_buffer, b);
482 return NULL;
483 }
484
485 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
486 b->stack_len = 0;
487 #endif
488 return b;
489 }
490
491 /*
492 * Free buffer and its data.
493 */
free_buffer(struct dm_buffer * b)494 static void free_buffer(struct dm_buffer *b)
495 {
496 struct dm_bufio_client *c = b->c;
497
498 free_buffer_data(c, b->data, b->data_mode);
499 kmem_cache_free(c->slab_buffer, b);
500 }
501
502 /*
503 * Link buffer to the buffer tree and clean or dirty queue.
504 */
__link_buffer(struct dm_buffer * b,sector_t block,int dirty)505 static void __link_buffer(struct dm_buffer *b, sector_t block, int dirty)
506 {
507 struct dm_bufio_client *c = b->c;
508
509 c->n_buffers[dirty]++;
510 b->block = block;
511 b->list_mode = dirty;
512 list_add(&b->lru_list, &c->lru[dirty]);
513 __insert(b->c, b);
514 b->last_accessed = jiffies;
515
516 adjust_total_allocated(b, false);
517 }
518
519 /*
520 * Unlink buffer from the buffer tree and dirty or clean queue.
521 */
__unlink_buffer(struct dm_buffer * b)522 static void __unlink_buffer(struct dm_buffer *b)
523 {
524 struct dm_bufio_client *c = b->c;
525
526 BUG_ON(!c->n_buffers[b->list_mode]);
527
528 c->n_buffers[b->list_mode]--;
529 __remove(b->c, b);
530 list_del(&b->lru_list);
531
532 adjust_total_allocated(b, true);
533 }
534
535 /*
536 * Place the buffer to the head of dirty or clean LRU queue.
537 */
__relink_lru(struct dm_buffer * b,int dirty)538 static void __relink_lru(struct dm_buffer *b, int dirty)
539 {
540 struct dm_bufio_client *c = b->c;
541
542 b->accessed = 1;
543
544 BUG_ON(!c->n_buffers[b->list_mode]);
545
546 c->n_buffers[b->list_mode]--;
547 c->n_buffers[dirty]++;
548 b->list_mode = dirty;
549 list_move(&b->lru_list, &c->lru[dirty]);
550 b->last_accessed = jiffies;
551 }
552
553 /*----------------------------------------------------------------
554 * Submit I/O on the buffer.
555 *
556 * Bio interface is faster but it has some problems:
557 * the vector list is limited (increasing this limit increases
558 * memory-consumption per buffer, so it is not viable);
559 *
560 * the memory must be direct-mapped, not vmalloced;
561 *
562 * If the buffer is small enough (up to DM_BUFIO_INLINE_VECS pages) and
563 * it is not vmalloced, try using the bio interface.
564 *
565 * If the buffer is big, if it is vmalloced or if the underlying device
566 * rejects the bio because it is too large, use dm-io layer to do the I/O.
567 * The dm-io layer splits the I/O into multiple requests, avoiding the above
568 * shortcomings.
569 *--------------------------------------------------------------*/
570
571 /*
572 * dm-io completion routine. It just calls b->bio.bi_end_io, pretending
573 * that the request was handled directly with bio interface.
574 */
dmio_complete(unsigned long error,void * context)575 static void dmio_complete(unsigned long error, void *context)
576 {
577 struct dm_buffer *b = context;
578
579 b->end_io(b, unlikely(error != 0) ? BLK_STS_IOERR : 0);
580 }
581
use_dmio(struct dm_buffer * b,int rw,sector_t sector,unsigned n_sectors,unsigned offset,unsigned short ioprio)582 static void use_dmio(struct dm_buffer *b, int rw, sector_t sector,
583 unsigned n_sectors, unsigned offset,
584 unsigned short ioprio)
585 {
586 int r;
587 struct dm_io_request io_req = {
588 .bi_op = rw,
589 .bi_op_flags = 0,
590 .notify.fn = dmio_complete,
591 .notify.context = b,
592 .client = b->c->dm_io,
593 };
594 struct dm_io_region region = {
595 .bdev = b->c->bdev,
596 .sector = sector,
597 .count = n_sectors,
598 };
599
600 if (b->data_mode != DATA_MODE_VMALLOC) {
601 io_req.mem.type = DM_IO_KMEM;
602 io_req.mem.ptr.addr = (char *)b->data + offset;
603 } else {
604 io_req.mem.type = DM_IO_VMA;
605 io_req.mem.ptr.vma = (char *)b->data + offset;
606 }
607
608 r = dm_io(&io_req, 1, ®ion, NULL, ioprio);
609 if (unlikely(r))
610 b->end_io(b, errno_to_blk_status(r));
611 }
612
bio_complete(struct bio * bio)613 static void bio_complete(struct bio *bio)
614 {
615 struct dm_buffer *b = bio->bi_private;
616 blk_status_t status = bio->bi_status;
617 bio_put(bio);
618 b->end_io(b, status);
619 }
620
use_bio(struct dm_buffer * b,int rw,sector_t sector,unsigned n_sectors,unsigned offset,unsigned short ioprio)621 static void use_bio(struct dm_buffer *b, int rw, sector_t sector,
622 unsigned n_sectors, unsigned offset,
623 unsigned short ioprio)
624 {
625 struct bio *bio;
626 char *ptr;
627 unsigned vec_size, len;
628
629 vec_size = b->c->block_size >> PAGE_SHIFT;
630 if (unlikely(b->c->sectors_per_block_bits < PAGE_SHIFT - SECTOR_SHIFT))
631 vec_size += 2;
632
633 bio = bio_kmalloc(GFP_NOWAIT | __GFP_NORETRY | __GFP_NOWARN, vec_size);
634 if (!bio) {
635 dmio:
636 use_dmio(b, rw, sector, n_sectors, offset, ioprio);
637 return;
638 }
639
640 bio->bi_iter.bi_sector = sector;
641 bio_set_dev(bio, b->c->bdev);
642 bio_set_op_attrs(bio, rw, 0);
643 bio->bi_end_io = bio_complete;
644 bio->bi_private = b;
645 bio->bi_ioprio = ioprio;
646
647 ptr = (char *)b->data + offset;
648 len = n_sectors << SECTOR_SHIFT;
649
650 do {
651 unsigned this_step = min((unsigned)(PAGE_SIZE - offset_in_page(ptr)), len);
652 if (!bio_add_page(bio, virt_to_page(ptr), this_step,
653 offset_in_page(ptr))) {
654 bio_put(bio);
655 goto dmio;
656 }
657
658 len -= this_step;
659 ptr += this_step;
660 } while (len > 0);
661
662 submit_bio(bio);
663 }
664
block_to_sector(struct dm_bufio_client * c,sector_t block)665 static inline sector_t block_to_sector(struct dm_bufio_client *c, sector_t block)
666 {
667 sector_t sector;
668
669 if (likely(c->sectors_per_block_bits >= 0))
670 sector = block << c->sectors_per_block_bits;
671 else
672 sector = block * (c->block_size >> SECTOR_SHIFT);
673 sector += c->start;
674
675 return sector;
676 }
677
submit_io(struct dm_buffer * b,int rw,unsigned short ioprio,void (* end_io)(struct dm_buffer *,blk_status_t))678 static void submit_io(struct dm_buffer *b, int rw, unsigned short ioprio,
679 void (*end_io)(struct dm_buffer *, blk_status_t))
680 {
681 unsigned n_sectors;
682 sector_t sector;
683 unsigned offset, end;
684
685 b->end_io = end_io;
686
687 sector = block_to_sector(b->c, b->block);
688
689 if (rw != REQ_OP_WRITE) {
690 n_sectors = b->c->block_size >> SECTOR_SHIFT;
691 offset = 0;
692 } else {
693 if (b->c->write_callback)
694 b->c->write_callback(b);
695 offset = b->write_start;
696 end = b->write_end;
697 offset &= -DM_BUFIO_WRITE_ALIGN;
698 end += DM_BUFIO_WRITE_ALIGN - 1;
699 end &= -DM_BUFIO_WRITE_ALIGN;
700 if (unlikely(end > b->c->block_size))
701 end = b->c->block_size;
702
703 sector += offset >> SECTOR_SHIFT;
704 n_sectors = (end - offset) >> SECTOR_SHIFT;
705 }
706
707 if (b->data_mode != DATA_MODE_VMALLOC)
708 use_bio(b, rw, sector, n_sectors, offset, ioprio);
709 else
710 use_dmio(b, rw, sector, n_sectors, offset, ioprio);
711 }
712
713 /*----------------------------------------------------------------
714 * Writing dirty buffers
715 *--------------------------------------------------------------*/
716
717 /*
718 * The endio routine for write.
719 *
720 * Set the error, clear B_WRITING bit and wake anyone who was waiting on
721 * it.
722 */
write_endio(struct dm_buffer * b,blk_status_t status)723 static void write_endio(struct dm_buffer *b, blk_status_t status)
724 {
725 b->write_error = status;
726 if (unlikely(status)) {
727 struct dm_bufio_client *c = b->c;
728
729 (void)cmpxchg(&c->async_write_error, 0,
730 blk_status_to_errno(status));
731 }
732
733 BUG_ON(!test_bit(B_WRITING, &b->state));
734
735 smp_mb__before_atomic();
736 clear_bit(B_WRITING, &b->state);
737 smp_mb__after_atomic();
738
739 wake_up_bit(&b->state, B_WRITING);
740 }
741
742 /*
743 * Initiate a write on a dirty buffer, but don't wait for it.
744 *
745 * - If the buffer is not dirty, exit.
746 * - If there some previous write going on, wait for it to finish (we can't
747 * have two writes on the same buffer simultaneously).
748 * - Submit our write and don't wait on it. We set B_WRITING indicating
749 * that there is a write in progress.
750 */
__write_dirty_buffer(struct dm_buffer * b,struct list_head * write_list)751 static void __write_dirty_buffer(struct dm_buffer *b,
752 struct list_head *write_list)
753 {
754 if (!test_bit(B_DIRTY, &b->state))
755 return;
756
757 clear_bit(B_DIRTY, &b->state);
758 wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
759
760 b->write_start = b->dirty_start;
761 b->write_end = b->dirty_end;
762
763 if (!write_list)
764 submit_io(b, REQ_OP_WRITE, IOPRIO_DEFAULT, write_endio);
765 else
766 list_add_tail(&b->write_list, write_list);
767 }
768
__flush_write_list(struct list_head * write_list)769 static void __flush_write_list(struct list_head *write_list)
770 {
771 struct blk_plug plug;
772 blk_start_plug(&plug);
773 while (!list_empty(write_list)) {
774 struct dm_buffer *b =
775 list_entry(write_list->next, struct dm_buffer, write_list);
776 list_del(&b->write_list);
777 submit_io(b, REQ_OP_WRITE, IOPRIO_DEFAULT, write_endio);
778 cond_resched();
779 }
780 blk_finish_plug(&plug);
781 }
782
783 /*
784 * Wait until any activity on the buffer finishes. Possibly write the
785 * buffer if it is dirty. When this function finishes, there is no I/O
786 * running on the buffer and the buffer is not dirty.
787 */
__make_buffer_clean(struct dm_buffer * b)788 static void __make_buffer_clean(struct dm_buffer *b)
789 {
790 BUG_ON(b->hold_count);
791
792 if (!b->state) /* fast case */
793 return;
794
795 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
796 __write_dirty_buffer(b, NULL);
797 wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
798 }
799
800 /*
801 * Find some buffer that is not held by anybody, clean it, unlink it and
802 * return it.
803 */
__get_unclaimed_buffer(struct dm_bufio_client * c)804 static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
805 {
806 struct dm_buffer *b;
807
808 list_for_each_entry_reverse(b, &c->lru[LIST_CLEAN], lru_list) {
809 BUG_ON(test_bit(B_WRITING, &b->state));
810 BUG_ON(test_bit(B_DIRTY, &b->state));
811
812 if (!b->hold_count) {
813 __make_buffer_clean(b);
814 __unlink_buffer(b);
815 return b;
816 }
817 cond_resched();
818 }
819
820 list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) {
821 BUG_ON(test_bit(B_READING, &b->state));
822
823 if (!b->hold_count) {
824 __make_buffer_clean(b);
825 __unlink_buffer(b);
826 return b;
827 }
828 cond_resched();
829 }
830
831 return NULL;
832 }
833
834 /*
835 * Wait until some other threads free some buffer or release hold count on
836 * some buffer.
837 *
838 * This function is entered with c->lock held, drops it and regains it
839 * before exiting.
840 */
__wait_for_free_buffer(struct dm_bufio_client * c)841 static void __wait_for_free_buffer(struct dm_bufio_client *c)
842 {
843 DECLARE_WAITQUEUE(wait, current);
844
845 add_wait_queue(&c->free_buffer_wait, &wait);
846 set_current_state(TASK_UNINTERRUPTIBLE);
847 dm_bufio_unlock(c);
848
849 io_schedule();
850
851 remove_wait_queue(&c->free_buffer_wait, &wait);
852
853 dm_bufio_lock(c);
854 }
855
856 enum new_flag {
857 NF_FRESH = 0,
858 NF_READ = 1,
859 NF_GET = 2,
860 NF_PREFETCH = 3
861 };
862
863 /*
864 * Allocate a new buffer. If the allocation is not possible, wait until
865 * some other thread frees a buffer.
866 *
867 * May drop the lock and regain it.
868 */
__alloc_buffer_wait_no_callback(struct dm_bufio_client * c,enum new_flag nf)869 static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf)
870 {
871 struct dm_buffer *b;
872 bool tried_noio_alloc = false;
873
874 /*
875 * dm-bufio is resistant to allocation failures (it just keeps
876 * one buffer reserved in cases all the allocations fail).
877 * So set flags to not try too hard:
878 * GFP_NOWAIT: don't wait; if we need to sleep we'll release our
879 * mutex and wait ourselves.
880 * __GFP_NORETRY: don't retry and rather return failure
881 * __GFP_NOMEMALLOC: don't use emergency reserves
882 * __GFP_NOWARN: don't print a warning in case of failure
883 *
884 * For debugging, if we set the cache size to 1, no new buffers will
885 * be allocated.
886 */
887 while (1) {
888 if (dm_bufio_cache_size_latch != 1) {
889 b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
890 if (b)
891 return b;
892 }
893
894 if (nf == NF_PREFETCH)
895 return NULL;
896
897 if (dm_bufio_cache_size_latch != 1 && !tried_noio_alloc) {
898 dm_bufio_unlock(c);
899 b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
900 dm_bufio_lock(c);
901 if (b)
902 return b;
903 tried_noio_alloc = true;
904 }
905
906 if (!list_empty(&c->reserved_buffers)) {
907 b = list_entry(c->reserved_buffers.next,
908 struct dm_buffer, lru_list);
909 list_del(&b->lru_list);
910 c->need_reserved_buffers++;
911
912 return b;
913 }
914
915 b = __get_unclaimed_buffer(c);
916 if (b)
917 return b;
918
919 __wait_for_free_buffer(c);
920 }
921 }
922
__alloc_buffer_wait(struct dm_bufio_client * c,enum new_flag nf)923 static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c, enum new_flag nf)
924 {
925 struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf);
926
927 if (!b)
928 return NULL;
929
930 if (c->alloc_callback)
931 c->alloc_callback(b);
932
933 return b;
934 }
935
936 /*
937 * Free a buffer and wake other threads waiting for free buffers.
938 */
__free_buffer_wake(struct dm_buffer * b)939 static void __free_buffer_wake(struct dm_buffer *b)
940 {
941 struct dm_bufio_client *c = b->c;
942
943 if (!c->need_reserved_buffers)
944 free_buffer(b);
945 else {
946 list_add(&b->lru_list, &c->reserved_buffers);
947 c->need_reserved_buffers--;
948 }
949
950 wake_up(&c->free_buffer_wait);
951 }
952
__write_dirty_buffers_async(struct dm_bufio_client * c,int no_wait,struct list_head * write_list)953 static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait,
954 struct list_head *write_list)
955 {
956 struct dm_buffer *b, *tmp;
957
958 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
959 BUG_ON(test_bit(B_READING, &b->state));
960
961 if (!test_bit(B_DIRTY, &b->state) &&
962 !test_bit(B_WRITING, &b->state)) {
963 __relink_lru(b, LIST_CLEAN);
964 continue;
965 }
966
967 if (no_wait && test_bit(B_WRITING, &b->state))
968 return;
969
970 __write_dirty_buffer(b, write_list);
971 cond_resched();
972 }
973 }
974
975 /*
976 * Check if we're over watermark.
977 * If we are over threshold_buffers, start freeing buffers.
978 * If we're over "limit_buffers", block until we get under the limit.
979 */
__check_watermark(struct dm_bufio_client * c,struct list_head * write_list)980 static void __check_watermark(struct dm_bufio_client *c,
981 struct list_head *write_list)
982 {
983 if (c->n_buffers[LIST_DIRTY] > c->n_buffers[LIST_CLEAN] * DM_BUFIO_WRITEBACK_RATIO)
984 __write_dirty_buffers_async(c, 1, write_list);
985 }
986
987 /*----------------------------------------------------------------
988 * Getting a buffer
989 *--------------------------------------------------------------*/
990
__bufio_new(struct dm_bufio_client * c,sector_t block,enum new_flag nf,int * need_submit,struct list_head * write_list)991 static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
992 enum new_flag nf, int *need_submit,
993 struct list_head *write_list)
994 {
995 struct dm_buffer *b, *new_b = NULL;
996
997 *need_submit = 0;
998
999 b = __find(c, block);
1000 if (b)
1001 goto found_buffer;
1002
1003 if (nf == NF_GET)
1004 return NULL;
1005
1006 new_b = __alloc_buffer_wait(c, nf);
1007 if (!new_b)
1008 return NULL;
1009
1010 /*
1011 * We've had a period where the mutex was unlocked, so need to
1012 * recheck the buffer tree.
1013 */
1014 b = __find(c, block);
1015 if (b) {
1016 __free_buffer_wake(new_b);
1017 goto found_buffer;
1018 }
1019
1020 __check_watermark(c, write_list);
1021
1022 b = new_b;
1023 b->hold_count = 1;
1024 b->read_error = 0;
1025 b->write_error = 0;
1026 __link_buffer(b, block, LIST_CLEAN);
1027
1028 if (nf == NF_FRESH) {
1029 b->state = 0;
1030 return b;
1031 }
1032
1033 b->state = 1 << B_READING;
1034 *need_submit = 1;
1035
1036 return b;
1037
1038 found_buffer:
1039 if (nf == NF_PREFETCH)
1040 return NULL;
1041 /*
1042 * Note: it is essential that we don't wait for the buffer to be
1043 * read if dm_bufio_get function is used. Both dm_bufio_get and
1044 * dm_bufio_prefetch can be used in the driver request routine.
1045 * If the user called both dm_bufio_prefetch and dm_bufio_get on
1046 * the same buffer, it would deadlock if we waited.
1047 */
1048 if (nf == NF_GET && unlikely(test_bit(B_READING, &b->state)))
1049 return NULL;
1050
1051 b->hold_count++;
1052 __relink_lru(b, test_bit(B_DIRTY, &b->state) ||
1053 test_bit(B_WRITING, &b->state));
1054 return b;
1055 }
1056
1057 /*
1058 * The endio routine for reading: set the error, clear the bit and wake up
1059 * anyone waiting on the buffer.
1060 */
read_endio(struct dm_buffer * b,blk_status_t status)1061 static void read_endio(struct dm_buffer *b, blk_status_t status)
1062 {
1063 b->read_error = status;
1064
1065 BUG_ON(!test_bit(B_READING, &b->state));
1066
1067 smp_mb__before_atomic();
1068 clear_bit(B_READING, &b->state);
1069 smp_mb__after_atomic();
1070
1071 wake_up_bit(&b->state, B_READING);
1072 }
1073
1074 /*
1075 * A common routine for dm_bufio_new and dm_bufio_read. Operation of these
1076 * functions is similar except that dm_bufio_new doesn't read the
1077 * buffer from the disk (assuming that the caller overwrites all the data
1078 * and uses dm_bufio_mark_buffer_dirty to write new data back).
1079 */
new_read(struct dm_bufio_client * c,sector_t block,enum new_flag nf,struct dm_buffer ** bp,unsigned short ioprio)1080 static void *new_read(struct dm_bufio_client *c, sector_t block,
1081 enum new_flag nf, struct dm_buffer **bp,
1082 unsigned short ioprio)
1083 {
1084 int need_submit;
1085 struct dm_buffer *b;
1086
1087 LIST_HEAD(write_list);
1088
1089 dm_bufio_lock(c);
1090 b = __bufio_new(c, block, nf, &need_submit, &write_list);
1091 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1092 if (b && b->hold_count == 1)
1093 buffer_record_stack(b);
1094 #endif
1095 dm_bufio_unlock(c);
1096
1097 __flush_write_list(&write_list);
1098
1099 if (!b)
1100 return NULL;
1101
1102 if (need_submit)
1103 submit_io(b, REQ_OP_READ, ioprio, read_endio);
1104
1105 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
1106
1107 if (b->read_error) {
1108 int error = blk_status_to_errno(b->read_error);
1109
1110 dm_bufio_release(b);
1111
1112 return ERR_PTR(error);
1113 }
1114
1115 *bp = b;
1116
1117 return b->data;
1118 }
1119
dm_bufio_get(struct dm_bufio_client * c,sector_t block,struct dm_buffer ** bp)1120 void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
1121 struct dm_buffer **bp)
1122 {
1123 return new_read(c, block, NF_GET, bp, IOPRIO_DEFAULT);
1124 }
1125 EXPORT_SYMBOL_GPL(dm_bufio_get);
1126
__dm_bufio_read(struct dm_bufio_client * c,sector_t block,struct dm_buffer ** bp,unsigned short ioprio)1127 static void *__dm_bufio_read(struct dm_bufio_client *c, sector_t block,
1128 struct dm_buffer **bp, unsigned short ioprio)
1129 {
1130 BUG_ON(dm_bufio_in_request());
1131
1132 return new_read(c, block, NF_READ, bp, ioprio);
1133 }
1134
dm_bufio_read(struct dm_bufio_client * c,sector_t block,struct dm_buffer ** bp)1135 void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
1136 struct dm_buffer **bp)
1137 {
1138 return __dm_bufio_read(c, block, bp, IOPRIO_DEFAULT);
1139 }
1140 EXPORT_SYMBOL_GPL(dm_bufio_read);
1141
dm_bufio_read_with_ioprio(struct dm_bufio_client * c,sector_t block,struct dm_buffer ** bp,unsigned short ioprio)1142 void *dm_bufio_read_with_ioprio(struct dm_bufio_client *c, sector_t block,
1143 struct dm_buffer **bp, unsigned short ioprio)
1144 {
1145 return __dm_bufio_read(c, block, bp, ioprio);
1146 }
1147 EXPORT_SYMBOL_GPL(dm_bufio_read_with_ioprio);
1148
dm_bufio_new(struct dm_bufio_client * c,sector_t block,struct dm_buffer ** bp)1149 void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
1150 struct dm_buffer **bp)
1151 {
1152 BUG_ON(dm_bufio_in_request());
1153
1154 return new_read(c, block, NF_FRESH, bp, IOPRIO_DEFAULT);
1155 }
1156 EXPORT_SYMBOL_GPL(dm_bufio_new);
1157
__dm_bufio_prefetch(struct dm_bufio_client * c,sector_t block,unsigned int n_blocks,unsigned short ioprio)1158 static void __dm_bufio_prefetch(struct dm_bufio_client *c,
1159 sector_t block, unsigned int n_blocks,
1160 unsigned short ioprio)
1161 {
1162 struct blk_plug plug;
1163
1164 LIST_HEAD(write_list);
1165
1166 BUG_ON(dm_bufio_in_request());
1167
1168 blk_start_plug(&plug);
1169 dm_bufio_lock(c);
1170
1171 for (; n_blocks--; block++) {
1172 int need_submit;
1173 struct dm_buffer *b;
1174 b = __bufio_new(c, block, NF_PREFETCH, &need_submit,
1175 &write_list);
1176 if (unlikely(!list_empty(&write_list))) {
1177 dm_bufio_unlock(c);
1178 blk_finish_plug(&plug);
1179 __flush_write_list(&write_list);
1180 blk_start_plug(&plug);
1181 dm_bufio_lock(c);
1182 }
1183 if (unlikely(b != NULL)) {
1184 dm_bufio_unlock(c);
1185
1186 if (need_submit)
1187 submit_io(b, REQ_OP_READ, ioprio, read_endio);
1188 dm_bufio_release(b);
1189
1190 cond_resched();
1191
1192 if (!n_blocks)
1193 goto flush_plug;
1194 dm_bufio_lock(c);
1195 }
1196 }
1197
1198 dm_bufio_unlock(c);
1199
1200 flush_plug:
1201 blk_finish_plug(&plug);
1202 }
1203
dm_bufio_prefetch(struct dm_bufio_client * c,sector_t block,unsigned int n_blocks)1204 void dm_bufio_prefetch(struct dm_bufio_client *c, sector_t block, unsigned int n_blocks)
1205 {
1206 return __dm_bufio_prefetch(c, block, n_blocks, IOPRIO_DEFAULT);
1207 }
1208 EXPORT_SYMBOL_GPL(dm_bufio_prefetch);
1209
dm_bufio_prefetch_with_ioprio(struct dm_bufio_client * c,sector_t block,unsigned int n_blocks,unsigned short ioprio)1210 void dm_bufio_prefetch_with_ioprio(struct dm_bufio_client *c, sector_t block,
1211 unsigned int n_blocks, unsigned short ioprio)
1212 {
1213 return __dm_bufio_prefetch(c, block, n_blocks, ioprio);
1214 }
1215 EXPORT_SYMBOL_GPL(dm_bufio_prefetch_with_ioprio);
1216
dm_bufio_release(struct dm_buffer * b)1217 void dm_bufio_release(struct dm_buffer *b)
1218 {
1219 struct dm_bufio_client *c = b->c;
1220
1221 dm_bufio_lock(c);
1222
1223 BUG_ON(!b->hold_count);
1224
1225 b->hold_count--;
1226 if (!b->hold_count) {
1227 wake_up(&c->free_buffer_wait);
1228
1229 /*
1230 * If there were errors on the buffer, and the buffer is not
1231 * to be written, free the buffer. There is no point in caching
1232 * invalid buffer.
1233 */
1234 if ((b->read_error || b->write_error) &&
1235 !test_bit(B_READING, &b->state) &&
1236 !test_bit(B_WRITING, &b->state) &&
1237 !test_bit(B_DIRTY, &b->state)) {
1238 __unlink_buffer(b);
1239 __free_buffer_wake(b);
1240 }
1241 }
1242
1243 dm_bufio_unlock(c);
1244 }
1245 EXPORT_SYMBOL_GPL(dm_bufio_release);
1246
dm_bufio_mark_partial_buffer_dirty(struct dm_buffer * b,unsigned start,unsigned end)1247 void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b,
1248 unsigned start, unsigned end)
1249 {
1250 struct dm_bufio_client *c = b->c;
1251
1252 BUG_ON(start >= end);
1253 BUG_ON(end > b->c->block_size);
1254
1255 dm_bufio_lock(c);
1256
1257 BUG_ON(test_bit(B_READING, &b->state));
1258
1259 if (!test_and_set_bit(B_DIRTY, &b->state)) {
1260 b->dirty_start = start;
1261 b->dirty_end = end;
1262 __relink_lru(b, LIST_DIRTY);
1263 } else {
1264 if (start < b->dirty_start)
1265 b->dirty_start = start;
1266 if (end > b->dirty_end)
1267 b->dirty_end = end;
1268 }
1269
1270 dm_bufio_unlock(c);
1271 }
1272 EXPORT_SYMBOL_GPL(dm_bufio_mark_partial_buffer_dirty);
1273
dm_bufio_mark_buffer_dirty(struct dm_buffer * b)1274 void dm_bufio_mark_buffer_dirty(struct dm_buffer *b)
1275 {
1276 dm_bufio_mark_partial_buffer_dirty(b, 0, b->c->block_size);
1277 }
1278 EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty);
1279
dm_bufio_write_dirty_buffers_async(struct dm_bufio_client * c)1280 void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c)
1281 {
1282 LIST_HEAD(write_list);
1283
1284 BUG_ON(dm_bufio_in_request());
1285
1286 dm_bufio_lock(c);
1287 __write_dirty_buffers_async(c, 0, &write_list);
1288 dm_bufio_unlock(c);
1289 __flush_write_list(&write_list);
1290 }
1291 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async);
1292
1293 /*
1294 * For performance, it is essential that the buffers are written asynchronously
1295 * and simultaneously (so that the block layer can merge the writes) and then
1296 * waited upon.
1297 *
1298 * Finally, we flush hardware disk cache.
1299 */
dm_bufio_write_dirty_buffers(struct dm_bufio_client * c)1300 int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c)
1301 {
1302 int a, f;
1303 unsigned long buffers_processed = 0;
1304 struct dm_buffer *b, *tmp;
1305
1306 LIST_HEAD(write_list);
1307
1308 dm_bufio_lock(c);
1309 __write_dirty_buffers_async(c, 0, &write_list);
1310 dm_bufio_unlock(c);
1311 __flush_write_list(&write_list);
1312 dm_bufio_lock(c);
1313
1314 again:
1315 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
1316 int dropped_lock = 0;
1317
1318 if (buffers_processed < c->n_buffers[LIST_DIRTY])
1319 buffers_processed++;
1320
1321 BUG_ON(test_bit(B_READING, &b->state));
1322
1323 if (test_bit(B_WRITING, &b->state)) {
1324 if (buffers_processed < c->n_buffers[LIST_DIRTY]) {
1325 dropped_lock = 1;
1326 b->hold_count++;
1327 dm_bufio_unlock(c);
1328 wait_on_bit_io(&b->state, B_WRITING,
1329 TASK_UNINTERRUPTIBLE);
1330 dm_bufio_lock(c);
1331 b->hold_count--;
1332 } else
1333 wait_on_bit_io(&b->state, B_WRITING,
1334 TASK_UNINTERRUPTIBLE);
1335 }
1336
1337 if (!test_bit(B_DIRTY, &b->state) &&
1338 !test_bit(B_WRITING, &b->state))
1339 __relink_lru(b, LIST_CLEAN);
1340
1341 cond_resched();
1342
1343 /*
1344 * If we dropped the lock, the list is no longer consistent,
1345 * so we must restart the search.
1346 *
1347 * In the most common case, the buffer just processed is
1348 * relinked to the clean list, so we won't loop scanning the
1349 * same buffer again and again.
1350 *
1351 * This may livelock if there is another thread simultaneously
1352 * dirtying buffers, so we count the number of buffers walked
1353 * and if it exceeds the total number of buffers, it means that
1354 * someone is doing some writes simultaneously with us. In
1355 * this case, stop, dropping the lock.
1356 */
1357 if (dropped_lock)
1358 goto again;
1359 }
1360 wake_up(&c->free_buffer_wait);
1361 dm_bufio_unlock(c);
1362
1363 a = xchg(&c->async_write_error, 0);
1364 f = dm_bufio_issue_flush(c);
1365 if (a)
1366 return a;
1367
1368 return f;
1369 }
1370 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers);
1371
1372 /*
1373 * Use dm-io to send an empty barrier to flush the device.
1374 */
dm_bufio_issue_flush(struct dm_bufio_client * c)1375 int dm_bufio_issue_flush(struct dm_bufio_client *c)
1376 {
1377 struct dm_io_request io_req = {
1378 .bi_op = REQ_OP_WRITE,
1379 .bi_op_flags = REQ_PREFLUSH | REQ_SYNC,
1380 .mem.type = DM_IO_KMEM,
1381 .mem.ptr.addr = NULL,
1382 .client = c->dm_io,
1383 };
1384 struct dm_io_region io_reg = {
1385 .bdev = c->bdev,
1386 .sector = 0,
1387 .count = 0,
1388 };
1389
1390 BUG_ON(dm_bufio_in_request());
1391
1392 return dm_io(&io_req, 1, &io_reg, NULL, IOPRIO_DEFAULT);
1393 }
1394 EXPORT_SYMBOL_GPL(dm_bufio_issue_flush);
1395
1396 /*
1397 * Use dm-io to send a discard request to flush the device.
1398 */
dm_bufio_issue_discard(struct dm_bufio_client * c,sector_t block,sector_t count)1399 int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t count)
1400 {
1401 struct dm_io_request io_req = {
1402 .bi_op = REQ_OP_DISCARD,
1403 .bi_op_flags = REQ_SYNC,
1404 .mem.type = DM_IO_KMEM,
1405 .mem.ptr.addr = NULL,
1406 .client = c->dm_io,
1407 };
1408 struct dm_io_region io_reg = {
1409 .bdev = c->bdev,
1410 .sector = block_to_sector(c, block),
1411 .count = block_to_sector(c, count),
1412 };
1413
1414 BUG_ON(dm_bufio_in_request());
1415
1416 return dm_io(&io_req, 1, &io_reg, NULL, IOPRIO_DEFAULT);
1417 }
1418 EXPORT_SYMBOL_GPL(dm_bufio_issue_discard);
1419
1420 /*
1421 * We first delete any other buffer that may be at that new location.
1422 *
1423 * Then, we write the buffer to the original location if it was dirty.
1424 *
1425 * Then, if we are the only one who is holding the buffer, relink the buffer
1426 * in the buffer tree for the new location.
1427 *
1428 * If there was someone else holding the buffer, we write it to the new
1429 * location but not relink it, because that other user needs to have the buffer
1430 * at the same place.
1431 */
dm_bufio_release_move(struct dm_buffer * b,sector_t new_block)1432 void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block)
1433 {
1434 struct dm_bufio_client *c = b->c;
1435 struct dm_buffer *new;
1436
1437 BUG_ON(dm_bufio_in_request());
1438
1439 dm_bufio_lock(c);
1440
1441 retry:
1442 new = __find(c, new_block);
1443 if (new) {
1444 if (new->hold_count) {
1445 __wait_for_free_buffer(c);
1446 goto retry;
1447 }
1448
1449 /*
1450 * FIXME: Is there any point waiting for a write that's going
1451 * to be overwritten in a bit?
1452 */
1453 __make_buffer_clean(new);
1454 __unlink_buffer(new);
1455 __free_buffer_wake(new);
1456 }
1457
1458 BUG_ON(!b->hold_count);
1459 BUG_ON(test_bit(B_READING, &b->state));
1460
1461 __write_dirty_buffer(b, NULL);
1462 if (b->hold_count == 1) {
1463 wait_on_bit_io(&b->state, B_WRITING,
1464 TASK_UNINTERRUPTIBLE);
1465 set_bit(B_DIRTY, &b->state);
1466 b->dirty_start = 0;
1467 b->dirty_end = c->block_size;
1468 __unlink_buffer(b);
1469 __link_buffer(b, new_block, LIST_DIRTY);
1470 } else {
1471 sector_t old_block;
1472 wait_on_bit_lock_io(&b->state, B_WRITING,
1473 TASK_UNINTERRUPTIBLE);
1474 /*
1475 * Relink buffer to "new_block" so that write_callback
1476 * sees "new_block" as a block number.
1477 * After the write, link the buffer back to old_block.
1478 * All this must be done in bufio lock, so that block number
1479 * change isn't visible to other threads.
1480 */
1481 old_block = b->block;
1482 __unlink_buffer(b);
1483 __link_buffer(b, new_block, b->list_mode);
1484 submit_io(b, REQ_OP_WRITE, IOPRIO_DEFAULT, write_endio);
1485 wait_on_bit_io(&b->state, B_WRITING,
1486 TASK_UNINTERRUPTIBLE);
1487 __unlink_buffer(b);
1488 __link_buffer(b, old_block, b->list_mode);
1489 }
1490
1491 dm_bufio_unlock(c);
1492 dm_bufio_release(b);
1493 }
1494 EXPORT_SYMBOL_GPL(dm_bufio_release_move);
1495
forget_buffer_locked(struct dm_buffer * b)1496 static void forget_buffer_locked(struct dm_buffer *b)
1497 {
1498 if (likely(!b->hold_count) && likely(!b->state)) {
1499 __unlink_buffer(b);
1500 __free_buffer_wake(b);
1501 }
1502 }
1503
1504 /*
1505 * Free the given buffer.
1506 *
1507 * This is just a hint, if the buffer is in use or dirty, this function
1508 * does nothing.
1509 */
dm_bufio_forget(struct dm_bufio_client * c,sector_t block)1510 void dm_bufio_forget(struct dm_bufio_client *c, sector_t block)
1511 {
1512 struct dm_buffer *b;
1513
1514 dm_bufio_lock(c);
1515
1516 b = __find(c, block);
1517 if (b)
1518 forget_buffer_locked(b);
1519
1520 dm_bufio_unlock(c);
1521 }
1522 EXPORT_SYMBOL_GPL(dm_bufio_forget);
1523
dm_bufio_forget_buffers(struct dm_bufio_client * c,sector_t block,sector_t n_blocks)1524 void dm_bufio_forget_buffers(struct dm_bufio_client *c, sector_t block, sector_t n_blocks)
1525 {
1526 struct dm_buffer *b;
1527 sector_t end_block = block + n_blocks;
1528
1529 while (block < end_block) {
1530 dm_bufio_lock(c);
1531
1532 b = __find_next(c, block);
1533 if (b) {
1534 block = b->block + 1;
1535 forget_buffer_locked(b);
1536 }
1537
1538 dm_bufio_unlock(c);
1539
1540 if (!b)
1541 break;
1542 }
1543
1544 }
1545 EXPORT_SYMBOL_GPL(dm_bufio_forget_buffers);
1546
dm_bufio_set_minimum_buffers(struct dm_bufio_client * c,unsigned n)1547 void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n)
1548 {
1549 c->minimum_buffers = n;
1550 }
1551 EXPORT_SYMBOL_GPL(dm_bufio_set_minimum_buffers);
1552
dm_bufio_get_block_size(struct dm_bufio_client * c)1553 unsigned dm_bufio_get_block_size(struct dm_bufio_client *c)
1554 {
1555 return c->block_size;
1556 }
1557 EXPORT_SYMBOL_GPL(dm_bufio_get_block_size);
1558
dm_bufio_get_device_size(struct dm_bufio_client * c)1559 sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
1560 {
1561 sector_t s = i_size_read(c->bdev->bd_inode) >> SECTOR_SHIFT;
1562 if (s >= c->start)
1563 s -= c->start;
1564 else
1565 s = 0;
1566 if (likely(c->sectors_per_block_bits >= 0))
1567 s >>= c->sectors_per_block_bits;
1568 else
1569 sector_div(s, c->block_size >> SECTOR_SHIFT);
1570 return s;
1571 }
1572 EXPORT_SYMBOL_GPL(dm_bufio_get_device_size);
1573
dm_bufio_get_dm_io_client(struct dm_bufio_client * c)1574 struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c)
1575 {
1576 return c->dm_io;
1577 }
1578 EXPORT_SYMBOL_GPL(dm_bufio_get_dm_io_client);
1579
dm_bufio_get_block_number(struct dm_buffer * b)1580 sector_t dm_bufio_get_block_number(struct dm_buffer *b)
1581 {
1582 return b->block;
1583 }
1584 EXPORT_SYMBOL_GPL(dm_bufio_get_block_number);
1585
dm_bufio_get_block_data(struct dm_buffer * b)1586 void *dm_bufio_get_block_data(struct dm_buffer *b)
1587 {
1588 return b->data;
1589 }
1590 EXPORT_SYMBOL_GPL(dm_bufio_get_block_data);
1591
dm_bufio_get_aux_data(struct dm_buffer * b)1592 void *dm_bufio_get_aux_data(struct dm_buffer *b)
1593 {
1594 return b + 1;
1595 }
1596 EXPORT_SYMBOL_GPL(dm_bufio_get_aux_data);
1597
dm_bufio_get_client(struct dm_buffer * b)1598 struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b)
1599 {
1600 return b->c;
1601 }
1602 EXPORT_SYMBOL_GPL(dm_bufio_get_client);
1603
drop_buffers(struct dm_bufio_client * c)1604 static void drop_buffers(struct dm_bufio_client *c)
1605 {
1606 struct dm_buffer *b;
1607 int i;
1608 bool warned = false;
1609
1610 BUG_ON(dm_bufio_in_request());
1611
1612 /*
1613 * An optimization so that the buffers are not written one-by-one.
1614 */
1615 dm_bufio_write_dirty_buffers_async(c);
1616
1617 dm_bufio_lock(c);
1618
1619 while ((b = __get_unclaimed_buffer(c)))
1620 __free_buffer_wake(b);
1621
1622 for (i = 0; i < LIST_SIZE; i++)
1623 list_for_each_entry(b, &c->lru[i], lru_list) {
1624 WARN_ON(!warned);
1625 warned = true;
1626 DMERR("leaked buffer %llx, hold count %u, list %d",
1627 (unsigned long long)b->block, b->hold_count, i);
1628 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1629 stack_trace_print(b->stack_entries, b->stack_len, 1);
1630 /* mark unclaimed to avoid BUG_ON below */
1631 b->hold_count = 0;
1632 #endif
1633 }
1634
1635 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1636 while ((b = __get_unclaimed_buffer(c)))
1637 __free_buffer_wake(b);
1638 #endif
1639
1640 for (i = 0; i < LIST_SIZE; i++)
1641 BUG_ON(!list_empty(&c->lru[i]));
1642
1643 dm_bufio_unlock(c);
1644 }
1645
1646 /*
1647 * We may not be able to evict this buffer if IO pending or the client
1648 * is still using it. Caller is expected to know buffer is too old.
1649 *
1650 * And if GFP_NOFS is used, we must not do any I/O because we hold
1651 * dm_bufio_clients_lock and we would risk deadlock if the I/O gets
1652 * rerouted to different bufio client.
1653 */
__try_evict_buffer(struct dm_buffer * b,gfp_t gfp)1654 static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp)
1655 {
1656 if (!(gfp & __GFP_FS)) {
1657 if (test_bit(B_READING, &b->state) ||
1658 test_bit(B_WRITING, &b->state) ||
1659 test_bit(B_DIRTY, &b->state))
1660 return false;
1661 }
1662
1663 if (b->hold_count)
1664 return false;
1665
1666 __make_buffer_clean(b);
1667 __unlink_buffer(b);
1668 __free_buffer_wake(b);
1669
1670 return true;
1671 }
1672
get_retain_buffers(struct dm_bufio_client * c)1673 static unsigned long get_retain_buffers(struct dm_bufio_client *c)
1674 {
1675 unsigned long retain_bytes = READ_ONCE(dm_bufio_retain_bytes);
1676 if (likely(c->sectors_per_block_bits >= 0))
1677 retain_bytes >>= c->sectors_per_block_bits + SECTOR_SHIFT;
1678 else
1679 retain_bytes /= c->block_size;
1680 return retain_bytes;
1681 }
1682
__scan(struct dm_bufio_client * c)1683 static void __scan(struct dm_bufio_client *c)
1684 {
1685 int l;
1686 struct dm_buffer *b, *tmp;
1687 unsigned long freed = 0;
1688 unsigned long count = c->n_buffers[LIST_CLEAN] +
1689 c->n_buffers[LIST_DIRTY];
1690 unsigned long retain_target = get_retain_buffers(c);
1691
1692 for (l = 0; l < LIST_SIZE; l++) {
1693 list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) {
1694 if (count - freed <= retain_target)
1695 atomic_long_set(&c->need_shrink, 0);
1696 if (!atomic_long_read(&c->need_shrink))
1697 return;
1698 if (__try_evict_buffer(b, GFP_KERNEL)) {
1699 atomic_long_dec(&c->need_shrink);
1700 freed++;
1701 }
1702 cond_resched();
1703 }
1704 }
1705 }
1706
shrink_work(struct work_struct * w)1707 static void shrink_work(struct work_struct *w)
1708 {
1709 struct dm_bufio_client *c = container_of(w, struct dm_bufio_client, shrink_work);
1710
1711 dm_bufio_lock(c);
1712 __scan(c);
1713 dm_bufio_unlock(c);
1714 }
1715
dm_bufio_shrink_scan(struct shrinker * shrink,struct shrink_control * sc)1716 static unsigned long dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1717 {
1718 struct dm_bufio_client *c;
1719 bool bypass = false;
1720
1721 trace_android_vh_dm_bufio_shrink_scan_bypass(
1722 dm_bufio_current_allocated,
1723 &bypass);
1724 if (bypass)
1725 return 0;
1726
1727 c = container_of(shrink, struct dm_bufio_client, shrinker);
1728 atomic_long_add(sc->nr_to_scan, &c->need_shrink);
1729 queue_work(dm_bufio_wq, &c->shrink_work);
1730
1731 return sc->nr_to_scan;
1732 }
1733
dm_bufio_shrink_count(struct shrinker * shrink,struct shrink_control * sc)1734 static unsigned long dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1735 {
1736 struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker);
1737 unsigned long count = READ_ONCE(c->n_buffers[LIST_CLEAN]) +
1738 READ_ONCE(c->n_buffers[LIST_DIRTY]);
1739 unsigned long retain_target = get_retain_buffers(c);
1740 unsigned long queued_for_cleanup = atomic_long_read(&c->need_shrink);
1741
1742 if (unlikely(count < retain_target))
1743 count = 0;
1744 else
1745 count -= retain_target;
1746
1747 if (unlikely(count < queued_for_cleanup))
1748 count = 0;
1749 else
1750 count -= queued_for_cleanup;
1751
1752 return count;
1753 }
1754
1755 /*
1756 * Create the buffering interface
1757 */
dm_bufio_client_create(struct block_device * bdev,unsigned block_size,unsigned reserved_buffers,unsigned aux_size,void (* alloc_callback)(struct dm_buffer *),void (* write_callback)(struct dm_buffer *))1758 struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned block_size,
1759 unsigned reserved_buffers, unsigned aux_size,
1760 void (*alloc_callback)(struct dm_buffer *),
1761 void (*write_callback)(struct dm_buffer *))
1762 {
1763 int r;
1764 struct dm_bufio_client *c;
1765 unsigned i;
1766 char slab_name[27];
1767
1768 if (!block_size || block_size & ((1 << SECTOR_SHIFT) - 1)) {
1769 DMERR("%s: block size not specified or is not multiple of 512b", __func__);
1770 r = -EINVAL;
1771 goto bad_client;
1772 }
1773
1774 c = kzalloc(sizeof(*c), GFP_KERNEL);
1775 if (!c) {
1776 r = -ENOMEM;
1777 goto bad_client;
1778 }
1779 c->buffer_tree = RB_ROOT;
1780
1781 c->bdev = bdev;
1782 c->block_size = block_size;
1783 if (is_power_of_2(block_size))
1784 c->sectors_per_block_bits = __ffs(block_size) - SECTOR_SHIFT;
1785 else
1786 c->sectors_per_block_bits = -1;
1787
1788 c->alloc_callback = alloc_callback;
1789 c->write_callback = write_callback;
1790
1791 for (i = 0; i < LIST_SIZE; i++) {
1792 INIT_LIST_HEAD(&c->lru[i]);
1793 c->n_buffers[i] = 0;
1794 }
1795
1796 mutex_init(&c->lock);
1797 INIT_LIST_HEAD(&c->reserved_buffers);
1798 c->need_reserved_buffers = reserved_buffers;
1799
1800 dm_bufio_set_minimum_buffers(c, DM_BUFIO_MIN_BUFFERS);
1801
1802 init_waitqueue_head(&c->free_buffer_wait);
1803 c->async_write_error = 0;
1804
1805 c->dm_io = dm_io_client_create();
1806 if (IS_ERR(c->dm_io)) {
1807 r = PTR_ERR(c->dm_io);
1808 goto bad_dm_io;
1809 }
1810
1811 if (block_size <= KMALLOC_MAX_SIZE &&
1812 (block_size < PAGE_SIZE || !is_power_of_2(block_size))) {
1813 unsigned align = min(1U << __ffs(block_size), (unsigned)PAGE_SIZE);
1814 snprintf(slab_name, sizeof slab_name, "dm_bufio_cache-%u", block_size);
1815 c->slab_cache = kmem_cache_create(slab_name, block_size, align,
1816 SLAB_RECLAIM_ACCOUNT, NULL);
1817 if (!c->slab_cache) {
1818 r = -ENOMEM;
1819 goto bad;
1820 }
1821 }
1822 if (aux_size)
1823 snprintf(slab_name, sizeof slab_name, "dm_bufio_buffer-%u", aux_size);
1824 else
1825 snprintf(slab_name, sizeof slab_name, "dm_bufio_buffer");
1826 c->slab_buffer = kmem_cache_create(slab_name, sizeof(struct dm_buffer) + aux_size,
1827 0, SLAB_RECLAIM_ACCOUNT, NULL);
1828 if (!c->slab_buffer) {
1829 r = -ENOMEM;
1830 goto bad;
1831 }
1832
1833 while (c->need_reserved_buffers) {
1834 struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL);
1835
1836 if (!b) {
1837 r = -ENOMEM;
1838 goto bad;
1839 }
1840 __free_buffer_wake(b);
1841 }
1842
1843 INIT_WORK(&c->shrink_work, shrink_work);
1844 atomic_long_set(&c->need_shrink, 0);
1845
1846 c->shrinker.count_objects = dm_bufio_shrink_count;
1847 c->shrinker.scan_objects = dm_bufio_shrink_scan;
1848 c->shrinker.seeks = 1;
1849 c->shrinker.batch = 0;
1850 r = register_shrinker(&c->shrinker);
1851 if (r)
1852 goto bad;
1853
1854 mutex_lock(&dm_bufio_clients_lock);
1855 dm_bufio_client_count++;
1856 list_add(&c->client_list, &dm_bufio_all_clients);
1857 __cache_size_refresh();
1858 mutex_unlock(&dm_bufio_clients_lock);
1859
1860 return c;
1861
1862 bad:
1863 while (!list_empty(&c->reserved_buffers)) {
1864 struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1865 struct dm_buffer, lru_list);
1866 list_del(&b->lru_list);
1867 free_buffer(b);
1868 }
1869 kmem_cache_destroy(c->slab_cache);
1870 kmem_cache_destroy(c->slab_buffer);
1871 dm_io_client_destroy(c->dm_io);
1872 bad_dm_io:
1873 mutex_destroy(&c->lock);
1874 kfree(c);
1875 bad_client:
1876 return ERR_PTR(r);
1877 }
1878 EXPORT_SYMBOL_GPL(dm_bufio_client_create);
1879
1880 /*
1881 * Free the buffering interface.
1882 * It is required that there are no references on any buffers.
1883 */
dm_bufio_client_destroy(struct dm_bufio_client * c)1884 void dm_bufio_client_destroy(struct dm_bufio_client *c)
1885 {
1886 unsigned i;
1887
1888 drop_buffers(c);
1889
1890 unregister_shrinker(&c->shrinker);
1891 flush_work(&c->shrink_work);
1892
1893 mutex_lock(&dm_bufio_clients_lock);
1894
1895 list_del(&c->client_list);
1896 dm_bufio_client_count--;
1897 __cache_size_refresh();
1898
1899 mutex_unlock(&dm_bufio_clients_lock);
1900
1901 BUG_ON(!RB_EMPTY_ROOT(&c->buffer_tree));
1902 BUG_ON(c->need_reserved_buffers);
1903
1904 while (!list_empty(&c->reserved_buffers)) {
1905 struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1906 struct dm_buffer, lru_list);
1907 list_del(&b->lru_list);
1908 free_buffer(b);
1909 }
1910
1911 for (i = 0; i < LIST_SIZE; i++)
1912 if (c->n_buffers[i])
1913 DMERR("leaked buffer count %d: %ld", i, c->n_buffers[i]);
1914
1915 for (i = 0; i < LIST_SIZE; i++)
1916 BUG_ON(c->n_buffers[i]);
1917
1918 kmem_cache_destroy(c->slab_cache);
1919 kmem_cache_destroy(c->slab_buffer);
1920 dm_io_client_destroy(c->dm_io);
1921 mutex_destroy(&c->lock);
1922 kfree(c);
1923 }
1924 EXPORT_SYMBOL_GPL(dm_bufio_client_destroy);
1925
dm_bufio_set_sector_offset(struct dm_bufio_client * c,sector_t start)1926 void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start)
1927 {
1928 c->start = start;
1929 }
1930 EXPORT_SYMBOL_GPL(dm_bufio_set_sector_offset);
1931
get_max_age_hz(void)1932 static unsigned get_max_age_hz(void)
1933 {
1934 unsigned max_age = READ_ONCE(dm_bufio_max_age);
1935
1936 if (max_age > UINT_MAX / HZ)
1937 max_age = UINT_MAX / HZ;
1938
1939 return max_age * HZ;
1940 }
1941
older_than(struct dm_buffer * b,unsigned long age_hz)1942 static bool older_than(struct dm_buffer *b, unsigned long age_hz)
1943 {
1944 return time_after_eq(jiffies, b->last_accessed + age_hz);
1945 }
1946
__evict_old_buffers(struct dm_bufio_client * c,unsigned long age_hz)1947 static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz)
1948 {
1949 struct dm_buffer *b, *tmp;
1950 unsigned long retain_target = get_retain_buffers(c);
1951 unsigned long count;
1952 LIST_HEAD(write_list);
1953
1954 dm_bufio_lock(c);
1955
1956 __check_watermark(c, &write_list);
1957 if (unlikely(!list_empty(&write_list))) {
1958 dm_bufio_unlock(c);
1959 __flush_write_list(&write_list);
1960 dm_bufio_lock(c);
1961 }
1962
1963 count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
1964 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_CLEAN], lru_list) {
1965 if (count <= retain_target)
1966 break;
1967
1968 if (!older_than(b, age_hz))
1969 break;
1970
1971 if (__try_evict_buffer(b, 0))
1972 count--;
1973
1974 cond_resched();
1975 }
1976
1977 dm_bufio_unlock(c);
1978 }
1979
do_global_cleanup(struct work_struct * w)1980 static void do_global_cleanup(struct work_struct *w)
1981 {
1982 struct dm_bufio_client *locked_client = NULL;
1983 struct dm_bufio_client *current_client;
1984 struct dm_buffer *b;
1985 unsigned spinlock_hold_count;
1986 unsigned long threshold = dm_bufio_cache_size -
1987 dm_bufio_cache_size / DM_BUFIO_LOW_WATERMARK_RATIO;
1988 unsigned long loops = global_num * 2;
1989
1990 mutex_lock(&dm_bufio_clients_lock);
1991
1992 while (1) {
1993 cond_resched();
1994
1995 spin_lock(&global_spinlock);
1996 if (unlikely(dm_bufio_current_allocated <= threshold))
1997 break;
1998
1999 spinlock_hold_count = 0;
2000 get_next:
2001 if (!loops--)
2002 break;
2003 if (unlikely(list_empty(&global_queue)))
2004 break;
2005 b = list_entry(global_queue.prev, struct dm_buffer, global_list);
2006
2007 if (b->accessed) {
2008 b->accessed = 0;
2009 list_move(&b->global_list, &global_queue);
2010 if (likely(++spinlock_hold_count < 16))
2011 goto get_next;
2012 spin_unlock(&global_spinlock);
2013 continue;
2014 }
2015
2016 current_client = b->c;
2017 if (unlikely(current_client != locked_client)) {
2018 if (locked_client)
2019 dm_bufio_unlock(locked_client);
2020
2021 if (!dm_bufio_trylock(current_client)) {
2022 spin_unlock(&global_spinlock);
2023 dm_bufio_lock(current_client);
2024 locked_client = current_client;
2025 continue;
2026 }
2027
2028 locked_client = current_client;
2029 }
2030
2031 spin_unlock(&global_spinlock);
2032
2033 if (unlikely(!__try_evict_buffer(b, GFP_KERNEL))) {
2034 spin_lock(&global_spinlock);
2035 list_move(&b->global_list, &global_queue);
2036 spin_unlock(&global_spinlock);
2037 }
2038 }
2039
2040 spin_unlock(&global_spinlock);
2041
2042 if (locked_client)
2043 dm_bufio_unlock(locked_client);
2044
2045 mutex_unlock(&dm_bufio_clients_lock);
2046 }
2047
cleanup_old_buffers(void)2048 static void cleanup_old_buffers(void)
2049 {
2050 unsigned long max_age_hz = get_max_age_hz();
2051 struct dm_bufio_client *c;
2052 bool bypass = false;
2053
2054 trace_android_vh_cleanup_old_buffers_bypass(
2055 dm_bufio_current_allocated,
2056 &max_age_hz,
2057 &bypass);
2058 if (bypass)
2059 return;
2060
2061 mutex_lock(&dm_bufio_clients_lock);
2062
2063 __cache_size_refresh();
2064
2065 list_for_each_entry(c, &dm_bufio_all_clients, client_list)
2066 __evict_old_buffers(c, max_age_hz);
2067
2068 mutex_unlock(&dm_bufio_clients_lock);
2069 }
2070
work_fn(struct work_struct * w)2071 static void work_fn(struct work_struct *w)
2072 {
2073 cleanup_old_buffers();
2074
2075 queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work,
2076 DM_BUFIO_WORK_TIMER_SECS * HZ);
2077 }
2078
2079 /*----------------------------------------------------------------
2080 * Module setup
2081 *--------------------------------------------------------------*/
2082
2083 /*
2084 * This is called only once for the whole dm_bufio module.
2085 * It initializes memory limit.
2086 */
dm_bufio_init(void)2087 static int __init dm_bufio_init(void)
2088 {
2089 __u64 mem;
2090
2091 dm_bufio_allocated_kmem_cache = 0;
2092 dm_bufio_allocated_get_free_pages = 0;
2093 dm_bufio_allocated_vmalloc = 0;
2094 dm_bufio_current_allocated = 0;
2095
2096 mem = (__u64)mult_frac(totalram_pages() - totalhigh_pages(),
2097 DM_BUFIO_MEMORY_PERCENT, 100) << PAGE_SHIFT;
2098
2099 if (mem > ULONG_MAX)
2100 mem = ULONG_MAX;
2101
2102 #ifdef CONFIG_MMU
2103 if (mem > mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100))
2104 mem = mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100);
2105 #endif
2106
2107 dm_bufio_default_cache_size = mem;
2108
2109 mutex_lock(&dm_bufio_clients_lock);
2110 __cache_size_refresh();
2111 mutex_unlock(&dm_bufio_clients_lock);
2112
2113 dm_bufio_wq = alloc_workqueue("dm_bufio_cache", WQ_MEM_RECLAIM, 0);
2114 if (!dm_bufio_wq)
2115 return -ENOMEM;
2116
2117 INIT_DELAYED_WORK(&dm_bufio_cleanup_old_work, work_fn);
2118 INIT_WORK(&dm_bufio_replacement_work, do_global_cleanup);
2119 queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work,
2120 DM_BUFIO_WORK_TIMER_SECS * HZ);
2121
2122 return 0;
2123 }
2124
2125 /*
2126 * This is called once when unloading the dm_bufio module.
2127 */
dm_bufio_exit(void)2128 static void __exit dm_bufio_exit(void)
2129 {
2130 int bug = 0;
2131
2132 cancel_delayed_work_sync(&dm_bufio_cleanup_old_work);
2133 flush_workqueue(dm_bufio_wq);
2134 destroy_workqueue(dm_bufio_wq);
2135
2136 if (dm_bufio_client_count) {
2137 DMCRIT("%s: dm_bufio_client_count leaked: %d",
2138 __func__, dm_bufio_client_count);
2139 bug = 1;
2140 }
2141
2142 if (dm_bufio_current_allocated) {
2143 DMCRIT("%s: dm_bufio_current_allocated leaked: %lu",
2144 __func__, dm_bufio_current_allocated);
2145 bug = 1;
2146 }
2147
2148 if (dm_bufio_allocated_get_free_pages) {
2149 DMCRIT("%s: dm_bufio_allocated_get_free_pages leaked: %lu",
2150 __func__, dm_bufio_allocated_get_free_pages);
2151 bug = 1;
2152 }
2153
2154 if (dm_bufio_allocated_vmalloc) {
2155 DMCRIT("%s: dm_bufio_vmalloc leaked: %lu",
2156 __func__, dm_bufio_allocated_vmalloc);
2157 bug = 1;
2158 }
2159
2160 BUG_ON(bug);
2161 }
2162
2163 module_init(dm_bufio_init)
2164 module_exit(dm_bufio_exit)
2165
2166 module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, S_IRUGO | S_IWUSR);
2167 MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
2168
2169 module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR);
2170 MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
2171
2172 module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, S_IRUGO | S_IWUSR);
2173 MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory");
2174
2175 module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR);
2176 MODULE_PARM_DESC(peak_allocated_bytes, "Tracks the maximum allocated memory");
2177
2178 module_param_named(allocated_kmem_cache_bytes, dm_bufio_allocated_kmem_cache, ulong, S_IRUGO);
2179 MODULE_PARM_DESC(allocated_kmem_cache_bytes, "Memory allocated with kmem_cache_alloc");
2180
2181 module_param_named(allocated_get_free_pages_bytes, dm_bufio_allocated_get_free_pages, ulong, S_IRUGO);
2182 MODULE_PARM_DESC(allocated_get_free_pages_bytes, "Memory allocated with get_free_pages");
2183
2184 module_param_named(allocated_vmalloc_bytes, dm_bufio_allocated_vmalloc, ulong, S_IRUGO);
2185 MODULE_PARM_DESC(allocated_vmalloc_bytes, "Memory allocated with vmalloc");
2186
2187 module_param_named(current_allocated_bytes, dm_bufio_current_allocated, ulong, S_IRUGO);
2188 MODULE_PARM_DESC(current_allocated_bytes, "Memory currently used by the cache");
2189
2190 MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
2191 MODULE_DESCRIPTION(DM_NAME " buffered I/O library");
2192 MODULE_LICENSE("GPL");
2193