1 /*
2 * Copyright (C) 2009-2011 Red Hat, Inc.
3 *
4 * Author: Mikulas Patocka <mpatocka@redhat.com>
5 *
6 * This file is released under the GPL.
7 */
8
9 #include <linux/dm-bufio.h>
10
11 #include <linux/device-mapper.h>
12 #include <linux/dm-io.h>
13 #include <linux/slab.h>
14 #include <linux/sched/mm.h>
15 #include <linux/jiffies.h>
16 #include <linux/vmalloc.h>
17 #include <linux/shrinker.h>
18 #include <linux/module.h>
19 #include <linux/rbtree.h>
20 #include <linux/stacktrace.h>
21 #include <linux/jump_label.h>
22
23 #include <trace/hooks/mm.h>
24
25 #define DM_MSG_PREFIX "bufio"
26
27 /*
28 * Memory management policy:
29 * Limit the number of buffers to DM_BUFIO_MEMORY_PERCENT of main memory
30 * or DM_BUFIO_VMALLOC_PERCENT of vmalloc memory (whichever is lower).
31 * Always allocate at least DM_BUFIO_MIN_BUFFERS buffers.
32 * Start background writeback when there are DM_BUFIO_WRITEBACK_PERCENT
33 * dirty buffers.
34 */
35 #define DM_BUFIO_MIN_BUFFERS 8
36
37 #define DM_BUFIO_MEMORY_PERCENT 2
38 #define DM_BUFIO_VMALLOC_PERCENT 25
39 #define DM_BUFIO_WRITEBACK_RATIO 3
40 #define DM_BUFIO_LOW_WATERMARK_RATIO 16
41
42 /*
43 * Check buffer ages in this interval (seconds)
44 */
45 #define DM_BUFIO_WORK_TIMER_SECS 30
46
47 /*
48 * Free buffers when they are older than this (seconds)
49 */
50 #define DM_BUFIO_DEFAULT_AGE_SECS 300
51
52 /*
53 * The nr of bytes of cached data to keep around.
54 */
55 #define DM_BUFIO_DEFAULT_RETAIN_BYTES (256 * 1024)
56
57 /*
58 * Align buffer writes to this boundary.
59 * Tests show that SSDs have the highest IOPS when using 4k writes.
60 */
61 #define DM_BUFIO_WRITE_ALIGN 4096
62
63 /*
64 * dm_buffer->list_mode
65 */
66 #define LIST_CLEAN 0
67 #define LIST_DIRTY 1
68 #define LIST_SIZE 2
69
70 /*
71 * Linking of buffers:
72 * All buffers are linked to buffer_tree with their node field.
73 *
74 * Clean buffers that are not being written (B_WRITING not set)
75 * are linked to lru[LIST_CLEAN] with their lru_list field.
76 *
77 * Dirty and clean buffers that are being written are linked to
78 * lru[LIST_DIRTY] with their lru_list field. When the write
79 * finishes, the buffer cannot be relinked immediately (because we
80 * are in an interrupt context and relinking requires process
81 * context), so some clean-not-writing buffers can be held on
82 * dirty_lru too. They are later added to lru in the process
83 * context.
84 */
85 struct dm_bufio_client {
86 struct mutex lock;
87 spinlock_t spinlock;
88 bool no_sleep;
89
90 struct list_head lru[LIST_SIZE];
91 unsigned long n_buffers[LIST_SIZE];
92
93 struct block_device *bdev;
94 unsigned int block_size;
95 s8 sectors_per_block_bits;
96 void (*alloc_callback)(struct dm_buffer *);
97 void (*write_callback)(struct dm_buffer *);
98 struct kmem_cache *slab_buffer;
99 struct kmem_cache *slab_cache;
100 struct dm_io_client *dm_io;
101
102 struct list_head reserved_buffers;
103 unsigned int need_reserved_buffers;
104
105 unsigned int minimum_buffers;
106
107 struct rb_root buffer_tree;
108 wait_queue_head_t free_buffer_wait;
109
110 sector_t start;
111
112 int async_write_error;
113
114 struct list_head client_list;
115
116 struct shrinker shrinker;
117 struct work_struct shrink_work;
118 atomic_long_t need_shrink;
119 };
120
121 /*
122 * Buffer state bits.
123 */
124 #define B_READING 0
125 #define B_WRITING 1
126 #define B_DIRTY 2
127
128 /*
129 * Describes how the block was allocated:
130 * kmem_cache_alloc(), __get_free_pages() or vmalloc().
131 * See the comment at alloc_buffer_data.
132 */
133 enum data_mode {
134 DATA_MODE_SLAB = 0,
135 DATA_MODE_GET_FREE_PAGES = 1,
136 DATA_MODE_VMALLOC = 2,
137 DATA_MODE_LIMIT = 3
138 };
139
140 struct dm_buffer {
141 struct rb_node node;
142 struct list_head lru_list;
143 struct list_head global_list;
144 sector_t block;
145 void *data;
146 unsigned char data_mode; /* DATA_MODE_* */
147 unsigned char list_mode; /* LIST_* */
148 blk_status_t read_error;
149 blk_status_t write_error;
150 unsigned int accessed;
151 unsigned int hold_count;
152 unsigned long state;
153 unsigned long last_accessed;
154 unsigned int dirty_start;
155 unsigned int dirty_end;
156 unsigned int write_start;
157 unsigned int write_end;
158 struct dm_bufio_client *c;
159 struct list_head write_list;
160 void (*end_io)(struct dm_buffer *, blk_status_t);
161 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
162 #define MAX_STACK 10
163 unsigned int stack_len;
164 unsigned long stack_entries[MAX_STACK];
165 #endif
166 };
167
168 static DEFINE_STATIC_KEY_FALSE(no_sleep_enabled);
169
170 /*----------------------------------------------------------------*/
171
172 #define dm_bufio_in_request() (!!current->bio_list)
173
dm_bufio_lock(struct dm_bufio_client * c)174 static void dm_bufio_lock(struct dm_bufio_client *c)
175 {
176 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
177 spin_lock_bh(&c->spinlock);
178 else
179 mutex_lock_nested(&c->lock, dm_bufio_in_request());
180 }
181
dm_bufio_trylock(struct dm_bufio_client * c)182 static int dm_bufio_trylock(struct dm_bufio_client *c)
183 {
184 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
185 return spin_trylock_bh(&c->spinlock);
186 else
187 return mutex_trylock(&c->lock);
188 }
189
dm_bufio_unlock(struct dm_bufio_client * c)190 static void dm_bufio_unlock(struct dm_bufio_client *c)
191 {
192 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
193 spin_unlock_bh(&c->spinlock);
194 else
195 mutex_unlock(&c->lock);
196 }
197
198 /*----------------------------------------------------------------*/
199
200 /*
201 * Default cache size: available memory divided by the ratio.
202 */
203 static unsigned long dm_bufio_default_cache_size;
204
205 /*
206 * Total cache size set by the user.
207 */
208 static unsigned long dm_bufio_cache_size;
209
210 /*
211 * A copy of dm_bufio_cache_size because dm_bufio_cache_size can change
212 * at any time. If it disagrees, the user has changed cache size.
213 */
214 static unsigned long dm_bufio_cache_size_latch;
215
216 static DEFINE_SPINLOCK(global_spinlock);
217
218 static LIST_HEAD(global_queue);
219
220 static unsigned long global_num = 0;
221
222 /*
223 * Buffers are freed after this timeout
224 */
225 static unsigned int dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
226 static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
227
228 static unsigned long dm_bufio_peak_allocated;
229 static unsigned long dm_bufio_allocated_kmem_cache;
230 static unsigned long dm_bufio_allocated_get_free_pages;
231 static unsigned long dm_bufio_allocated_vmalloc;
232 static unsigned long dm_bufio_current_allocated;
233
234 /*----------------------------------------------------------------*/
235
236 /*
237 * The current number of clients.
238 */
239 static int dm_bufio_client_count;
240
241 /*
242 * The list of all clients.
243 */
244 static LIST_HEAD(dm_bufio_all_clients);
245
246 /*
247 * This mutex protects dm_bufio_cache_size_latch and dm_bufio_client_count
248 */
249 static DEFINE_MUTEX(dm_bufio_clients_lock);
250
251 static struct workqueue_struct *dm_bufio_wq;
252 static struct delayed_work dm_bufio_cleanup_old_work;
253 static struct work_struct dm_bufio_replacement_work;
254
255
256 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
buffer_record_stack(struct dm_buffer * b)257 static void buffer_record_stack(struct dm_buffer *b)
258 {
259 b->stack_len = stack_trace_save(b->stack_entries, MAX_STACK, 2);
260 }
261 #endif
262
263 /*----------------------------------------------------------------
264 * A red/black tree acts as an index for all the buffers.
265 *--------------------------------------------------------------*/
__find(struct dm_bufio_client * c,sector_t block)266 static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block)
267 {
268 struct rb_node *n = c->buffer_tree.rb_node;
269 struct dm_buffer *b;
270
271 while (n) {
272 b = container_of(n, struct dm_buffer, node);
273
274 if (b->block == block)
275 return b;
276
277 n = block < b->block ? n->rb_left : n->rb_right;
278 }
279
280 return NULL;
281 }
282
__find_next(struct dm_bufio_client * c,sector_t block)283 static struct dm_buffer *__find_next(struct dm_bufio_client *c, sector_t block)
284 {
285 struct rb_node *n = c->buffer_tree.rb_node;
286 struct dm_buffer *b;
287 struct dm_buffer *best = NULL;
288
289 while (n) {
290 b = container_of(n, struct dm_buffer, node);
291
292 if (b->block == block)
293 return b;
294
295 if (block <= b->block) {
296 n = n->rb_left;
297 best = b;
298 } else {
299 n = n->rb_right;
300 }
301 }
302
303 return best;
304 }
305
__insert(struct dm_bufio_client * c,struct dm_buffer * b)306 static void __insert(struct dm_bufio_client *c, struct dm_buffer *b)
307 {
308 struct rb_node **new = &c->buffer_tree.rb_node, *parent = NULL;
309 struct dm_buffer *found;
310
311 while (*new) {
312 found = container_of(*new, struct dm_buffer, node);
313
314 if (found->block == b->block) {
315 BUG_ON(found != b);
316 return;
317 }
318
319 parent = *new;
320 new = b->block < found->block ?
321 &found->node.rb_left : &found->node.rb_right;
322 }
323
324 rb_link_node(&b->node, parent, new);
325 rb_insert_color(&b->node, &c->buffer_tree);
326 }
327
__remove(struct dm_bufio_client * c,struct dm_buffer * b)328 static void __remove(struct dm_bufio_client *c, struct dm_buffer *b)
329 {
330 rb_erase(&b->node, &c->buffer_tree);
331 }
332
333 /*----------------------------------------------------------------*/
334
adjust_total_allocated(struct dm_buffer * b,bool unlink)335 static void adjust_total_allocated(struct dm_buffer *b, bool unlink)
336 {
337 unsigned char data_mode;
338 long diff;
339
340 static unsigned long * const class_ptr[DATA_MODE_LIMIT] = {
341 &dm_bufio_allocated_kmem_cache,
342 &dm_bufio_allocated_get_free_pages,
343 &dm_bufio_allocated_vmalloc,
344 };
345
346 data_mode = b->data_mode;
347 diff = (long)b->c->block_size;
348 if (unlink)
349 diff = -diff;
350
351 spin_lock(&global_spinlock);
352
353 *class_ptr[data_mode] += diff;
354
355 dm_bufio_current_allocated += diff;
356
357 if (dm_bufio_current_allocated > dm_bufio_peak_allocated)
358 dm_bufio_peak_allocated = dm_bufio_current_allocated;
359
360 b->accessed = 1;
361
362 if (!unlink) {
363 list_add(&b->global_list, &global_queue);
364 global_num++;
365 if (dm_bufio_current_allocated > dm_bufio_cache_size)
366 queue_work(dm_bufio_wq, &dm_bufio_replacement_work);
367 } else {
368 list_del(&b->global_list);
369 global_num--;
370 }
371
372 spin_unlock(&global_spinlock);
373 }
374
375 /*
376 * Change the number of clients and recalculate per-client limit.
377 */
__cache_size_refresh(void)378 static void __cache_size_refresh(void)
379 {
380 BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock));
381 BUG_ON(dm_bufio_client_count < 0);
382
383 dm_bufio_cache_size_latch = READ_ONCE(dm_bufio_cache_size);
384
385 /*
386 * Use default if set to 0 and report the actual cache size used.
387 */
388 if (!dm_bufio_cache_size_latch) {
389 (void)cmpxchg(&dm_bufio_cache_size, 0,
390 dm_bufio_default_cache_size);
391 dm_bufio_cache_size_latch = dm_bufio_default_cache_size;
392 }
393 }
394
395 /*
396 * Allocating buffer data.
397 *
398 * Small buffers are allocated with kmem_cache, to use space optimally.
399 *
400 * For large buffers, we choose between get_free_pages and vmalloc.
401 * Each has advantages and disadvantages.
402 *
403 * __get_free_pages can randomly fail if the memory is fragmented.
404 * __vmalloc won't randomly fail, but vmalloc space is limited (it may be
405 * as low as 128M) so using it for caching is not appropriate.
406 *
407 * If the allocation may fail we use __get_free_pages. Memory fragmentation
408 * won't have a fatal effect here, but it just causes flushes of some other
409 * buffers and more I/O will be performed. Don't use __get_free_pages if it
410 * always fails (i.e. order >= MAX_ORDER).
411 *
412 * If the allocation shouldn't fail we use __vmalloc. This is only for the
413 * initial reserve allocation, so there's no risk of wasting all vmalloc
414 * space.
415 */
alloc_buffer_data(struct dm_bufio_client * c,gfp_t gfp_mask,unsigned char * data_mode)416 static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
417 unsigned char *data_mode)
418 {
419 if (unlikely(c->slab_cache != NULL)) {
420 *data_mode = DATA_MODE_SLAB;
421 return kmem_cache_alloc(c->slab_cache, gfp_mask);
422 }
423
424 if (c->block_size <= KMALLOC_MAX_SIZE &&
425 gfp_mask & __GFP_NORETRY) {
426 *data_mode = DATA_MODE_GET_FREE_PAGES;
427 return (void *)__get_free_pages(gfp_mask,
428 c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT));
429 }
430
431 *data_mode = DATA_MODE_VMALLOC;
432
433 /*
434 * __vmalloc allocates the data pages and auxiliary structures with
435 * gfp_flags that were specified, but pagetables are always allocated
436 * with GFP_KERNEL, no matter what was specified as gfp_mask.
437 *
438 * Consequently, we must set per-process flag PF_MEMALLOC_NOIO so that
439 * all allocations done by this process (including pagetables) are done
440 * as if GFP_NOIO was specified.
441 */
442 if (gfp_mask & __GFP_NORETRY) {
443 unsigned int noio_flag = memalloc_noio_save();
444 void *ptr = __vmalloc(c->block_size, gfp_mask);
445
446 memalloc_noio_restore(noio_flag);
447 return ptr;
448 }
449
450 return __vmalloc(c->block_size, gfp_mask);
451 }
452
453 /*
454 * Free buffer's data.
455 */
free_buffer_data(struct dm_bufio_client * c,void * data,unsigned char data_mode)456 static void free_buffer_data(struct dm_bufio_client *c,
457 void *data, unsigned char data_mode)
458 {
459 switch (data_mode) {
460 case DATA_MODE_SLAB:
461 kmem_cache_free(c->slab_cache, data);
462 break;
463
464 case DATA_MODE_GET_FREE_PAGES:
465 free_pages((unsigned long)data,
466 c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT));
467 break;
468
469 case DATA_MODE_VMALLOC:
470 vfree(data);
471 break;
472
473 default:
474 DMCRIT("dm_bufio_free_buffer_data: bad data mode: %d",
475 data_mode);
476 BUG();
477 }
478 }
479
480 /*
481 * Allocate buffer and its data.
482 */
alloc_buffer(struct dm_bufio_client * c,gfp_t gfp_mask)483 static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask)
484 {
485 struct dm_buffer *b = kmem_cache_alloc(c->slab_buffer, gfp_mask);
486
487 if (!b)
488 return NULL;
489
490 b->c = c;
491
492 b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode);
493 if (!b->data) {
494 kmem_cache_free(c->slab_buffer, b);
495 return NULL;
496 }
497
498 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
499 b->stack_len = 0;
500 #endif
501 return b;
502 }
503
504 /*
505 * Free buffer and its data.
506 */
free_buffer(struct dm_buffer * b)507 static void free_buffer(struct dm_buffer *b)
508 {
509 struct dm_bufio_client *c = b->c;
510
511 free_buffer_data(c, b->data, b->data_mode);
512 kmem_cache_free(c->slab_buffer, b);
513 }
514
515 /*
516 * Link buffer to the buffer tree and clean or dirty queue.
517 */
__link_buffer(struct dm_buffer * b,sector_t block,int dirty)518 static void __link_buffer(struct dm_buffer *b, sector_t block, int dirty)
519 {
520 struct dm_bufio_client *c = b->c;
521
522 c->n_buffers[dirty]++;
523 b->block = block;
524 b->list_mode = dirty;
525 list_add(&b->lru_list, &c->lru[dirty]);
526 __insert(b->c, b);
527 b->last_accessed = jiffies;
528
529 adjust_total_allocated(b, false);
530 }
531
532 /*
533 * Unlink buffer from the buffer tree and dirty or clean queue.
534 */
__unlink_buffer(struct dm_buffer * b)535 static void __unlink_buffer(struct dm_buffer *b)
536 {
537 struct dm_bufio_client *c = b->c;
538
539 BUG_ON(!c->n_buffers[b->list_mode]);
540
541 c->n_buffers[b->list_mode]--;
542 __remove(b->c, b);
543 list_del(&b->lru_list);
544
545 adjust_total_allocated(b, true);
546 }
547
548 /*
549 * Place the buffer to the head of dirty or clean LRU queue.
550 */
__relink_lru(struct dm_buffer * b,int dirty)551 static void __relink_lru(struct dm_buffer *b, int dirty)
552 {
553 struct dm_bufio_client *c = b->c;
554
555 b->accessed = 1;
556
557 BUG_ON(!c->n_buffers[b->list_mode]);
558
559 c->n_buffers[b->list_mode]--;
560 c->n_buffers[dirty]++;
561 b->list_mode = dirty;
562 list_move(&b->lru_list, &c->lru[dirty]);
563 b->last_accessed = jiffies;
564 }
565
566 /*----------------------------------------------------------------
567 * Submit I/O on the buffer.
568 *
569 * Bio interface is faster but it has some problems:
570 * the vector list is limited (increasing this limit increases
571 * memory-consumption per buffer, so it is not viable);
572 *
573 * the memory must be direct-mapped, not vmalloced;
574 *
575 * If the buffer is small enough (up to DM_BUFIO_INLINE_VECS pages) and
576 * it is not vmalloced, try using the bio interface.
577 *
578 * If the buffer is big, if it is vmalloced or if the underlying device
579 * rejects the bio because it is too large, use dm-io layer to do the I/O.
580 * The dm-io layer splits the I/O into multiple requests, avoiding the above
581 * shortcomings.
582 *--------------------------------------------------------------*/
583
584 /*
585 * dm-io completion routine. It just calls b->bio.bi_end_io, pretending
586 * that the request was handled directly with bio interface.
587 */
dmio_complete(unsigned long error,void * context)588 static void dmio_complete(unsigned long error, void *context)
589 {
590 struct dm_buffer *b = context;
591
592 b->end_io(b, unlikely(error != 0) ? BLK_STS_IOERR : 0);
593 }
594
use_dmio(struct dm_buffer * b,enum req_op op,sector_t sector,unsigned int n_sectors,unsigned int offset)595 static void use_dmio(struct dm_buffer *b, enum req_op op, sector_t sector,
596 unsigned int n_sectors, unsigned int offset)
597 {
598 int r;
599 struct dm_io_request io_req = {
600 .bi_opf = op,
601 .notify.fn = dmio_complete,
602 .notify.context = b,
603 .client = b->c->dm_io,
604 };
605 struct dm_io_region region = {
606 .bdev = b->c->bdev,
607 .sector = sector,
608 .count = n_sectors,
609 };
610
611 if (b->data_mode != DATA_MODE_VMALLOC) {
612 io_req.mem.type = DM_IO_KMEM;
613 io_req.mem.ptr.addr = (char *)b->data + offset;
614 } else {
615 io_req.mem.type = DM_IO_VMA;
616 io_req.mem.ptr.vma = (char *)b->data + offset;
617 }
618
619 r = dm_io(&io_req, 1, ®ion, NULL);
620 if (unlikely(r))
621 b->end_io(b, errno_to_blk_status(r));
622 }
623
bio_complete(struct bio * bio)624 static void bio_complete(struct bio *bio)
625 {
626 struct dm_buffer *b = bio->bi_private;
627 blk_status_t status = bio->bi_status;
628 bio_uninit(bio);
629 kfree(bio);
630 b->end_io(b, status);
631 }
632
use_bio(struct dm_buffer * b,enum req_op op,sector_t sector,unsigned int n_sectors,unsigned int offset)633 static void use_bio(struct dm_buffer *b, enum req_op op, sector_t sector,
634 unsigned int n_sectors, unsigned int offset)
635 {
636 struct bio *bio;
637 char *ptr;
638 unsigned int vec_size, len;
639
640 vec_size = b->c->block_size >> PAGE_SHIFT;
641 if (unlikely(b->c->sectors_per_block_bits < PAGE_SHIFT - SECTOR_SHIFT))
642 vec_size += 2;
643
644 bio = bio_kmalloc(vec_size, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOWARN);
645 if (!bio) {
646 dmio:
647 use_dmio(b, op, sector, n_sectors, offset);
648 return;
649 }
650 bio_init(bio, b->c->bdev, bio->bi_inline_vecs, vec_size, op);
651 bio->bi_iter.bi_sector = sector;
652 bio->bi_end_io = bio_complete;
653 bio->bi_private = b;
654
655 ptr = (char *)b->data + offset;
656 len = n_sectors << SECTOR_SHIFT;
657
658 do {
659 unsigned int this_step = min((unsigned int)(PAGE_SIZE - offset_in_page(ptr)), len);
660 if (!bio_add_page(bio, virt_to_page(ptr), this_step,
661 offset_in_page(ptr))) {
662 bio_put(bio);
663 goto dmio;
664 }
665
666 len -= this_step;
667 ptr += this_step;
668 } while (len > 0);
669
670 submit_bio(bio);
671 }
672
block_to_sector(struct dm_bufio_client * c,sector_t block)673 static inline sector_t block_to_sector(struct dm_bufio_client *c, sector_t block)
674 {
675 sector_t sector;
676
677 if (likely(c->sectors_per_block_bits >= 0))
678 sector = block << c->sectors_per_block_bits;
679 else
680 sector = block * (c->block_size >> SECTOR_SHIFT);
681 sector += c->start;
682
683 return sector;
684 }
685
submit_io(struct dm_buffer * b,enum req_op op,void (* end_io)(struct dm_buffer *,blk_status_t))686 static void submit_io(struct dm_buffer *b, enum req_op op,
687 void (*end_io)(struct dm_buffer *, blk_status_t))
688 {
689 unsigned int n_sectors;
690 sector_t sector;
691 unsigned int offset, end;
692
693 b->end_io = end_io;
694
695 sector = block_to_sector(b->c, b->block);
696
697 if (op != REQ_OP_WRITE) {
698 n_sectors = b->c->block_size >> SECTOR_SHIFT;
699 offset = 0;
700 } else {
701 if (b->c->write_callback)
702 b->c->write_callback(b);
703 offset = b->write_start;
704 end = b->write_end;
705 offset &= -DM_BUFIO_WRITE_ALIGN;
706 end += DM_BUFIO_WRITE_ALIGN - 1;
707 end &= -DM_BUFIO_WRITE_ALIGN;
708 if (unlikely(end > b->c->block_size))
709 end = b->c->block_size;
710
711 sector += offset >> SECTOR_SHIFT;
712 n_sectors = (end - offset) >> SECTOR_SHIFT;
713 }
714
715 if (b->data_mode != DATA_MODE_VMALLOC)
716 use_bio(b, op, sector, n_sectors, offset);
717 else
718 use_dmio(b, op, sector, n_sectors, offset);
719 }
720
721 /*----------------------------------------------------------------
722 * Writing dirty buffers
723 *--------------------------------------------------------------*/
724
725 /*
726 * The endio routine for write.
727 *
728 * Set the error, clear B_WRITING bit and wake anyone who was waiting on
729 * it.
730 */
write_endio(struct dm_buffer * b,blk_status_t status)731 static void write_endio(struct dm_buffer *b, blk_status_t status)
732 {
733 b->write_error = status;
734 if (unlikely(status)) {
735 struct dm_bufio_client *c = b->c;
736
737 (void)cmpxchg(&c->async_write_error, 0,
738 blk_status_to_errno(status));
739 }
740
741 BUG_ON(!test_bit(B_WRITING, &b->state));
742
743 smp_mb__before_atomic();
744 clear_bit(B_WRITING, &b->state);
745 smp_mb__after_atomic();
746
747 wake_up_bit(&b->state, B_WRITING);
748 }
749
750 /*
751 * Initiate a write on a dirty buffer, but don't wait for it.
752 *
753 * - If the buffer is not dirty, exit.
754 * - If there some previous write going on, wait for it to finish (we can't
755 * have two writes on the same buffer simultaneously).
756 * - Submit our write and don't wait on it. We set B_WRITING indicating
757 * that there is a write in progress.
758 */
__write_dirty_buffer(struct dm_buffer * b,struct list_head * write_list)759 static void __write_dirty_buffer(struct dm_buffer *b,
760 struct list_head *write_list)
761 {
762 if (!test_bit(B_DIRTY, &b->state))
763 return;
764
765 clear_bit(B_DIRTY, &b->state);
766 wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
767
768 b->write_start = b->dirty_start;
769 b->write_end = b->dirty_end;
770
771 if (!write_list)
772 submit_io(b, REQ_OP_WRITE, write_endio);
773 else
774 list_add_tail(&b->write_list, write_list);
775 }
776
__flush_write_list(struct list_head * write_list)777 static void __flush_write_list(struct list_head *write_list)
778 {
779 struct blk_plug plug;
780 blk_start_plug(&plug);
781 while (!list_empty(write_list)) {
782 struct dm_buffer *b =
783 list_entry(write_list->next, struct dm_buffer, write_list);
784 list_del(&b->write_list);
785 submit_io(b, REQ_OP_WRITE, write_endio);
786 cond_resched();
787 }
788 blk_finish_plug(&plug);
789 }
790
791 /*
792 * Wait until any activity on the buffer finishes. Possibly write the
793 * buffer if it is dirty. When this function finishes, there is no I/O
794 * running on the buffer and the buffer is not dirty.
795 */
__make_buffer_clean(struct dm_buffer * b)796 static void __make_buffer_clean(struct dm_buffer *b)
797 {
798 BUG_ON(b->hold_count);
799
800 /* smp_load_acquire() pairs with read_endio()'s smp_mb__before_atomic() */
801 if (!smp_load_acquire(&b->state)) /* fast case */
802 return;
803
804 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
805 __write_dirty_buffer(b, NULL);
806 wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
807 }
808
809 /*
810 * Find some buffer that is not held by anybody, clean it, unlink it and
811 * return it.
812 */
__get_unclaimed_buffer(struct dm_bufio_client * c)813 static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
814 {
815 struct dm_buffer *b;
816
817 list_for_each_entry_reverse(b, &c->lru[LIST_CLEAN], lru_list) {
818 BUG_ON(test_bit(B_WRITING, &b->state));
819 BUG_ON(test_bit(B_DIRTY, &b->state));
820
821 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep &&
822 unlikely(test_bit_acquire(B_READING, &b->state)))
823 continue;
824
825 if (!b->hold_count) {
826 __make_buffer_clean(b);
827 __unlink_buffer(b);
828 return b;
829 }
830 cond_resched();
831 }
832
833 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
834 return NULL;
835
836 list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) {
837 BUG_ON(test_bit(B_READING, &b->state));
838
839 if (!b->hold_count) {
840 __make_buffer_clean(b);
841 __unlink_buffer(b);
842 return b;
843 }
844 cond_resched();
845 }
846
847 return NULL;
848 }
849
850 /*
851 * Wait until some other threads free some buffer or release hold count on
852 * some buffer.
853 *
854 * This function is entered with c->lock held, drops it and regains it
855 * before exiting.
856 */
__wait_for_free_buffer(struct dm_bufio_client * c)857 static void __wait_for_free_buffer(struct dm_bufio_client *c)
858 {
859 DECLARE_WAITQUEUE(wait, current);
860
861 add_wait_queue(&c->free_buffer_wait, &wait);
862 set_current_state(TASK_UNINTERRUPTIBLE);
863 dm_bufio_unlock(c);
864
865 io_schedule();
866
867 remove_wait_queue(&c->free_buffer_wait, &wait);
868
869 dm_bufio_lock(c);
870 }
871
872 enum new_flag {
873 NF_FRESH = 0,
874 NF_READ = 1,
875 NF_GET = 2,
876 NF_PREFETCH = 3
877 };
878
879 /*
880 * Allocate a new buffer. If the allocation is not possible, wait until
881 * some other thread frees a buffer.
882 *
883 * May drop the lock and regain it.
884 */
__alloc_buffer_wait_no_callback(struct dm_bufio_client * c,enum new_flag nf)885 static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf)
886 {
887 struct dm_buffer *b;
888 bool tried_noio_alloc = false;
889
890 /*
891 * dm-bufio is resistant to allocation failures (it just keeps
892 * one buffer reserved in cases all the allocations fail).
893 * So set flags to not try too hard:
894 * GFP_NOWAIT: don't wait; if we need to sleep we'll release our
895 * mutex and wait ourselves.
896 * __GFP_NORETRY: don't retry and rather return failure
897 * __GFP_NOMEMALLOC: don't use emergency reserves
898 * __GFP_NOWARN: don't print a warning in case of failure
899 *
900 * For debugging, if we set the cache size to 1, no new buffers will
901 * be allocated.
902 */
903 while (1) {
904 if (dm_bufio_cache_size_latch != 1) {
905 b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
906 if (b)
907 return b;
908 }
909
910 if (nf == NF_PREFETCH)
911 return NULL;
912
913 if (dm_bufio_cache_size_latch != 1 && !tried_noio_alloc) {
914 dm_bufio_unlock(c);
915 b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
916 dm_bufio_lock(c);
917 if (b)
918 return b;
919 tried_noio_alloc = true;
920 }
921
922 if (!list_empty(&c->reserved_buffers)) {
923 b = list_entry(c->reserved_buffers.next,
924 struct dm_buffer, lru_list);
925 list_del(&b->lru_list);
926 c->need_reserved_buffers++;
927
928 return b;
929 }
930
931 b = __get_unclaimed_buffer(c);
932 if (b)
933 return b;
934
935 __wait_for_free_buffer(c);
936 }
937 }
938
__alloc_buffer_wait(struct dm_bufio_client * c,enum new_flag nf)939 static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c, enum new_flag nf)
940 {
941 struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf);
942
943 if (!b)
944 return NULL;
945
946 if (c->alloc_callback)
947 c->alloc_callback(b);
948
949 return b;
950 }
951
952 /*
953 * Free a buffer and wake other threads waiting for free buffers.
954 */
__free_buffer_wake(struct dm_buffer * b)955 static void __free_buffer_wake(struct dm_buffer *b)
956 {
957 struct dm_bufio_client *c = b->c;
958
959 if (!c->need_reserved_buffers)
960 free_buffer(b);
961 else {
962 list_add(&b->lru_list, &c->reserved_buffers);
963 c->need_reserved_buffers--;
964 }
965
966 wake_up(&c->free_buffer_wait);
967 }
968
__write_dirty_buffers_async(struct dm_bufio_client * c,int no_wait,struct list_head * write_list)969 static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait,
970 struct list_head *write_list)
971 {
972 struct dm_buffer *b, *tmp;
973
974 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
975 BUG_ON(test_bit(B_READING, &b->state));
976
977 if (!test_bit(B_DIRTY, &b->state) &&
978 !test_bit(B_WRITING, &b->state)) {
979 __relink_lru(b, LIST_CLEAN);
980 continue;
981 }
982
983 if (no_wait && test_bit(B_WRITING, &b->state))
984 return;
985
986 __write_dirty_buffer(b, write_list);
987 cond_resched();
988 }
989 }
990
991 /*
992 * Check if we're over watermark.
993 * If we are over threshold_buffers, start freeing buffers.
994 * If we're over "limit_buffers", block until we get under the limit.
995 */
__check_watermark(struct dm_bufio_client * c,struct list_head * write_list)996 static void __check_watermark(struct dm_bufio_client *c,
997 struct list_head *write_list)
998 {
999 if (c->n_buffers[LIST_DIRTY] > c->n_buffers[LIST_CLEAN] * DM_BUFIO_WRITEBACK_RATIO)
1000 __write_dirty_buffers_async(c, 1, write_list);
1001 }
1002
1003 /*----------------------------------------------------------------
1004 * Getting a buffer
1005 *--------------------------------------------------------------*/
1006
__bufio_new(struct dm_bufio_client * c,sector_t block,enum new_flag nf,int * need_submit,struct list_head * write_list)1007 static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
1008 enum new_flag nf, int *need_submit,
1009 struct list_head *write_list)
1010 {
1011 struct dm_buffer *b, *new_b = NULL;
1012
1013 *need_submit = 0;
1014
1015 b = __find(c, block);
1016 if (b)
1017 goto found_buffer;
1018
1019 if (nf == NF_GET)
1020 return NULL;
1021
1022 new_b = __alloc_buffer_wait(c, nf);
1023 if (!new_b)
1024 return NULL;
1025
1026 /*
1027 * We've had a period where the mutex was unlocked, so need to
1028 * recheck the buffer tree.
1029 */
1030 b = __find(c, block);
1031 if (b) {
1032 __free_buffer_wake(new_b);
1033 goto found_buffer;
1034 }
1035
1036 __check_watermark(c, write_list);
1037
1038 b = new_b;
1039 b->hold_count = 1;
1040 b->read_error = 0;
1041 b->write_error = 0;
1042 __link_buffer(b, block, LIST_CLEAN);
1043
1044 if (nf == NF_FRESH) {
1045 b->state = 0;
1046 return b;
1047 }
1048
1049 b->state = 1 << B_READING;
1050 *need_submit = 1;
1051
1052 return b;
1053
1054 found_buffer:
1055 if (nf == NF_PREFETCH)
1056 return NULL;
1057 /*
1058 * Note: it is essential that we don't wait for the buffer to be
1059 * read if dm_bufio_get function is used. Both dm_bufio_get and
1060 * dm_bufio_prefetch can be used in the driver request routine.
1061 * If the user called both dm_bufio_prefetch and dm_bufio_get on
1062 * the same buffer, it would deadlock if we waited.
1063 */
1064 if (nf == NF_GET && unlikely(test_bit_acquire(B_READING, &b->state)))
1065 return NULL;
1066
1067 b->hold_count++;
1068 __relink_lru(b, test_bit(B_DIRTY, &b->state) ||
1069 test_bit(B_WRITING, &b->state));
1070 return b;
1071 }
1072
1073 /*
1074 * The endio routine for reading: set the error, clear the bit and wake up
1075 * anyone waiting on the buffer.
1076 */
read_endio(struct dm_buffer * b,blk_status_t status)1077 static void read_endio(struct dm_buffer *b, blk_status_t status)
1078 {
1079 b->read_error = status;
1080
1081 BUG_ON(!test_bit(B_READING, &b->state));
1082
1083 smp_mb__before_atomic();
1084 clear_bit(B_READING, &b->state);
1085 smp_mb__after_atomic();
1086
1087 wake_up_bit(&b->state, B_READING);
1088 }
1089
1090 /*
1091 * A common routine for dm_bufio_new and dm_bufio_read. Operation of these
1092 * functions is similar except that dm_bufio_new doesn't read the
1093 * buffer from the disk (assuming that the caller overwrites all the data
1094 * and uses dm_bufio_mark_buffer_dirty to write new data back).
1095 */
new_read(struct dm_bufio_client * c,sector_t block,enum new_flag nf,struct dm_buffer ** bp)1096 static void *new_read(struct dm_bufio_client *c, sector_t block,
1097 enum new_flag nf, struct dm_buffer **bp)
1098 {
1099 int need_submit;
1100 struct dm_buffer *b;
1101
1102 LIST_HEAD(write_list);
1103
1104 dm_bufio_lock(c);
1105 b = __bufio_new(c, block, nf, &need_submit, &write_list);
1106 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1107 if (b && b->hold_count == 1)
1108 buffer_record_stack(b);
1109 #endif
1110 dm_bufio_unlock(c);
1111
1112 __flush_write_list(&write_list);
1113
1114 if (!b)
1115 return NULL;
1116
1117 if (need_submit)
1118 submit_io(b, REQ_OP_READ, read_endio);
1119
1120 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
1121
1122 if (b->read_error) {
1123 int error = blk_status_to_errno(b->read_error);
1124
1125 dm_bufio_release(b);
1126
1127 return ERR_PTR(error);
1128 }
1129
1130 *bp = b;
1131
1132 return b->data;
1133 }
1134
dm_bufio_get(struct dm_bufio_client * c,sector_t block,struct dm_buffer ** bp)1135 void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
1136 struct dm_buffer **bp)
1137 {
1138 return new_read(c, block, NF_GET, bp);
1139 }
1140 EXPORT_SYMBOL_GPL(dm_bufio_get);
1141
dm_bufio_read(struct dm_bufio_client * c,sector_t block,struct dm_buffer ** bp)1142 void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
1143 struct dm_buffer **bp)
1144 {
1145 BUG_ON(dm_bufio_in_request());
1146
1147 return new_read(c, block, NF_READ, bp);
1148 }
1149 EXPORT_SYMBOL_GPL(dm_bufio_read);
1150
dm_bufio_new(struct dm_bufio_client * c,sector_t block,struct dm_buffer ** bp)1151 void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
1152 struct dm_buffer **bp)
1153 {
1154 BUG_ON(dm_bufio_in_request());
1155
1156 return new_read(c, block, NF_FRESH, bp);
1157 }
1158 EXPORT_SYMBOL_GPL(dm_bufio_new);
1159
dm_bufio_prefetch(struct dm_bufio_client * c,sector_t block,unsigned int n_blocks)1160 void dm_bufio_prefetch(struct dm_bufio_client *c,
1161 sector_t block, unsigned int n_blocks)
1162 {
1163 struct blk_plug plug;
1164
1165 LIST_HEAD(write_list);
1166
1167 BUG_ON(dm_bufio_in_request());
1168
1169 blk_start_plug(&plug);
1170 dm_bufio_lock(c);
1171
1172 for (; n_blocks--; block++) {
1173 int need_submit;
1174 struct dm_buffer *b;
1175 b = __bufio_new(c, block, NF_PREFETCH, &need_submit,
1176 &write_list);
1177 if (unlikely(!list_empty(&write_list))) {
1178 dm_bufio_unlock(c);
1179 blk_finish_plug(&plug);
1180 __flush_write_list(&write_list);
1181 blk_start_plug(&plug);
1182 dm_bufio_lock(c);
1183 }
1184 if (unlikely(b != NULL)) {
1185 dm_bufio_unlock(c);
1186
1187 if (need_submit)
1188 submit_io(b, REQ_OP_READ, read_endio);
1189 dm_bufio_release(b);
1190
1191 cond_resched();
1192
1193 if (!n_blocks)
1194 goto flush_plug;
1195 dm_bufio_lock(c);
1196 }
1197 }
1198
1199 dm_bufio_unlock(c);
1200
1201 flush_plug:
1202 blk_finish_plug(&plug);
1203 }
1204 EXPORT_SYMBOL_GPL(dm_bufio_prefetch);
1205
dm_bufio_release(struct dm_buffer * b)1206 void dm_bufio_release(struct dm_buffer *b)
1207 {
1208 struct dm_bufio_client *c = b->c;
1209
1210 dm_bufio_lock(c);
1211
1212 BUG_ON(!b->hold_count);
1213
1214 b->hold_count--;
1215 if (!b->hold_count) {
1216 wake_up(&c->free_buffer_wait);
1217
1218 /*
1219 * If there were errors on the buffer, and the buffer is not
1220 * to be written, free the buffer. There is no point in caching
1221 * invalid buffer.
1222 */
1223 if ((b->read_error || b->write_error) &&
1224 !test_bit_acquire(B_READING, &b->state) &&
1225 !test_bit(B_WRITING, &b->state) &&
1226 !test_bit(B_DIRTY, &b->state)) {
1227 __unlink_buffer(b);
1228 __free_buffer_wake(b);
1229 }
1230 }
1231
1232 dm_bufio_unlock(c);
1233 }
1234 EXPORT_SYMBOL_GPL(dm_bufio_release);
1235
dm_bufio_mark_partial_buffer_dirty(struct dm_buffer * b,unsigned int start,unsigned int end)1236 void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b,
1237 unsigned int start, unsigned int end)
1238 {
1239 struct dm_bufio_client *c = b->c;
1240
1241 BUG_ON(start >= end);
1242 BUG_ON(end > b->c->block_size);
1243
1244 dm_bufio_lock(c);
1245
1246 BUG_ON(test_bit(B_READING, &b->state));
1247
1248 if (!test_and_set_bit(B_DIRTY, &b->state)) {
1249 b->dirty_start = start;
1250 b->dirty_end = end;
1251 __relink_lru(b, LIST_DIRTY);
1252 } else {
1253 if (start < b->dirty_start)
1254 b->dirty_start = start;
1255 if (end > b->dirty_end)
1256 b->dirty_end = end;
1257 }
1258
1259 dm_bufio_unlock(c);
1260 }
1261 EXPORT_SYMBOL_GPL(dm_bufio_mark_partial_buffer_dirty);
1262
dm_bufio_mark_buffer_dirty(struct dm_buffer * b)1263 void dm_bufio_mark_buffer_dirty(struct dm_buffer *b)
1264 {
1265 dm_bufio_mark_partial_buffer_dirty(b, 0, b->c->block_size);
1266 }
1267 EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty);
1268
dm_bufio_write_dirty_buffers_async(struct dm_bufio_client * c)1269 void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c)
1270 {
1271 LIST_HEAD(write_list);
1272
1273 BUG_ON(dm_bufio_in_request());
1274
1275 dm_bufio_lock(c);
1276 __write_dirty_buffers_async(c, 0, &write_list);
1277 dm_bufio_unlock(c);
1278 __flush_write_list(&write_list);
1279 }
1280 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async);
1281
1282 /*
1283 * For performance, it is essential that the buffers are written asynchronously
1284 * and simultaneously (so that the block layer can merge the writes) and then
1285 * waited upon.
1286 *
1287 * Finally, we flush hardware disk cache.
1288 */
dm_bufio_write_dirty_buffers(struct dm_bufio_client * c)1289 int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c)
1290 {
1291 int a, f;
1292 unsigned long buffers_processed = 0;
1293 struct dm_buffer *b, *tmp;
1294
1295 LIST_HEAD(write_list);
1296
1297 dm_bufio_lock(c);
1298 __write_dirty_buffers_async(c, 0, &write_list);
1299 dm_bufio_unlock(c);
1300 __flush_write_list(&write_list);
1301 dm_bufio_lock(c);
1302
1303 again:
1304 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
1305 int dropped_lock = 0;
1306
1307 if (buffers_processed < c->n_buffers[LIST_DIRTY])
1308 buffers_processed++;
1309
1310 BUG_ON(test_bit(B_READING, &b->state));
1311
1312 if (test_bit(B_WRITING, &b->state)) {
1313 if (buffers_processed < c->n_buffers[LIST_DIRTY]) {
1314 dropped_lock = 1;
1315 b->hold_count++;
1316 dm_bufio_unlock(c);
1317 wait_on_bit_io(&b->state, B_WRITING,
1318 TASK_UNINTERRUPTIBLE);
1319 dm_bufio_lock(c);
1320 b->hold_count--;
1321 } else
1322 wait_on_bit_io(&b->state, B_WRITING,
1323 TASK_UNINTERRUPTIBLE);
1324 }
1325
1326 if (!test_bit(B_DIRTY, &b->state) &&
1327 !test_bit(B_WRITING, &b->state))
1328 __relink_lru(b, LIST_CLEAN);
1329
1330 cond_resched();
1331
1332 /*
1333 * If we dropped the lock, the list is no longer consistent,
1334 * so we must restart the search.
1335 *
1336 * In the most common case, the buffer just processed is
1337 * relinked to the clean list, so we won't loop scanning the
1338 * same buffer again and again.
1339 *
1340 * This may livelock if there is another thread simultaneously
1341 * dirtying buffers, so we count the number of buffers walked
1342 * and if it exceeds the total number of buffers, it means that
1343 * someone is doing some writes simultaneously with us. In
1344 * this case, stop, dropping the lock.
1345 */
1346 if (dropped_lock)
1347 goto again;
1348 }
1349 wake_up(&c->free_buffer_wait);
1350 dm_bufio_unlock(c);
1351
1352 a = xchg(&c->async_write_error, 0);
1353 f = dm_bufio_issue_flush(c);
1354 if (a)
1355 return a;
1356
1357 return f;
1358 }
1359 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers);
1360
1361 /*
1362 * Use dm-io to send an empty barrier to flush the device.
1363 */
dm_bufio_issue_flush(struct dm_bufio_client * c)1364 int dm_bufio_issue_flush(struct dm_bufio_client *c)
1365 {
1366 struct dm_io_request io_req = {
1367 .bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC,
1368 .mem.type = DM_IO_KMEM,
1369 .mem.ptr.addr = NULL,
1370 .client = c->dm_io,
1371 };
1372 struct dm_io_region io_reg = {
1373 .bdev = c->bdev,
1374 .sector = 0,
1375 .count = 0,
1376 };
1377
1378 BUG_ON(dm_bufio_in_request());
1379
1380 return dm_io(&io_req, 1, &io_reg, NULL);
1381 }
1382 EXPORT_SYMBOL_GPL(dm_bufio_issue_flush);
1383
1384 /*
1385 * Use dm-io to send a discard request to flush the device.
1386 */
dm_bufio_issue_discard(struct dm_bufio_client * c,sector_t block,sector_t count)1387 int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t count)
1388 {
1389 struct dm_io_request io_req = {
1390 .bi_opf = REQ_OP_DISCARD | REQ_SYNC,
1391 .mem.type = DM_IO_KMEM,
1392 .mem.ptr.addr = NULL,
1393 .client = c->dm_io,
1394 };
1395 struct dm_io_region io_reg = {
1396 .bdev = c->bdev,
1397 .sector = block_to_sector(c, block),
1398 .count = block_to_sector(c, count),
1399 };
1400
1401 BUG_ON(dm_bufio_in_request());
1402
1403 return dm_io(&io_req, 1, &io_reg, NULL);
1404 }
1405 EXPORT_SYMBOL_GPL(dm_bufio_issue_discard);
1406
1407 /*
1408 * We first delete any other buffer that may be at that new location.
1409 *
1410 * Then, we write the buffer to the original location if it was dirty.
1411 *
1412 * Then, if we are the only one who is holding the buffer, relink the buffer
1413 * in the buffer tree for the new location.
1414 *
1415 * If there was someone else holding the buffer, we write it to the new
1416 * location but not relink it, because that other user needs to have the buffer
1417 * at the same place.
1418 */
dm_bufio_release_move(struct dm_buffer * b,sector_t new_block)1419 void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block)
1420 {
1421 struct dm_bufio_client *c = b->c;
1422 struct dm_buffer *new;
1423
1424 BUG_ON(dm_bufio_in_request());
1425
1426 dm_bufio_lock(c);
1427
1428 retry:
1429 new = __find(c, new_block);
1430 if (new) {
1431 if (new->hold_count) {
1432 __wait_for_free_buffer(c);
1433 goto retry;
1434 }
1435
1436 /*
1437 * FIXME: Is there any point waiting for a write that's going
1438 * to be overwritten in a bit?
1439 */
1440 __make_buffer_clean(new);
1441 __unlink_buffer(new);
1442 __free_buffer_wake(new);
1443 }
1444
1445 BUG_ON(!b->hold_count);
1446 BUG_ON(test_bit(B_READING, &b->state));
1447
1448 __write_dirty_buffer(b, NULL);
1449 if (b->hold_count == 1) {
1450 wait_on_bit_io(&b->state, B_WRITING,
1451 TASK_UNINTERRUPTIBLE);
1452 set_bit(B_DIRTY, &b->state);
1453 b->dirty_start = 0;
1454 b->dirty_end = c->block_size;
1455 __unlink_buffer(b);
1456 __link_buffer(b, new_block, LIST_DIRTY);
1457 } else {
1458 sector_t old_block;
1459 wait_on_bit_lock_io(&b->state, B_WRITING,
1460 TASK_UNINTERRUPTIBLE);
1461 /*
1462 * Relink buffer to "new_block" so that write_callback
1463 * sees "new_block" as a block number.
1464 * After the write, link the buffer back to old_block.
1465 * All this must be done in bufio lock, so that block number
1466 * change isn't visible to other threads.
1467 */
1468 old_block = b->block;
1469 __unlink_buffer(b);
1470 __link_buffer(b, new_block, b->list_mode);
1471 submit_io(b, REQ_OP_WRITE, write_endio);
1472 wait_on_bit_io(&b->state, B_WRITING,
1473 TASK_UNINTERRUPTIBLE);
1474 __unlink_buffer(b);
1475 __link_buffer(b, old_block, b->list_mode);
1476 }
1477
1478 dm_bufio_unlock(c);
1479 dm_bufio_release(b);
1480 }
1481 EXPORT_SYMBOL_GPL(dm_bufio_release_move);
1482
forget_buffer_locked(struct dm_buffer * b)1483 static void forget_buffer_locked(struct dm_buffer *b)
1484 {
1485 if (likely(!b->hold_count) && likely(!smp_load_acquire(&b->state))) {
1486 __unlink_buffer(b);
1487 __free_buffer_wake(b);
1488 }
1489 }
1490
1491 /*
1492 * Free the given buffer.
1493 *
1494 * This is just a hint, if the buffer is in use or dirty, this function
1495 * does nothing.
1496 */
dm_bufio_forget(struct dm_bufio_client * c,sector_t block)1497 void dm_bufio_forget(struct dm_bufio_client *c, sector_t block)
1498 {
1499 struct dm_buffer *b;
1500
1501 dm_bufio_lock(c);
1502
1503 b = __find(c, block);
1504 if (b)
1505 forget_buffer_locked(b);
1506
1507 dm_bufio_unlock(c);
1508 }
1509 EXPORT_SYMBOL_GPL(dm_bufio_forget);
1510
dm_bufio_forget_buffers(struct dm_bufio_client * c,sector_t block,sector_t n_blocks)1511 void dm_bufio_forget_buffers(struct dm_bufio_client *c, sector_t block, sector_t n_blocks)
1512 {
1513 struct dm_buffer *b;
1514 sector_t end_block = block + n_blocks;
1515
1516 while (block < end_block) {
1517 dm_bufio_lock(c);
1518
1519 b = __find_next(c, block);
1520 if (b) {
1521 block = b->block + 1;
1522 forget_buffer_locked(b);
1523 }
1524
1525 dm_bufio_unlock(c);
1526
1527 if (!b)
1528 break;
1529 }
1530
1531 }
1532 EXPORT_SYMBOL_GPL(dm_bufio_forget_buffers);
1533
dm_bufio_set_minimum_buffers(struct dm_bufio_client * c,unsigned int n)1534 void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned int n)
1535 {
1536 c->minimum_buffers = n;
1537 }
1538 EXPORT_SYMBOL_GPL(dm_bufio_set_minimum_buffers);
1539
dm_bufio_get_block_size(struct dm_bufio_client * c)1540 unsigned int dm_bufio_get_block_size(struct dm_bufio_client *c)
1541 {
1542 return c->block_size;
1543 }
1544 EXPORT_SYMBOL_GPL(dm_bufio_get_block_size);
1545
dm_bufio_get_device_size(struct dm_bufio_client * c)1546 sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
1547 {
1548 sector_t s = bdev_nr_sectors(c->bdev);
1549 if (s >= c->start)
1550 s -= c->start;
1551 else
1552 s = 0;
1553 if (likely(c->sectors_per_block_bits >= 0))
1554 s >>= c->sectors_per_block_bits;
1555 else
1556 sector_div(s, c->block_size >> SECTOR_SHIFT);
1557 return s;
1558 }
1559 EXPORT_SYMBOL_GPL(dm_bufio_get_device_size);
1560
dm_bufio_get_dm_io_client(struct dm_bufio_client * c)1561 struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c)
1562 {
1563 return c->dm_io;
1564 }
1565 EXPORT_SYMBOL_GPL(dm_bufio_get_dm_io_client);
1566
dm_bufio_get_block_number(struct dm_buffer * b)1567 sector_t dm_bufio_get_block_number(struct dm_buffer *b)
1568 {
1569 return b->block;
1570 }
1571 EXPORT_SYMBOL_GPL(dm_bufio_get_block_number);
1572
dm_bufio_get_block_data(struct dm_buffer * b)1573 void *dm_bufio_get_block_data(struct dm_buffer *b)
1574 {
1575 return b->data;
1576 }
1577 EXPORT_SYMBOL_GPL(dm_bufio_get_block_data);
1578
dm_bufio_get_aux_data(struct dm_buffer * b)1579 void *dm_bufio_get_aux_data(struct dm_buffer *b)
1580 {
1581 return b + 1;
1582 }
1583 EXPORT_SYMBOL_GPL(dm_bufio_get_aux_data);
1584
dm_bufio_get_client(struct dm_buffer * b)1585 struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b)
1586 {
1587 return b->c;
1588 }
1589 EXPORT_SYMBOL_GPL(dm_bufio_get_client);
1590
drop_buffers(struct dm_bufio_client * c)1591 static void drop_buffers(struct dm_bufio_client *c)
1592 {
1593 struct dm_buffer *b;
1594 int i;
1595 bool warned = false;
1596
1597 BUG_ON(dm_bufio_in_request());
1598
1599 /*
1600 * An optimization so that the buffers are not written one-by-one.
1601 */
1602 dm_bufio_write_dirty_buffers_async(c);
1603
1604 dm_bufio_lock(c);
1605
1606 while ((b = __get_unclaimed_buffer(c)))
1607 __free_buffer_wake(b);
1608
1609 for (i = 0; i < LIST_SIZE; i++)
1610 list_for_each_entry(b, &c->lru[i], lru_list) {
1611 WARN_ON(!warned);
1612 warned = true;
1613 DMERR("leaked buffer %llx, hold count %u, list %d",
1614 (unsigned long long)b->block, b->hold_count, i);
1615 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1616 stack_trace_print(b->stack_entries, b->stack_len, 1);
1617 /* mark unclaimed to avoid BUG_ON below */
1618 b->hold_count = 0;
1619 #endif
1620 }
1621
1622 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1623 while ((b = __get_unclaimed_buffer(c)))
1624 __free_buffer_wake(b);
1625 #endif
1626
1627 for (i = 0; i < LIST_SIZE; i++)
1628 BUG_ON(!list_empty(&c->lru[i]));
1629
1630 dm_bufio_unlock(c);
1631 }
1632
1633 /*
1634 * We may not be able to evict this buffer if IO pending or the client
1635 * is still using it. Caller is expected to know buffer is too old.
1636 *
1637 * And if GFP_NOFS is used, we must not do any I/O because we hold
1638 * dm_bufio_clients_lock and we would risk deadlock if the I/O gets
1639 * rerouted to different bufio client.
1640 */
__try_evict_buffer(struct dm_buffer * b,gfp_t gfp)1641 static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp)
1642 {
1643 if (!(gfp & __GFP_FS) ||
1644 (static_branch_unlikely(&no_sleep_enabled) && b->c->no_sleep)) {
1645 if (test_bit_acquire(B_READING, &b->state) ||
1646 test_bit(B_WRITING, &b->state) ||
1647 test_bit(B_DIRTY, &b->state))
1648 return false;
1649 }
1650
1651 if (b->hold_count)
1652 return false;
1653
1654 __make_buffer_clean(b);
1655 __unlink_buffer(b);
1656 __free_buffer_wake(b);
1657
1658 return true;
1659 }
1660
get_retain_buffers(struct dm_bufio_client * c)1661 static unsigned long get_retain_buffers(struct dm_bufio_client *c)
1662 {
1663 unsigned long retain_bytes = READ_ONCE(dm_bufio_retain_bytes);
1664 if (likely(c->sectors_per_block_bits >= 0))
1665 retain_bytes >>= c->sectors_per_block_bits + SECTOR_SHIFT;
1666 else
1667 retain_bytes /= c->block_size;
1668 return retain_bytes;
1669 }
1670
__scan(struct dm_bufio_client * c)1671 static void __scan(struct dm_bufio_client *c)
1672 {
1673 int l;
1674 struct dm_buffer *b, *tmp;
1675 unsigned long freed = 0;
1676 unsigned long count = c->n_buffers[LIST_CLEAN] +
1677 c->n_buffers[LIST_DIRTY];
1678 unsigned long retain_target = get_retain_buffers(c);
1679
1680 for (l = 0; l < LIST_SIZE; l++) {
1681 list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) {
1682 if (count - freed <= retain_target)
1683 atomic_long_set(&c->need_shrink, 0);
1684 if (!atomic_long_read(&c->need_shrink))
1685 return;
1686 if (__try_evict_buffer(b, GFP_KERNEL)) {
1687 atomic_long_dec(&c->need_shrink);
1688 freed++;
1689 }
1690 cond_resched();
1691 }
1692 }
1693 }
1694
shrink_work(struct work_struct * w)1695 static void shrink_work(struct work_struct *w)
1696 {
1697 struct dm_bufio_client *c = container_of(w, struct dm_bufio_client, shrink_work);
1698
1699 dm_bufio_lock(c);
1700 __scan(c);
1701 dm_bufio_unlock(c);
1702 }
1703
dm_bufio_shrink_scan(struct shrinker * shrink,struct shrink_control * sc)1704 static unsigned long dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1705 {
1706 struct dm_bufio_client *c;
1707 bool bypass = false;
1708
1709 trace_android_vh_dm_bufio_shrink_scan_bypass(
1710 dm_bufio_current_allocated,
1711 &bypass);
1712 if (bypass)
1713 return 0;
1714
1715 c = container_of(shrink, struct dm_bufio_client, shrinker);
1716 atomic_long_add(sc->nr_to_scan, &c->need_shrink);
1717 queue_work(dm_bufio_wq, &c->shrink_work);
1718
1719 return sc->nr_to_scan;
1720 }
1721
dm_bufio_shrink_count(struct shrinker * shrink,struct shrink_control * sc)1722 static unsigned long dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1723 {
1724 struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker);
1725 unsigned long count = READ_ONCE(c->n_buffers[LIST_CLEAN]) +
1726 READ_ONCE(c->n_buffers[LIST_DIRTY]);
1727 unsigned long retain_target = get_retain_buffers(c);
1728 unsigned long queued_for_cleanup = atomic_long_read(&c->need_shrink);
1729
1730 if (unlikely(count < retain_target))
1731 count = 0;
1732 else
1733 count -= retain_target;
1734
1735 if (unlikely(count < queued_for_cleanup))
1736 count = 0;
1737 else
1738 count -= queued_for_cleanup;
1739
1740 return count;
1741 }
1742
1743 /*
1744 * Create the buffering interface
1745 */
dm_bufio_client_create(struct block_device * bdev,unsigned int block_size,unsigned int reserved_buffers,unsigned int aux_size,void (* alloc_callback)(struct dm_buffer *),void (* write_callback)(struct dm_buffer *),unsigned int flags)1746 struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned int block_size,
1747 unsigned int reserved_buffers, unsigned int aux_size,
1748 void (*alloc_callback)(struct dm_buffer *),
1749 void (*write_callback)(struct dm_buffer *),
1750 unsigned int flags)
1751 {
1752 int r;
1753 struct dm_bufio_client *c;
1754 unsigned int i;
1755 char slab_name[27];
1756
1757 if (!block_size || block_size & ((1 << SECTOR_SHIFT) - 1)) {
1758 DMERR("%s: block size not specified or is not multiple of 512b", __func__);
1759 r = -EINVAL;
1760 goto bad_client;
1761 }
1762
1763 c = kzalloc(sizeof(*c), GFP_KERNEL);
1764 if (!c) {
1765 r = -ENOMEM;
1766 goto bad_client;
1767 }
1768 c->buffer_tree = RB_ROOT;
1769
1770 c->bdev = bdev;
1771 c->block_size = block_size;
1772 if (is_power_of_2(block_size))
1773 c->sectors_per_block_bits = __ffs(block_size) - SECTOR_SHIFT;
1774 else
1775 c->sectors_per_block_bits = -1;
1776
1777 c->alloc_callback = alloc_callback;
1778 c->write_callback = write_callback;
1779
1780 if (flags & DM_BUFIO_CLIENT_NO_SLEEP) {
1781 c->no_sleep = true;
1782 static_branch_inc(&no_sleep_enabled);
1783 }
1784
1785 for (i = 0; i < LIST_SIZE; i++) {
1786 INIT_LIST_HEAD(&c->lru[i]);
1787 c->n_buffers[i] = 0;
1788 }
1789
1790 mutex_init(&c->lock);
1791 spin_lock_init(&c->spinlock);
1792 INIT_LIST_HEAD(&c->reserved_buffers);
1793 c->need_reserved_buffers = reserved_buffers;
1794
1795 dm_bufio_set_minimum_buffers(c, DM_BUFIO_MIN_BUFFERS);
1796
1797 init_waitqueue_head(&c->free_buffer_wait);
1798 c->async_write_error = 0;
1799
1800 c->dm_io = dm_io_client_create();
1801 if (IS_ERR(c->dm_io)) {
1802 r = PTR_ERR(c->dm_io);
1803 goto bad_dm_io;
1804 }
1805
1806 if (block_size <= KMALLOC_MAX_SIZE &&
1807 (block_size < PAGE_SIZE || !is_power_of_2(block_size))) {
1808 unsigned int align = min(1U << __ffs(block_size), (unsigned int)PAGE_SIZE);
1809 snprintf(slab_name, sizeof slab_name, "dm_bufio_cache-%u", block_size);
1810 c->slab_cache = kmem_cache_create(slab_name, block_size, align,
1811 SLAB_RECLAIM_ACCOUNT, NULL);
1812 if (!c->slab_cache) {
1813 r = -ENOMEM;
1814 goto bad;
1815 }
1816 }
1817 if (aux_size)
1818 snprintf(slab_name, sizeof slab_name, "dm_bufio_buffer-%u", aux_size);
1819 else
1820 snprintf(slab_name, sizeof slab_name, "dm_bufio_buffer");
1821 c->slab_buffer = kmem_cache_create(slab_name, sizeof(struct dm_buffer) + aux_size,
1822 0, SLAB_RECLAIM_ACCOUNT, NULL);
1823 if (!c->slab_buffer) {
1824 r = -ENOMEM;
1825 goto bad;
1826 }
1827
1828 while (c->need_reserved_buffers) {
1829 struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL);
1830
1831 if (!b) {
1832 r = -ENOMEM;
1833 goto bad;
1834 }
1835 __free_buffer_wake(b);
1836 }
1837
1838 INIT_WORK(&c->shrink_work, shrink_work);
1839 atomic_long_set(&c->need_shrink, 0);
1840
1841 c->shrinker.count_objects = dm_bufio_shrink_count;
1842 c->shrinker.scan_objects = dm_bufio_shrink_scan;
1843 c->shrinker.seeks = 1;
1844 c->shrinker.batch = 0;
1845 r = register_shrinker(&c->shrinker, "dm-bufio:(%u:%u)",
1846 MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
1847 if (r)
1848 goto bad;
1849
1850 mutex_lock(&dm_bufio_clients_lock);
1851 dm_bufio_client_count++;
1852 list_add(&c->client_list, &dm_bufio_all_clients);
1853 __cache_size_refresh();
1854 mutex_unlock(&dm_bufio_clients_lock);
1855
1856 return c;
1857
1858 bad:
1859 while (!list_empty(&c->reserved_buffers)) {
1860 struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1861 struct dm_buffer, lru_list);
1862 list_del(&b->lru_list);
1863 free_buffer(b);
1864 }
1865 kmem_cache_destroy(c->slab_cache);
1866 kmem_cache_destroy(c->slab_buffer);
1867 dm_io_client_destroy(c->dm_io);
1868 bad_dm_io:
1869 mutex_destroy(&c->lock);
1870 if (c->no_sleep)
1871 static_branch_dec(&no_sleep_enabled);
1872 kfree(c);
1873 bad_client:
1874 return ERR_PTR(r);
1875 }
1876 EXPORT_SYMBOL_GPL(dm_bufio_client_create);
1877
1878 /*
1879 * Free the buffering interface.
1880 * It is required that there are no references on any buffers.
1881 */
dm_bufio_client_destroy(struct dm_bufio_client * c)1882 void dm_bufio_client_destroy(struct dm_bufio_client *c)
1883 {
1884 unsigned int i;
1885
1886 drop_buffers(c);
1887
1888 unregister_shrinker(&c->shrinker);
1889 flush_work(&c->shrink_work);
1890
1891 mutex_lock(&dm_bufio_clients_lock);
1892
1893 list_del(&c->client_list);
1894 dm_bufio_client_count--;
1895 __cache_size_refresh();
1896
1897 mutex_unlock(&dm_bufio_clients_lock);
1898
1899 BUG_ON(!RB_EMPTY_ROOT(&c->buffer_tree));
1900 BUG_ON(c->need_reserved_buffers);
1901
1902 while (!list_empty(&c->reserved_buffers)) {
1903 struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1904 struct dm_buffer, lru_list);
1905 list_del(&b->lru_list);
1906 free_buffer(b);
1907 }
1908
1909 for (i = 0; i < LIST_SIZE; i++)
1910 if (c->n_buffers[i])
1911 DMERR("leaked buffer count %d: %ld", i, c->n_buffers[i]);
1912
1913 for (i = 0; i < LIST_SIZE; i++)
1914 BUG_ON(c->n_buffers[i]);
1915
1916 kmem_cache_destroy(c->slab_cache);
1917 kmem_cache_destroy(c->slab_buffer);
1918 dm_io_client_destroy(c->dm_io);
1919 mutex_destroy(&c->lock);
1920 if (c->no_sleep)
1921 static_branch_dec(&no_sleep_enabled);
1922 kfree(c);
1923 }
1924 EXPORT_SYMBOL_GPL(dm_bufio_client_destroy);
1925
dm_bufio_client_reset(struct dm_bufio_client * c)1926 void dm_bufio_client_reset(struct dm_bufio_client *c)
1927 {
1928 drop_buffers(c);
1929 flush_work(&c->shrink_work);
1930 }
1931 EXPORT_SYMBOL_GPL(dm_bufio_client_reset);
1932
dm_bufio_set_sector_offset(struct dm_bufio_client * c,sector_t start)1933 void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start)
1934 {
1935 c->start = start;
1936 }
1937 EXPORT_SYMBOL_GPL(dm_bufio_set_sector_offset);
1938
get_max_age_hz(void)1939 static unsigned int get_max_age_hz(void)
1940 {
1941 unsigned int max_age = READ_ONCE(dm_bufio_max_age);
1942
1943 if (max_age > UINT_MAX / HZ)
1944 max_age = UINT_MAX / HZ;
1945
1946 return max_age * HZ;
1947 }
1948
older_than(struct dm_buffer * b,unsigned long age_hz)1949 static bool older_than(struct dm_buffer *b, unsigned long age_hz)
1950 {
1951 return time_after_eq(jiffies, b->last_accessed + age_hz);
1952 }
1953
__evict_old_buffers(struct dm_bufio_client * c,unsigned long age_hz)1954 static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz)
1955 {
1956 struct dm_buffer *b, *tmp;
1957 unsigned long retain_target = get_retain_buffers(c);
1958 unsigned long count;
1959 LIST_HEAD(write_list);
1960
1961 dm_bufio_lock(c);
1962
1963 __check_watermark(c, &write_list);
1964 if (unlikely(!list_empty(&write_list))) {
1965 dm_bufio_unlock(c);
1966 __flush_write_list(&write_list);
1967 dm_bufio_lock(c);
1968 }
1969
1970 count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
1971 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_CLEAN], lru_list) {
1972 if (count <= retain_target)
1973 break;
1974
1975 if (!older_than(b, age_hz))
1976 break;
1977
1978 if (__try_evict_buffer(b, 0))
1979 count--;
1980
1981 cond_resched();
1982 }
1983
1984 dm_bufio_unlock(c);
1985 }
1986
do_global_cleanup(struct work_struct * w)1987 static void do_global_cleanup(struct work_struct *w)
1988 {
1989 struct dm_bufio_client *locked_client = NULL;
1990 struct dm_bufio_client *current_client;
1991 struct dm_buffer *b;
1992 unsigned int spinlock_hold_count;
1993 unsigned long threshold = dm_bufio_cache_size -
1994 dm_bufio_cache_size / DM_BUFIO_LOW_WATERMARK_RATIO;
1995 unsigned long loops = global_num * 2;
1996
1997 mutex_lock(&dm_bufio_clients_lock);
1998
1999 while (1) {
2000 cond_resched();
2001
2002 spin_lock(&global_spinlock);
2003 if (unlikely(dm_bufio_current_allocated <= threshold))
2004 break;
2005
2006 spinlock_hold_count = 0;
2007 get_next:
2008 if (!loops--)
2009 break;
2010 if (unlikely(list_empty(&global_queue)))
2011 break;
2012 b = list_entry(global_queue.prev, struct dm_buffer, global_list);
2013
2014 if (b->accessed) {
2015 b->accessed = 0;
2016 list_move(&b->global_list, &global_queue);
2017 if (likely(++spinlock_hold_count < 16))
2018 goto get_next;
2019 spin_unlock(&global_spinlock);
2020 continue;
2021 }
2022
2023 current_client = b->c;
2024 if (unlikely(current_client != locked_client)) {
2025 if (locked_client)
2026 dm_bufio_unlock(locked_client);
2027
2028 if (!dm_bufio_trylock(current_client)) {
2029 spin_unlock(&global_spinlock);
2030 dm_bufio_lock(current_client);
2031 locked_client = current_client;
2032 continue;
2033 }
2034
2035 locked_client = current_client;
2036 }
2037
2038 spin_unlock(&global_spinlock);
2039
2040 if (unlikely(!__try_evict_buffer(b, GFP_KERNEL))) {
2041 spin_lock(&global_spinlock);
2042 list_move(&b->global_list, &global_queue);
2043 spin_unlock(&global_spinlock);
2044 }
2045 }
2046
2047 spin_unlock(&global_spinlock);
2048
2049 if (locked_client)
2050 dm_bufio_unlock(locked_client);
2051
2052 mutex_unlock(&dm_bufio_clients_lock);
2053 }
2054
cleanup_old_buffers(void)2055 static void cleanup_old_buffers(void)
2056 {
2057 unsigned long max_age_hz = get_max_age_hz();
2058 struct dm_bufio_client *c;
2059 bool bypass = false;
2060
2061 trace_android_vh_cleanup_old_buffers_bypass(
2062 dm_bufio_current_allocated,
2063 &max_age_hz,
2064 &bypass);
2065 if (bypass)
2066 return;
2067
2068 mutex_lock(&dm_bufio_clients_lock);
2069
2070 __cache_size_refresh();
2071
2072 list_for_each_entry(c, &dm_bufio_all_clients, client_list)
2073 __evict_old_buffers(c, max_age_hz);
2074
2075 mutex_unlock(&dm_bufio_clients_lock);
2076 }
2077
work_fn(struct work_struct * w)2078 static void work_fn(struct work_struct *w)
2079 {
2080 cleanup_old_buffers();
2081
2082 queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work,
2083 DM_BUFIO_WORK_TIMER_SECS * HZ);
2084 }
2085
2086 /*----------------------------------------------------------------
2087 * Module setup
2088 *--------------------------------------------------------------*/
2089
2090 /*
2091 * This is called only once for the whole dm_bufio module.
2092 * It initializes memory limit.
2093 */
dm_bufio_init(void)2094 static int __init dm_bufio_init(void)
2095 {
2096 __u64 mem;
2097
2098 dm_bufio_allocated_kmem_cache = 0;
2099 dm_bufio_allocated_get_free_pages = 0;
2100 dm_bufio_allocated_vmalloc = 0;
2101 dm_bufio_current_allocated = 0;
2102
2103 mem = (__u64)mult_frac(totalram_pages() - totalhigh_pages(),
2104 DM_BUFIO_MEMORY_PERCENT, 100) << PAGE_SHIFT;
2105
2106 if (mem > ULONG_MAX)
2107 mem = ULONG_MAX;
2108
2109 #ifdef CONFIG_MMU
2110 if (mem > mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100))
2111 mem = mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100);
2112 #endif
2113
2114 dm_bufio_default_cache_size = mem;
2115
2116 mutex_lock(&dm_bufio_clients_lock);
2117 __cache_size_refresh();
2118 mutex_unlock(&dm_bufio_clients_lock);
2119
2120 dm_bufio_wq = alloc_workqueue("dm_bufio_cache", WQ_MEM_RECLAIM, 0);
2121 if (!dm_bufio_wq)
2122 return -ENOMEM;
2123
2124 INIT_DELAYED_WORK(&dm_bufio_cleanup_old_work, work_fn);
2125 INIT_WORK(&dm_bufio_replacement_work, do_global_cleanup);
2126 queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work,
2127 DM_BUFIO_WORK_TIMER_SECS * HZ);
2128
2129 return 0;
2130 }
2131
2132 /*
2133 * This is called once when unloading the dm_bufio module.
2134 */
dm_bufio_exit(void)2135 static void __exit dm_bufio_exit(void)
2136 {
2137 int bug = 0;
2138
2139 cancel_delayed_work_sync(&dm_bufio_cleanup_old_work);
2140 destroy_workqueue(dm_bufio_wq);
2141
2142 if (dm_bufio_client_count) {
2143 DMCRIT("%s: dm_bufio_client_count leaked: %d",
2144 __func__, dm_bufio_client_count);
2145 bug = 1;
2146 }
2147
2148 if (dm_bufio_current_allocated) {
2149 DMCRIT("%s: dm_bufio_current_allocated leaked: %lu",
2150 __func__, dm_bufio_current_allocated);
2151 bug = 1;
2152 }
2153
2154 if (dm_bufio_allocated_get_free_pages) {
2155 DMCRIT("%s: dm_bufio_allocated_get_free_pages leaked: %lu",
2156 __func__, dm_bufio_allocated_get_free_pages);
2157 bug = 1;
2158 }
2159
2160 if (dm_bufio_allocated_vmalloc) {
2161 DMCRIT("%s: dm_bufio_vmalloc leaked: %lu",
2162 __func__, dm_bufio_allocated_vmalloc);
2163 bug = 1;
2164 }
2165
2166 BUG_ON(bug);
2167 }
2168
2169 module_init(dm_bufio_init)
2170 module_exit(dm_bufio_exit)
2171
2172 module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, S_IRUGO | S_IWUSR);
2173 MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
2174
2175 module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR);
2176 MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
2177
2178 module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, S_IRUGO | S_IWUSR);
2179 MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory");
2180
2181 module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR);
2182 MODULE_PARM_DESC(peak_allocated_bytes, "Tracks the maximum allocated memory");
2183
2184 module_param_named(allocated_kmem_cache_bytes, dm_bufio_allocated_kmem_cache, ulong, S_IRUGO);
2185 MODULE_PARM_DESC(allocated_kmem_cache_bytes, "Memory allocated with kmem_cache_alloc");
2186
2187 module_param_named(allocated_get_free_pages_bytes, dm_bufio_allocated_get_free_pages, ulong, S_IRUGO);
2188 MODULE_PARM_DESC(allocated_get_free_pages_bytes, "Memory allocated with get_free_pages");
2189
2190 module_param_named(allocated_vmalloc_bytes, dm_bufio_allocated_vmalloc, ulong, S_IRUGO);
2191 MODULE_PARM_DESC(allocated_vmalloc_bytes, "Memory allocated with vmalloc");
2192
2193 module_param_named(current_allocated_bytes, dm_bufio_current_allocated, ulong, S_IRUGO);
2194 MODULE_PARM_DESC(current_allocated_bytes, "Memory currently used by the cache");
2195
2196 MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
2197 MODULE_DESCRIPTION(DM_NAME " buffered I/O library");
2198 MODULE_LICENSE("GPL");
2199