1 /*
2 * Copyright (C) 2012 Red Hat. All rights reserved.
3 *
4 * This file is released under the GPL.
5 */
6
7 #include "dm.h"
8 #include "dm-bio-prison-v2.h"
9 #include "dm-bio-record.h"
10 #include "dm-cache-metadata.h"
11
12 #include <linux/dm-io.h>
13 #include <linux/dm-kcopyd.h>
14 #include <linux/jiffies.h>
15 #include <linux/init.h>
16 #include <linux/mempool.h>
17 #include <linux/module.h>
18 #include <linux/rwsem.h>
19 #include <linux/slab.h>
20 #include <linux/vmalloc.h>
21
22 #define DM_MSG_PREFIX "cache"
23
24 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(cache_copy_throttle,
25 "A percentage of time allocated for copying to and/or from cache");
26
27 /*----------------------------------------------------------------*/
28
29 /*
30 * Glossary:
31 *
32 * oblock: index of an origin block
33 * cblock: index of a cache block
34 * promotion: movement of a block from origin to cache
35 * demotion: movement of a block from cache to origin
36 * migration: movement of a block between the origin and cache device,
37 * either direction
38 */
39
40 /*----------------------------------------------------------------*/
41
42 struct io_tracker {
43 spinlock_t lock;
44
45 /*
46 * Sectors of in-flight IO.
47 */
48 sector_t in_flight;
49
50 /*
51 * The time, in jiffies, when this device became idle (if it is
52 * indeed idle).
53 */
54 unsigned long idle_time;
55 unsigned long last_update_time;
56 };
57
iot_init(struct io_tracker * iot)58 static void iot_init(struct io_tracker *iot)
59 {
60 spin_lock_init(&iot->lock);
61 iot->in_flight = 0ul;
62 iot->idle_time = 0ul;
63 iot->last_update_time = jiffies;
64 }
65
__iot_idle_for(struct io_tracker * iot,unsigned long jifs)66 static bool __iot_idle_for(struct io_tracker *iot, unsigned long jifs)
67 {
68 if (iot->in_flight)
69 return false;
70
71 return time_after(jiffies, iot->idle_time + jifs);
72 }
73
iot_idle_for(struct io_tracker * iot,unsigned long jifs)74 static bool iot_idle_for(struct io_tracker *iot, unsigned long jifs)
75 {
76 bool r;
77
78 spin_lock_irq(&iot->lock);
79 r = __iot_idle_for(iot, jifs);
80 spin_unlock_irq(&iot->lock);
81
82 return r;
83 }
84
iot_io_begin(struct io_tracker * iot,sector_t len)85 static void iot_io_begin(struct io_tracker *iot, sector_t len)
86 {
87 spin_lock_irq(&iot->lock);
88 iot->in_flight += len;
89 spin_unlock_irq(&iot->lock);
90 }
91
__iot_io_end(struct io_tracker * iot,sector_t len)92 static void __iot_io_end(struct io_tracker *iot, sector_t len)
93 {
94 if (!len)
95 return;
96
97 iot->in_flight -= len;
98 if (!iot->in_flight)
99 iot->idle_time = jiffies;
100 }
101
iot_io_end(struct io_tracker * iot,sector_t len)102 static void iot_io_end(struct io_tracker *iot, sector_t len)
103 {
104 unsigned long flags;
105
106 spin_lock_irqsave(&iot->lock, flags);
107 __iot_io_end(iot, len);
108 spin_unlock_irqrestore(&iot->lock, flags);
109 }
110
111 /*----------------------------------------------------------------*/
112
113 /*
114 * Represents a chunk of future work. 'input' allows continuations to pass
115 * values between themselves, typically error values.
116 */
117 struct continuation {
118 struct work_struct ws;
119 blk_status_t input;
120 };
121
init_continuation(struct continuation * k,void (* fn)(struct work_struct *))122 static inline void init_continuation(struct continuation *k,
123 void (*fn)(struct work_struct *))
124 {
125 INIT_WORK(&k->ws, fn);
126 k->input = 0;
127 }
128
queue_continuation(struct workqueue_struct * wq,struct continuation * k)129 static inline void queue_continuation(struct workqueue_struct *wq,
130 struct continuation *k)
131 {
132 queue_work(wq, &k->ws);
133 }
134
135 /*----------------------------------------------------------------*/
136
137 /*
138 * The batcher collects together pieces of work that need a particular
139 * operation to occur before they can proceed (typically a commit).
140 */
141 struct batcher {
142 /*
143 * The operation that everyone is waiting for.
144 */
145 blk_status_t (*commit_op)(void *context);
146 void *commit_context;
147
148 /*
149 * This is how bios should be issued once the commit op is complete
150 * (accounted_request).
151 */
152 void (*issue_op)(struct bio *bio, void *context);
153 void *issue_context;
154
155 /*
156 * Queued work gets put on here after commit.
157 */
158 struct workqueue_struct *wq;
159
160 spinlock_t lock;
161 struct list_head work_items;
162 struct bio_list bios;
163 struct work_struct commit_work;
164
165 bool commit_scheduled;
166 };
167
__commit(struct work_struct * _ws)168 static void __commit(struct work_struct *_ws)
169 {
170 struct batcher *b = container_of(_ws, struct batcher, commit_work);
171 blk_status_t r;
172 struct list_head work_items;
173 struct work_struct *ws, *tmp;
174 struct continuation *k;
175 struct bio *bio;
176 struct bio_list bios;
177
178 INIT_LIST_HEAD(&work_items);
179 bio_list_init(&bios);
180
181 /*
182 * We have to grab these before the commit_op to avoid a race
183 * condition.
184 */
185 spin_lock_irq(&b->lock);
186 list_splice_init(&b->work_items, &work_items);
187 bio_list_merge(&bios, &b->bios);
188 bio_list_init(&b->bios);
189 b->commit_scheduled = false;
190 spin_unlock_irq(&b->lock);
191
192 r = b->commit_op(b->commit_context);
193
194 list_for_each_entry_safe(ws, tmp, &work_items, entry) {
195 k = container_of(ws, struct continuation, ws);
196 k->input = r;
197 INIT_LIST_HEAD(&ws->entry); /* to avoid a WARN_ON */
198 queue_work(b->wq, ws);
199 }
200
201 while ((bio = bio_list_pop(&bios))) {
202 if (r) {
203 bio->bi_status = r;
204 bio_endio(bio);
205 } else
206 b->issue_op(bio, b->issue_context);
207 }
208 }
209
batcher_init(struct batcher * b,blk_status_t (* commit_op)(void *),void * commit_context,void (* issue_op)(struct bio * bio,void *),void * issue_context,struct workqueue_struct * wq)210 static void batcher_init(struct batcher *b,
211 blk_status_t (*commit_op)(void *),
212 void *commit_context,
213 void (*issue_op)(struct bio *bio, void *),
214 void *issue_context,
215 struct workqueue_struct *wq)
216 {
217 b->commit_op = commit_op;
218 b->commit_context = commit_context;
219 b->issue_op = issue_op;
220 b->issue_context = issue_context;
221 b->wq = wq;
222
223 spin_lock_init(&b->lock);
224 INIT_LIST_HEAD(&b->work_items);
225 bio_list_init(&b->bios);
226 INIT_WORK(&b->commit_work, __commit);
227 b->commit_scheduled = false;
228 }
229
async_commit(struct batcher * b)230 static void async_commit(struct batcher *b)
231 {
232 queue_work(b->wq, &b->commit_work);
233 }
234
continue_after_commit(struct batcher * b,struct continuation * k)235 static void continue_after_commit(struct batcher *b, struct continuation *k)
236 {
237 bool commit_scheduled;
238
239 spin_lock_irq(&b->lock);
240 commit_scheduled = b->commit_scheduled;
241 list_add_tail(&k->ws.entry, &b->work_items);
242 spin_unlock_irq(&b->lock);
243
244 if (commit_scheduled)
245 async_commit(b);
246 }
247
248 /*
249 * Bios are errored if commit failed.
250 */
issue_after_commit(struct batcher * b,struct bio * bio)251 static void issue_after_commit(struct batcher *b, struct bio *bio)
252 {
253 bool commit_scheduled;
254
255 spin_lock_irq(&b->lock);
256 commit_scheduled = b->commit_scheduled;
257 bio_list_add(&b->bios, bio);
258 spin_unlock_irq(&b->lock);
259
260 if (commit_scheduled)
261 async_commit(b);
262 }
263
264 /*
265 * Call this if some urgent work is waiting for the commit to complete.
266 */
schedule_commit(struct batcher * b)267 static void schedule_commit(struct batcher *b)
268 {
269 bool immediate;
270
271 spin_lock_irq(&b->lock);
272 immediate = !list_empty(&b->work_items) || !bio_list_empty(&b->bios);
273 b->commit_scheduled = true;
274 spin_unlock_irq(&b->lock);
275
276 if (immediate)
277 async_commit(b);
278 }
279
280 /*
281 * There are a couple of places where we let a bio run, but want to do some
282 * work before calling its endio function. We do this by temporarily
283 * changing the endio fn.
284 */
285 struct dm_hook_info {
286 bio_end_io_t *bi_end_io;
287 };
288
dm_hook_bio(struct dm_hook_info * h,struct bio * bio,bio_end_io_t * bi_end_io,void * bi_private)289 static void dm_hook_bio(struct dm_hook_info *h, struct bio *bio,
290 bio_end_io_t *bi_end_io, void *bi_private)
291 {
292 h->bi_end_io = bio->bi_end_io;
293
294 bio->bi_end_io = bi_end_io;
295 bio->bi_private = bi_private;
296 }
297
dm_unhook_bio(struct dm_hook_info * h,struct bio * bio)298 static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio)
299 {
300 bio->bi_end_io = h->bi_end_io;
301 }
302
303 /*----------------------------------------------------------------*/
304
305 #define MIGRATION_POOL_SIZE 128
306 #define COMMIT_PERIOD HZ
307 #define MIGRATION_COUNT_WINDOW 10
308
309 /*
310 * The block size of the device holding cache data must be
311 * between 32KB and 1GB.
312 */
313 #define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (32 * 1024 >> SECTOR_SHIFT)
314 #define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)
315
316 enum cache_metadata_mode {
317 CM_WRITE, /* metadata may be changed */
318 CM_READ_ONLY, /* metadata may not be changed */
319 CM_FAIL
320 };
321
322 enum cache_io_mode {
323 /*
324 * Data is written to cached blocks only. These blocks are marked
325 * dirty. If you lose the cache device you will lose data.
326 * Potential performance increase for both reads and writes.
327 */
328 CM_IO_WRITEBACK,
329
330 /*
331 * Data is written to both cache and origin. Blocks are never
332 * dirty. Potential performance benfit for reads only.
333 */
334 CM_IO_WRITETHROUGH,
335
336 /*
337 * A degraded mode useful for various cache coherency situations
338 * (eg, rolling back snapshots). Reads and writes always go to the
339 * origin. If a write goes to a cached oblock, then the cache
340 * block is invalidated.
341 */
342 CM_IO_PASSTHROUGH
343 };
344
345 struct cache_features {
346 enum cache_metadata_mode mode;
347 enum cache_io_mode io_mode;
348 unsigned metadata_version;
349 bool discard_passdown:1;
350 };
351
352 struct cache_stats {
353 atomic_t read_hit;
354 atomic_t read_miss;
355 atomic_t write_hit;
356 atomic_t write_miss;
357 atomic_t demotion;
358 atomic_t promotion;
359 atomic_t writeback;
360 atomic_t copies_avoided;
361 atomic_t cache_cell_clash;
362 atomic_t commit_count;
363 atomic_t discard_count;
364 };
365
366 struct cache {
367 struct dm_target *ti;
368 spinlock_t lock;
369
370 /*
371 * Fields for converting from sectors to blocks.
372 */
373 int sectors_per_block_shift;
374 sector_t sectors_per_block;
375
376 struct dm_cache_metadata *cmd;
377
378 /*
379 * Metadata is written to this device.
380 */
381 struct dm_dev *metadata_dev;
382
383 /*
384 * The slower of the two data devices. Typically a spindle.
385 */
386 struct dm_dev *origin_dev;
387
388 /*
389 * The faster of the two data devices. Typically an SSD.
390 */
391 struct dm_dev *cache_dev;
392
393 /*
394 * Size of the origin device in _complete_ blocks and native sectors.
395 */
396 dm_oblock_t origin_blocks;
397 sector_t origin_sectors;
398
399 /*
400 * Size of the cache device in blocks.
401 */
402 dm_cblock_t cache_size;
403
404 /*
405 * Invalidation fields.
406 */
407 spinlock_t invalidation_lock;
408 struct list_head invalidation_requests;
409
410 sector_t migration_threshold;
411 wait_queue_head_t migration_wait;
412 atomic_t nr_allocated_migrations;
413
414 /*
415 * The number of in flight migrations that are performing
416 * background io. eg, promotion, writeback.
417 */
418 atomic_t nr_io_migrations;
419
420 struct bio_list deferred_bios;
421
422 struct rw_semaphore quiesce_lock;
423
424 /*
425 * origin_blocks entries, discarded if set.
426 */
427 dm_dblock_t discard_nr_blocks;
428 unsigned long *discard_bitset;
429 uint32_t discard_block_size; /* a power of 2 times sectors per block */
430
431 /*
432 * Rather than reconstructing the table line for the status we just
433 * save it and regurgitate.
434 */
435 unsigned nr_ctr_args;
436 const char **ctr_args;
437
438 struct dm_kcopyd_client *copier;
439 struct work_struct deferred_bio_worker;
440 struct work_struct migration_worker;
441 struct workqueue_struct *wq;
442 struct delayed_work waker;
443 struct dm_bio_prison_v2 *prison;
444
445 /*
446 * cache_size entries, dirty if set
447 */
448 unsigned long *dirty_bitset;
449 atomic_t nr_dirty;
450
451 unsigned policy_nr_args;
452 struct dm_cache_policy *policy;
453
454 /*
455 * Cache features such as write-through.
456 */
457 struct cache_features features;
458
459 struct cache_stats stats;
460
461 bool need_tick_bio:1;
462 bool sized:1;
463 bool invalidate:1;
464 bool commit_requested:1;
465 bool loaded_mappings:1;
466 bool loaded_discards:1;
467
468 struct rw_semaphore background_work_lock;
469
470 struct batcher committer;
471 struct work_struct commit_ws;
472
473 struct io_tracker tracker;
474
475 mempool_t migration_pool;
476
477 struct bio_set bs;
478 };
479
480 struct per_bio_data {
481 bool tick:1;
482 unsigned req_nr:2;
483 struct dm_bio_prison_cell_v2 *cell;
484 struct dm_hook_info hook_info;
485 sector_t len;
486 };
487
488 struct dm_cache_migration {
489 struct continuation k;
490 struct cache *cache;
491
492 struct policy_work *op;
493 struct bio *overwrite_bio;
494 struct dm_bio_prison_cell_v2 *cell;
495
496 dm_cblock_t invalidate_cblock;
497 dm_oblock_t invalidate_oblock;
498 };
499
500 /*----------------------------------------------------------------*/
501
writethrough_mode(struct cache * cache)502 static bool writethrough_mode(struct cache *cache)
503 {
504 return cache->features.io_mode == CM_IO_WRITETHROUGH;
505 }
506
writeback_mode(struct cache * cache)507 static bool writeback_mode(struct cache *cache)
508 {
509 return cache->features.io_mode == CM_IO_WRITEBACK;
510 }
511
passthrough_mode(struct cache * cache)512 static inline bool passthrough_mode(struct cache *cache)
513 {
514 return unlikely(cache->features.io_mode == CM_IO_PASSTHROUGH);
515 }
516
517 /*----------------------------------------------------------------*/
518
wake_deferred_bio_worker(struct cache * cache)519 static void wake_deferred_bio_worker(struct cache *cache)
520 {
521 queue_work(cache->wq, &cache->deferred_bio_worker);
522 }
523
wake_migration_worker(struct cache * cache)524 static void wake_migration_worker(struct cache *cache)
525 {
526 if (passthrough_mode(cache))
527 return;
528
529 queue_work(cache->wq, &cache->migration_worker);
530 }
531
532 /*----------------------------------------------------------------*/
533
alloc_prison_cell(struct cache * cache)534 static struct dm_bio_prison_cell_v2 *alloc_prison_cell(struct cache *cache)
535 {
536 return dm_bio_prison_alloc_cell_v2(cache->prison, GFP_NOIO);
537 }
538
free_prison_cell(struct cache * cache,struct dm_bio_prison_cell_v2 * cell)539 static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell_v2 *cell)
540 {
541 dm_bio_prison_free_cell_v2(cache->prison, cell);
542 }
543
alloc_migration(struct cache * cache)544 static struct dm_cache_migration *alloc_migration(struct cache *cache)
545 {
546 struct dm_cache_migration *mg;
547
548 mg = mempool_alloc(&cache->migration_pool, GFP_NOIO);
549
550 memset(mg, 0, sizeof(*mg));
551
552 mg->cache = cache;
553 atomic_inc(&cache->nr_allocated_migrations);
554
555 return mg;
556 }
557
free_migration(struct dm_cache_migration * mg)558 static void free_migration(struct dm_cache_migration *mg)
559 {
560 struct cache *cache = mg->cache;
561
562 if (atomic_dec_and_test(&cache->nr_allocated_migrations))
563 wake_up(&cache->migration_wait);
564
565 mempool_free(mg, &cache->migration_pool);
566 }
567
568 /*----------------------------------------------------------------*/
569
oblock_succ(dm_oblock_t b)570 static inline dm_oblock_t oblock_succ(dm_oblock_t b)
571 {
572 return to_oblock(from_oblock(b) + 1ull);
573 }
574
build_key(dm_oblock_t begin,dm_oblock_t end,struct dm_cell_key_v2 * key)575 static void build_key(dm_oblock_t begin, dm_oblock_t end, struct dm_cell_key_v2 *key)
576 {
577 key->virtual = 0;
578 key->dev = 0;
579 key->block_begin = from_oblock(begin);
580 key->block_end = from_oblock(end);
581 }
582
583 /*
584 * We have two lock levels. Level 0, which is used to prevent WRITEs, and
585 * level 1 which prevents *both* READs and WRITEs.
586 */
587 #define WRITE_LOCK_LEVEL 0
588 #define READ_WRITE_LOCK_LEVEL 1
589
lock_level(struct bio * bio)590 static unsigned lock_level(struct bio *bio)
591 {
592 return bio_data_dir(bio) == WRITE ?
593 WRITE_LOCK_LEVEL :
594 READ_WRITE_LOCK_LEVEL;
595 }
596
597 /*----------------------------------------------------------------
598 * Per bio data
599 *--------------------------------------------------------------*/
600
get_per_bio_data(struct bio * bio)601 static struct per_bio_data *get_per_bio_data(struct bio *bio)
602 {
603 struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
604 BUG_ON(!pb);
605 return pb;
606 }
607
init_per_bio_data(struct bio * bio)608 static struct per_bio_data *init_per_bio_data(struct bio *bio)
609 {
610 struct per_bio_data *pb = get_per_bio_data(bio);
611
612 pb->tick = false;
613 pb->req_nr = dm_bio_get_target_bio_nr(bio);
614 pb->cell = NULL;
615 pb->len = 0;
616
617 return pb;
618 }
619
620 /*----------------------------------------------------------------*/
621
defer_bio(struct cache * cache,struct bio * bio)622 static void defer_bio(struct cache *cache, struct bio *bio)
623 {
624 spin_lock_irq(&cache->lock);
625 bio_list_add(&cache->deferred_bios, bio);
626 spin_unlock_irq(&cache->lock);
627
628 wake_deferred_bio_worker(cache);
629 }
630
defer_bios(struct cache * cache,struct bio_list * bios)631 static void defer_bios(struct cache *cache, struct bio_list *bios)
632 {
633 spin_lock_irq(&cache->lock);
634 bio_list_merge(&cache->deferred_bios, bios);
635 bio_list_init(bios);
636 spin_unlock_irq(&cache->lock);
637
638 wake_deferred_bio_worker(cache);
639 }
640
641 /*----------------------------------------------------------------*/
642
bio_detain_shared(struct cache * cache,dm_oblock_t oblock,struct bio * bio)643 static bool bio_detain_shared(struct cache *cache, dm_oblock_t oblock, struct bio *bio)
644 {
645 bool r;
646 struct per_bio_data *pb;
647 struct dm_cell_key_v2 key;
648 dm_oblock_t end = to_oblock(from_oblock(oblock) + 1ULL);
649 struct dm_bio_prison_cell_v2 *cell_prealloc, *cell;
650
651 cell_prealloc = alloc_prison_cell(cache); /* FIXME: allow wait if calling from worker */
652
653 build_key(oblock, end, &key);
654 r = dm_cell_get_v2(cache->prison, &key, lock_level(bio), bio, cell_prealloc, &cell);
655 if (!r) {
656 /*
657 * Failed to get the lock.
658 */
659 free_prison_cell(cache, cell_prealloc);
660 return r;
661 }
662
663 if (cell != cell_prealloc)
664 free_prison_cell(cache, cell_prealloc);
665
666 pb = get_per_bio_data(bio);
667 pb->cell = cell;
668
669 return r;
670 }
671
672 /*----------------------------------------------------------------*/
673
is_dirty(struct cache * cache,dm_cblock_t b)674 static bool is_dirty(struct cache *cache, dm_cblock_t b)
675 {
676 return test_bit(from_cblock(b), cache->dirty_bitset);
677 }
678
set_dirty(struct cache * cache,dm_cblock_t cblock)679 static void set_dirty(struct cache *cache, dm_cblock_t cblock)
680 {
681 if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) {
682 atomic_inc(&cache->nr_dirty);
683 policy_set_dirty(cache->policy, cblock);
684 }
685 }
686
687 /*
688 * These two are called when setting after migrations to force the policy
689 * and dirty bitset to be in sync.
690 */
force_set_dirty(struct cache * cache,dm_cblock_t cblock)691 static void force_set_dirty(struct cache *cache, dm_cblock_t cblock)
692 {
693 if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset))
694 atomic_inc(&cache->nr_dirty);
695 policy_set_dirty(cache->policy, cblock);
696 }
697
force_clear_dirty(struct cache * cache,dm_cblock_t cblock)698 static void force_clear_dirty(struct cache *cache, dm_cblock_t cblock)
699 {
700 if (test_and_clear_bit(from_cblock(cblock), cache->dirty_bitset)) {
701 if (atomic_dec_return(&cache->nr_dirty) == 0)
702 dm_table_event(cache->ti->table);
703 }
704
705 policy_clear_dirty(cache->policy, cblock);
706 }
707
708 /*----------------------------------------------------------------*/
709
block_size_is_power_of_two(struct cache * cache)710 static bool block_size_is_power_of_two(struct cache *cache)
711 {
712 return cache->sectors_per_block_shift >= 0;
713 }
714
block_div(dm_block_t b,uint32_t n)715 static dm_block_t block_div(dm_block_t b, uint32_t n)
716 {
717 do_div(b, n);
718
719 return b;
720 }
721
oblocks_per_dblock(struct cache * cache)722 static dm_block_t oblocks_per_dblock(struct cache *cache)
723 {
724 dm_block_t oblocks = cache->discard_block_size;
725
726 if (block_size_is_power_of_two(cache))
727 oblocks >>= cache->sectors_per_block_shift;
728 else
729 oblocks = block_div(oblocks, cache->sectors_per_block);
730
731 return oblocks;
732 }
733
oblock_to_dblock(struct cache * cache,dm_oblock_t oblock)734 static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock)
735 {
736 return to_dblock(block_div(from_oblock(oblock),
737 oblocks_per_dblock(cache)));
738 }
739
set_discard(struct cache * cache,dm_dblock_t b)740 static void set_discard(struct cache *cache, dm_dblock_t b)
741 {
742 BUG_ON(from_dblock(b) >= from_dblock(cache->discard_nr_blocks));
743 atomic_inc(&cache->stats.discard_count);
744
745 spin_lock_irq(&cache->lock);
746 set_bit(from_dblock(b), cache->discard_bitset);
747 spin_unlock_irq(&cache->lock);
748 }
749
clear_discard(struct cache * cache,dm_dblock_t b)750 static void clear_discard(struct cache *cache, dm_dblock_t b)
751 {
752 spin_lock_irq(&cache->lock);
753 clear_bit(from_dblock(b), cache->discard_bitset);
754 spin_unlock_irq(&cache->lock);
755 }
756
is_discarded(struct cache * cache,dm_dblock_t b)757 static bool is_discarded(struct cache *cache, dm_dblock_t b)
758 {
759 int r;
760 spin_lock_irq(&cache->lock);
761 r = test_bit(from_dblock(b), cache->discard_bitset);
762 spin_unlock_irq(&cache->lock);
763
764 return r;
765 }
766
is_discarded_oblock(struct cache * cache,dm_oblock_t b)767 static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b)
768 {
769 int r;
770 spin_lock_irq(&cache->lock);
771 r = test_bit(from_dblock(oblock_to_dblock(cache, b)),
772 cache->discard_bitset);
773 spin_unlock_irq(&cache->lock);
774
775 return r;
776 }
777
778 /*----------------------------------------------------------------
779 * Remapping
780 *--------------------------------------------------------------*/
remap_to_origin(struct cache * cache,struct bio * bio)781 static void remap_to_origin(struct cache *cache, struct bio *bio)
782 {
783 bio_set_dev(bio, cache->origin_dev->bdev);
784 }
785
remap_to_cache(struct cache * cache,struct bio * bio,dm_cblock_t cblock)786 static void remap_to_cache(struct cache *cache, struct bio *bio,
787 dm_cblock_t cblock)
788 {
789 sector_t bi_sector = bio->bi_iter.bi_sector;
790 sector_t block = from_cblock(cblock);
791
792 bio_set_dev(bio, cache->cache_dev->bdev);
793 if (!block_size_is_power_of_two(cache))
794 bio->bi_iter.bi_sector =
795 (block * cache->sectors_per_block) +
796 sector_div(bi_sector, cache->sectors_per_block);
797 else
798 bio->bi_iter.bi_sector =
799 (block << cache->sectors_per_block_shift) |
800 (bi_sector & (cache->sectors_per_block - 1));
801 }
802
check_if_tick_bio_needed(struct cache * cache,struct bio * bio)803 static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
804 {
805 struct per_bio_data *pb;
806
807 spin_lock_irq(&cache->lock);
808 if (cache->need_tick_bio && !op_is_flush(bio->bi_opf) &&
809 bio_op(bio) != REQ_OP_DISCARD) {
810 pb = get_per_bio_data(bio);
811 pb->tick = true;
812 cache->need_tick_bio = false;
813 }
814 spin_unlock_irq(&cache->lock);
815 }
816
__remap_to_origin_clear_discard(struct cache * cache,struct bio * bio,dm_oblock_t oblock,bool bio_has_pbd)817 static void __remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
818 dm_oblock_t oblock, bool bio_has_pbd)
819 {
820 if (bio_has_pbd)
821 check_if_tick_bio_needed(cache, bio);
822 remap_to_origin(cache, bio);
823 if (bio_data_dir(bio) == WRITE)
824 clear_discard(cache, oblock_to_dblock(cache, oblock));
825 }
826
remap_to_origin_clear_discard(struct cache * cache,struct bio * bio,dm_oblock_t oblock)827 static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
828 dm_oblock_t oblock)
829 {
830 // FIXME: check_if_tick_bio_needed() is called way too much through this interface
831 __remap_to_origin_clear_discard(cache, bio, oblock, true);
832 }
833
remap_to_cache_dirty(struct cache * cache,struct bio * bio,dm_oblock_t oblock,dm_cblock_t cblock)834 static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
835 dm_oblock_t oblock, dm_cblock_t cblock)
836 {
837 check_if_tick_bio_needed(cache, bio);
838 remap_to_cache(cache, bio, cblock);
839 if (bio_data_dir(bio) == WRITE) {
840 set_dirty(cache, cblock);
841 clear_discard(cache, oblock_to_dblock(cache, oblock));
842 }
843 }
844
get_bio_block(struct cache * cache,struct bio * bio)845 static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
846 {
847 sector_t block_nr = bio->bi_iter.bi_sector;
848
849 if (!block_size_is_power_of_two(cache))
850 (void) sector_div(block_nr, cache->sectors_per_block);
851 else
852 block_nr >>= cache->sectors_per_block_shift;
853
854 return to_oblock(block_nr);
855 }
856
accountable_bio(struct cache * cache,struct bio * bio)857 static bool accountable_bio(struct cache *cache, struct bio *bio)
858 {
859 return bio_op(bio) != REQ_OP_DISCARD;
860 }
861
accounted_begin(struct cache * cache,struct bio * bio)862 static void accounted_begin(struct cache *cache, struct bio *bio)
863 {
864 struct per_bio_data *pb;
865
866 if (accountable_bio(cache, bio)) {
867 pb = get_per_bio_data(bio);
868 pb->len = bio_sectors(bio);
869 iot_io_begin(&cache->tracker, pb->len);
870 }
871 }
872
accounted_complete(struct cache * cache,struct bio * bio)873 static void accounted_complete(struct cache *cache, struct bio *bio)
874 {
875 struct per_bio_data *pb = get_per_bio_data(bio);
876
877 iot_io_end(&cache->tracker, pb->len);
878 }
879
accounted_request(struct cache * cache,struct bio * bio)880 static void accounted_request(struct cache *cache, struct bio *bio)
881 {
882 accounted_begin(cache, bio);
883 submit_bio_noacct(bio);
884 }
885
issue_op(struct bio * bio,void * context)886 static void issue_op(struct bio *bio, void *context)
887 {
888 struct cache *cache = context;
889 accounted_request(cache, bio);
890 }
891
892 /*
893 * When running in writethrough mode we need to send writes to clean blocks
894 * to both the cache and origin devices. Clone the bio and send them in parallel.
895 */
remap_to_origin_and_cache(struct cache * cache,struct bio * bio,dm_oblock_t oblock,dm_cblock_t cblock)896 static void remap_to_origin_and_cache(struct cache *cache, struct bio *bio,
897 dm_oblock_t oblock, dm_cblock_t cblock)
898 {
899 struct bio *origin_bio = bio_clone_fast(bio, GFP_NOIO, &cache->bs);
900
901 BUG_ON(!origin_bio);
902
903 bio_chain(origin_bio, bio);
904 /*
905 * Passing false to __remap_to_origin_clear_discard() skips
906 * all code that might use per_bio_data (since clone doesn't have it)
907 */
908 __remap_to_origin_clear_discard(cache, origin_bio, oblock, false);
909 submit_bio(origin_bio);
910
911 remap_to_cache(cache, bio, cblock);
912 }
913
914 /*----------------------------------------------------------------
915 * Failure modes
916 *--------------------------------------------------------------*/
get_cache_mode(struct cache * cache)917 static enum cache_metadata_mode get_cache_mode(struct cache *cache)
918 {
919 return cache->features.mode;
920 }
921
cache_device_name(struct cache * cache)922 static const char *cache_device_name(struct cache *cache)
923 {
924 return dm_table_device_name(cache->ti->table);
925 }
926
notify_mode_switch(struct cache * cache,enum cache_metadata_mode mode)927 static void notify_mode_switch(struct cache *cache, enum cache_metadata_mode mode)
928 {
929 const char *descs[] = {
930 "write",
931 "read-only",
932 "fail"
933 };
934
935 dm_table_event(cache->ti->table);
936 DMINFO("%s: switching cache to %s mode",
937 cache_device_name(cache), descs[(int)mode]);
938 }
939
set_cache_mode(struct cache * cache,enum cache_metadata_mode new_mode)940 static void set_cache_mode(struct cache *cache, enum cache_metadata_mode new_mode)
941 {
942 bool needs_check;
943 enum cache_metadata_mode old_mode = get_cache_mode(cache);
944
945 if (dm_cache_metadata_needs_check(cache->cmd, &needs_check)) {
946 DMERR("%s: unable to read needs_check flag, setting failure mode.",
947 cache_device_name(cache));
948 new_mode = CM_FAIL;
949 }
950
951 if (new_mode == CM_WRITE && needs_check) {
952 DMERR("%s: unable to switch cache to write mode until repaired.",
953 cache_device_name(cache));
954 if (old_mode != new_mode)
955 new_mode = old_mode;
956 else
957 new_mode = CM_READ_ONLY;
958 }
959
960 /* Never move out of fail mode */
961 if (old_mode == CM_FAIL)
962 new_mode = CM_FAIL;
963
964 switch (new_mode) {
965 case CM_FAIL:
966 case CM_READ_ONLY:
967 dm_cache_metadata_set_read_only(cache->cmd);
968 break;
969
970 case CM_WRITE:
971 dm_cache_metadata_set_read_write(cache->cmd);
972 break;
973 }
974
975 cache->features.mode = new_mode;
976
977 if (new_mode != old_mode)
978 notify_mode_switch(cache, new_mode);
979 }
980
abort_transaction(struct cache * cache)981 static void abort_transaction(struct cache *cache)
982 {
983 const char *dev_name = cache_device_name(cache);
984
985 if (get_cache_mode(cache) >= CM_READ_ONLY)
986 return;
987
988 DMERR_LIMIT("%s: aborting current metadata transaction", dev_name);
989 if (dm_cache_metadata_abort(cache->cmd)) {
990 DMERR("%s: failed to abort metadata transaction", dev_name);
991 set_cache_mode(cache, CM_FAIL);
992 }
993
994 if (dm_cache_metadata_set_needs_check(cache->cmd)) {
995 DMERR("%s: failed to set 'needs_check' flag in metadata", dev_name);
996 set_cache_mode(cache, CM_FAIL);
997 }
998 }
999
metadata_operation_failed(struct cache * cache,const char * op,int r)1000 static void metadata_operation_failed(struct cache *cache, const char *op, int r)
1001 {
1002 DMERR_LIMIT("%s: metadata operation '%s' failed: error = %d",
1003 cache_device_name(cache), op, r);
1004 abort_transaction(cache);
1005 set_cache_mode(cache, CM_READ_ONLY);
1006 }
1007
1008 /*----------------------------------------------------------------*/
1009
load_stats(struct cache * cache)1010 static void load_stats(struct cache *cache)
1011 {
1012 struct dm_cache_statistics stats;
1013
1014 dm_cache_metadata_get_stats(cache->cmd, &stats);
1015 atomic_set(&cache->stats.read_hit, stats.read_hits);
1016 atomic_set(&cache->stats.read_miss, stats.read_misses);
1017 atomic_set(&cache->stats.write_hit, stats.write_hits);
1018 atomic_set(&cache->stats.write_miss, stats.write_misses);
1019 }
1020
save_stats(struct cache * cache)1021 static void save_stats(struct cache *cache)
1022 {
1023 struct dm_cache_statistics stats;
1024
1025 if (get_cache_mode(cache) >= CM_READ_ONLY)
1026 return;
1027
1028 stats.read_hits = atomic_read(&cache->stats.read_hit);
1029 stats.read_misses = atomic_read(&cache->stats.read_miss);
1030 stats.write_hits = atomic_read(&cache->stats.write_hit);
1031 stats.write_misses = atomic_read(&cache->stats.write_miss);
1032
1033 dm_cache_metadata_set_stats(cache->cmd, &stats);
1034 }
1035
update_stats(struct cache_stats * stats,enum policy_operation op)1036 static void update_stats(struct cache_stats *stats, enum policy_operation op)
1037 {
1038 switch (op) {
1039 case POLICY_PROMOTE:
1040 atomic_inc(&stats->promotion);
1041 break;
1042
1043 case POLICY_DEMOTE:
1044 atomic_inc(&stats->demotion);
1045 break;
1046
1047 case POLICY_WRITEBACK:
1048 atomic_inc(&stats->writeback);
1049 break;
1050 }
1051 }
1052
1053 /*----------------------------------------------------------------
1054 * Migration processing
1055 *
1056 * Migration covers moving data from the origin device to the cache, or
1057 * vice versa.
1058 *--------------------------------------------------------------*/
1059
inc_io_migrations(struct cache * cache)1060 static void inc_io_migrations(struct cache *cache)
1061 {
1062 atomic_inc(&cache->nr_io_migrations);
1063 }
1064
dec_io_migrations(struct cache * cache)1065 static void dec_io_migrations(struct cache *cache)
1066 {
1067 atomic_dec(&cache->nr_io_migrations);
1068 }
1069
discard_or_flush(struct bio * bio)1070 static bool discard_or_flush(struct bio *bio)
1071 {
1072 return bio_op(bio) == REQ_OP_DISCARD || op_is_flush(bio->bi_opf);
1073 }
1074
calc_discard_block_range(struct cache * cache,struct bio * bio,dm_dblock_t * b,dm_dblock_t * e)1075 static void calc_discard_block_range(struct cache *cache, struct bio *bio,
1076 dm_dblock_t *b, dm_dblock_t *e)
1077 {
1078 sector_t sb = bio->bi_iter.bi_sector;
1079 sector_t se = bio_end_sector(bio);
1080
1081 *b = to_dblock(dm_sector_div_up(sb, cache->discard_block_size));
1082
1083 if (se - sb < cache->discard_block_size)
1084 *e = *b;
1085 else
1086 *e = to_dblock(block_div(se, cache->discard_block_size));
1087 }
1088
1089 /*----------------------------------------------------------------*/
1090
prevent_background_work(struct cache * cache)1091 static void prevent_background_work(struct cache *cache)
1092 {
1093 lockdep_off();
1094 down_write(&cache->background_work_lock);
1095 lockdep_on();
1096 }
1097
allow_background_work(struct cache * cache)1098 static void allow_background_work(struct cache *cache)
1099 {
1100 lockdep_off();
1101 up_write(&cache->background_work_lock);
1102 lockdep_on();
1103 }
1104
background_work_begin(struct cache * cache)1105 static bool background_work_begin(struct cache *cache)
1106 {
1107 bool r;
1108
1109 lockdep_off();
1110 r = down_read_trylock(&cache->background_work_lock);
1111 lockdep_on();
1112
1113 return r;
1114 }
1115
background_work_end(struct cache * cache)1116 static void background_work_end(struct cache *cache)
1117 {
1118 lockdep_off();
1119 up_read(&cache->background_work_lock);
1120 lockdep_on();
1121 }
1122
1123 /*----------------------------------------------------------------*/
1124
bio_writes_complete_block(struct cache * cache,struct bio * bio)1125 static bool bio_writes_complete_block(struct cache *cache, struct bio *bio)
1126 {
1127 return (bio_data_dir(bio) == WRITE) &&
1128 (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT));
1129 }
1130
optimisable_bio(struct cache * cache,struct bio * bio,dm_oblock_t block)1131 static bool optimisable_bio(struct cache *cache, struct bio *bio, dm_oblock_t block)
1132 {
1133 return writeback_mode(cache) &&
1134 (is_discarded_oblock(cache, block) || bio_writes_complete_block(cache, bio));
1135 }
1136
quiesce(struct dm_cache_migration * mg,void (* continuation)(struct work_struct *))1137 static void quiesce(struct dm_cache_migration *mg,
1138 void (*continuation)(struct work_struct *))
1139 {
1140 init_continuation(&mg->k, continuation);
1141 dm_cell_quiesce_v2(mg->cache->prison, mg->cell, &mg->k.ws);
1142 }
1143
ws_to_mg(struct work_struct * ws)1144 static struct dm_cache_migration *ws_to_mg(struct work_struct *ws)
1145 {
1146 struct continuation *k = container_of(ws, struct continuation, ws);
1147 return container_of(k, struct dm_cache_migration, k);
1148 }
1149
copy_complete(int read_err,unsigned long write_err,void * context)1150 static void copy_complete(int read_err, unsigned long write_err, void *context)
1151 {
1152 struct dm_cache_migration *mg = container_of(context, struct dm_cache_migration, k);
1153
1154 if (read_err || write_err)
1155 mg->k.input = BLK_STS_IOERR;
1156
1157 queue_continuation(mg->cache->wq, &mg->k);
1158 }
1159
copy(struct dm_cache_migration * mg,bool promote)1160 static void copy(struct dm_cache_migration *mg, bool promote)
1161 {
1162 struct dm_io_region o_region, c_region;
1163 struct cache *cache = mg->cache;
1164
1165 o_region.bdev = cache->origin_dev->bdev;
1166 o_region.sector = from_oblock(mg->op->oblock) * cache->sectors_per_block;
1167 o_region.count = cache->sectors_per_block;
1168
1169 c_region.bdev = cache->cache_dev->bdev;
1170 c_region.sector = from_cblock(mg->op->cblock) * cache->sectors_per_block;
1171 c_region.count = cache->sectors_per_block;
1172
1173 if (promote)
1174 dm_kcopyd_copy(cache->copier, &o_region, 1, &c_region, 0, copy_complete, &mg->k);
1175 else
1176 dm_kcopyd_copy(cache->copier, &c_region, 1, &o_region, 0, copy_complete, &mg->k);
1177 }
1178
bio_drop_shared_lock(struct cache * cache,struct bio * bio)1179 static void bio_drop_shared_lock(struct cache *cache, struct bio *bio)
1180 {
1181 struct per_bio_data *pb = get_per_bio_data(bio);
1182
1183 if (pb->cell && dm_cell_put_v2(cache->prison, pb->cell))
1184 free_prison_cell(cache, pb->cell);
1185 pb->cell = NULL;
1186 }
1187
overwrite_endio(struct bio * bio)1188 static void overwrite_endio(struct bio *bio)
1189 {
1190 struct dm_cache_migration *mg = bio->bi_private;
1191 struct cache *cache = mg->cache;
1192 struct per_bio_data *pb = get_per_bio_data(bio);
1193
1194 dm_unhook_bio(&pb->hook_info, bio);
1195
1196 if (bio->bi_status)
1197 mg->k.input = bio->bi_status;
1198
1199 queue_continuation(cache->wq, &mg->k);
1200 }
1201
overwrite(struct dm_cache_migration * mg,void (* continuation)(struct work_struct *))1202 static void overwrite(struct dm_cache_migration *mg,
1203 void (*continuation)(struct work_struct *))
1204 {
1205 struct bio *bio = mg->overwrite_bio;
1206 struct per_bio_data *pb = get_per_bio_data(bio);
1207
1208 dm_hook_bio(&pb->hook_info, bio, overwrite_endio, mg);
1209
1210 /*
1211 * The overwrite bio is part of the copy operation, as such it does
1212 * not set/clear discard or dirty flags.
1213 */
1214 if (mg->op->op == POLICY_PROMOTE)
1215 remap_to_cache(mg->cache, bio, mg->op->cblock);
1216 else
1217 remap_to_origin(mg->cache, bio);
1218
1219 init_continuation(&mg->k, continuation);
1220 accounted_request(mg->cache, bio);
1221 }
1222
1223 /*
1224 * Migration steps:
1225 *
1226 * 1) exclusive lock preventing WRITEs
1227 * 2) quiesce
1228 * 3) copy or issue overwrite bio
1229 * 4) upgrade to exclusive lock preventing READs and WRITEs
1230 * 5) quiesce
1231 * 6) update metadata and commit
1232 * 7) unlock
1233 */
mg_complete(struct dm_cache_migration * mg,bool success)1234 static void mg_complete(struct dm_cache_migration *mg, bool success)
1235 {
1236 struct bio_list bios;
1237 struct cache *cache = mg->cache;
1238 struct policy_work *op = mg->op;
1239 dm_cblock_t cblock = op->cblock;
1240
1241 if (success)
1242 update_stats(&cache->stats, op->op);
1243
1244 switch (op->op) {
1245 case POLICY_PROMOTE:
1246 clear_discard(cache, oblock_to_dblock(cache, op->oblock));
1247 policy_complete_background_work(cache->policy, op, success);
1248
1249 if (mg->overwrite_bio) {
1250 if (success)
1251 force_set_dirty(cache, cblock);
1252 else if (mg->k.input)
1253 mg->overwrite_bio->bi_status = mg->k.input;
1254 else
1255 mg->overwrite_bio->bi_status = BLK_STS_IOERR;
1256 bio_endio(mg->overwrite_bio);
1257 } else {
1258 if (success)
1259 force_clear_dirty(cache, cblock);
1260 dec_io_migrations(cache);
1261 }
1262 break;
1263
1264 case POLICY_DEMOTE:
1265 /*
1266 * We clear dirty here to update the nr_dirty counter.
1267 */
1268 if (success)
1269 force_clear_dirty(cache, cblock);
1270 policy_complete_background_work(cache->policy, op, success);
1271 dec_io_migrations(cache);
1272 break;
1273
1274 case POLICY_WRITEBACK:
1275 if (success)
1276 force_clear_dirty(cache, cblock);
1277 policy_complete_background_work(cache->policy, op, success);
1278 dec_io_migrations(cache);
1279 break;
1280 }
1281
1282 bio_list_init(&bios);
1283 if (mg->cell) {
1284 if (dm_cell_unlock_v2(cache->prison, mg->cell, &bios))
1285 free_prison_cell(cache, mg->cell);
1286 }
1287
1288 free_migration(mg);
1289 defer_bios(cache, &bios);
1290 wake_migration_worker(cache);
1291
1292 background_work_end(cache);
1293 }
1294
mg_success(struct work_struct * ws)1295 static void mg_success(struct work_struct *ws)
1296 {
1297 struct dm_cache_migration *mg = ws_to_mg(ws);
1298 mg_complete(mg, mg->k.input == 0);
1299 }
1300
mg_update_metadata(struct work_struct * ws)1301 static void mg_update_metadata(struct work_struct *ws)
1302 {
1303 int r;
1304 struct dm_cache_migration *mg = ws_to_mg(ws);
1305 struct cache *cache = mg->cache;
1306 struct policy_work *op = mg->op;
1307
1308 switch (op->op) {
1309 case POLICY_PROMOTE:
1310 r = dm_cache_insert_mapping(cache->cmd, op->cblock, op->oblock);
1311 if (r) {
1312 DMERR_LIMIT("%s: migration failed; couldn't insert mapping",
1313 cache_device_name(cache));
1314 metadata_operation_failed(cache, "dm_cache_insert_mapping", r);
1315
1316 mg_complete(mg, false);
1317 return;
1318 }
1319 mg_complete(mg, true);
1320 break;
1321
1322 case POLICY_DEMOTE:
1323 r = dm_cache_remove_mapping(cache->cmd, op->cblock);
1324 if (r) {
1325 DMERR_LIMIT("%s: migration failed; couldn't update on disk metadata",
1326 cache_device_name(cache));
1327 metadata_operation_failed(cache, "dm_cache_remove_mapping", r);
1328
1329 mg_complete(mg, false);
1330 return;
1331 }
1332
1333 /*
1334 * It would be nice if we only had to commit when a REQ_FLUSH
1335 * comes through. But there's one scenario that we have to
1336 * look out for:
1337 *
1338 * - vblock x in a cache block
1339 * - domotion occurs
1340 * - cache block gets reallocated and over written
1341 * - crash
1342 *
1343 * When we recover, because there was no commit the cache will
1344 * rollback to having the data for vblock x in the cache block.
1345 * But the cache block has since been overwritten, so it'll end
1346 * up pointing to data that was never in 'x' during the history
1347 * of the device.
1348 *
1349 * To avoid this issue we require a commit as part of the
1350 * demotion operation.
1351 */
1352 init_continuation(&mg->k, mg_success);
1353 continue_after_commit(&cache->committer, &mg->k);
1354 schedule_commit(&cache->committer);
1355 break;
1356
1357 case POLICY_WRITEBACK:
1358 mg_complete(mg, true);
1359 break;
1360 }
1361 }
1362
mg_update_metadata_after_copy(struct work_struct * ws)1363 static void mg_update_metadata_after_copy(struct work_struct *ws)
1364 {
1365 struct dm_cache_migration *mg = ws_to_mg(ws);
1366
1367 /*
1368 * Did the copy succeed?
1369 */
1370 if (mg->k.input)
1371 mg_complete(mg, false);
1372 else
1373 mg_update_metadata(ws);
1374 }
1375
mg_upgrade_lock(struct work_struct * ws)1376 static void mg_upgrade_lock(struct work_struct *ws)
1377 {
1378 int r;
1379 struct dm_cache_migration *mg = ws_to_mg(ws);
1380
1381 /*
1382 * Did the copy succeed?
1383 */
1384 if (mg->k.input)
1385 mg_complete(mg, false);
1386
1387 else {
1388 /*
1389 * Now we want the lock to prevent both reads and writes.
1390 */
1391 r = dm_cell_lock_promote_v2(mg->cache->prison, mg->cell,
1392 READ_WRITE_LOCK_LEVEL);
1393 if (r < 0)
1394 mg_complete(mg, false);
1395
1396 else if (r)
1397 quiesce(mg, mg_update_metadata);
1398
1399 else
1400 mg_update_metadata(ws);
1401 }
1402 }
1403
mg_full_copy(struct work_struct * ws)1404 static void mg_full_copy(struct work_struct *ws)
1405 {
1406 struct dm_cache_migration *mg = ws_to_mg(ws);
1407 struct cache *cache = mg->cache;
1408 struct policy_work *op = mg->op;
1409 bool is_policy_promote = (op->op == POLICY_PROMOTE);
1410
1411 if ((!is_policy_promote && !is_dirty(cache, op->cblock)) ||
1412 is_discarded_oblock(cache, op->oblock)) {
1413 mg_upgrade_lock(ws);
1414 return;
1415 }
1416
1417 init_continuation(&mg->k, mg_upgrade_lock);
1418 copy(mg, is_policy_promote);
1419 }
1420
mg_copy(struct work_struct * ws)1421 static void mg_copy(struct work_struct *ws)
1422 {
1423 struct dm_cache_migration *mg = ws_to_mg(ws);
1424
1425 if (mg->overwrite_bio) {
1426 /*
1427 * No exclusive lock was held when we last checked if the bio
1428 * was optimisable. So we have to check again in case things
1429 * have changed (eg, the block may no longer be discarded).
1430 */
1431 if (!optimisable_bio(mg->cache, mg->overwrite_bio, mg->op->oblock)) {
1432 /*
1433 * Fallback to a real full copy after doing some tidying up.
1434 */
1435 bool rb = bio_detain_shared(mg->cache, mg->op->oblock, mg->overwrite_bio);
1436 BUG_ON(rb); /* An exclussive lock must _not_ be held for this block */
1437 mg->overwrite_bio = NULL;
1438 inc_io_migrations(mg->cache);
1439 mg_full_copy(ws);
1440 return;
1441 }
1442
1443 /*
1444 * It's safe to do this here, even though it's new data
1445 * because all IO has been locked out of the block.
1446 *
1447 * mg_lock_writes() already took READ_WRITE_LOCK_LEVEL
1448 * so _not_ using mg_upgrade_lock() as continutation.
1449 */
1450 overwrite(mg, mg_update_metadata_after_copy);
1451
1452 } else
1453 mg_full_copy(ws);
1454 }
1455
mg_lock_writes(struct dm_cache_migration * mg)1456 static int mg_lock_writes(struct dm_cache_migration *mg)
1457 {
1458 int r;
1459 struct dm_cell_key_v2 key;
1460 struct cache *cache = mg->cache;
1461 struct dm_bio_prison_cell_v2 *prealloc;
1462
1463 prealloc = alloc_prison_cell(cache);
1464
1465 /*
1466 * Prevent writes to the block, but allow reads to continue.
1467 * Unless we're using an overwrite bio, in which case we lock
1468 * everything.
1469 */
1470 build_key(mg->op->oblock, oblock_succ(mg->op->oblock), &key);
1471 r = dm_cell_lock_v2(cache->prison, &key,
1472 mg->overwrite_bio ? READ_WRITE_LOCK_LEVEL : WRITE_LOCK_LEVEL,
1473 prealloc, &mg->cell);
1474 if (r < 0) {
1475 free_prison_cell(cache, prealloc);
1476 mg_complete(mg, false);
1477 return r;
1478 }
1479
1480 if (mg->cell != prealloc)
1481 free_prison_cell(cache, prealloc);
1482
1483 if (r == 0)
1484 mg_copy(&mg->k.ws);
1485 else
1486 quiesce(mg, mg_copy);
1487
1488 return 0;
1489 }
1490
mg_start(struct cache * cache,struct policy_work * op,struct bio * bio)1491 static int mg_start(struct cache *cache, struct policy_work *op, struct bio *bio)
1492 {
1493 struct dm_cache_migration *mg;
1494
1495 if (!background_work_begin(cache)) {
1496 policy_complete_background_work(cache->policy, op, false);
1497 return -EPERM;
1498 }
1499
1500 mg = alloc_migration(cache);
1501
1502 mg->op = op;
1503 mg->overwrite_bio = bio;
1504
1505 if (!bio)
1506 inc_io_migrations(cache);
1507
1508 return mg_lock_writes(mg);
1509 }
1510
1511 /*----------------------------------------------------------------
1512 * invalidation processing
1513 *--------------------------------------------------------------*/
1514
invalidate_complete(struct dm_cache_migration * mg,bool success)1515 static void invalidate_complete(struct dm_cache_migration *mg, bool success)
1516 {
1517 struct bio_list bios;
1518 struct cache *cache = mg->cache;
1519
1520 bio_list_init(&bios);
1521 if (dm_cell_unlock_v2(cache->prison, mg->cell, &bios))
1522 free_prison_cell(cache, mg->cell);
1523
1524 if (!success && mg->overwrite_bio)
1525 bio_io_error(mg->overwrite_bio);
1526
1527 free_migration(mg);
1528 defer_bios(cache, &bios);
1529
1530 background_work_end(cache);
1531 }
1532
invalidate_completed(struct work_struct * ws)1533 static void invalidate_completed(struct work_struct *ws)
1534 {
1535 struct dm_cache_migration *mg = ws_to_mg(ws);
1536 invalidate_complete(mg, !mg->k.input);
1537 }
1538
invalidate_cblock(struct cache * cache,dm_cblock_t cblock)1539 static int invalidate_cblock(struct cache *cache, dm_cblock_t cblock)
1540 {
1541 int r = policy_invalidate_mapping(cache->policy, cblock);
1542 if (!r) {
1543 r = dm_cache_remove_mapping(cache->cmd, cblock);
1544 if (r) {
1545 DMERR_LIMIT("%s: invalidation failed; couldn't update on disk metadata",
1546 cache_device_name(cache));
1547 metadata_operation_failed(cache, "dm_cache_remove_mapping", r);
1548 }
1549
1550 } else if (r == -ENODATA) {
1551 /*
1552 * Harmless, already unmapped.
1553 */
1554 r = 0;
1555
1556 } else
1557 DMERR("%s: policy_invalidate_mapping failed", cache_device_name(cache));
1558
1559 return r;
1560 }
1561
invalidate_remove(struct work_struct * ws)1562 static void invalidate_remove(struct work_struct *ws)
1563 {
1564 int r;
1565 struct dm_cache_migration *mg = ws_to_mg(ws);
1566 struct cache *cache = mg->cache;
1567
1568 r = invalidate_cblock(cache, mg->invalidate_cblock);
1569 if (r) {
1570 invalidate_complete(mg, false);
1571 return;
1572 }
1573
1574 init_continuation(&mg->k, invalidate_completed);
1575 continue_after_commit(&cache->committer, &mg->k);
1576 remap_to_origin_clear_discard(cache, mg->overwrite_bio, mg->invalidate_oblock);
1577 mg->overwrite_bio = NULL;
1578 schedule_commit(&cache->committer);
1579 }
1580
invalidate_lock(struct dm_cache_migration * mg)1581 static int invalidate_lock(struct dm_cache_migration *mg)
1582 {
1583 int r;
1584 struct dm_cell_key_v2 key;
1585 struct cache *cache = mg->cache;
1586 struct dm_bio_prison_cell_v2 *prealloc;
1587
1588 prealloc = alloc_prison_cell(cache);
1589
1590 build_key(mg->invalidate_oblock, oblock_succ(mg->invalidate_oblock), &key);
1591 r = dm_cell_lock_v2(cache->prison, &key,
1592 READ_WRITE_LOCK_LEVEL, prealloc, &mg->cell);
1593 if (r < 0) {
1594 free_prison_cell(cache, prealloc);
1595 invalidate_complete(mg, false);
1596 return r;
1597 }
1598
1599 if (mg->cell != prealloc)
1600 free_prison_cell(cache, prealloc);
1601
1602 if (r)
1603 quiesce(mg, invalidate_remove);
1604
1605 else {
1606 /*
1607 * We can't call invalidate_remove() directly here because we
1608 * might still be in request context.
1609 */
1610 init_continuation(&mg->k, invalidate_remove);
1611 queue_work(cache->wq, &mg->k.ws);
1612 }
1613
1614 return 0;
1615 }
1616
invalidate_start(struct cache * cache,dm_cblock_t cblock,dm_oblock_t oblock,struct bio * bio)1617 static int invalidate_start(struct cache *cache, dm_cblock_t cblock,
1618 dm_oblock_t oblock, struct bio *bio)
1619 {
1620 struct dm_cache_migration *mg;
1621
1622 if (!background_work_begin(cache))
1623 return -EPERM;
1624
1625 mg = alloc_migration(cache);
1626
1627 mg->overwrite_bio = bio;
1628 mg->invalidate_cblock = cblock;
1629 mg->invalidate_oblock = oblock;
1630
1631 return invalidate_lock(mg);
1632 }
1633
1634 /*----------------------------------------------------------------
1635 * bio processing
1636 *--------------------------------------------------------------*/
1637
1638 enum busy {
1639 IDLE,
1640 BUSY
1641 };
1642
spare_migration_bandwidth(struct cache * cache)1643 static enum busy spare_migration_bandwidth(struct cache *cache)
1644 {
1645 bool idle = iot_idle_for(&cache->tracker, HZ);
1646 sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) *
1647 cache->sectors_per_block;
1648
1649 if (idle && current_volume <= cache->migration_threshold)
1650 return IDLE;
1651 else
1652 return BUSY;
1653 }
1654
inc_hit_counter(struct cache * cache,struct bio * bio)1655 static void inc_hit_counter(struct cache *cache, struct bio *bio)
1656 {
1657 atomic_inc(bio_data_dir(bio) == READ ?
1658 &cache->stats.read_hit : &cache->stats.write_hit);
1659 }
1660
inc_miss_counter(struct cache * cache,struct bio * bio)1661 static void inc_miss_counter(struct cache *cache, struct bio *bio)
1662 {
1663 atomic_inc(bio_data_dir(bio) == READ ?
1664 &cache->stats.read_miss : &cache->stats.write_miss);
1665 }
1666
1667 /*----------------------------------------------------------------*/
1668
map_bio(struct cache * cache,struct bio * bio,dm_oblock_t block,bool * commit_needed)1669 static int map_bio(struct cache *cache, struct bio *bio, dm_oblock_t block,
1670 bool *commit_needed)
1671 {
1672 int r, data_dir;
1673 bool rb, background_queued;
1674 dm_cblock_t cblock;
1675
1676 *commit_needed = false;
1677
1678 rb = bio_detain_shared(cache, block, bio);
1679 if (!rb) {
1680 /*
1681 * An exclusive lock is held for this block, so we have to
1682 * wait. We set the commit_needed flag so the current
1683 * transaction will be committed asap, allowing this lock
1684 * to be dropped.
1685 */
1686 *commit_needed = true;
1687 return DM_MAPIO_SUBMITTED;
1688 }
1689
1690 data_dir = bio_data_dir(bio);
1691
1692 if (optimisable_bio(cache, bio, block)) {
1693 struct policy_work *op = NULL;
1694
1695 r = policy_lookup_with_work(cache->policy, block, &cblock, data_dir, true, &op);
1696 if (unlikely(r && r != -ENOENT)) {
1697 DMERR_LIMIT("%s: policy_lookup_with_work() failed with r = %d",
1698 cache_device_name(cache), r);
1699 bio_io_error(bio);
1700 return DM_MAPIO_SUBMITTED;
1701 }
1702
1703 if (r == -ENOENT && op) {
1704 bio_drop_shared_lock(cache, bio);
1705 BUG_ON(op->op != POLICY_PROMOTE);
1706 mg_start(cache, op, bio);
1707 return DM_MAPIO_SUBMITTED;
1708 }
1709 } else {
1710 r = policy_lookup(cache->policy, block, &cblock, data_dir, false, &background_queued);
1711 if (unlikely(r && r != -ENOENT)) {
1712 DMERR_LIMIT("%s: policy_lookup() failed with r = %d",
1713 cache_device_name(cache), r);
1714 bio_io_error(bio);
1715 return DM_MAPIO_SUBMITTED;
1716 }
1717
1718 if (background_queued)
1719 wake_migration_worker(cache);
1720 }
1721
1722 if (r == -ENOENT) {
1723 struct per_bio_data *pb = get_per_bio_data(bio);
1724
1725 /*
1726 * Miss.
1727 */
1728 inc_miss_counter(cache, bio);
1729 if (pb->req_nr == 0) {
1730 accounted_begin(cache, bio);
1731 remap_to_origin_clear_discard(cache, bio, block);
1732 } else {
1733 /*
1734 * This is a duplicate writethrough io that is no
1735 * longer needed because the block has been demoted.
1736 */
1737 bio_endio(bio);
1738 return DM_MAPIO_SUBMITTED;
1739 }
1740 } else {
1741 /*
1742 * Hit.
1743 */
1744 inc_hit_counter(cache, bio);
1745
1746 /*
1747 * Passthrough always maps to the origin, invalidating any
1748 * cache blocks that are written to.
1749 */
1750 if (passthrough_mode(cache)) {
1751 if (bio_data_dir(bio) == WRITE) {
1752 bio_drop_shared_lock(cache, bio);
1753 atomic_inc(&cache->stats.demotion);
1754 invalidate_start(cache, cblock, block, bio);
1755 } else
1756 remap_to_origin_clear_discard(cache, bio, block);
1757 } else {
1758 if (bio_data_dir(bio) == WRITE && writethrough_mode(cache) &&
1759 !is_dirty(cache, cblock)) {
1760 remap_to_origin_and_cache(cache, bio, block, cblock);
1761 accounted_begin(cache, bio);
1762 } else
1763 remap_to_cache_dirty(cache, bio, block, cblock);
1764 }
1765 }
1766
1767 /*
1768 * dm core turns FUA requests into a separate payload and FLUSH req.
1769 */
1770 if (bio->bi_opf & REQ_FUA) {
1771 /*
1772 * issue_after_commit will call accounted_begin a second time. So
1773 * we call accounted_complete() to avoid double accounting.
1774 */
1775 accounted_complete(cache, bio);
1776 issue_after_commit(&cache->committer, bio);
1777 *commit_needed = true;
1778 return DM_MAPIO_SUBMITTED;
1779 }
1780
1781 return DM_MAPIO_REMAPPED;
1782 }
1783
process_bio(struct cache * cache,struct bio * bio)1784 static bool process_bio(struct cache *cache, struct bio *bio)
1785 {
1786 bool commit_needed;
1787
1788 if (map_bio(cache, bio, get_bio_block(cache, bio), &commit_needed) == DM_MAPIO_REMAPPED)
1789 submit_bio_noacct(bio);
1790
1791 return commit_needed;
1792 }
1793
1794 /*
1795 * A non-zero return indicates read_only or fail_io mode.
1796 */
commit(struct cache * cache,bool clean_shutdown)1797 static int commit(struct cache *cache, bool clean_shutdown)
1798 {
1799 int r;
1800
1801 if (get_cache_mode(cache) >= CM_READ_ONLY)
1802 return -EINVAL;
1803
1804 atomic_inc(&cache->stats.commit_count);
1805 r = dm_cache_commit(cache->cmd, clean_shutdown);
1806 if (r)
1807 metadata_operation_failed(cache, "dm_cache_commit", r);
1808
1809 return r;
1810 }
1811
1812 /*
1813 * Used by the batcher.
1814 */
commit_op(void * context)1815 static blk_status_t commit_op(void *context)
1816 {
1817 struct cache *cache = context;
1818
1819 if (dm_cache_changed_this_transaction(cache->cmd))
1820 return errno_to_blk_status(commit(cache, false));
1821
1822 return 0;
1823 }
1824
1825 /*----------------------------------------------------------------*/
1826
process_flush_bio(struct cache * cache,struct bio * bio)1827 static bool process_flush_bio(struct cache *cache, struct bio *bio)
1828 {
1829 struct per_bio_data *pb = get_per_bio_data(bio);
1830
1831 if (!pb->req_nr)
1832 remap_to_origin(cache, bio);
1833 else
1834 remap_to_cache(cache, bio, 0);
1835
1836 issue_after_commit(&cache->committer, bio);
1837 return true;
1838 }
1839
process_discard_bio(struct cache * cache,struct bio * bio)1840 static bool process_discard_bio(struct cache *cache, struct bio *bio)
1841 {
1842 dm_dblock_t b, e;
1843
1844 // FIXME: do we need to lock the region? Or can we just assume the
1845 // user wont be so foolish as to issue discard concurrently with
1846 // other IO?
1847 calc_discard_block_range(cache, bio, &b, &e);
1848 while (b != e) {
1849 set_discard(cache, b);
1850 b = to_dblock(from_dblock(b) + 1);
1851 }
1852
1853 if (cache->features.discard_passdown) {
1854 remap_to_origin(cache, bio);
1855 submit_bio_noacct(bio);
1856 } else
1857 bio_endio(bio);
1858
1859 return false;
1860 }
1861
process_deferred_bios(struct work_struct * ws)1862 static void process_deferred_bios(struct work_struct *ws)
1863 {
1864 struct cache *cache = container_of(ws, struct cache, deferred_bio_worker);
1865
1866 bool commit_needed = false;
1867 struct bio_list bios;
1868 struct bio *bio;
1869
1870 bio_list_init(&bios);
1871
1872 spin_lock_irq(&cache->lock);
1873 bio_list_merge(&bios, &cache->deferred_bios);
1874 bio_list_init(&cache->deferred_bios);
1875 spin_unlock_irq(&cache->lock);
1876
1877 while ((bio = bio_list_pop(&bios))) {
1878 if (bio->bi_opf & REQ_PREFLUSH)
1879 commit_needed = process_flush_bio(cache, bio) || commit_needed;
1880
1881 else if (bio_op(bio) == REQ_OP_DISCARD)
1882 commit_needed = process_discard_bio(cache, bio) || commit_needed;
1883
1884 else
1885 commit_needed = process_bio(cache, bio) || commit_needed;
1886 }
1887
1888 if (commit_needed)
1889 schedule_commit(&cache->committer);
1890 }
1891
1892 /*----------------------------------------------------------------
1893 * Main worker loop
1894 *--------------------------------------------------------------*/
1895
requeue_deferred_bios(struct cache * cache)1896 static void requeue_deferred_bios(struct cache *cache)
1897 {
1898 struct bio *bio;
1899 struct bio_list bios;
1900
1901 bio_list_init(&bios);
1902 bio_list_merge(&bios, &cache->deferred_bios);
1903 bio_list_init(&cache->deferred_bios);
1904
1905 while ((bio = bio_list_pop(&bios))) {
1906 bio->bi_status = BLK_STS_DM_REQUEUE;
1907 bio_endio(bio);
1908 }
1909 }
1910
1911 /*
1912 * We want to commit periodically so that not too much
1913 * unwritten metadata builds up.
1914 */
do_waker(struct work_struct * ws)1915 static void do_waker(struct work_struct *ws)
1916 {
1917 struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker);
1918
1919 policy_tick(cache->policy, true);
1920 wake_migration_worker(cache);
1921 schedule_commit(&cache->committer);
1922 queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD);
1923 }
1924
check_migrations(struct work_struct * ws)1925 static void check_migrations(struct work_struct *ws)
1926 {
1927 int r;
1928 struct policy_work *op;
1929 struct cache *cache = container_of(ws, struct cache, migration_worker);
1930 enum busy b;
1931
1932 for (;;) {
1933 b = spare_migration_bandwidth(cache);
1934
1935 r = policy_get_background_work(cache->policy, b == IDLE, &op);
1936 if (r == -ENODATA)
1937 break;
1938
1939 if (r) {
1940 DMERR_LIMIT("%s: policy_background_work failed",
1941 cache_device_name(cache));
1942 break;
1943 }
1944
1945 r = mg_start(cache, op, NULL);
1946 if (r)
1947 break;
1948 }
1949 }
1950
1951 /*----------------------------------------------------------------
1952 * Target methods
1953 *--------------------------------------------------------------*/
1954
1955 /*
1956 * This function gets called on the error paths of the constructor, so we
1957 * have to cope with a partially initialised struct.
1958 */
destroy(struct cache * cache)1959 static void destroy(struct cache *cache)
1960 {
1961 unsigned i;
1962
1963 mempool_exit(&cache->migration_pool);
1964
1965 if (cache->prison)
1966 dm_bio_prison_destroy_v2(cache->prison);
1967
1968 cancel_delayed_work_sync(&cache->waker);
1969 if (cache->wq)
1970 destroy_workqueue(cache->wq);
1971
1972 if (cache->dirty_bitset)
1973 free_bitset(cache->dirty_bitset);
1974
1975 if (cache->discard_bitset)
1976 free_bitset(cache->discard_bitset);
1977
1978 if (cache->copier)
1979 dm_kcopyd_client_destroy(cache->copier);
1980
1981 if (cache->cmd)
1982 dm_cache_metadata_close(cache->cmd);
1983
1984 if (cache->metadata_dev)
1985 dm_put_device(cache->ti, cache->metadata_dev);
1986
1987 if (cache->origin_dev)
1988 dm_put_device(cache->ti, cache->origin_dev);
1989
1990 if (cache->cache_dev)
1991 dm_put_device(cache->ti, cache->cache_dev);
1992
1993 if (cache->policy)
1994 dm_cache_policy_destroy(cache->policy);
1995
1996 for (i = 0; i < cache->nr_ctr_args ; i++)
1997 kfree(cache->ctr_args[i]);
1998 kfree(cache->ctr_args);
1999
2000 bioset_exit(&cache->bs);
2001
2002 kfree(cache);
2003 }
2004
cache_dtr(struct dm_target * ti)2005 static void cache_dtr(struct dm_target *ti)
2006 {
2007 struct cache *cache = ti->private;
2008
2009 destroy(cache);
2010 }
2011
get_dev_size(struct dm_dev * dev)2012 static sector_t get_dev_size(struct dm_dev *dev)
2013 {
2014 return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
2015 }
2016
2017 /*----------------------------------------------------------------*/
2018
2019 /*
2020 * Construct a cache device mapping.
2021 *
2022 * cache <metadata dev> <cache dev> <origin dev> <block size>
2023 * <#feature args> [<feature arg>]*
2024 * <policy> <#policy args> [<policy arg>]*
2025 *
2026 * metadata dev : fast device holding the persistent metadata
2027 * cache dev : fast device holding cached data blocks
2028 * origin dev : slow device holding original data blocks
2029 * block size : cache unit size in sectors
2030 *
2031 * #feature args : number of feature arguments passed
2032 * feature args : writethrough. (The default is writeback.)
2033 *
2034 * policy : the replacement policy to use
2035 * #policy args : an even number of policy arguments corresponding
2036 * to key/value pairs passed to the policy
2037 * policy args : key/value pairs passed to the policy
2038 * E.g. 'sequential_threshold 1024'
2039 * See cache-policies.txt for details.
2040 *
2041 * Optional feature arguments are:
2042 * writethrough : write through caching that prohibits cache block
2043 * content from being different from origin block content.
2044 * Without this argument, the default behaviour is to write
2045 * back cache block contents later for performance reasons,
2046 * so they may differ from the corresponding origin blocks.
2047 */
2048 struct cache_args {
2049 struct dm_target *ti;
2050
2051 struct dm_dev *metadata_dev;
2052
2053 struct dm_dev *cache_dev;
2054 sector_t cache_sectors;
2055
2056 struct dm_dev *origin_dev;
2057 sector_t origin_sectors;
2058
2059 uint32_t block_size;
2060
2061 const char *policy_name;
2062 int policy_argc;
2063 const char **policy_argv;
2064
2065 struct cache_features features;
2066 };
2067
destroy_cache_args(struct cache_args * ca)2068 static void destroy_cache_args(struct cache_args *ca)
2069 {
2070 if (ca->metadata_dev)
2071 dm_put_device(ca->ti, ca->metadata_dev);
2072
2073 if (ca->cache_dev)
2074 dm_put_device(ca->ti, ca->cache_dev);
2075
2076 if (ca->origin_dev)
2077 dm_put_device(ca->ti, ca->origin_dev);
2078
2079 kfree(ca);
2080 }
2081
at_least_one_arg(struct dm_arg_set * as,char ** error)2082 static bool at_least_one_arg(struct dm_arg_set *as, char **error)
2083 {
2084 if (!as->argc) {
2085 *error = "Insufficient args";
2086 return false;
2087 }
2088
2089 return true;
2090 }
2091
parse_metadata_dev(struct cache_args * ca,struct dm_arg_set * as,char ** error)2092 static int parse_metadata_dev(struct cache_args *ca, struct dm_arg_set *as,
2093 char **error)
2094 {
2095 int r;
2096 sector_t metadata_dev_size;
2097 char b[BDEVNAME_SIZE];
2098
2099 if (!at_least_one_arg(as, error))
2100 return -EINVAL;
2101
2102 r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
2103 &ca->metadata_dev);
2104 if (r) {
2105 *error = "Error opening metadata device";
2106 return r;
2107 }
2108
2109 metadata_dev_size = get_dev_size(ca->metadata_dev);
2110 if (metadata_dev_size > DM_CACHE_METADATA_MAX_SECTORS_WARNING)
2111 DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
2112 bdevname(ca->metadata_dev->bdev, b), THIN_METADATA_MAX_SECTORS);
2113
2114 return 0;
2115 }
2116
parse_cache_dev(struct cache_args * ca,struct dm_arg_set * as,char ** error)2117 static int parse_cache_dev(struct cache_args *ca, struct dm_arg_set *as,
2118 char **error)
2119 {
2120 int r;
2121
2122 if (!at_least_one_arg(as, error))
2123 return -EINVAL;
2124
2125 r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
2126 &ca->cache_dev);
2127 if (r) {
2128 *error = "Error opening cache device";
2129 return r;
2130 }
2131 ca->cache_sectors = get_dev_size(ca->cache_dev);
2132
2133 return 0;
2134 }
2135
parse_origin_dev(struct cache_args * ca,struct dm_arg_set * as,char ** error)2136 static int parse_origin_dev(struct cache_args *ca, struct dm_arg_set *as,
2137 char **error)
2138 {
2139 int r;
2140
2141 if (!at_least_one_arg(as, error))
2142 return -EINVAL;
2143
2144 r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
2145 &ca->origin_dev);
2146 if (r) {
2147 *error = "Error opening origin device";
2148 return r;
2149 }
2150
2151 ca->origin_sectors = get_dev_size(ca->origin_dev);
2152 if (ca->ti->len > ca->origin_sectors) {
2153 *error = "Device size larger than cached device";
2154 return -EINVAL;
2155 }
2156
2157 return 0;
2158 }
2159
parse_block_size(struct cache_args * ca,struct dm_arg_set * as,char ** error)2160 static int parse_block_size(struct cache_args *ca, struct dm_arg_set *as,
2161 char **error)
2162 {
2163 unsigned long block_size;
2164
2165 if (!at_least_one_arg(as, error))
2166 return -EINVAL;
2167
2168 if (kstrtoul(dm_shift_arg(as), 10, &block_size) || !block_size ||
2169 block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
2170 block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS ||
2171 block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
2172 *error = "Invalid data block size";
2173 return -EINVAL;
2174 }
2175
2176 if (block_size > ca->cache_sectors) {
2177 *error = "Data block size is larger than the cache device";
2178 return -EINVAL;
2179 }
2180
2181 ca->block_size = block_size;
2182
2183 return 0;
2184 }
2185
init_features(struct cache_features * cf)2186 static void init_features(struct cache_features *cf)
2187 {
2188 cf->mode = CM_WRITE;
2189 cf->io_mode = CM_IO_WRITEBACK;
2190 cf->metadata_version = 1;
2191 cf->discard_passdown = true;
2192 }
2193
parse_features(struct cache_args * ca,struct dm_arg_set * as,char ** error)2194 static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
2195 char **error)
2196 {
2197 static const struct dm_arg _args[] = {
2198 {0, 3, "Invalid number of cache feature arguments"},
2199 };
2200
2201 int r, mode_ctr = 0;
2202 unsigned argc;
2203 const char *arg;
2204 struct cache_features *cf = &ca->features;
2205
2206 init_features(cf);
2207
2208 r = dm_read_arg_group(_args, as, &argc, error);
2209 if (r)
2210 return -EINVAL;
2211
2212 while (argc--) {
2213 arg = dm_shift_arg(as);
2214
2215 if (!strcasecmp(arg, "writeback")) {
2216 cf->io_mode = CM_IO_WRITEBACK;
2217 mode_ctr++;
2218 }
2219
2220 else if (!strcasecmp(arg, "writethrough")) {
2221 cf->io_mode = CM_IO_WRITETHROUGH;
2222 mode_ctr++;
2223 }
2224
2225 else if (!strcasecmp(arg, "passthrough")) {
2226 cf->io_mode = CM_IO_PASSTHROUGH;
2227 mode_ctr++;
2228 }
2229
2230 else if (!strcasecmp(arg, "metadata2"))
2231 cf->metadata_version = 2;
2232
2233 else if (!strcasecmp(arg, "no_discard_passdown"))
2234 cf->discard_passdown = false;
2235
2236 else {
2237 *error = "Unrecognised cache feature requested";
2238 return -EINVAL;
2239 }
2240 }
2241
2242 if (mode_ctr > 1) {
2243 *error = "Duplicate cache io_mode features requested";
2244 return -EINVAL;
2245 }
2246
2247 return 0;
2248 }
2249
parse_policy(struct cache_args * ca,struct dm_arg_set * as,char ** error)2250 static int parse_policy(struct cache_args *ca, struct dm_arg_set *as,
2251 char **error)
2252 {
2253 static const struct dm_arg _args[] = {
2254 {0, 1024, "Invalid number of policy arguments"},
2255 };
2256
2257 int r;
2258
2259 if (!at_least_one_arg(as, error))
2260 return -EINVAL;
2261
2262 ca->policy_name = dm_shift_arg(as);
2263
2264 r = dm_read_arg_group(_args, as, &ca->policy_argc, error);
2265 if (r)
2266 return -EINVAL;
2267
2268 ca->policy_argv = (const char **)as->argv;
2269 dm_consume_args(as, ca->policy_argc);
2270
2271 return 0;
2272 }
2273
parse_cache_args(struct cache_args * ca,int argc,char ** argv,char ** error)2274 static int parse_cache_args(struct cache_args *ca, int argc, char **argv,
2275 char **error)
2276 {
2277 int r;
2278 struct dm_arg_set as;
2279
2280 as.argc = argc;
2281 as.argv = argv;
2282
2283 r = parse_metadata_dev(ca, &as, error);
2284 if (r)
2285 return r;
2286
2287 r = parse_cache_dev(ca, &as, error);
2288 if (r)
2289 return r;
2290
2291 r = parse_origin_dev(ca, &as, error);
2292 if (r)
2293 return r;
2294
2295 r = parse_block_size(ca, &as, error);
2296 if (r)
2297 return r;
2298
2299 r = parse_features(ca, &as, error);
2300 if (r)
2301 return r;
2302
2303 r = parse_policy(ca, &as, error);
2304 if (r)
2305 return r;
2306
2307 return 0;
2308 }
2309
2310 /*----------------------------------------------------------------*/
2311
2312 static struct kmem_cache *migration_cache;
2313
2314 #define NOT_CORE_OPTION 1
2315
process_config_option(struct cache * cache,const char * key,const char * value)2316 static int process_config_option(struct cache *cache, const char *key, const char *value)
2317 {
2318 unsigned long tmp;
2319
2320 if (!strcasecmp(key, "migration_threshold")) {
2321 if (kstrtoul(value, 10, &tmp))
2322 return -EINVAL;
2323
2324 cache->migration_threshold = tmp;
2325 return 0;
2326 }
2327
2328 return NOT_CORE_OPTION;
2329 }
2330
set_config_value(struct cache * cache,const char * key,const char * value)2331 static int set_config_value(struct cache *cache, const char *key, const char *value)
2332 {
2333 int r = process_config_option(cache, key, value);
2334
2335 if (r == NOT_CORE_OPTION)
2336 r = policy_set_config_value(cache->policy, key, value);
2337
2338 if (r)
2339 DMWARN("bad config value for %s: %s", key, value);
2340
2341 return r;
2342 }
2343
set_config_values(struct cache * cache,int argc,const char ** argv)2344 static int set_config_values(struct cache *cache, int argc, const char **argv)
2345 {
2346 int r = 0;
2347
2348 if (argc & 1) {
2349 DMWARN("Odd number of policy arguments given but they should be <key> <value> pairs.");
2350 return -EINVAL;
2351 }
2352
2353 while (argc) {
2354 r = set_config_value(cache, argv[0], argv[1]);
2355 if (r)
2356 break;
2357
2358 argc -= 2;
2359 argv += 2;
2360 }
2361
2362 return r;
2363 }
2364
create_cache_policy(struct cache * cache,struct cache_args * ca,char ** error)2365 static int create_cache_policy(struct cache *cache, struct cache_args *ca,
2366 char **error)
2367 {
2368 struct dm_cache_policy *p = dm_cache_policy_create(ca->policy_name,
2369 cache->cache_size,
2370 cache->origin_sectors,
2371 cache->sectors_per_block);
2372 if (IS_ERR(p)) {
2373 *error = "Error creating cache's policy";
2374 return PTR_ERR(p);
2375 }
2376 cache->policy = p;
2377 BUG_ON(!cache->policy);
2378
2379 return 0;
2380 }
2381
2382 /*
2383 * We want the discard block size to be at least the size of the cache
2384 * block size and have no more than 2^14 discard blocks across the origin.
2385 */
2386 #define MAX_DISCARD_BLOCKS (1 << 14)
2387
too_many_discard_blocks(sector_t discard_block_size,sector_t origin_size)2388 static bool too_many_discard_blocks(sector_t discard_block_size,
2389 sector_t origin_size)
2390 {
2391 (void) sector_div(origin_size, discard_block_size);
2392
2393 return origin_size > MAX_DISCARD_BLOCKS;
2394 }
2395
calculate_discard_block_size(sector_t cache_block_size,sector_t origin_size)2396 static sector_t calculate_discard_block_size(sector_t cache_block_size,
2397 sector_t origin_size)
2398 {
2399 sector_t discard_block_size = cache_block_size;
2400
2401 if (origin_size)
2402 while (too_many_discard_blocks(discard_block_size, origin_size))
2403 discard_block_size *= 2;
2404
2405 return discard_block_size;
2406 }
2407
set_cache_size(struct cache * cache,dm_cblock_t size)2408 static void set_cache_size(struct cache *cache, dm_cblock_t size)
2409 {
2410 dm_block_t nr_blocks = from_cblock(size);
2411
2412 if (nr_blocks > (1 << 20) && cache->cache_size != size)
2413 DMWARN_LIMIT("You have created a cache device with a lot of individual cache blocks (%llu)\n"
2414 "All these mappings can consume a lot of kernel memory, and take some time to read/write.\n"
2415 "Please consider increasing the cache block size to reduce the overall cache block count.",
2416 (unsigned long long) nr_blocks);
2417
2418 cache->cache_size = size;
2419 }
2420
2421 #define DEFAULT_MIGRATION_THRESHOLD 2048
2422
cache_create(struct cache_args * ca,struct cache ** result)2423 static int cache_create(struct cache_args *ca, struct cache **result)
2424 {
2425 int r = 0;
2426 char **error = &ca->ti->error;
2427 struct cache *cache;
2428 struct dm_target *ti = ca->ti;
2429 dm_block_t origin_blocks;
2430 struct dm_cache_metadata *cmd;
2431 bool may_format = ca->features.mode == CM_WRITE;
2432
2433 cache = kzalloc(sizeof(*cache), GFP_KERNEL);
2434 if (!cache)
2435 return -ENOMEM;
2436
2437 cache->ti = ca->ti;
2438 ti->private = cache;
2439 ti->num_flush_bios = 2;
2440 ti->flush_supported = true;
2441
2442 ti->num_discard_bios = 1;
2443 ti->discards_supported = true;
2444
2445 ti->per_io_data_size = sizeof(struct per_bio_data);
2446
2447 cache->features = ca->features;
2448 if (writethrough_mode(cache)) {
2449 /* Create bioset for writethrough bios issued to origin */
2450 r = bioset_init(&cache->bs, BIO_POOL_SIZE, 0, 0);
2451 if (r)
2452 goto bad;
2453 }
2454
2455 cache->metadata_dev = ca->metadata_dev;
2456 cache->origin_dev = ca->origin_dev;
2457 cache->cache_dev = ca->cache_dev;
2458
2459 ca->metadata_dev = ca->origin_dev = ca->cache_dev = NULL;
2460
2461 origin_blocks = cache->origin_sectors = ca->origin_sectors;
2462 origin_blocks = block_div(origin_blocks, ca->block_size);
2463 cache->origin_blocks = to_oblock(origin_blocks);
2464
2465 cache->sectors_per_block = ca->block_size;
2466 if (dm_set_target_max_io_len(ti, cache->sectors_per_block)) {
2467 r = -EINVAL;
2468 goto bad;
2469 }
2470
2471 if (ca->block_size & (ca->block_size - 1)) {
2472 dm_block_t cache_size = ca->cache_sectors;
2473
2474 cache->sectors_per_block_shift = -1;
2475 cache_size = block_div(cache_size, ca->block_size);
2476 set_cache_size(cache, to_cblock(cache_size));
2477 } else {
2478 cache->sectors_per_block_shift = __ffs(ca->block_size);
2479 set_cache_size(cache, to_cblock(ca->cache_sectors >> cache->sectors_per_block_shift));
2480 }
2481
2482 r = create_cache_policy(cache, ca, error);
2483 if (r)
2484 goto bad;
2485
2486 cache->policy_nr_args = ca->policy_argc;
2487 cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD;
2488
2489 r = set_config_values(cache, ca->policy_argc, ca->policy_argv);
2490 if (r) {
2491 *error = "Error setting cache policy's config values";
2492 goto bad;
2493 }
2494
2495 cmd = dm_cache_metadata_open(cache->metadata_dev->bdev,
2496 ca->block_size, may_format,
2497 dm_cache_policy_get_hint_size(cache->policy),
2498 ca->features.metadata_version);
2499 if (IS_ERR(cmd)) {
2500 *error = "Error creating metadata object";
2501 r = PTR_ERR(cmd);
2502 goto bad;
2503 }
2504 cache->cmd = cmd;
2505 set_cache_mode(cache, CM_WRITE);
2506 if (get_cache_mode(cache) != CM_WRITE) {
2507 *error = "Unable to get write access to metadata, please check/repair metadata.";
2508 r = -EINVAL;
2509 goto bad;
2510 }
2511
2512 if (passthrough_mode(cache)) {
2513 bool all_clean;
2514
2515 r = dm_cache_metadata_all_clean(cache->cmd, &all_clean);
2516 if (r) {
2517 *error = "dm_cache_metadata_all_clean() failed";
2518 goto bad;
2519 }
2520
2521 if (!all_clean) {
2522 *error = "Cannot enter passthrough mode unless all blocks are clean";
2523 r = -EINVAL;
2524 goto bad;
2525 }
2526
2527 policy_allow_migrations(cache->policy, false);
2528 }
2529
2530 spin_lock_init(&cache->lock);
2531 bio_list_init(&cache->deferred_bios);
2532 atomic_set(&cache->nr_allocated_migrations, 0);
2533 atomic_set(&cache->nr_io_migrations, 0);
2534 init_waitqueue_head(&cache->migration_wait);
2535
2536 r = -ENOMEM;
2537 atomic_set(&cache->nr_dirty, 0);
2538 cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
2539 if (!cache->dirty_bitset) {
2540 *error = "could not allocate dirty bitset";
2541 goto bad;
2542 }
2543 clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size));
2544
2545 cache->discard_block_size =
2546 calculate_discard_block_size(cache->sectors_per_block,
2547 cache->origin_sectors);
2548 cache->discard_nr_blocks = to_dblock(dm_sector_div_up(cache->origin_sectors,
2549 cache->discard_block_size));
2550 cache->discard_bitset = alloc_bitset(from_dblock(cache->discard_nr_blocks));
2551 if (!cache->discard_bitset) {
2552 *error = "could not allocate discard bitset";
2553 goto bad;
2554 }
2555 clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks));
2556
2557 cache->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle);
2558 if (IS_ERR(cache->copier)) {
2559 *error = "could not create kcopyd client";
2560 r = PTR_ERR(cache->copier);
2561 goto bad;
2562 }
2563
2564 cache->wq = alloc_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM, 0);
2565 if (!cache->wq) {
2566 *error = "could not create workqueue for metadata object";
2567 goto bad;
2568 }
2569 INIT_WORK(&cache->deferred_bio_worker, process_deferred_bios);
2570 INIT_WORK(&cache->migration_worker, check_migrations);
2571 INIT_DELAYED_WORK(&cache->waker, do_waker);
2572
2573 cache->prison = dm_bio_prison_create_v2(cache->wq);
2574 if (!cache->prison) {
2575 *error = "could not create bio prison";
2576 goto bad;
2577 }
2578
2579 r = mempool_init_slab_pool(&cache->migration_pool, MIGRATION_POOL_SIZE,
2580 migration_cache);
2581 if (r) {
2582 *error = "Error creating cache's migration mempool";
2583 goto bad;
2584 }
2585
2586 cache->need_tick_bio = true;
2587 cache->sized = false;
2588 cache->invalidate = false;
2589 cache->commit_requested = false;
2590 cache->loaded_mappings = false;
2591 cache->loaded_discards = false;
2592
2593 load_stats(cache);
2594
2595 atomic_set(&cache->stats.demotion, 0);
2596 atomic_set(&cache->stats.promotion, 0);
2597 atomic_set(&cache->stats.copies_avoided, 0);
2598 atomic_set(&cache->stats.cache_cell_clash, 0);
2599 atomic_set(&cache->stats.commit_count, 0);
2600 atomic_set(&cache->stats.discard_count, 0);
2601
2602 spin_lock_init(&cache->invalidation_lock);
2603 INIT_LIST_HEAD(&cache->invalidation_requests);
2604
2605 batcher_init(&cache->committer, commit_op, cache,
2606 issue_op, cache, cache->wq);
2607 iot_init(&cache->tracker);
2608
2609 init_rwsem(&cache->background_work_lock);
2610 prevent_background_work(cache);
2611
2612 *result = cache;
2613 return 0;
2614 bad:
2615 destroy(cache);
2616 return r;
2617 }
2618
copy_ctr_args(struct cache * cache,int argc,const char ** argv)2619 static int copy_ctr_args(struct cache *cache, int argc, const char **argv)
2620 {
2621 unsigned i;
2622 const char **copy;
2623
2624 copy = kcalloc(argc, sizeof(*copy), GFP_KERNEL);
2625 if (!copy)
2626 return -ENOMEM;
2627 for (i = 0; i < argc; i++) {
2628 copy[i] = kstrdup(argv[i], GFP_KERNEL);
2629 if (!copy[i]) {
2630 while (i--)
2631 kfree(copy[i]);
2632 kfree(copy);
2633 return -ENOMEM;
2634 }
2635 }
2636
2637 cache->nr_ctr_args = argc;
2638 cache->ctr_args = copy;
2639
2640 return 0;
2641 }
2642
cache_ctr(struct dm_target * ti,unsigned argc,char ** argv)2643 static int cache_ctr(struct dm_target *ti, unsigned argc, char **argv)
2644 {
2645 int r = -EINVAL;
2646 struct cache_args *ca;
2647 struct cache *cache = NULL;
2648
2649 ca = kzalloc(sizeof(*ca), GFP_KERNEL);
2650 if (!ca) {
2651 ti->error = "Error allocating memory for cache";
2652 return -ENOMEM;
2653 }
2654 ca->ti = ti;
2655
2656 r = parse_cache_args(ca, argc, argv, &ti->error);
2657 if (r)
2658 goto out;
2659
2660 r = cache_create(ca, &cache);
2661 if (r)
2662 goto out;
2663
2664 r = copy_ctr_args(cache, argc - 3, (const char **)argv + 3);
2665 if (r) {
2666 destroy(cache);
2667 goto out;
2668 }
2669
2670 ti->private = cache;
2671 out:
2672 destroy_cache_args(ca);
2673 return r;
2674 }
2675
2676 /*----------------------------------------------------------------*/
2677
cache_map(struct dm_target * ti,struct bio * bio)2678 static int cache_map(struct dm_target *ti, struct bio *bio)
2679 {
2680 struct cache *cache = ti->private;
2681
2682 int r;
2683 bool commit_needed;
2684 dm_oblock_t block = get_bio_block(cache, bio);
2685
2686 init_per_bio_data(bio);
2687 if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) {
2688 /*
2689 * This can only occur if the io goes to a partial block at
2690 * the end of the origin device. We don't cache these.
2691 * Just remap to the origin and carry on.
2692 */
2693 remap_to_origin(cache, bio);
2694 accounted_begin(cache, bio);
2695 return DM_MAPIO_REMAPPED;
2696 }
2697
2698 if (discard_or_flush(bio)) {
2699 defer_bio(cache, bio);
2700 return DM_MAPIO_SUBMITTED;
2701 }
2702
2703 r = map_bio(cache, bio, block, &commit_needed);
2704 if (commit_needed)
2705 schedule_commit(&cache->committer);
2706
2707 return r;
2708 }
2709
cache_end_io(struct dm_target * ti,struct bio * bio,blk_status_t * error)2710 static int cache_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *error)
2711 {
2712 struct cache *cache = ti->private;
2713 unsigned long flags;
2714 struct per_bio_data *pb = get_per_bio_data(bio);
2715
2716 if (pb->tick) {
2717 policy_tick(cache->policy, false);
2718
2719 spin_lock_irqsave(&cache->lock, flags);
2720 cache->need_tick_bio = true;
2721 spin_unlock_irqrestore(&cache->lock, flags);
2722 }
2723
2724 bio_drop_shared_lock(cache, bio);
2725 accounted_complete(cache, bio);
2726
2727 return DM_ENDIO_DONE;
2728 }
2729
write_dirty_bitset(struct cache * cache)2730 static int write_dirty_bitset(struct cache *cache)
2731 {
2732 int r;
2733
2734 if (get_cache_mode(cache) >= CM_READ_ONLY)
2735 return -EINVAL;
2736
2737 r = dm_cache_set_dirty_bits(cache->cmd, from_cblock(cache->cache_size), cache->dirty_bitset);
2738 if (r)
2739 metadata_operation_failed(cache, "dm_cache_set_dirty_bits", r);
2740
2741 return r;
2742 }
2743
write_discard_bitset(struct cache * cache)2744 static int write_discard_bitset(struct cache *cache)
2745 {
2746 unsigned i, r;
2747
2748 if (get_cache_mode(cache) >= CM_READ_ONLY)
2749 return -EINVAL;
2750
2751 r = dm_cache_discard_bitset_resize(cache->cmd, cache->discard_block_size,
2752 cache->discard_nr_blocks);
2753 if (r) {
2754 DMERR("%s: could not resize on-disk discard bitset", cache_device_name(cache));
2755 metadata_operation_failed(cache, "dm_cache_discard_bitset_resize", r);
2756 return r;
2757 }
2758
2759 for (i = 0; i < from_dblock(cache->discard_nr_blocks); i++) {
2760 r = dm_cache_set_discard(cache->cmd, to_dblock(i),
2761 is_discarded(cache, to_dblock(i)));
2762 if (r) {
2763 metadata_operation_failed(cache, "dm_cache_set_discard", r);
2764 return r;
2765 }
2766 }
2767
2768 return 0;
2769 }
2770
write_hints(struct cache * cache)2771 static int write_hints(struct cache *cache)
2772 {
2773 int r;
2774
2775 if (get_cache_mode(cache) >= CM_READ_ONLY)
2776 return -EINVAL;
2777
2778 r = dm_cache_write_hints(cache->cmd, cache->policy);
2779 if (r) {
2780 metadata_operation_failed(cache, "dm_cache_write_hints", r);
2781 return r;
2782 }
2783
2784 return 0;
2785 }
2786
2787 /*
2788 * returns true on success
2789 */
sync_metadata(struct cache * cache)2790 static bool sync_metadata(struct cache *cache)
2791 {
2792 int r1, r2, r3, r4;
2793
2794 r1 = write_dirty_bitset(cache);
2795 if (r1)
2796 DMERR("%s: could not write dirty bitset", cache_device_name(cache));
2797
2798 r2 = write_discard_bitset(cache);
2799 if (r2)
2800 DMERR("%s: could not write discard bitset", cache_device_name(cache));
2801
2802 save_stats(cache);
2803
2804 r3 = write_hints(cache);
2805 if (r3)
2806 DMERR("%s: could not write hints", cache_device_name(cache));
2807
2808 /*
2809 * If writing the above metadata failed, we still commit, but don't
2810 * set the clean shutdown flag. This will effectively force every
2811 * dirty bit to be set on reload.
2812 */
2813 r4 = commit(cache, !r1 && !r2 && !r3);
2814 if (r4)
2815 DMERR("%s: could not write cache metadata", cache_device_name(cache));
2816
2817 return !r1 && !r2 && !r3 && !r4;
2818 }
2819
cache_postsuspend(struct dm_target * ti)2820 static void cache_postsuspend(struct dm_target *ti)
2821 {
2822 struct cache *cache = ti->private;
2823
2824 prevent_background_work(cache);
2825 BUG_ON(atomic_read(&cache->nr_io_migrations));
2826
2827 cancel_delayed_work_sync(&cache->waker);
2828 drain_workqueue(cache->wq);
2829 WARN_ON(cache->tracker.in_flight);
2830
2831 /*
2832 * If it's a flush suspend there won't be any deferred bios, so this
2833 * call is harmless.
2834 */
2835 requeue_deferred_bios(cache);
2836
2837 if (get_cache_mode(cache) == CM_WRITE)
2838 (void) sync_metadata(cache);
2839 }
2840
load_mapping(void * context,dm_oblock_t oblock,dm_cblock_t cblock,bool dirty,uint32_t hint,bool hint_valid)2841 static int load_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock,
2842 bool dirty, uint32_t hint, bool hint_valid)
2843 {
2844 int r;
2845 struct cache *cache = context;
2846
2847 if (dirty) {
2848 set_bit(from_cblock(cblock), cache->dirty_bitset);
2849 atomic_inc(&cache->nr_dirty);
2850 } else
2851 clear_bit(from_cblock(cblock), cache->dirty_bitset);
2852
2853 r = policy_load_mapping(cache->policy, oblock, cblock, dirty, hint, hint_valid);
2854 if (r)
2855 return r;
2856
2857 return 0;
2858 }
2859
2860 /*
2861 * The discard block size in the on disk metadata is not
2862 * neccessarily the same as we're currently using. So we have to
2863 * be careful to only set the discarded attribute if we know it
2864 * covers a complete block of the new size.
2865 */
2866 struct discard_load_info {
2867 struct cache *cache;
2868
2869 /*
2870 * These blocks are sized using the on disk dblock size, rather
2871 * than the current one.
2872 */
2873 dm_block_t block_size;
2874 dm_block_t discard_begin, discard_end;
2875 };
2876
discard_load_info_init(struct cache * cache,struct discard_load_info * li)2877 static void discard_load_info_init(struct cache *cache,
2878 struct discard_load_info *li)
2879 {
2880 li->cache = cache;
2881 li->discard_begin = li->discard_end = 0;
2882 }
2883
set_discard_range(struct discard_load_info * li)2884 static void set_discard_range(struct discard_load_info *li)
2885 {
2886 sector_t b, e;
2887
2888 if (li->discard_begin == li->discard_end)
2889 return;
2890
2891 /*
2892 * Convert to sectors.
2893 */
2894 b = li->discard_begin * li->block_size;
2895 e = li->discard_end * li->block_size;
2896
2897 /*
2898 * Then convert back to the current dblock size.
2899 */
2900 b = dm_sector_div_up(b, li->cache->discard_block_size);
2901 sector_div(e, li->cache->discard_block_size);
2902
2903 /*
2904 * The origin may have shrunk, so we need to check we're still in
2905 * bounds.
2906 */
2907 if (e > from_dblock(li->cache->discard_nr_blocks))
2908 e = from_dblock(li->cache->discard_nr_blocks);
2909
2910 for (; b < e; b++)
2911 set_discard(li->cache, to_dblock(b));
2912 }
2913
load_discard(void * context,sector_t discard_block_size,dm_dblock_t dblock,bool discard)2914 static int load_discard(void *context, sector_t discard_block_size,
2915 dm_dblock_t dblock, bool discard)
2916 {
2917 struct discard_load_info *li = context;
2918
2919 li->block_size = discard_block_size;
2920
2921 if (discard) {
2922 if (from_dblock(dblock) == li->discard_end)
2923 /*
2924 * We're already in a discard range, just extend it.
2925 */
2926 li->discard_end = li->discard_end + 1ULL;
2927
2928 else {
2929 /*
2930 * Emit the old range and start a new one.
2931 */
2932 set_discard_range(li);
2933 li->discard_begin = from_dblock(dblock);
2934 li->discard_end = li->discard_begin + 1ULL;
2935 }
2936 } else {
2937 set_discard_range(li);
2938 li->discard_begin = li->discard_end = 0;
2939 }
2940
2941 return 0;
2942 }
2943
get_cache_dev_size(struct cache * cache)2944 static dm_cblock_t get_cache_dev_size(struct cache *cache)
2945 {
2946 sector_t size = get_dev_size(cache->cache_dev);
2947 (void) sector_div(size, cache->sectors_per_block);
2948 return to_cblock(size);
2949 }
2950
can_resize(struct cache * cache,dm_cblock_t new_size)2951 static bool can_resize(struct cache *cache, dm_cblock_t new_size)
2952 {
2953 if (from_cblock(new_size) > from_cblock(cache->cache_size)) {
2954 if (cache->sized) {
2955 DMERR("%s: unable to extend cache due to missing cache table reload",
2956 cache_device_name(cache));
2957 return false;
2958 }
2959 }
2960
2961 /*
2962 * We can't drop a dirty block when shrinking the cache.
2963 */
2964 while (from_cblock(new_size) < from_cblock(cache->cache_size)) {
2965 new_size = to_cblock(from_cblock(new_size) + 1);
2966 if (is_dirty(cache, new_size)) {
2967 DMERR("%s: unable to shrink cache; cache block %llu is dirty",
2968 cache_device_name(cache),
2969 (unsigned long long) from_cblock(new_size));
2970 return false;
2971 }
2972 }
2973
2974 return true;
2975 }
2976
resize_cache_dev(struct cache * cache,dm_cblock_t new_size)2977 static int resize_cache_dev(struct cache *cache, dm_cblock_t new_size)
2978 {
2979 int r;
2980
2981 r = dm_cache_resize(cache->cmd, new_size);
2982 if (r) {
2983 DMERR("%s: could not resize cache metadata", cache_device_name(cache));
2984 metadata_operation_failed(cache, "dm_cache_resize", r);
2985 return r;
2986 }
2987
2988 set_cache_size(cache, new_size);
2989
2990 return 0;
2991 }
2992
cache_preresume(struct dm_target * ti)2993 static int cache_preresume(struct dm_target *ti)
2994 {
2995 int r = 0;
2996 struct cache *cache = ti->private;
2997 dm_cblock_t csize = get_cache_dev_size(cache);
2998
2999 /*
3000 * Check to see if the cache has resized.
3001 */
3002 if (!cache->sized) {
3003 r = resize_cache_dev(cache, csize);
3004 if (r)
3005 return r;
3006
3007 cache->sized = true;
3008
3009 } else if (csize != cache->cache_size) {
3010 if (!can_resize(cache, csize))
3011 return -EINVAL;
3012
3013 r = resize_cache_dev(cache, csize);
3014 if (r)
3015 return r;
3016 }
3017
3018 if (!cache->loaded_mappings) {
3019 r = dm_cache_load_mappings(cache->cmd, cache->policy,
3020 load_mapping, cache);
3021 if (r) {
3022 DMERR("%s: could not load cache mappings", cache_device_name(cache));
3023 metadata_operation_failed(cache, "dm_cache_load_mappings", r);
3024 return r;
3025 }
3026
3027 cache->loaded_mappings = true;
3028 }
3029
3030 if (!cache->loaded_discards) {
3031 struct discard_load_info li;
3032
3033 /*
3034 * The discard bitset could have been resized, or the
3035 * discard block size changed. To be safe we start by
3036 * setting every dblock to not discarded.
3037 */
3038 clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks));
3039
3040 discard_load_info_init(cache, &li);
3041 r = dm_cache_load_discards(cache->cmd, load_discard, &li);
3042 if (r) {
3043 DMERR("%s: could not load origin discards", cache_device_name(cache));
3044 metadata_operation_failed(cache, "dm_cache_load_discards", r);
3045 return r;
3046 }
3047 set_discard_range(&li);
3048
3049 cache->loaded_discards = true;
3050 }
3051
3052 return r;
3053 }
3054
cache_resume(struct dm_target * ti)3055 static void cache_resume(struct dm_target *ti)
3056 {
3057 struct cache *cache = ti->private;
3058
3059 cache->need_tick_bio = true;
3060 allow_background_work(cache);
3061 do_waker(&cache->waker.work);
3062 }
3063
emit_flags(struct cache * cache,char * result,unsigned maxlen,ssize_t * sz_ptr)3064 static void emit_flags(struct cache *cache, char *result,
3065 unsigned maxlen, ssize_t *sz_ptr)
3066 {
3067 ssize_t sz = *sz_ptr;
3068 struct cache_features *cf = &cache->features;
3069 unsigned count = (cf->metadata_version == 2) + !cf->discard_passdown + 1;
3070
3071 DMEMIT("%u ", count);
3072
3073 if (cf->metadata_version == 2)
3074 DMEMIT("metadata2 ");
3075
3076 if (writethrough_mode(cache))
3077 DMEMIT("writethrough ");
3078
3079 else if (passthrough_mode(cache))
3080 DMEMIT("passthrough ");
3081
3082 else if (writeback_mode(cache))
3083 DMEMIT("writeback ");
3084
3085 else {
3086 DMEMIT("unknown ");
3087 DMERR("%s: internal error: unknown io mode: %d",
3088 cache_device_name(cache), (int) cf->io_mode);
3089 }
3090
3091 if (!cf->discard_passdown)
3092 DMEMIT("no_discard_passdown ");
3093
3094 *sz_ptr = sz;
3095 }
3096
3097 /*
3098 * Status format:
3099 *
3100 * <metadata block size> <#used metadata blocks>/<#total metadata blocks>
3101 * <cache block size> <#used cache blocks>/<#total cache blocks>
3102 * <#read hits> <#read misses> <#write hits> <#write misses>
3103 * <#demotions> <#promotions> <#dirty>
3104 * <#features> <features>*
3105 * <#core args> <core args>
3106 * <policy name> <#policy args> <policy args>* <cache metadata mode> <needs_check>
3107 */
cache_status(struct dm_target * ti,status_type_t type,unsigned status_flags,char * result,unsigned maxlen)3108 static void cache_status(struct dm_target *ti, status_type_t type,
3109 unsigned status_flags, char *result, unsigned maxlen)
3110 {
3111 int r = 0;
3112 unsigned i;
3113 ssize_t sz = 0;
3114 dm_block_t nr_free_blocks_metadata = 0;
3115 dm_block_t nr_blocks_metadata = 0;
3116 char buf[BDEVNAME_SIZE];
3117 struct cache *cache = ti->private;
3118 dm_cblock_t residency;
3119 bool needs_check;
3120
3121 switch (type) {
3122 case STATUSTYPE_INFO:
3123 if (get_cache_mode(cache) == CM_FAIL) {
3124 DMEMIT("Fail");
3125 break;
3126 }
3127
3128 /* Commit to ensure statistics aren't out-of-date */
3129 if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti))
3130 (void) commit(cache, false);
3131
3132 r = dm_cache_get_free_metadata_block_count(cache->cmd, &nr_free_blocks_metadata);
3133 if (r) {
3134 DMERR("%s: dm_cache_get_free_metadata_block_count returned %d",
3135 cache_device_name(cache), r);
3136 goto err;
3137 }
3138
3139 r = dm_cache_get_metadata_dev_size(cache->cmd, &nr_blocks_metadata);
3140 if (r) {
3141 DMERR("%s: dm_cache_get_metadata_dev_size returned %d",
3142 cache_device_name(cache), r);
3143 goto err;
3144 }
3145
3146 residency = policy_residency(cache->policy);
3147
3148 DMEMIT("%u %llu/%llu %llu %llu/%llu %u %u %u %u %u %u %lu ",
3149 (unsigned)DM_CACHE_METADATA_BLOCK_SIZE,
3150 (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
3151 (unsigned long long)nr_blocks_metadata,
3152 (unsigned long long)cache->sectors_per_block,
3153 (unsigned long long) from_cblock(residency),
3154 (unsigned long long) from_cblock(cache->cache_size),
3155 (unsigned) atomic_read(&cache->stats.read_hit),
3156 (unsigned) atomic_read(&cache->stats.read_miss),
3157 (unsigned) atomic_read(&cache->stats.write_hit),
3158 (unsigned) atomic_read(&cache->stats.write_miss),
3159 (unsigned) atomic_read(&cache->stats.demotion),
3160 (unsigned) atomic_read(&cache->stats.promotion),
3161 (unsigned long) atomic_read(&cache->nr_dirty));
3162
3163 emit_flags(cache, result, maxlen, &sz);
3164
3165 DMEMIT("2 migration_threshold %llu ", (unsigned long long) cache->migration_threshold);
3166
3167 DMEMIT("%s ", dm_cache_policy_get_name(cache->policy));
3168 if (sz < maxlen) {
3169 r = policy_emit_config_values(cache->policy, result, maxlen, &sz);
3170 if (r)
3171 DMERR("%s: policy_emit_config_values returned %d",
3172 cache_device_name(cache), r);
3173 }
3174
3175 if (get_cache_mode(cache) == CM_READ_ONLY)
3176 DMEMIT("ro ");
3177 else
3178 DMEMIT("rw ");
3179
3180 r = dm_cache_metadata_needs_check(cache->cmd, &needs_check);
3181
3182 if (r || needs_check)
3183 DMEMIT("needs_check ");
3184 else
3185 DMEMIT("- ");
3186
3187 break;
3188
3189 case STATUSTYPE_TABLE:
3190 format_dev_t(buf, cache->metadata_dev->bdev->bd_dev);
3191 DMEMIT("%s ", buf);
3192 format_dev_t(buf, cache->cache_dev->bdev->bd_dev);
3193 DMEMIT("%s ", buf);
3194 format_dev_t(buf, cache->origin_dev->bdev->bd_dev);
3195 DMEMIT("%s", buf);
3196
3197 for (i = 0; i < cache->nr_ctr_args - 1; i++)
3198 DMEMIT(" %s", cache->ctr_args[i]);
3199 if (cache->nr_ctr_args)
3200 DMEMIT(" %s", cache->ctr_args[cache->nr_ctr_args - 1]);
3201 }
3202
3203 return;
3204
3205 err:
3206 DMEMIT("Error");
3207 }
3208
3209 /*
3210 * Defines a range of cblocks, begin to (end - 1) are in the range. end is
3211 * the one-past-the-end value.
3212 */
3213 struct cblock_range {
3214 dm_cblock_t begin;
3215 dm_cblock_t end;
3216 };
3217
3218 /*
3219 * A cache block range can take two forms:
3220 *
3221 * i) A single cblock, eg. '3456'
3222 * ii) A begin and end cblock with a dash between, eg. 123-234
3223 */
parse_cblock_range(struct cache * cache,const char * str,struct cblock_range * result)3224 static int parse_cblock_range(struct cache *cache, const char *str,
3225 struct cblock_range *result)
3226 {
3227 char dummy;
3228 uint64_t b, e;
3229 int r;
3230
3231 /*
3232 * Try and parse form (ii) first.
3233 */
3234 r = sscanf(str, "%llu-%llu%c", &b, &e, &dummy);
3235 if (r < 0)
3236 return r;
3237
3238 if (r == 2) {
3239 result->begin = to_cblock(b);
3240 result->end = to_cblock(e);
3241 return 0;
3242 }
3243
3244 /*
3245 * That didn't work, try form (i).
3246 */
3247 r = sscanf(str, "%llu%c", &b, &dummy);
3248 if (r < 0)
3249 return r;
3250
3251 if (r == 1) {
3252 result->begin = to_cblock(b);
3253 result->end = to_cblock(from_cblock(result->begin) + 1u);
3254 return 0;
3255 }
3256
3257 DMERR("%s: invalid cblock range '%s'", cache_device_name(cache), str);
3258 return -EINVAL;
3259 }
3260
validate_cblock_range(struct cache * cache,struct cblock_range * range)3261 static int validate_cblock_range(struct cache *cache, struct cblock_range *range)
3262 {
3263 uint64_t b = from_cblock(range->begin);
3264 uint64_t e = from_cblock(range->end);
3265 uint64_t n = from_cblock(cache->cache_size);
3266
3267 if (b >= n) {
3268 DMERR("%s: begin cblock out of range: %llu >= %llu",
3269 cache_device_name(cache), b, n);
3270 return -EINVAL;
3271 }
3272
3273 if (e > n) {
3274 DMERR("%s: end cblock out of range: %llu > %llu",
3275 cache_device_name(cache), e, n);
3276 return -EINVAL;
3277 }
3278
3279 if (b >= e) {
3280 DMERR("%s: invalid cblock range: %llu >= %llu",
3281 cache_device_name(cache), b, e);
3282 return -EINVAL;
3283 }
3284
3285 return 0;
3286 }
3287
cblock_succ(dm_cblock_t b)3288 static inline dm_cblock_t cblock_succ(dm_cblock_t b)
3289 {
3290 return to_cblock(from_cblock(b) + 1);
3291 }
3292
request_invalidation(struct cache * cache,struct cblock_range * range)3293 static int request_invalidation(struct cache *cache, struct cblock_range *range)
3294 {
3295 int r = 0;
3296
3297 /*
3298 * We don't need to do any locking here because we know we're in
3299 * passthrough mode. There's is potential for a race between an
3300 * invalidation triggered by an io and an invalidation message. This
3301 * is harmless, we must not worry if the policy call fails.
3302 */
3303 while (range->begin != range->end) {
3304 r = invalidate_cblock(cache, range->begin);
3305 if (r)
3306 return r;
3307
3308 range->begin = cblock_succ(range->begin);
3309 }
3310
3311 cache->commit_requested = true;
3312 return r;
3313 }
3314
process_invalidate_cblocks_message(struct cache * cache,unsigned count,const char ** cblock_ranges)3315 static int process_invalidate_cblocks_message(struct cache *cache, unsigned count,
3316 const char **cblock_ranges)
3317 {
3318 int r = 0;
3319 unsigned i;
3320 struct cblock_range range;
3321
3322 if (!passthrough_mode(cache)) {
3323 DMERR("%s: cache has to be in passthrough mode for invalidation",
3324 cache_device_name(cache));
3325 return -EPERM;
3326 }
3327
3328 for (i = 0; i < count; i++) {
3329 r = parse_cblock_range(cache, cblock_ranges[i], &range);
3330 if (r)
3331 break;
3332
3333 r = validate_cblock_range(cache, &range);
3334 if (r)
3335 break;
3336
3337 /*
3338 * Pass begin and end origin blocks to the worker and wake it.
3339 */
3340 r = request_invalidation(cache, &range);
3341 if (r)
3342 break;
3343 }
3344
3345 return r;
3346 }
3347
3348 /*
3349 * Supports
3350 * "<key> <value>"
3351 * and
3352 * "invalidate_cblocks [(<begin>)|(<begin>-<end>)]*
3353 *
3354 * The key migration_threshold is supported by the cache target core.
3355 */
cache_message(struct dm_target * ti,unsigned argc,char ** argv,char * result,unsigned maxlen)3356 static int cache_message(struct dm_target *ti, unsigned argc, char **argv,
3357 char *result, unsigned maxlen)
3358 {
3359 struct cache *cache = ti->private;
3360
3361 if (!argc)
3362 return -EINVAL;
3363
3364 if (get_cache_mode(cache) >= CM_READ_ONLY) {
3365 DMERR("%s: unable to service cache target messages in READ_ONLY or FAIL mode",
3366 cache_device_name(cache));
3367 return -EOPNOTSUPP;
3368 }
3369
3370 if (!strcasecmp(argv[0], "invalidate_cblocks"))
3371 return process_invalidate_cblocks_message(cache, argc - 1, (const char **) argv + 1);
3372
3373 if (argc != 2)
3374 return -EINVAL;
3375
3376 return set_config_value(cache, argv[0], argv[1]);
3377 }
3378
cache_iterate_devices(struct dm_target * ti,iterate_devices_callout_fn fn,void * data)3379 static int cache_iterate_devices(struct dm_target *ti,
3380 iterate_devices_callout_fn fn, void *data)
3381 {
3382 int r = 0;
3383 struct cache *cache = ti->private;
3384
3385 r = fn(ti, cache->cache_dev, 0, get_dev_size(cache->cache_dev), data);
3386 if (!r)
3387 r = fn(ti, cache->origin_dev, 0, ti->len, data);
3388
3389 return r;
3390 }
3391
origin_dev_supports_discard(struct block_device * origin_bdev)3392 static bool origin_dev_supports_discard(struct block_device *origin_bdev)
3393 {
3394 struct request_queue *q = bdev_get_queue(origin_bdev);
3395
3396 return q && blk_queue_discard(q);
3397 }
3398
3399 /*
3400 * If discard_passdown was enabled verify that the origin device
3401 * supports discards. Disable discard_passdown if not.
3402 */
disable_passdown_if_not_supported(struct cache * cache)3403 static void disable_passdown_if_not_supported(struct cache *cache)
3404 {
3405 struct block_device *origin_bdev = cache->origin_dev->bdev;
3406 struct queue_limits *origin_limits = &bdev_get_queue(origin_bdev)->limits;
3407 const char *reason = NULL;
3408 char buf[BDEVNAME_SIZE];
3409
3410 if (!cache->features.discard_passdown)
3411 return;
3412
3413 if (!origin_dev_supports_discard(origin_bdev))
3414 reason = "discard unsupported";
3415
3416 else if (origin_limits->max_discard_sectors < cache->sectors_per_block)
3417 reason = "max discard sectors smaller than a block";
3418
3419 if (reason) {
3420 DMWARN("Origin device (%s) %s: Disabling discard passdown.",
3421 bdevname(origin_bdev, buf), reason);
3422 cache->features.discard_passdown = false;
3423 }
3424 }
3425
set_discard_limits(struct cache * cache,struct queue_limits * limits)3426 static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
3427 {
3428 struct block_device *origin_bdev = cache->origin_dev->bdev;
3429 struct queue_limits *origin_limits = &bdev_get_queue(origin_bdev)->limits;
3430
3431 if (!cache->features.discard_passdown) {
3432 /* No passdown is done so setting own virtual limits */
3433 limits->max_discard_sectors = min_t(sector_t, cache->discard_block_size * 1024,
3434 cache->origin_sectors);
3435 limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT;
3436 return;
3437 }
3438
3439 /*
3440 * cache_iterate_devices() is stacking both origin and fast device limits
3441 * but discards aren't passed to fast device, so inherit origin's limits.
3442 */
3443 limits->max_discard_sectors = origin_limits->max_discard_sectors;
3444 limits->max_hw_discard_sectors = origin_limits->max_hw_discard_sectors;
3445 limits->discard_granularity = origin_limits->discard_granularity;
3446 limits->discard_alignment = origin_limits->discard_alignment;
3447 limits->discard_misaligned = origin_limits->discard_misaligned;
3448 }
3449
cache_io_hints(struct dm_target * ti,struct queue_limits * limits)3450 static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
3451 {
3452 struct cache *cache = ti->private;
3453 uint64_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
3454
3455 /*
3456 * If the system-determined stacked limits are compatible with the
3457 * cache's blocksize (io_opt is a factor) do not override them.
3458 */
3459 if (io_opt_sectors < cache->sectors_per_block ||
3460 do_div(io_opt_sectors, cache->sectors_per_block)) {
3461 blk_limits_io_min(limits, cache->sectors_per_block << SECTOR_SHIFT);
3462 blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT);
3463 }
3464
3465 disable_passdown_if_not_supported(cache);
3466 set_discard_limits(cache, limits);
3467 }
3468
3469 /*----------------------------------------------------------------*/
3470
3471 static struct target_type cache_target = {
3472 .name = "cache",
3473 .version = {2, 2, 0},
3474 .module = THIS_MODULE,
3475 .ctr = cache_ctr,
3476 .dtr = cache_dtr,
3477 .map = cache_map,
3478 .end_io = cache_end_io,
3479 .postsuspend = cache_postsuspend,
3480 .preresume = cache_preresume,
3481 .resume = cache_resume,
3482 .status = cache_status,
3483 .message = cache_message,
3484 .iterate_devices = cache_iterate_devices,
3485 .io_hints = cache_io_hints,
3486 };
3487
dm_cache_init(void)3488 static int __init dm_cache_init(void)
3489 {
3490 int r;
3491
3492 migration_cache = KMEM_CACHE(dm_cache_migration, 0);
3493 if (!migration_cache)
3494 return -ENOMEM;
3495
3496 r = dm_register_target(&cache_target);
3497 if (r) {
3498 DMERR("cache target registration failed: %d", r);
3499 kmem_cache_destroy(migration_cache);
3500 return r;
3501 }
3502
3503 return 0;
3504 }
3505
dm_cache_exit(void)3506 static void __exit dm_cache_exit(void)
3507 {
3508 dm_unregister_target(&cache_target);
3509 kmem_cache_destroy(migration_cache);
3510 }
3511
3512 module_init(dm_cache_init);
3513 module_exit(dm_cache_exit);
3514
3515 MODULE_DESCRIPTION(DM_NAME " cache target");
3516 MODULE_AUTHOR("Joe Thornber <ejt@redhat.com>");
3517 MODULE_LICENSE("GPL");
3518